ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7dfeaa7f8be502956858f88ccc8dbbda43c2bc4e | """Example code for the epd2in13bc driver."""
from time import sleep
from PIL import Image
from einkd.drivers.virtual import TkinterDriver
images = [
'examples/img/880x528-b.png',
'examples/img/880x528-w.png',
'examples/img/880x528-r.png',
]
if __name__ == "__main__":
with TkinterDriver((800, 528)) as epd:
epd.clear()
sleep(5)
for img_name in images:
img = Image.open(img_name)
epd.show(img)
epd.refresh()
sleep(5)
|
py | 7dfeaa8851c1663f2602409873311eb2e34d5711 | '''
Register on Alyx the set of tracked traces (after histology) for a given mouse.
All your tracks should be in a single folder, and the files names should follow the nomenclature
{yyyy-mm-dd}_{SubjectName}_{SessionNumber}_{ProbeLabel}_pts.csv
Edit the variable 'path_tracks'(set it either to your local directory [example given here],
either to the Google folder if synched to your machine).
To check if the registration worked, go to the admin interface
> experiments > trajectory estimates > search for the subject
If you want to test first, use ALYX_URL = "https://dev.alyx.internationalbrainlab.org"
And check the data appears on:
https://dev.alyx.internationalbrainlab.org/admin/experiments/trajectoryestimate/?
When you feel confident you can upload without error,
set EXAMPLE_OVERWRITE = False ,
change to the ALYX_URL = "https://alyx.internationalbrainlab.org"
and re-run.
With EXAMPLE_OVERWRITE = True, the script downloads an example dataset and runs
the registration (used for automatic testing of the example).
'''
# Author: Olivier, Gaelle
from ibllib.pipes import histology
from oneibl.one import ONE
from pathlib import Path
# ======== EDIT FOR USERS ====
# Edit so as to reflect the directory containing your electrode tracks
path_tracks = "/Users/gaelle/Downloads/Flatiron/examples/00_to_add"
EXAMPLE_OVERWRITE = True # Put to False when wanting to run the script on your data
ALYX_URL = "https://dev.alyx.internationalbrainlab.org" # FOR TESTING
# ALYX_URL = "https://alyx.internationalbrainlab.org" # UNCOMMENT WHEN READY
# ======== DO NOT EDIT BELOW ====
one = ONE(base_url=ALYX_URL)
if EXAMPLE_OVERWRITE:
# TODO Olivier : Function to download examples folder
cachepath = Path(one._par.CACHE_DIR)
path_tracks = cachepath.joinpath('examples', 'histology', 'tracks_to_add')
histology.register_track_files(path_tracks=path_tracks, one=one, overwrite=True)
histology.detect_missing_histology_tracks(path_tracks=path_tracks, one=one)
|
py | 7dfeaafb626650365675879e8de4cf43f7d72188 | """
The :mod:`skrmt.covariance` module implements different methods
to estimate covariance matrices.
"""
from .estimator import sample_estimator
from .estimator import fsopt_estimator
from .estimator import linear_shrinkage_estimator
from .estimator import analytical_shrinkage_estimator
from .estimator import empirical_bayesian_estimator
from .estimator import minimax_estimator
from .metrics import loss_mv, loss_frobenius
from .metrics import prial_mv
__all__ = ["sample_estimator", "fsopt_estimator",
"linear_shrinkage_estimator",
"analytical_shrinkage_estimator",
"empirical_bayesian_estimator",
"minimax_estimator",
"loss_mv", "loss_frobenius",
"prial_mv"]
|
py | 7dfeab3902c0eae6ecd3c8c6f978b197dc66a78a | # Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import numpy as np
import pytest
import cirq
import cirq.testing
def test_deprecated():
with cirq.testing.assert_logs('cirq.to_valid_density_matrix', 'deprecated'):
_ = cirq.sim.to_valid_density_matrix(0, 1)
with cirq.testing.assert_logs('cirq.von_neumann_entropy', 'deprecated'):
_ = cirq.sim.von_neumann_entropy(np.eye(2) / 2)
def test_sample_density_matrix_big_endian():
results = []
for x in range(8):
matrix = cirq.to_valid_density_matrix(x, 3)
sample = cirq.sample_density_matrix(matrix, [2, 1, 0])
results.append(sample)
expecteds = [[list(reversed(x))] for x in
list(itertools.product([False, True], repeat=3))]
for result, expected in zip(results, expecteds):
np.testing.assert_equal(result, expected)
def test_sample_density_matrix_partial_indices():
for index in range(3):
for x in range(8):
matrix = cirq.to_valid_density_matrix(x, 3)
np.testing.assert_equal(cirq.sample_density_matrix(matrix, [index]),
[[bool(1 & (x >> (2 - index)))]])
def test_sample_density_matrix_partial_indices_oder():
for x in range(8):
matrix = cirq.to_valid_density_matrix(x, 3)
expected = [[bool(1 & (x >> 0)), bool(1 & (x >> 1))]]
np.testing.assert_equal(cirq.sample_density_matrix(matrix, [2, 1]),
expected)
def test_sample_density_matrix_partial_indices_all_orders():
for perm in itertools.permutations([0, 1, 2]):
for x in range(8):
matrix = cirq.to_valid_density_matrix(x, 3)
expected = [[bool(1 & (x >> (2 - p))) for p in perm]]
np.testing.assert_equal(cirq.sample_density_matrix(matrix, perm),
expected)
def test_sample_density_matrix():
state = np.zeros(8, dtype=np.complex64)
state[0] = 1 / np.sqrt(2)
state[2] = 1 / np.sqrt(2)
matrix = cirq.to_valid_density_matrix(state, num_qubits=3)
for _ in range(10):
sample = cirq.sample_density_matrix(matrix, [2, 1, 0])
assert (np.array_equal(sample, [[False, False, False]])
or np.array_equal(sample, [[False, True, False]]))
# Partial sample is correct.
for _ in range(10):
np.testing.assert_equal(cirq.sample_density_matrix(matrix, [2]),
[[False]])
np.testing.assert_equal(cirq.sample_density_matrix(matrix, [0]),
[[False]])
def test_sample_density_matrix_seed():
density_matrix = 0.5 * np.eye(2)
samples = cirq.sample_density_matrix(density_matrix, [0],
repetitions=10,
seed=1234)
assert np.array_equal(samples, [[False], [True], [False], [True], [True],
[False], [False], [True], [True], [True]])
samples = cirq.sample_density_matrix(density_matrix, [0],
repetitions=10,
seed=np.random.RandomState(1234))
assert np.array_equal(samples, [[False], [True], [False], [True], [True],
[False], [False], [True], [True], [True]])
def test_sample_empty_density_matrix():
matrix = np.zeros(shape=())
np.testing.assert_almost_equal(cirq.sample_density_matrix(matrix, []), [[]])
def test_sample_density_matrix_no_repetitions():
matrix = cirq.to_valid_density_matrix(0, 3)
np.testing.assert_almost_equal(
cirq.sample_density_matrix(matrix, [1], repetitions=0),
np.zeros(shape=(0, 1)))
np.testing.assert_almost_equal(
cirq.sample_density_matrix(matrix, [0, 1], repetitions=0),
np.zeros(shape=(0, 2)))
def test_sample_density_matrix_repetitions():
for perm in itertools.permutations([0, 1, 2]):
for x in range(8):
matrix = cirq.to_valid_density_matrix(x, 3)
expected = [[bool(1 & (x >> (2 - p))) for p in perm]] * 3
result = cirq.sample_density_matrix(matrix, perm, repetitions=3)
np.testing.assert_equal(result, expected)
def test_sample_density_matrix_negative_repetitions():
matrix = cirq.to_valid_density_matrix(0, 3)
with pytest.raises(ValueError, match='-1'):
cirq.sample_density_matrix(matrix, [1], repetitions=-1)
def test_sample_density_matrix_not_square():
with pytest.raises(ValueError, match='not square'):
cirq.sample_density_matrix(np.array([1, 0, 0]), [1])
def test_sample_density_matrix_not_power_of_two():
with pytest.raises(ValueError, match='power of two'):
cirq.sample_density_matrix(np.ones((3, 3)) / 3, [1])
with pytest.raises(ValueError, match='power of two'):
cirq.sample_density_matrix(np.ones((2, 3, 2, 3)) / 6, [1])
def test_sample_density_matrix_higher_powers_of_two():
with pytest.raises(ValueError, match='powers of two'):
cirq.sample_density_matrix(np.ones((2, 4, 2, 4)) / 8, [1])
def test_sample_density_matrix_out_of_range():
matrix = cirq.to_valid_density_matrix(0, 3)
with pytest.raises(IndexError, match='-2'):
cirq.sample_density_matrix(matrix, [-2])
with pytest.raises(IndexError, match='3'):
cirq.sample_density_matrix(matrix, [3])
def test_sample_density_matrix_no_indices():
matrix = cirq.to_valid_density_matrix(0, 3)
bits = cirq.sample_density_matrix(matrix, [])
np.testing.assert_almost_equal(bits, np.zeros(shape=(1, 0)))
def test_sample_density_matrix_validate_qid_shape():
matrix = cirq.to_valid_density_matrix(0, 3)
cirq.sample_density_matrix(matrix, [], qid_shape=(2, 2, 2))
with pytest.raises(ValueError,
match='Matrix size does not match qid shape'):
cirq.sample_density_matrix(matrix, [], qid_shape=(2, 2, 1))
matrix2 = cirq.to_valid_density_matrix(0, qid_shape=(1, 2, 3))
cirq.sample_density_matrix(matrix2, [], qid_shape=(1, 2, 3))
with pytest.raises(ValueError,
match='Matrix size does not match qid shape'):
cirq.sample_density_matrix(matrix2, [], qid_shape=(2, 2, 2))
def test_measure_density_matrix_computational_basis():
results = []
for x in range(8):
matrix = cirq.to_valid_density_matrix(x, 3)
bits, out_matrix = cirq.measure_density_matrix(matrix, [2, 1, 0])
results.append(bits)
np.testing.assert_almost_equal(out_matrix, matrix)
expected = [list(reversed(x)) for x in
list(itertools.product([False, True], repeat=3))]
assert results == expected
def test_measure_density_matrix_computational_basis_reversed():
results = []
for x in range(8):
matrix = cirq.to_valid_density_matrix(x, 3)
bits, out_matrix = cirq.measure_density_matrix(matrix, [0, 1, 2])
results.append(bits)
np.testing.assert_almost_equal(out_matrix, matrix)
expected = [list(x) for x in
list(itertools.product([False, True], repeat=3))]
assert results == expected
def test_measure_density_matrix_computational_basis_reshaped():
results = []
for x in range(8):
matrix = np.reshape(cirq.to_valid_density_matrix(x, 3), (2,) * 6)
bits, out_matrix = cirq.measure_density_matrix(matrix, [2, 1, 0])
results.append(bits)
np.testing.assert_almost_equal(out_matrix, matrix)
expected = [list(reversed(x)) for x in
list(itertools.product([False, True], repeat=3))]
assert results == expected
def test_measure_density_matrix_partial_indices():
for index in range(3):
for x in range(8):
matrix = cirq.to_valid_density_matrix(x, 3)
bits, out_matrix = cirq.measure_density_matrix(matrix, [index])
np.testing.assert_almost_equal(out_matrix, matrix)
assert bits == [bool(1 & (x >> (2 - index)))]
def test_measure_density_matrix_partial_indices_all_orders():
for perm in itertools.permutations([0, 1, 2]):
for x in range(8):
matrix = cirq.to_valid_density_matrix(x, 3)
bits, out_matrix = cirq.measure_density_matrix(matrix, perm)
np.testing.assert_almost_equal(matrix, out_matrix)
assert bits == [bool(1 & (x >> (2 - p))) for p in perm]
def matrix_000_plus_010():
state = np.zeros(8, dtype=np.complex64)
state[0] = 1 / np.sqrt(2)
state[2] = 1j / np.sqrt(2)
return cirq.to_valid_density_matrix(state, num_qubits=3)
def test_measure_density_matrix_collapse():
matrix = matrix_000_plus_010()
for _ in range(10):
bits, out_matrix = cirq.measure_density_matrix(matrix, [2, 1, 0])
assert bits in [[False, False, False], [False, True, False]]
expected = np.zeros(8, dtype=np.complex64)
if bits[1]:
expected[2] = 1j
else:
expected[0] = 1
expected_matrix = np.outer(np.conj(expected), expected)
np.testing.assert_almost_equal(out_matrix, expected_matrix)
assert out_matrix is not matrix
# Partial sample is correct.
for _ in range(10):
bits, out_matrix = cirq.measure_density_matrix(matrix, [2])
np.testing.assert_almost_equal(out_matrix, matrix)
assert bits == [False]
bits, out_matrix = cirq.measure_density_matrix(matrix, [0])
np.testing.assert_almost_equal(out_matrix, matrix)
assert bits == [False]
def test_measure_density_matrix_seed():
n = 5
matrix = np.eye(2**n) / 2**n
bits, out_matrix1 = cirq.measure_density_matrix(matrix, range(n), seed=1234)
assert bits == [False, False, True, True, False]
bits, out_matrix2 = cirq.measure_density_matrix(
matrix, range(n), seed=np.random.RandomState(1234))
assert bits == [False, False, True, True, False]
np.testing.assert_allclose(out_matrix1, out_matrix2)
def test_measure_density_matrix_out_is_matrix():
matrix = matrix_000_plus_010()
bits, out_matrix = cirq.measure_density_matrix(matrix, [2, 1, 0],
out=matrix)
expected_state = np.zeros(8, dtype=np.complex64)
expected_state[2 if bits[1] else 0] = 1.0
expected_matrix = np.outer(np.conj(expected_state), expected_state)
np.testing.assert_array_almost_equal(out_matrix, expected_matrix)
assert out_matrix is matrix
def test_measure_state_out_is_not_matrix():
matrix = matrix_000_plus_010()
out = np.zeros_like(matrix)
_, out_matrix = cirq.measure_density_matrix(matrix, [2, 1, 0], out=out)
assert out is not matrix
assert out is out_matrix
def test_measure_density_matrix_not_square():
with pytest.raises(ValueError, match='not square'):
cirq.measure_density_matrix(np.array([1, 0, 0]), [1])
with pytest.raises(ValueError, match='not square'):
cirq.measure_density_matrix(np.array([1, 0, 0, 0]).reshape((2, 1, 2)),
[1],
qid_shape=(2, 1))
def test_measure_density_matrix_not_power_of_two():
with pytest.raises(ValueError, match='power of two'):
cirq.measure_density_matrix(np.ones((3, 3)) / 3, [1])
with pytest.raises(ValueError, match='power of two'):
cirq.measure_density_matrix(np.ones((2, 3, 2, 3)) / 6, [1])
def test_measure_density_matrix_higher_powers_of_two():
with pytest.raises(ValueError, match='powers of two'):
cirq.measure_density_matrix(np.ones((2, 4, 2, 4)) / 8, [1])
def test_measure_density_matrix_tensor_different_left_right_shape():
with pytest.raises(ValueError, match='not equal'):
cirq.measure_density_matrix(np.array([1, 0, 0, 0]).reshape(
(2, 2, 1, 1)), [1],
qid_shape=(2, 1))
def test_measure_density_matrix_out_of_range():
matrix = cirq.to_valid_density_matrix(0, 3)
with pytest.raises(IndexError, match='-2'):
cirq.measure_density_matrix(matrix, [-2])
with pytest.raises(IndexError, match='3'):
cirq.measure_density_matrix(matrix, [3])
def test_measure_state_no_indices():
matrix = cirq.to_valid_density_matrix(0, 3)
bits, out_matrix = cirq.measure_density_matrix(matrix, [])
assert [] == bits
np.testing.assert_almost_equal(out_matrix, matrix)
def test_measure_state_no_indices_out_is_matrix():
matrix = cirq.to_valid_density_matrix(0, 3)
bits, out_matrix = cirq.measure_density_matrix(matrix, [], out=matrix)
assert [] == bits
np.testing.assert_almost_equal(out_matrix, matrix)
assert out_matrix is matrix
def test_measure_state_no_indices_out_is_not_matrix():
matrix = cirq.to_valid_density_matrix(0, 3)
out = np.zeros_like(matrix)
bits, out_matrix = cirq.measure_density_matrix(matrix, [], out=out)
assert [] == bits
np.testing.assert_almost_equal(out_matrix, matrix)
assert out is out_matrix
assert out is not matrix
def test_measure_state_empty_density_matrix():
matrix = np.zeros(shape=())
bits, out_matrix = cirq.measure_density_matrix(matrix, [])
assert [] == bits
np.testing.assert_almost_equal(matrix, out_matrix)
@pytest.mark.parametrize('seed', [17, 35, 48])
def test_to_valid_density_matrix_on_simulator_output(seed):
circuit = cirq.testing.random_circuit(qubits=5,
n_moments=20,
op_density=0.9,
random_state=seed)
simulator = cirq.DensityMatrixSimulator()
result = simulator.simulate(circuit)
_ = cirq.to_valid_density_matrix(result.final_density_matrix, num_qubits=5)
|
py | 7dfeab69e93e1d1efe96dc4ab4b6a1b782467800 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
def _requires_from_file(filename):
return open(filename).read().splitlines()
setuptools.setup(
name="galois_field",
version="2.1.4",
author="Sakoda Takuya",
author_email="[email protected]",
description="Galois Field: GF(p^n)",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/syakoo/galois_field",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=_requires_from_file('requirements.txt'),
setup_requires=["pytest-runner"],
test_requires=["pytest"],
python_requires='>=3.7',
)
|
py | 7dfeaba07e9a5c4b2b2355180929311fb1d425dc | """
Mask R-CNN
The main Mask R-CNN model implementation.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import os
import random
import datetime
import re
import math
import logging
from collections import OrderedDict
import multiprocessing
import numpy as np
# import tensorflow as tf
#mwm 1, 3
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
tf.compat.v1.disable_eager_execution()
tf.compat.v1.placeholder()
import keras
#mwm 1, 1
# import tensorflow.keras as keras
#from tensorflow.keras import backend as K
import keras.backend as K
import keras.layers as KL
#from keras import engine as KE
import keras.engine as KE
import keras.models as KM
from mrcnn import utils
# Requires TensorFlow 1.3+ and Keras 2.0.8+.
from distutils.version import LooseVersion
assert LooseVersion(tf.__version__) >= LooseVersion("1.3")
assert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')
############################################################
# Utility Functions
############################################################
def log(text, array=None):
"""Prints a text message. And, optionally, if a Numpy array is provided it
prints it's shape, min, and max values.
"""
if array is not None:
text = text.ljust(25)
text += ("shape: {:20} ".format(str(array.shape)))
if array.size:
text += ("min: {:10.5f} max: {:10.5f}".format(array.min(),array.max()))
else:
text += ("min: {:10} max: {:10}".format("",""))
text += " {}".format(array.dtype)
print(text)
class BatchNorm(KL.BatchNormalization):
"""Extends the Keras BatchNormalization class to allow a central place
to make changes if needed.
Batch normalization has a negative effect on training if batches are small
so this layer is often frozen (via setting in Config class) and functions
as linear layer.
"""
def call(self, inputs, training=None):
"""
Note about training values:
None: Train BN layers. This is the normal mode
False: Freeze BN layers. Good when batch size is small
True: (don't use). Set layer in training mode even when making inferences
"""
return super(self.__class__, self).call(inputs, training=training)
def compute_backbone_shapes(config, image_shape):
"""Computes the width and height of each stage of the backbone network.
Returns:
[N, (height, width)]. Where N is the number of stages
"""
if callable(config.BACKBONE):
return config.COMPUTE_BACKBONE_SHAPE(image_shape)
# Currently supports ResNet only
assert config.BACKBONE in ["resnet50", "resnet101"]
return np.array(
[[int(math.ceil(image_shape[0] / stride)),
int(math.ceil(image_shape[1] / stride))]
for stride in config.BACKBONE_STRIDES])
############################################################
# Resnet Graph
############################################################
# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
def identity_block(input_tensor, kernel_size, filters, stage, block,
use_bias=True, train_bn=True):
"""The identity_block is the block that has no conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layers
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',
use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',
use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
x = KL.Add()([x, input_tensor])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block,
strides=(2, 2), use_bias=True, train_bn=True):
"""conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layers
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,
name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +
'2c', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,
name=conv_name_base + '1', use_bias=use_bias)(input_tensor)
shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)
x = KL.Add()([x, shortcut])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def resnet_graph(input_image, architecture, stage5=False, train_bn=True):
"""Build a ResNet graph.
architecture: Can be resnet50 or resnet101
stage5: Boolean. If False, stage5 of the network is not created
train_bn: Boolean. Train or freeze Batch Norm layers
"""
assert architecture in ["resnet50", "resnet101"]
# Stage 1
x = KL.ZeroPadding2D((3, 3))(input_image)
x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)
x = BatchNorm(name='bn_conv1')(x, training=train_bn)
x = KL.Activation('relu')(x)
C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
# Stage 2
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)
C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)
# Stage 3
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)
C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)
# Stage 4
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)
block_count = {"resnet50": 5, "resnet101": 22}[architecture]
for i in range(block_count):
x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)
C4 = x
# Stage 5
if stage5:
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)
C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)
else:
C5 = None
return [C1, C2, C3, C4, C5]
############################################################
# Proposal Layer
############################################################
def apply_box_deltas_graph(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, (y1, x1, y2, x2)] boxes to update
deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply
"""
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= tf.exp(deltas[:, 2])
width *= tf.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
result = tf.stack([y1, x1, y2, x2], axis=1, name="apply_box_deltas_out")
return result
def clip_boxes_graph(boxes, window):
"""
boxes: [N, (y1, x1, y2, x2)]
window: [4] in the form y1, x1, y2, x2
"""
# Split
wy1, wx1, wy2, wx2 = tf.split(window, 4)
y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
# Clip
y1 = tf.maximum(tf.minimum(y1, wy2), wy1)
x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes")
clipped.set_shape((clipped.shape[0], 4))
return clipped
class ProposalLayer(KE.Layer):
"""Receives anchor scores and selects a subset to pass as proposals
to the second stage. Filtering is done based on anchor scores and
non-max suppression to remove overlaps. It also applies bounding
box refinement deltas to anchors.
Inputs:
rpn_probs: [batch, num_anchors, (bg prob, fg prob)]
rpn_bbox: [batch, num_anchors, (dy, dx, log(dh), log(dw))]
anchors: [batch, num_anchors, (y1, x1, y2, x2)] anchors in normalized coordinates
Returns:
Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]
"""
def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):
super(ProposalLayer, self).__init__(**kwargs)
self.config = config
self.proposal_count = proposal_count
self.nms_threshold = nms_threshold
def call(self, inputs):
# Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]
scores = inputs[0][:, :, 1]
# Box deltas [batch, num_rois, 4]
deltas = inputs[1]
deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])
# Anchors
anchors = inputs[2]
# Improve performance by trimming to top anchors by score
# and doing the rest on the smaller subset.
pre_nms_limit = tf.minimum(self.config.PRE_NMS_LIMIT, tf.shape(anchors)[1])
ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,
name="top_anchors").indices
scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),
self.config.IMAGES_PER_GPU,
names=["pre_nms_anchors"])
# Apply deltas to anchors to get refined anchors.
# [batch, N, (y1, x1, y2, x2)]
boxes = utils.batch_slice([pre_nms_anchors, deltas],
lambda x, y: apply_box_deltas_graph(x, y),
self.config.IMAGES_PER_GPU,
names=["refined_anchors"])
# Clip to image boundaries. Since we're in normalized coordinates,
# clip to 0..1 range. [batch, N, (y1, x1, y2, x2)]
window = np.array([0, 0, 1, 1], dtype=np.float32)
boxes = utils.batch_slice(boxes,
lambda x: clip_boxes_graph(x, window),
self.config.IMAGES_PER_GPU,
names=["refined_anchors_clipped"])
# Filter out small boxes
# According to Xinlei Chen's paper, this reduces detection accuracy
# for small objects, so we're skipping it.
# Non-max suppression
def nms(boxes, scores):
indices = tf.image.non_max_suppression(
boxes, scores, self.proposal_count,
self.nms_threshold, name="rpn_non_max_suppression")
proposals = tf.gather(boxes, indices)
# Pad if needed
padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)
proposals = tf.pad(proposals, [(0, padding), (0, 0)])
return proposals
proposals = utils.batch_slice([boxes, scores], nms,
self.config.IMAGES_PER_GPU)
return proposals
def compute_output_shape(self, input_shape):
return (None, self.proposal_count, 4)
############################################################
# ROIAlign Layer
############################################################
def log2_graph(x):
"""Implementation of Log2. TF doesn't have a native implementation."""
return tf.math.log(x) / tf.math.log(2.0)
class PyramidROIAlign(KE.Layer):
"""Implements ROI Pooling on multiple levels of the feature pyramid.
Params:
- pool_shape: [pool_height, pool_width] of the output pooled regions. Usually [7, 7]
Inputs:
- boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized
coordinates. Possibly padded with zeros if not enough
boxes to fill the array.
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
- feature_maps: List of feature maps from different levels of the pyramid.
Each is [batch, height, width, channels]
Output:
Pooled regions in the shape: [batch, num_boxes, pool_height, pool_width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
def __init__(self, pool_shape, **kwargs):
super(PyramidROIAlign, self).__init__(**kwargs)
self.pool_shape = tuple(pool_shape)
def call(self, inputs):
# Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords
boxes = inputs[0]
# Image meta
# Holds details about the image. See compose_image_meta()
image_meta = inputs[1]
# Feature Maps. List of feature maps from different level of the
# feature pyramid. Each is [batch, height, width, channels]
feature_maps = inputs[2:]
# Assign each ROI to a level in the pyramid based on the ROI area.
y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)
h = y2 - y1
w = x2 - x1
# Use shape of first image. Images in a batch must have the same size.
image_shape = parse_image_meta_graph(image_meta)['image_shape'][0]
# Equation 1 in the Feature Pyramid Networks paper. Account for
# the fact that our coordinates are normalized here.
# e.g. a 224x224 ROI (in pixels) maps to P4
image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32)
roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))
roi_level = tf.minimum(5, tf.maximum(
2, 4 + tf.cast(tf.round(roi_level), tf.int32)))
roi_level = tf.squeeze(roi_level, 2)
# Loop through levels and apply ROI pooling to each. P2 to P5.
pooled = []
box_to_level = []
for i, level in enumerate(range(2, 6)):
ix = tf.where(tf.equal(roi_level, level))
level_boxes = tf.gather_nd(boxes, ix)
# Box indices for crop_and_resize.
box_indices = tf.cast(ix[:, 0], tf.int32)
# Keep track of which box is mapped to which level
box_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_boxes = tf.stop_gradient(level_boxes)
box_indices = tf.stop_gradient(box_indices)
# Crop and Resize
# From Mask R-CNN paper: "We sample four regular locations, so
# that we can evaluate either max or average pooling. In fact,
# interpolating only a single value at each bin center (without
# pooling) is nearly as effective."
#
# Here we use the simplified approach of a single value per bin,
# which is how it's done in tf.crop_and_resize()
# Result: [batch * num_boxes, pool_height, pool_width, channels]
pooled.append(tf.image.crop_and_resize(
feature_maps[i], level_boxes, box_indices, self.pool_shape,
method="bilinear"))
# Pack pooled features into one tensor
pooled = tf.concat(pooled, axis=0)
# Pack box_to_level mapping into one array and add another
# column representing the order of pooled boxes
box_to_level = tf.concat(box_to_level, axis=0)
box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)
box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],
axis=1)
# Rearrange pooled features to match the order of the original boxes
# Sort box_to_level by batch then box index
# TF doesn't have a way to sort by two columns, so merge them and sort.
sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]
ix = tf.nn.top_k(sorting_tensor, k=tf.shape(
box_to_level)[0]).indices[::-1]
ix = tf.gather(box_to_level[:, 2], ix)
pooled = tf.gather(pooled, ix)
# Re-add the batch dimension
shape = tf.concat([tf.shape(boxes)[:2], tf.shape(pooled)[1:]], axis=0)
pooled = tf.reshape(pooled, shape)
return pooled
def compute_output_shape(self, input_shape):
return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], )
############################################################
# Detection Target Layer
############################################################
def overlaps_graph(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
"""
# 1. Tile boxes2 and repeat boxes1. This allows us to compare
# every boxes1 against every boxes2 without loops.
# TF doesn't have an equivalent to np.repeat() so simulate it
# using tf.tile() and tf.reshape.
b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),
[1, 1, tf.shape(boxes2)[0]]), [-1, 4])
b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])
# 2. Compute intersections
b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)
b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)
y1 = tf.maximum(b1_y1, b2_y1)
x1 = tf.maximum(b1_x1, b2_x1)
y2 = tf.minimum(b1_y2, b2_y2)
x2 = tf.minimum(b1_x2, b2_x2)
intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)
# 3. Compute unions
b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
union = b1_area + b2_area - intersection
# 4. Compute IoU and reshape to [boxes1, boxes2]
iou = intersection / union
overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])
return overlaps
def detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):
"""Generates detection targets for one image. Subsamples proposals and
generates target class IDs, bounding box deltas, and masks for each.
Inputs:
proposals: [POST_NMS_ROIS_TRAINING, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [MAX_GT_INSTANCES] int class IDs
gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.
gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.
deltas: [TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw))]
masks: [TRAIN_ROIS_PER_IMAGE, height, width]. Masks cropped to bbox
boundaries and resized to neural network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
# Assertions
asserts = [
tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],
name="roi_assertion"),
]
with tf.control_dependencies(asserts):
proposals = tf.identity(proposals)
# Remove zero padding
proposals, _ = trim_zeros_graph(proposals, name="trim_proposals")
gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name="trim_gt_boxes")
gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,
name="trim_gt_class_ids")
gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,
name="trim_gt_masks")
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = tf.where(gt_class_ids < 0)[:, 0]
non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]
crowd_boxes = tf.gather(gt_boxes, crowd_ix)
gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)
gt_boxes = tf.gather(gt_boxes, non_crowd_ix)
gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)
# Compute overlaps matrix [proposals, gt_boxes]
overlaps = overlaps_graph(proposals, gt_boxes)
# Compute overlaps with crowd boxes [proposals, crowd_boxes]
crowd_overlaps = overlaps_graph(proposals, crowd_boxes)
crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
# Determine positive and negative ROIs
roi_iou_max = tf.reduce_max(overlaps, axis=1)
# 1. Positive ROIs are those with >= 0.5 IoU with a GT box
positive_roi_bool = (roi_iou_max >= 0.5)
positive_indices = tf.where(positive_roi_bool)[:, 0]
# 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.
negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]
# Subsample ROIs. Aim for 33% positive
# Positive ROIs
positive_count = int(config.TRAIN_ROIS_PER_IMAGE *
config.ROI_POSITIVE_RATIO)
positive_indices = tf.random_shuffle(positive_indices)[:positive_count]
positive_count = tf.shape(positive_indices)[0]
# Negative ROIs. Add enough to maintain positive:negative ratio.
r = 1.0 / config.ROI_POSITIVE_RATIO
negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count
negative_indices = tf.random_shuffle(negative_indices)[:negative_count]
# Gather selected ROIs
positive_rois = tf.gather(proposals, positive_indices)
negative_rois = tf.gather(proposals, negative_indices)
# Assign positive ROIs to GT boxes.
positive_overlaps = tf.gather(overlaps, positive_indices)
roi_gt_box_assignment = tf.cond(
tf.greater(tf.shape(positive_overlaps)[1], 0),
true_fn = lambda: tf.argmax(positive_overlaps, axis=1),
false_fn = lambda: tf.cast(tf.constant([]),tf.int64)
)
roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)
roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)
# Compute bbox refinement for positive ROIs
deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)
deltas /= config.BBOX_STD_DEV
# Assign positive ROIs to GT masks
# Permute masks to [N, height, width, 1]
transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)
# Pick the right mask for each ROI
roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)
# Compute mask targets
boxes = positive_rois
if config.USE_MINI_MASK:
# Transform ROI coordinates from normalized image space
# to normalized mini-mask space.
y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)
gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)
gt_h = gt_y2 - gt_y1
gt_w = gt_x2 - gt_x1
y1 = (y1 - gt_y1) / gt_h
x1 = (x1 - gt_x1) / gt_w
y2 = (y2 - gt_y1) / gt_h
x2 = (x2 - gt_x1) / gt_w
boxes = tf.concat([y1, x1, y2, x2], 1)
box_ids = tf.range(0, tf.shape(roi_masks)[0])
masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,
box_ids,
config.MASK_SHAPE)
# Remove the extra dimension from masks.
masks = tf.squeeze(masks, axis=3)
# Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with
# binary cross entropy loss.
masks = tf.round(masks)
# Append negative ROIs and pad bbox deltas and masks that
# are not used for negative ROIs with zeros.
rois = tf.concat([positive_rois, negative_rois], axis=0)
N = tf.shape(negative_rois)[0]
P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)
rois = tf.pad(rois, [(0, P), (0, 0)])
roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])
roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])
deltas = tf.pad(deltas, [(0, N + P), (0, 0)])
masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])
return rois, roi_gt_class_ids, deltas, masks
class DetectionTargetLayer(KE.Layer):
"""Subsamples proposals and generates target box refinement, class_ids,
and masks for each.
Inputs:
proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.
gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized
coordinates.
gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized
coordinates
target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw)]
target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width]
Masks cropped to bbox boundaries and resized to neural
network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
def __init__(self, config, **kwargs):
super(DetectionTargetLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
proposals = inputs[0]
gt_class_ids = inputs[1]
gt_boxes = inputs[2]
gt_masks = inputs[3]
# Slice the batch and run a graph for each slice
# TODO: Rename target_bbox to target_deltas for clarity
names = ["rois", "target_class_ids", "target_bbox", "target_mask"]
outputs = utils.batch_slice(
[proposals, gt_class_ids, gt_boxes, gt_masks],
lambda w, x, y, z: detection_targets_graph(
w, x, y, z, self.config),
self.config.IMAGES_PER_GPU, names=names)
return outputs
def compute_output_shape(self, input_shape):
return [
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois
(None, self.config.TRAIN_ROIS_PER_IMAGE), # class_ids
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas
(None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],
self.config.MASK_SHAPE[1]) # masks
]
def compute_mask(self, inputs, mask=None):
return [None, None, None, None]
############################################################
# Detection Layer
############################################################
def refine_detections_graph(rois, probs, deltas, window, config):
"""Refine classified proposals and filter overlaps and return final
detections.
Inputs:
rois: [N, (y1, x1, y2, x2)] in normalized coordinates
probs: [N, num_classes]. Class probabilities.
deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific
bounding box deltas.
window: (y1, x1, y2, x2) in normalized coordinates. The part of the image
that contains the image excluding the padding.
Returns detections shaped: [num_detections, (y1, x1, y2, x2, class_id, score)] where
coordinates are normalized.
"""
# Class IDs per ROI
class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)
# Class probability of the top class of each ROI
# indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)
#mwm 1,1
indices = tf.stack([tf.range(tf.shape(probs)[0]), class_ids], axis = 1)
class_scores = tf.gather_nd(probs, indices)
# Class-specific bounding box deltas
deltas_specific = tf.gather_nd(deltas, indices)
# Apply bounding box deltas
# Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates
refined_rois = apply_box_deltas_graph(
rois, deltas_specific * config.BBOX_STD_DEV)
# Clip boxes to image window
refined_rois = clip_boxes_graph(refined_rois, window)
# TODO: Filter out boxes with zero area
# Filter out background boxes
keep = tf.where(class_ids > 0)[:, 0]
# Filter out low confidence boxes
if config.DETECTION_MIN_CONFIDENCE:
conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(conf_keep, 0))
keep = tf.sparse_tensor_to_dense(keep)[0]
# Apply per-class NMS
# 1. Prepare variables
pre_nms_class_ids = tf.gather(class_ids, keep)
pre_nms_scores = tf.gather(class_scores, keep)
pre_nms_rois = tf.gather(refined_rois, keep)
unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]
def nms_keep_map(class_id):
"""Apply Non-Maximum Suppression on ROIs of the given class."""
# Indices of ROIs of the given class
ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]
# Apply NMS
class_keep = tf.image.non_max_suppression(
tf.gather(pre_nms_rois, ixs),
tf.gather(pre_nms_scores, ixs),
max_output_size=config.DETECTION_MAX_INSTANCES,
iou_threshold=config.DETECTION_NMS_THRESHOLD)
# Map indices
class_keep = tf.gather(keep, tf.gather(ixs, class_keep))
# Pad with -1 so returned tensors have the same shape
gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]
class_keep = tf.pad(class_keep, [(0, gap)],
mode='CONSTANT', constant_values=-1)
# Set shape so map_fn() can infer result shape
class_keep.set_shape([config.DETECTION_MAX_INSTANCES])
return class_keep
# 2. Map over class IDs
nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,
dtype=tf.int64)
# 3. Merge results into one list, and remove -1 padding
nms_keep = tf.reshape(nms_keep, [-1])
nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])
# 4. Compute intersection between keep and nms_keep
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(nms_keep, 0))
keep = tf.sparse_tensor_to_dense(keep)[0]
# Keep top detections
roi_count = config.DETECTION_MAX_INSTANCES
class_scores_keep = tf.gather(class_scores, keep)
num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)
top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]
keep = tf.gather(keep, top_ids)
# Arrange output as [N, (y1, x1, y2, x2, class_id, score)]
# Coordinates are normalized.
detections = tf.concat([
tf.gather(refined_rois, keep),
tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis],
tf.gather(class_scores, keep)[..., tf.newaxis]
], axis=1)
# Pad with zeros if detections < DETECTION_MAX_INSTANCES
gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]
detections = tf.pad(detections, [(0, gap), (0, 0)], "CONSTANT")
return detections
class DetectionLayer(KE.Layer):
"""Takes classified proposal boxes and their bounding box deltas and
returns the final detection boxes.
Returns:
[batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where
coordinates are normalized.
"""
def __init__(self, config=None, **kwargs):
super(DetectionLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
rois = inputs[0]
mrcnn_class = inputs[1]
mrcnn_bbox = inputs[2]
image_meta = inputs[3]
# Get windows of images in normalized coordinates. Windows are the area
# in the image that excludes the padding.
# Use the shape of the first image in the batch to normalize the window
# because we know that all images get resized to the same size.
m = parse_image_meta_graph(image_meta)
image_shape = m['image_shape'][0]
window = norm_boxes_graph(m['window'], image_shape[:2])
# Run detection refinement graph on each item in the batch
detections_batch = utils.batch_slice(
[rois, mrcnn_class, mrcnn_bbox, window],
lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),
self.config.IMAGES_PER_GPU)
# Reshape output
# [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in
# normalized coordinates
return tf.reshape(
detections_batch,
[self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])
def compute_output_shape(self, input_shape):
return (None, self.config.DETECTION_MAX_INSTANCES, 6)
############################################################
# Region Proposal Network (RPN)
############################################################
def rpn_graph(feature_map, anchors_per_location, anchor_stride):
"""Builds the computation graph of Region Proposal Network.
feature_map: backbone features [batch, height, width, depth]
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
Returns:
rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
# TODO: check if stride of 2 causes alignment issues if the feature map
# is not even.
# Shared convolutional base of the RPN
shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',
strides=anchor_stride,
name='rpn_conv_shared')(feature_map)
# Anchor Score. [batch, height, width, anchors per location * 2].
x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',
activation='linear', name='rpn_class_raw')(shared)
# Reshape to [batch, anchors, 2]
rpn_class_logits = KL.Lambda(
lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)
# Softmax on last dimension of BG/FG.
rpn_probs = KL.Activation(
"softmax", name="rpn_class_xxx")(rpn_class_logits)
# Bounding box refinement. [batch, H, W, anchors per location * depth]
# where depth is [x, y, log(w), log(h)]
x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding="valid",
activation='linear', name='rpn_bbox_pred')(shared)
# Reshape to [batch, anchors, 4]
rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)
return [rpn_class_logits, rpn_probs, rpn_bbox]
def build_rpn_model(anchor_stride, anchors_per_location, depth):
"""Builds a Keras model of the Region Proposal Network.
It wraps the RPN graph so it can be used multiple times with shared
weights.
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
depth: Depth of the backbone feature map.
Returns a Keras Model object. The model outputs, when called, are:
rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
input_feature_map = KL.Input(shape=[None, None, depth],
name="input_rpn_feature_map")
outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)
return KM.Model([input_feature_map], outputs, name="rpn_model")
############################################################
# Feature Pyramid Network Heads
############################################################
def fpn_classifier_graph(rois, feature_maps, image_meta,
pool_size, num_classes, train_bn=True,
fc_layers_size=1024):
"""Builds the computation graph of the feature pyramid network classifier
and regressor heads.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from different layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layers
fc_layers_size: Size of the 2 FC layers
Returns:
logits: [batch, num_rois, NUM_CLASSES] classifier logits (before softmax)
probs: [batch, num_rois, NUM_CLASSES] classifier probabilities
bbox_deltas: [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))] Deltas to apply to
proposal boxes
"""
# ROI Pooling
# Shape: [batch, num_rois, POOL_SIZE, POOL_SIZE, channels]
x = PyramidROIAlign([pool_size, pool_size],
name="roi_align_classifier")([rois, image_meta] + feature_maps)
# Two 1024 FC layers (implemented with Conv2D for consistency)
x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding="valid"),
name="mrcnn_class_conv1")(x)
x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)),
name="mrcnn_class_conv2")(x)
x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),
name="pool_squeeze")(x)
# Classifier head
mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),
name='mrcnn_class_logits')(shared)
mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"),
name="mrcnn_class")(mrcnn_class_logits)
# BBox head
# [batch, num_rois, NUM_CLASSES * (dy, dx, log(dh), log(dw))]
x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),
name='mrcnn_bbox_fc')(shared)
# Reshape to [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))]
s = K.int_shape(x)
#mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name="mrcnn_bbox")(x)
#mwm 1, 4
# if s[1] == None:
# mrcnn_bbox = KL.Reshape((-1, num_classes, 4), name="mrcnn_bbox")(x)
# else:
# mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name="mrcnn_bbox")(x)
#mwm 4,1 (again)
mrcnn_bbox = KL.Reshape((-1, num_classes, 4), name="mrcnn_bbox")(x)
return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox
def build_fpn_mask_graph(rois, feature_maps, image_meta,
pool_size, num_classes, train_bn=True):
"""Builds the computation graph of the mask head of Feature Pyramid Network.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from different layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layers
Returns: Masks [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, NUM_CLASSES]
"""
# ROI Pooling
# Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels]
x = PyramidROIAlign([pool_size, pool_size],
name="roi_align_mask")([rois, image_meta] + feature_maps)
# Conv layers
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv1")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv2")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv3")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn3')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv4")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn4')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"),
name="mrcnn_mask_deconv")(x)
x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation="sigmoid"),
name="mrcnn_mask")(x)
return x
############################################################
# Loss Functions
############################################################
def smooth_l1_loss(y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typically: [N, 4], but could be any shape.
"""
diff = K.abs(y_true - y_pred)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
return loss
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Cross entropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):
"""Return the RPN bounding box loss graph.
config: the model config object.
target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
Uses 0 padding to fill in unsed bbox deltas.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
"""
# Positive anchors contribute to the loss, but negative and
# neutral anchors (match value of 0 or -1) don't.
rpn_match = K.squeeze(rpn_match, -1)
indices = tf.where(K.equal(rpn_match, 1))
# Pick bbox deltas that contribute to the loss
rpn_bbox = tf.gather_nd(rpn_bbox, indices)
# Trim target bounding box deltas to the same length as rpn_bbox.
batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)
target_bbox = batch_pack_graph(target_bbox, batch_counts,
config.IMAGES_PER_GPU)
loss = smooth_l1_loss(target_bbox, rpn_bbox)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def mrcnn_class_loss_graph(target_class_ids, pred_class_logits,
active_class_ids):
"""Loss for the classifier head of Mask RCNN.
target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero
padding to fill in the array.
pred_class_logits: [batch, num_rois, num_classes]
active_class_ids: [batch, num_classes]. Has a value of 1 for
classes that are in the dataset of the image, and 0
for classes that are not in the dataset.
"""
# During model building, Keras calls this function with
# target_class_ids of type float32. Unclear why. Cast it
# to int to get around it.
target_class_ids = tf.cast(target_class_ids, 'int64')
# Find predictions of classes that are not in the dataset.
pred_class_ids = tf.argmax(pred_class_logits, axis=2)
# TODO: Update this line to work with batch > 1. Right now it assumes all
# images in a batch have the same active_class_ids
pred_active = tf.gather(active_class_ids[0], pred_class_ids)
# Loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target_class_ids, logits=pred_class_logits)
# Erase losses of predictions of classes that are not in the active
# classes of the image.
loss = loss * pred_active
# Computer loss mean. Use only predictions that contribute
# to the loss to get a correct mean.
loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)
return loss
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
"""Loss for Mask R-CNN bounding box refinement.
target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
target_class_ids: [batch, num_rois]. Integer class IDs.
pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
"""
# Reshape to merge batch and roi dimensions for simplicity.
target_class_ids = K.reshape(target_class_ids, (-1,))
target_bbox = K.reshape(target_bbox, (-1, 4))
pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))
# Only positive ROIs contribute to the loss. And only
# the right class_id of each ROI. Get their indices.
positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]
positive_roi_class_ids = tf.cast(
tf.gather(target_class_ids, positive_roi_ix), tf.int64)
indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)
# Gather the deltas (predicted and true) that contribute to loss
target_bbox = tf.gather(target_bbox, positive_roi_ix)
pred_bbox = tf.gather_nd(pred_bbox, indices)
# Smooth-L1 Loss
loss = K.switch(tf.size(target_bbox) > 0,
smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
tf.constant(0.0))
loss = K.mean(loss)
return loss
def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):
"""Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
"""
# Reshape for simplicity. Merge first two dimensions into one.
target_class_ids = K.reshape(target_class_ids, (-1,))
mask_shape = tf.shape(target_masks)
target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))
pred_shape = tf.shape(pred_masks)
pred_masks = K.reshape(pred_masks,
(-1, pred_shape[2], pred_shape[3], pred_shape[4]))
# Permute predicted masks to [N, num_classes, height, width]
pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])
# Only positive ROIs contribute to the loss. And only
# the class specific mask of each ROI.
positive_ix = tf.where(target_class_ids > 0)[:, 0]
positive_class_ids = tf.cast(
tf.gather(target_class_ids, positive_ix), tf.int64)
indices = tf.stack([positive_ix, positive_class_ids], axis=1)
# Gather the masks (predicted and true) that contribute to loss
y_true = tf.gather(target_masks, positive_ix)
y_pred = tf.gather_nd(pred_masks, indices)
# Compute binary cross entropy. If no positive ROIs, then return 0.
# shape: [batch, roi, num_classes]
loss = K.switch(tf.size(y_true) > 0,
K.binary_crossentropy(target=y_true, output=y_pred),
tf.constant(0.0))
loss = K.mean(loss)
return loss
############################################################
# Data Generator
############################################################
def load_image_gt(dataset, config, image_id, augment=False, augmentation=None,
use_mini_mask=False):
"""Load and return ground truth data for an image (image, mask, bounding boxes).
augment: (deprecated. Use augmentation instead). If true, apply random
image augmentation. Currently, only horizontal flipping is offered.
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
For example, passing imgaug.augmenters.Fliplr(0.5) flips images
right/left 50% of the time.
use_mini_mask: If False, returns full-size masks that are the same height
and width as the original image. These can be big, for example
1024x1024x100 (for 100 instances). Mini masks are smaller, typically,
224x224 and are generated by extracting the bounding box of the
object and resizing it to MINI_MASK_SHAPE.
Returns:
image: [height, width, 3]
shape: the original shape of the image before resizing and cropping.
class_ids: [instance_count] Integer class IDs
bbox: [instance_count, (y1, x1, y2, x2)]
mask: [height, width, instance_count]. The height and width are those
of the image unless use_mini_mask is True, in which case they are
defined in MINI_MASK_SHAPE.
"""
# Load image and mask
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
original_shape = image.shape
image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=config.IMAGE_MIN_DIM,
min_scale=config.IMAGE_MIN_SCALE,
max_dim=config.IMAGE_MAX_DIM,
mode=config.IMAGE_RESIZE_MODE)
mask = utils.resize_mask(mask, scale, padding, crop)
# Random horizontal flips.
# TODO: will be removed in a future update in favor of augmentation
if augment:
logging.warning("'augment' is deprecated. Use 'augmentation' instead.")
if random.randint(0, 1):
image = np.fliplr(image)
mask = np.fliplr(mask)
# Augmentation
# This requires the imgaug lib (https://github.com/aleju/imgaug)
if augmentation:
import imgaug
# Augmenters that are safe to apply to masks
# Some, such as Affine, have settings that make them unsafe, so always
# test your augmentation on masks
MASK_AUGMENTERS = ["Sequential", "SomeOf", "OneOf", "Sometimes",
"Fliplr", "Flipud", "CropAndPad",
"Affine", "PiecewiseAffine"]
def hook(images, augmenter, parents, default):
"""Determines which augmenters to apply to masks."""
return augmenter.__class__.__name__ in MASK_AUGMENTERS
# Store shapes before augmentation to compare
image_shape = image.shape
mask_shape = mask.shape
# Make augmenters deterministic to apply similarly to images and masks
det = augmentation.to_deterministic()
image = det.augment_image(image)
# Change mask to np.uint8 because imgaug doesn't support np.bool
mask = det.augment_image(mask.astype(np.uint8),
hooks=imgaug.HooksImages(activator=hook))
# Verify that shapes didn't change
assert image.shape == image_shape, "Augmentation shouldn't change image size"
assert mask.shape == mask_shape, "Augmentation shouldn't change mask size"
# Change mask back to bool
mask = mask.astype(np.bool)
# Note that some boxes might be all zeros if the corresponding mask got cropped out.
# and here is to filter them out
_idx = np.sum(mask, axis=(0, 1)) > 0
mask = mask[:, :, _idx]
class_ids = class_ids[_idx]
# Bounding boxes. Note that some boxes might be all zeros
# if the corresponding mask got cropped out.
# bbox: [num_instances, (y1, x1, y2, x2)]
bbox = utils.extract_bboxes(mask)
# Active classes
# Different datasets have different classes, so track the
# classes supported in the dataset of this image.
active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)
source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]["source"]]
active_class_ids[source_class_ids] = 1
# Resize masks to smaller size to reduce memory usage
if use_mini_mask:
mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)
# Image meta data
image_meta = compose_image_meta(image_id, original_shape, image.shape,
window, scale, active_class_ids)
return image, image_meta, class_ids, bbox, mask
def build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):
"""Generate targets for training Stage 2 classifier and mask heads.
This is not used in normal training. It's useful for debugging or to train
the Mask RCNN heads without using the RPN head.
Inputs:
rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.
gt_class_ids: [instance count] Integer class IDs
gt_boxes: [instance count, (y1, x1, y2, x2)]
gt_masks: [height, width, instance count] Ground truth masks. Can be full
size or mini-masks.
Returns:
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific
bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped
to bbox boundaries and resized to neural network output size.
"""
assert rpn_rois.shape[0] > 0
assert gt_class_ids.dtype == np.int32, "Expected int but got {}".format(
gt_class_ids.dtype)
assert gt_boxes.dtype == np.int32, "Expected int but got {}".format(
gt_boxes.dtype)
assert gt_masks.dtype == np.bool_, "Expected bool but got {}".format(
gt_masks.dtype)
# It's common to add GT Boxes to ROIs but we don't do that here because
# according to XinLei Chen's paper, it doesn't help.
# Trim empty padding in gt_boxes and gt_masks parts
instance_ids = np.where(gt_class_ids > 0)[0]
assert instance_ids.shape[0] > 0, "Image must contain instances."
gt_class_ids = gt_class_ids[instance_ids]
gt_boxes = gt_boxes[instance_ids]
gt_masks = gt_masks[:, :, instance_ids]
# Compute areas of ROIs and ground truth boxes.
rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \
(rpn_rois[:, 3] - rpn_rois[:, 1])
gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \
(gt_boxes[:, 3] - gt_boxes[:, 1])
# Compute overlaps [rpn_rois, gt_boxes]
overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))
for i in range(overlaps.shape[1]):
gt = gt_boxes[i]
overlaps[:, i] = utils.compute_iou(
gt, rpn_rois, gt_box_area[i], rpn_roi_area)
# Assign ROIs to GT boxes
rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)
rpn_roi_iou_max = overlaps[np.arange(
overlaps.shape[0]), rpn_roi_iou_argmax]
# GT box assigned to each ROI
rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]
rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]
# Positive ROIs are those with >= 0.5 IoU with a GT box.
fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]
# Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)
# TODO: To hard example mine or not to hard example mine, that's the question
# bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
# Subsample ROIs. Aim for 33% foreground.
# FG
fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)
if fg_ids.shape[0] > fg_roi_count:
keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)
else:
keep_fg_ids = fg_ids
# BG
remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]
if bg_ids.shape[0] > remaining:
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
else:
keep_bg_ids = bg_ids
# Combine indices of ROIs to keep
keep = np.concatenate([keep_fg_ids, keep_bg_ids])
# Need more?
remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]
if remaining > 0:
# Looks like we don't have enough samples to maintain the desired
# balance. Reduce requirements and fill in the rest. This is
# likely different from the Mask RCNN paper.
# There is a small chance we have neither fg nor bg samples.
if keep.shape[0] == 0:
# Pick bg regions with easier IoU threshold
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
assert bg_ids.shape[0] >= remaining
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
assert keep_bg_ids.shape[0] == remaining
keep = np.concatenate([keep, keep_bg_ids])
else:
# Fill the rest with repeated bg rois.
keep_extra_ids = np.random.choice(
keep_bg_ids, remaining, replace=True)
keep = np.concatenate([keep, keep_extra_ids])
assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \
"keep doesn't match ROI batch size {}, {}".format(
keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)
# Reset the gt boxes assigned to BG ROIs.
rpn_roi_gt_boxes[keep_bg_ids, :] = 0
rpn_roi_gt_class_ids[keep_bg_ids] = 0
# For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.
rois = rpn_rois[keep]
roi_gt_boxes = rpn_roi_gt_boxes[keep]
roi_gt_class_ids = rpn_roi_gt_class_ids[keep]
roi_gt_assignment = rpn_roi_iou_argmax[keep]
# Class-aware bbox deltas. [y, x, log(h), log(w)]
bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,
config.NUM_CLASSES, 4), dtype=np.float32)
pos_ids = np.where(roi_gt_class_ids > 0)[0]
bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(
rois[pos_ids], roi_gt_boxes[pos_ids, :4])
# Normalize bbox refinements
bboxes /= config.BBOX_STD_DEV
# Generate class-specific target masks
masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),
dtype=np.float32)
for i in pos_ids:
class_id = roi_gt_class_ids[i]
assert class_id > 0, "class id must be greater than 0"
gt_id = roi_gt_assignment[i]
class_mask = gt_masks[:, :, gt_id]
if config.USE_MINI_MASK:
# Create a mask placeholder, the size of the image
placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)
# GT box
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]
gt_w = gt_x2 - gt_x1
gt_h = gt_y2 - gt_y1
# Resize mini mask to size of GT box
placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \
np.round(utils.resize(class_mask, (gt_h, gt_w))).astype(bool)
# Place the mini batch in the placeholder
class_mask = placeholder
# Pick part of the mask and resize it
y1, x1, y2, x2 = rois[i].astype(np.int32)
m = class_mask[y1:y2, x1:x2]
mask = utils.resize(m, config.MASK_SHAPE)
masks[i, :, :, class_id] = mask
return rois, roi_gt_class_ids, bboxes, masks
def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):
"""Given the anchors and GT boxes, compute overlaps and identify positive
anchors and deltas to refine them to match their corresponding GT boxes.
anchors: [num_anchors, (y1, x1, y2, x2)]
gt_class_ids: [num_gt_boxes] Integer class IDs.
gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]
Returns:
rpn_match: [N] (int32) matches between anchors and GT boxes.
1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
"""
# RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)
# RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]
rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = np.where(gt_class_ids < 0)[0]
if crowd_ix.shape[0] > 0:
# Filter out crowds from ground truth class IDs and boxes
non_crowd_ix = np.where(gt_class_ids > 0)[0]
crowd_boxes = gt_boxes[crowd_ix]
gt_class_ids = gt_class_ids[non_crowd_ix]
gt_boxes = gt_boxes[non_crowd_ix]
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)
crowd_iou_max = np.amax(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
else:
# All anchors don't intersect a crowd
no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)
# Compute overlaps [num_anchors, num_gt_boxes]
overlaps = utils.compute_overlaps(anchors, gt_boxes)
# Match anchors to GT Boxes
# If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.
# If an anchor overlaps a GT box with IoU < 0.3 then it's negative.
# Neutral anchors are those that don't match the conditions above,
# and they don't influence the loss function.
# However, don't keep any GT box unmatched (rare, but happens). Instead,
# match it to the closest anchor (even if its max IoU is < 0.3).
#
# 1. Set negative anchors first. They get overwritten below if a GT box is
# matched to them. Skip boxes in crowd areas.
anchor_iou_argmax = np.argmax(overlaps, axis=1)
anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]
rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1
# 2. Set an anchor for each GT box (regardless of IoU value).
# If multiple anchors have the same IoU match all of them
gt_iou_argmax = np.argwhere(overlaps == np.max(overlaps, axis=0))[:,0]
rpn_match[gt_iou_argmax] = 1
# 3. Set anchors with high overlap as positive.
rpn_match[anchor_iou_max >= 0.7] = 1
# Subsample to balance positive and negative anchors
# Don't let positives be more than half the anchors
ids = np.where(rpn_match == 1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)
if extra > 0:
# Reset the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# Same for negative proposals
ids = np.where(rpn_match == -1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -
np.sum(rpn_match == 1))
if extra > 0:
# Rest the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# For positive anchors, compute shift and scale needed to transform them
# to match the corresponding GT boxes.
ids = np.where(rpn_match == 1)[0]
ix = 0 # index into rpn_bbox
# TODO: use box_refinement() rather than duplicating the code here
for i, a in zip(ids, anchors[ids]):
# Closest gt box (it might have IoU < 0.7)
gt = gt_boxes[anchor_iou_argmax[i]]
# Convert coordinates to center plus width/height.
# GT Box
gt_h = gt[2] - gt[0]
gt_w = gt[3] - gt[1]
gt_center_y = gt[0] + 0.5 * gt_h
gt_center_x = gt[1] + 0.5 * gt_w
# Anchor
a_h = a[2] - a[0]
a_w = a[3] - a[1]
a_center_y = a[0] + 0.5 * a_h
a_center_x = a[1] + 0.5 * a_w
# Compute the bbox refinement that the RPN should predict.
rpn_bbox[ix] = [
(gt_center_y - a_center_y) / a_h,
(gt_center_x - a_center_x) / a_w,
np.log(gt_h / a_h),
np.log(gt_w / a_w),
]
# Normalize
rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV
ix += 1
return rpn_match, rpn_bbox
def generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):
"""Generates ROI proposals similar to what a region proposal network
would generate.
image_shape: [Height, Width, Depth]
count: Number of ROIs to generate
gt_class_ids: [N] Integer ground truth class IDs
gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.
Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.
"""
# placeholder
rois = np.zeros((count, 4), dtype=np.int32)
# Generate random ROIs around GT boxes (90% of count)
rois_per_box = int(0.9 * count / gt_boxes.shape[0])
for i in range(gt_boxes.shape[0]):
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]
h = gt_y2 - gt_y1
w = gt_x2 - gt_x1
# random boundaries
r_y1 = max(gt_y1 - h, 0)
r_y2 = min(gt_y2 + h, image_shape[0])
r_x1 = max(gt_x1 - w, 0)
r_x2 = min(gt_x2 + w, image_shape[1])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))
x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:rois_per_box]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:rois_per_box]
if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
box_rois = np.hstack([y1, x1, y2, x2])
rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois
# Generate random ROIs anywhere in the image (10% of count)
remaining_count = count - (rois_per_box * gt_boxes.shape[0])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))
x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:remaining_count]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:remaining_count]
if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
global_rois = np.hstack([y1, x1, y2, x2])
rois[-remaining_count:] = global_rois
return rois
def data_generator(dataset, config, shuffle=True, augment=False, augmentation=None,
random_rois=0, batch_size=1, detection_targets=False,
no_augmentation_sources=None):
"""A generator that returns images and corresponding target class ids,
bounding box deltas, and masks.
dataset: The Dataset object to pick data from
config: The model config object
shuffle: If True, shuffles the samples before every epoch
augment: (deprecated. Use augmentation instead). If true, apply random
image augmentation. Currently, only horizontal flipping is offered.
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
For example, passing imgaug.augmenters.Fliplr(0.5) flips images
right/left 50% of the time.
random_rois: If > 0 then generate proposals to be used to train the
network classifier and mask heads. Useful if training
the Mask RCNN part without the RPN.
batch_size: How many images to return in each call
detection_targets: If True, generate detection targets (class IDs, bbox
deltas, and masks). Typically for debugging or visualizations because
in trainig detection targets are generated by DetectionTargetLayer.
no_augmentation_sources: Optional. List of sources to exclude for
augmentation. A source is string that identifies a dataset and is
defined in the Dataset class.
Returns a Python generator. Upon calling next() on it, the
generator returns two lists, inputs and outputs. The contents
of the lists differs depending on the received arguments:
inputs list:
- images: [batch, H, W, C]
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
- rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)
- rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
- gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs
- gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]
- gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width
are those of the image unless use_mini_mask is True, in which
case they are defined in MINI_MASK_SHAPE.
outputs list: Usually empty in regular training. But if detection_targets
is True then the outputs list contains target class_ids, bbox deltas,
and masks.
"""
b = 0 # batch item index
image_index = -1
image_ids = np.copy(dataset.image_ids)
error_count = 0
no_augmentation_sources = no_augmentation_sources or []
# Anchors
# [anchor_count, (y1, x1, y2, x2)]
backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)
anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
backbone_shapes,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
# Keras requires a generator to run indefinitely.
while True:
try:
# Increment index to pick next image. Shuffle if at the start of an epoch.
image_index = (image_index + 1) % len(image_ids)
if shuffle and image_index == 0:
np.random.shuffle(image_ids)
# Get GT bounding boxes and masks for image.
image_id = image_ids[image_index]
# If the image source is not to be augmented pass None as augmentation
if dataset.image_info[image_id]['source'] in no_augmentation_sources:
image, image_meta, gt_class_ids, gt_boxes, gt_masks = \
load_image_gt(dataset, config, image_id, augment=augment,
augmentation=None,
use_mini_mask=config.USE_MINI_MASK)
else:
image, image_meta, gt_class_ids, gt_boxes, gt_masks = \
load_image_gt(dataset, config, image_id, augment=augment,
augmentation=augmentation,
use_mini_mask=config.USE_MINI_MASK)
# Skip images that have no instances. This can happen in cases
# where we train on a subset of classes and the image doesn't
# have any of the classes we care about.
if not np.any(gt_class_ids > 0):
continue
# RPN Targets
rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,
gt_class_ids, gt_boxes, config)
# Mask R-CNN Targets
if random_rois:
rpn_rois = generate_random_rois(
image.shape, random_rois, gt_class_ids, gt_boxes)
if detection_targets:
rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask =\
build_detection_targets(
rpn_rois, gt_class_ids, gt_boxes, gt_masks, config)
# Init batch arrays
if b == 0:
batch_image_meta = np.zeros(
(batch_size,) + image_meta.shape, dtype=image_meta.dtype)
batch_rpn_match = np.zeros(
[batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)
batch_rpn_bbox = np.zeros(
[batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)
batch_images = np.zeros(
(batch_size,) + image.shape, dtype=np.float32)
batch_gt_class_ids = np.zeros(
(batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)
batch_gt_boxes = np.zeros(
(batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)
batch_gt_masks = np.zeros(
(batch_size, gt_masks.shape[0], gt_masks.shape[1],
config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)
if random_rois:
batch_rpn_rois = np.zeros(
(batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)
if detection_targets:
batch_rois = np.zeros(
(batch_size,) + rois.shape, dtype=rois.dtype)
batch_mrcnn_class_ids = np.zeros(
(batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)
batch_mrcnn_bbox = np.zeros(
(batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)
batch_mrcnn_mask = np.zeros(
(batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)
# If more instances than fits in the array, sub-sample from them.
if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:
ids = np.random.choice(
np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)
gt_class_ids = gt_class_ids[ids]
gt_boxes = gt_boxes[ids]
gt_masks = gt_masks[:, :, ids]
# Add to batch
batch_image_meta[b] = image_meta
batch_rpn_match[b] = rpn_match[:, np.newaxis]
batch_rpn_bbox[b] = rpn_bbox
batch_images[b] = mold_image(image.astype(np.float32), config)
batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids
batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes
batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks
if random_rois:
batch_rpn_rois[b] = rpn_rois
if detection_targets:
batch_rois[b] = rois
batch_mrcnn_class_ids[b] = mrcnn_class_ids
batch_mrcnn_bbox[b] = mrcnn_bbox
batch_mrcnn_mask[b] = mrcnn_mask
b += 1
# Batch full?
if b >= batch_size:
inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,
batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]
outputs = []
if random_rois:
inputs.extend([batch_rpn_rois])
if detection_targets:
inputs.extend([batch_rois])
# Keras requires that output and targets have the same number of dimensions
batch_mrcnn_class_ids = np.expand_dims(
batch_mrcnn_class_ids, -1)
outputs.extend(
[batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])
yield inputs, outputs
# start a new batch
b = 0
except (GeneratorExit, KeyboardInterrupt):
raise
except:
# Log it and skip the image
logging.exception("Error processing image {}".format(
dataset.image_info[image_id]))
error_count += 1
if error_count > 5:
raise
############################################################
# MaskRCNN Class
############################################################
class MaskRCNN():
"""Encapsulates the Mask RCNN model functionality.
The actual Keras model is in the keras_model property.
"""
def __init__(self, mode, config, model_dir):
"""
mode: Either "training" or "inference"
config: A Sub-class of the Config class
model_dir: Directory to save training logs and trained weights
"""
assert mode in ['training', 'inference']
self.mode = mode
self.config = config
self.model_dir = model_dir
self.set_log_dir()
self.keras_model = self.build(mode=mode, config=config)
def build(self, mode, config):
"""Build Mask R-CNN architecture.
input_shape: The shape of the input image.
mode: Either "training" or "inference". The inputs and
outputs of the model differ accordingly.
"""
assert mode in ['training', 'inference']
# Image size must be dividable by 2 multiple times
h, w = config.IMAGE_SHAPE[:2]
if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):
raise Exception("Image size must be dividable by 2 at least 6 times "
"to avoid fractions when downscaling and upscaling."
"For example, use 256, 320, 384, 448, 512, ... etc. ")
# Inputs
input_image = KL.Input(
shape=[None, None, config.IMAGE_SHAPE[2]], name="input_image")
input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],
name="input_image_meta")
if mode == "training":
# RPN GT
input_rpn_match = KL.Input(
shape=[None, 1], name="input_rpn_match", dtype=tf.int32)
input_rpn_bbox = KL.Input(
shape=[None, 4], name="input_rpn_bbox", dtype=tf.float32)
# Detection GT (class IDs, bounding boxes, and masks)
# 1. GT Class IDs (zero padded)
input_gt_class_ids = KL.Input(
shape=[None], name="input_gt_class_ids", dtype=tf.int32)
# 2. GT Boxes in pixels (zero padded)
# [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates
input_gt_boxes = KL.Input(
shape=[None, 4], name="input_gt_boxes", dtype=tf.float32)
# Normalize coordinates
gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_gt_boxes)
# 3. GT Masks (zero padded)
# [batch, height, width, MAX_GT_INSTANCES]
if config.USE_MINI_MASK:
input_gt_masks = KL.Input(
shape=[config.MINI_MASK_SHAPE[0],
config.MINI_MASK_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
else:
input_gt_masks = KL.Input(
shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
elif mode == "inference":
# Anchors in normalized coordinates
input_anchors = KL.Input(shape=[None, 4], name="input_anchors")
# Build the shared convolutional layers.
# Bottom-up Layers
# Returns a list of the last layers of each stage, 5 in total.
# Don't create the thead (stage 5), so we pick the 4th item in the list.
if callable(config.BACKBONE):
_, C2, C3, C4, C5 = config.BACKBONE(input_image, stage5=True,
train_bn=config.TRAIN_BN)
else:
_, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,
stage5=True, train_bn=config.TRAIN_BN)
# Top-down Layers
# TODO: add assert to varify feature map sizes match what's in config
P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c5p5')(C5)
P4 = KL.Add(name="fpn_p4add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p5upsampled")(P5),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)])
P3 = KL.Add(name="fpn_p3add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p4upsampled")(P4),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)])
P2 = KL.Add(name="fpn_p2add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p3upsampled")(P3),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)])
# Attach 3x3 conv to all P layers to get the final feature maps.
P2 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p2")(P2)
P3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p3")(P3)
P4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p4")(P4)
P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p5")(P5)
# P6 is used for the 5th anchor scale in RPN. Generated by
# subsampling from P5 with stride of 2.
P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name="fpn_p6")(P5)
# Note that P6 is used in RPN, but not in the classifier heads.
rpn_feature_maps = [P2, P3, P4, P5, P6]
mrcnn_feature_maps = [P2, P3, P4, P5]
# Anchors
if mode == "training":
anchors = self.get_anchors(config.IMAGE_SHAPE)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)
# A hack to get around Keras's bad support for constants
# anchors = KL.Lambda(lambda x: tf.Variable(anchors), name="anchors")(input_image)
#mwm 1 line, 2 lines
anchor_layer = AnchorsLayer(name="anchors")
anchors = anchor_layer(anchors)
else:
anchors = input_anchors
# RPN Model
rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,
len(config.RPN_ANCHOR_RATIOS), config.TOP_DOWN_PYRAMID_SIZE)
# Loop through pyramid layers
layer_outputs = [] # list of lists
for p in rpn_feature_maps:
layer_outputs.append(rpn([p]))
# Concatenate layer outputs
# Convert from list of lists of level outputs to list of lists
# of outputs across levels.
# e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]
output_names = ["rpn_class_logits", "rpn_class", "rpn_bbox"]
outputs = list(zip(*layer_outputs))
outputs = [KL.Concatenate(axis=1, name=n)(list(o))
for o, n in zip(outputs, output_names)]
rpn_class_logits, rpn_class, rpn_bbox = outputs
# Generate proposals
# Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates
# and zero padded.
proposal_count = config.POST_NMS_ROIS_TRAINING if mode == "training"\
else config.POST_NMS_ROIS_INFERENCE
rpn_rois = ProposalLayer(
proposal_count=proposal_count,
nms_threshold=config.RPN_NMS_THRESHOLD,
name="ROI",
config=config)([rpn_class, rpn_bbox, anchors])
if mode == "training":
# Class ID mask to mark class IDs supported by the dataset the image
# came from.
active_class_ids = KL.Lambda(
lambda x: parse_image_meta_graph(x)["active_class_ids"]
)(input_image_meta)
if not config.USE_RPN_ROIS:
# Ignore predicted ROIs and use ROIs provided as an input.
input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],
name="input_roi", dtype=np.int32)
# Normalize coordinates
target_rois = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_rois)
else:
target_rois = rpn_rois
# Generate detection targets
# Subsamples proposals and generates target outputs for training
# Note that proposal class IDs, gt_boxes, and gt_masks are zero
# padded. Equally, returned rois and targets are zero padded.
rois, target_class_ids, target_bbox, target_mask =\
DetectionTargetLayer(config, name="proposal_targets")([
target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])
# Network Heads
# TODO: verify that this handles zero padded ROIs
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN,
fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)
mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
# TODO: clean up (use tf.identify if necessary)
output_rois = KL.Lambda(lambda x: x * 1, name="output_rois")(rois)
# Losses
rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name="rpn_class_loss")(
[input_rpn_match, rpn_class_logits])
rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name="rpn_bbox_loss")(
[input_rpn_bbox, input_rpn_match, rpn_bbox])
class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name="mrcnn_class_loss")(
[target_class_ids, mrcnn_class_logits, active_class_ids])
bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name="mrcnn_bbox_loss")(
[target_bbox, target_class_ids, mrcnn_bbox])
mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name="mrcnn_mask_loss")(
[target_mask, target_class_ids, mrcnn_mask])
# Model
inputs = [input_image, input_image_meta,
input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]
if not config.USE_RPN_ROIS:
inputs.append(input_rois)
outputs = [rpn_class_logits, rpn_class, rpn_bbox,
mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,
rpn_rois, output_rois,
rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]
model = KM.Model(inputs, outputs, name='mask_rcnn')
else:
# Network Heads
# Proposal classifier and BBox regressor heads
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN,
fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)
# Detections
# output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in
# normalized coordinates
detections = DetectionLayer(config, name="mrcnn_detection")(
[rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])
# Create masks for detections
detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)
mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
model = KM.Model([input_image, input_image_meta, input_anchors],
[detections, mrcnn_class, mrcnn_bbox,
mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],
name='mask_rcnn')
# Add multi-GPU support.
if config.GPU_COUNT > 1:
from mrcnn.parallel_model import ParallelModel
model = ParallelModel(model, config.GPU_COUNT)
return model
def find_last(self):
"""Finds the last checkpoint file of the last trained model in the
model directory.
Returns:
The path of the last checkpoint file
"""
# Get directory names. Each directory corresponds to a model
dir_names = next(os.walk(self.model_dir))[1]
key = self.config.NAME.lower()
dir_names = filter(lambda f: f.startswith(key), dir_names)
dir_names = sorted(dir_names)
if not dir_names:
import errno
raise FileNotFoundError(
errno.ENOENT,
"Could not find model directory under {}".format(self.model_dir))
# Pick last directory
dir_name = os.path.join(self.model_dir, dir_names[-1])
# Find the last checkpoint
checkpoints = next(os.walk(dir_name))[2]
checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints)
checkpoints = sorted(checkpoints)
if not checkpoints:
import errno
raise FileNotFoundError(
errno.ENOENT, "Could not find weight files in {}".format(dir_name))
checkpoint = os.path.join(dir_name, checkpoints[-1])
return checkpoint
def load_weights(self, filepath, by_name=False, exclude=None):
"""Modified version of the corresponding Keras function with
the addition of multi-GPU support and the ability to exclude
some layers from loading.
exclude: list of layer names to exclude
"""
import h5py
# Conditional import to support versions of Keras before 2.2
# TODO: remove in about 6 months (end of 2018)
try:
from keras.engine import saving
except ImportError:
# Keras before 2.2 used the 'topology' namespace.
from keras.engine import topology as saving
if exclude:
by_name = True
if h5py is None:
raise ImportError('`load_weights` requires h5py.')
f = h5py.File(filepath, mode='r')
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
keras_model = self.keras_model
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
# Exclude some layers
if exclude:
layers = filter(lambda l: l.name not in exclude, layers)
#if by_name:
# saving.load_weights_from_hdf5_group_by_name(f, layers)
#else:
# saving.load_weights_from_hdf5_group(f, layers)
#mwm 4, 2
# print("keras_model.load_weights(filepath, by_name={})".format(by_name))
keras_model.load_weights(filepath, by_name=by_name, skip_mismatch=by_name)
if hasattr(f, 'close'):
f.close()
# Update the log directory
self.set_log_dir(filepath)
def get_imagenet_weights(self):
"""Downloads ImageNet trained weights from Keras.
Returns path to weights file.
"""
from keras.utils.data_utils import get_file
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
'releases/download/v0.2/'\
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
return weights_path
def compile(self, learning_rate, momentum):
"""Gets the model ready for training. Adds losses, regularization, and
metrics. Then calls the Keras compile() function.
"""
# Optimizer object
optimizer = keras.optimizers.SGD(
lr=learning_rate, momentum=momentum,
clipnorm=self.config.GRADIENT_CLIP_NORM)
# Add Losses
# First, clear previously set losses to avoid duplication
#mwm 0,2
# self.keras_model._losses = []
# self.keras_model._per_input_losses = {}
loss_names = [
"rpn_class_loss", "rpn_bbox_loss",
"mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"]
for name in loss_names:
layer = self.keras_model.get_layer(name)
if layer.output in self.keras_model.losses:
continue
# loss = (
# tf.reduce_mean(layer.output, keepdims=True)
# * self.config.LOSS_WEIGHTS.get(name, 1.))
# self.keras_model.add_loss(loss)
#mwm 4,3
loss = tf.reduce_mean(input_tensor=layer.output, keepdims=True)
print("loss: {}".format(loss))
self.keras_model.add_loss(loss)
# Add L2 Regularization
# Skip gamma and beta weights of batch normalization layers.
reg_losses = [
keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)
for w in self.keras_model.trainable_weights
if 'gamma' not in w.name and 'beta' not in w.name]
self.keras_model.add_loss(tf.add_n(reg_losses))
# Compile
self.keras_model.compile(
optimizer=optimizer,
loss=[None] * len(self.keras_model.outputs))
# Add metrics for losses
for name in loss_names:
if name in self.keras_model.metrics_names:
continue
layer = self.keras_model.get_layer(name)
self.keras_model.metrics_names.append(name)
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
#mwm 0,1
self.keras_model.metrics_tensors = []
self.keras_model.metrics_tensors.append(loss)
def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):
"""Sets model layers as trainable if their names match
the given regular expression.
"""
# Print message on the first call (but not on recursive calls)
if verbose > 0 and keras_model is None:
log("Selecting layers to train")
keras_model = keras_model or self.keras_model
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
for layer in layers:
# Is the layer a model?
if layer.__class__.__name__ == 'Model':
print("In model: ", layer.name)
self.set_trainable(
layer_regex, keras_model=layer, indent=indent + 4)
continue
if not layer.weights:
continue
# Is it trainable?
trainable = bool(re.fullmatch(layer_regex, layer.name))
# Update layer. If layer is a container, update inner layer.
if layer.__class__.__name__ == 'TimeDistributed':
layer.layer.trainable = trainable
else:
layer.trainable = trainable
# Print trainable layer names
if trainable and verbose > 0:
log("{}{:20} ({})".format(" " * indent, layer.name,
layer.__class__.__name__))
def set_log_dir(self, model_path=None):
"""Sets the model log directory and epoch counter.
model_path: If None, or a format different from what this code uses
then set a new log directory and start epochs from 0. Otherwise,
extract the log directory and the epoch counter from the file
name.
"""
# Set date and epoch counter as if starting a new model
self.epoch = 0
now = datetime.datetime.now()
# If we have a model path with date and epochs use them
if model_path:
# Continue from we left of. Get epoch and date from the file name
# A sample model path might look like:
# \path\to\logs\coco20171029T2315\mask_rcnn_coco_0001.h5 (Windows)
# /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5 (Linux)
regex = r".*[/\\][\w-]+(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})[/\\]mask\_rcnn\_[\w-]+(\d{4})\.h5"
m = re.match(regex, model_path)
if m:
now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),
int(m.group(4)), int(m.group(5)))
# Epoch number in file is 1-based, and in Keras code it's 0-based.
# So, adjust for that then increment by one to start from the next epoch
self.epoch = int(m.group(6)) - 1 + 1
print('Re-starting from epoch %d' % self.epoch)
# Directory for training logs
self.log_dir = os.path.join(self.model_dir, "{}{:%Y%m%dT%H%M}".format(
self.config.NAME.lower(), now))
# Path to save after each epoch. Include placeholders that get filled by Keras.
self.checkpoint_path = os.path.join(self.log_dir, "mask_rcnn_{}_*epoch*.h5".format(
self.config.NAME.lower()))
self.checkpoint_path = self.checkpoint_path.replace(
"*epoch*", "{epoch:04d}")
def train(self, train_dataset, val_dataset, learning_rate, epochs, layers,
augmentation=None, custom_callbacks=None, no_augmentation_sources=None):
"""Train the model.
train_dataset, val_dataset: Training and validation Dataset objects.
learning_rate: The learning rate to train with
epochs: Number of training epochs. Note that previous training epochs
are considered to be done alreay, so this actually determines
the epochs to train in total rather than in this particaular
call.
layers: Allows selecting wich layers to train. It can be:
- A regular expression to match layer names to train
- One of these predefined values:
heads: The RPN, classifier and mask heads of the network
all: All the layers
3+: Train Resnet stage 3 and up
4+: Train Resnet stage 4 and up
5+: Train Resnet stage 5 and up
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)
augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)
flips images right/left 50% of the time. You can pass complex
augmentations as well. This augmentation applies 50% of the
time, and when it does it flips images right/left half the time
and adds a Gaussian blur with a random sigma in range 0 to 5.
augmentation = imgaug.augmenters.Sometimes(0.5, [
imgaug.augmenters.Fliplr(0.5),
imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))
])
custom_callbacks: Optional. Add custom callbacks to be called
with the keras fit_generator method. Must be list of type keras.callbacks.
no_augmentation_sources: Optional. List of sources to exclude for
augmentation. A source is string that identifies a dataset and is
defined in the Dataset class.
"""
assert self.mode == "training", "Create model in training mode."
# Pre-defined layer regular expressions
layer_regex = {
# all layers but the backbone
"heads": r"(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# From a specific Resnet stage and up
"3+": r"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"4+": r"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"5+": r"(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# All layers
"all": ".*",
}
if layers in layer_regex.keys():
layers = layer_regex[layers]
# Data generators
train_generator = data_generator(train_dataset, self.config, shuffle=True,
augmentation=augmentation,
batch_size=self.config.BATCH_SIZE,
no_augmentation_sources=no_augmentation_sources)
val_generator = data_generator(val_dataset, self.config, shuffle=True,
batch_size=self.config.BATCH_SIZE)
# Create log_dir if it does not exist
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
# Callbacks
callbacks = [
keras.callbacks.TensorBoard(log_dir=self.log_dir,
histogram_freq=0, write_graph=True, write_images=False),
keras.callbacks.ModelCheckpoint(self.checkpoint_path,
verbose=0, save_weights_only=True),
]
# Add custom callbacks to the list
if custom_callbacks:
callbacks += custom_callbacks
# Train
log("\nStarting at epoch {}. LR={}\n".format(self.epoch, learning_rate))
log("Checkpoint Path: {}".format(self.checkpoint_path))
self.set_trainable(layers)
self.compile(learning_rate, self.config.LEARNING_MOMENTUM)
# Work-around for Windows: Keras fails on Windows when using
# multiprocessing workers. See discussion here:
# https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009
if os.name is 'nt':
workers = 0
else:
workers = multiprocessing.cpu_count()
#mwm 2
# workers = 1
# use_multiprocessing = False
# self.keras_model.fit_generator(
# train_generator,
# initial_epoch=self.epoch,
# epochs=epochs,
# # steps_per_epoch=self.config.steps_per_epoch,
# steps_per_epoch=self.config.STEPS_PER_EPOCH,
# callbacks=callbacks,
# validation_data=val_generator,
# # validation_steps=self.config.validation_steps,
# validation_steps=self.config.VALIDATION_STEPS,
# max_queue_size=100,
# workers=workers,
# # use_multiprocessing=True,
# #mwm 1,1
# use_multiprocessing=False,
# )
#mwm 12,21
self.keras_model.fit(
x=train_generator,
y=None,
batch_size=None,
epochs=epochs,
verbose=1,
callbacks=callbacks,
validation_split=0.0,
validation_data=val_generator,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=self.epoch,
steps_per_epoch=self.config.STEPS_PER_EPOCH,
validation_steps=self.config.VALIDATION_STEPS,
validation_freq=1,
max_queue_size=100,
workers=workers,
use_multiprocessing=False,
)
self.epoch = max(self.epoch, epochs)
def mold_inputs(self, images):
"""Takes a list of images and modifies them to the format expected
as an input to the neural network.
images: List of image matrices [height,width,depth]. Images can have
different sizes.
Returns 3 Numpy matrices:
molded_images: [N, h, w, 3]. Images resized and normalized.
image_metas: [N, length of meta data]. Details about each image.
windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the
original image (padding excluded).
"""
molded_images = []
image_metas = []
windows = []
for image in images:
# Resize image
# TODO: move resizing to mold_image()
molded_image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=self.config.IMAGE_MIN_DIM,
min_scale=self.config.IMAGE_MIN_SCALE,
max_dim=self.config.IMAGE_MAX_DIM,
mode=self.config.IMAGE_RESIZE_MODE)
molded_image = mold_image(molded_image, self.config)
# Build image_meta
image_meta = compose_image_meta(
0, image.shape, molded_image.shape, window, scale,
np.zeros([self.config.NUM_CLASSES], dtype=np.int32))
# Append
molded_images.append(molded_image)
windows.append(window)
image_metas.append(image_meta)
# Pack into arrays
molded_images = np.stack(molded_images)
image_metas = np.stack(image_metas)
windows = np.stack(windows)
return molded_images, image_metas, windows
def unmold_detections(self, detections, mrcnn_mask, original_image_shape,
image_shape, window):
"""Reformats the detections of one image from the format of the neural
network output to a format suitable for use in the rest of the
application.
detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates
mrcnn_mask: [N, height, width, num_classes]
original_image_shape: [H, W, C] Original image shape before resizing
image_shape: [H, W, C] Shape of the image after resizing and padding
window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real
image is excluding the padding.
Returns:
boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels
class_ids: [N] Integer class IDs for each bounding box
scores: [N] Float probability scores of the class_id
masks: [height, width, num_instances] Instance masks
"""
# How many detections do we have?
# Detections array is padded with zeros. Find the first class_id == 0.
zero_ix = np.where(detections[:, 4] == 0)[0]
N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]
# Extract boxes, class_ids, scores, and class-specific masks
boxes = detections[:N, :4]
class_ids = detections[:N, 4].astype(np.int32)
scores = detections[:N, 5]
masks = mrcnn_mask[np.arange(N), :, :, class_ids]
# Translate normalized coordinates in the resized image to pixel
# coordinates in the original image before resizing
window = utils.norm_boxes(window, image_shape[:2])
wy1, wx1, wy2, wx2 = window
shift = np.array([wy1, wx1, wy1, wx1])
wh = wy2 - wy1 # window height
ww = wx2 - wx1 # window width
scale = np.array([wh, ww, wh, ww])
# Convert boxes to normalized coordinates on the window
boxes = np.divide(boxes - shift, scale)
# Convert boxes to pixel coordinates on the original image
boxes = utils.denorm_boxes(boxes, original_image_shape[:2])
# Filter out detections with zero area. Happens in early training when
# network weights are still random
exclude_ix = np.where(
(boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
masks = np.delete(masks, exclude_ix, axis=0)
N = class_ids.shape[0]
# Resize masks to original image size and set boundary threshold.
full_masks = []
for i in range(N):
# Convert neural network mask to full size mask
full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape)
full_masks.append(full_mask)
full_masks = np.stack(full_masks, axis=-1)\
if full_masks else np.empty(original_image_shape[:2] + (0,))
return boxes, class_ids, scores, full_masks
def detect(self, images, verbose=0):
"""Runs the detection pipeline.
images: List of images, potentially of different sizes.
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(
images) == self.config.BATCH_SIZE, "len(images) must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(images)))
for image in images:
log("image", image)
# Mold inputs to format expected by the neural network
molded_images, image_metas, windows = self.mold_inputs(images)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape,\
"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes."
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
detections, _, _, mrcnn_mask, _, _, _ =\
self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(images):
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
windows[i])
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def detect_molded(self, molded_images, image_metas, verbose=0):
"""Runs the detection pipeline, but expect inputs that are
molded already. Used mostly for debugging and inspecting
the model.
molded_images: List of images loaded using load_image_gt()
image_metas: image meta data, also returned by load_image_gt()
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(molded_images) == self.config.BATCH_SIZE,\
"Number of images must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(molded_images)))
for image in molded_images:
log("image", image)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape, "Images must have the same size"
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
detections, _, _, mrcnn_mask, _, _, _ =\
self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(molded_images):
window = [0, 0, image.shape[0], image.shape[1]]
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
window)
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def get_anchors(self, image_shape):
"""Returns anchor pyramid for the given image size."""
backbone_shapes = compute_backbone_shapes(self.config, image_shape)
# Cache anchors and reuse if image shape is the same
if not hasattr(self, "_anchor_cache"):
self._anchor_cache = {}
if not tuple(image_shape) in self._anchor_cache:
# Generate Anchors
a = utils.generate_pyramid_anchors(
self.config.RPN_ANCHOR_SCALES,
self.config.RPN_ANCHOR_RATIOS,
backbone_shapes,
self.config.BACKBONE_STRIDES,
self.config.RPN_ANCHOR_STRIDE)
# Keep a copy of the latest anchors in pixel coordinates because
# it's used in inspect_model notebooks.
# TODO: Remove this after the notebook are refactored to not use it
self.anchors = a
# Normalize coordinates
self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])
return self._anchor_cache[tuple(image_shape)]
def ancestor(self, tensor, name, checked=None):
"""Finds the ancestor of a TF tensor in the computation graph.
tensor: TensorFlow symbolic tensor.
name: Name of ancestor tensor to find
checked: For internal use. A list of tensors that were already
searched to avoid loops in traversing the graph.
"""
checked = checked if checked is not None else []
# Put a limit on how deep we go to avoid very long loops
if len(checked) > 500:
return None
# Convert name to a regex and allow matching a number prefix
# because Keras adds them automatically
if isinstance(name, str):
name = re.compile(name.replace("/", r"(\_\d+)*/"))
parents = tensor.op.inputs
for p in parents:
if p in checked:
continue
if bool(re.fullmatch(name, p.name)):
return p
checked.append(p)
a = self.ancestor(p, name, checked)
if a is not None:
return a
return None
def find_trainable_layer(self, layer):
"""If a layer is encapsulated by another layer, this function
digs through the encapsulation and returns the layer that holds
the weights.
"""
if layer.__class__.__name__ == 'TimeDistributed':
return self.find_trainable_layer(layer.layer)
return layer
def get_trainable_layers(self):
"""Returns a list of layers that have weights."""
layers = []
# Loop through all layers
for l in self.keras_model.layers:
# If layer is a wrapper, find inner trainable layer
l = self.find_trainable_layer(l)
# Include layer if it has weights
if l.get_weights():
layers.append(l)
return layers
def run_graph(self, images, outputs, image_metas=None):
"""Runs a sub-set of the computation graph that computes the given
outputs.
image_metas: If provided, the images are assumed to be already
molded (i.e. resized, padded, and normalized)
outputs: List of tuples (name, tensor) to compute. The tensors are
symbolic TensorFlow tensors and the names are for easy tracking.
Returns an ordered dict of results. Keys are the names received in the
input and values are Numpy arrays.
"""
model = self.keras_model
# Organize desired outputs into an ordered dict
outputs = OrderedDict(outputs)
for o in outputs.values():
assert o is not None
# Build a Keras function to run parts of the computation graph
inputs = model.inputs
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
kf = K.function(model.inputs, list(outputs.values()))
# Prepare inputs
if image_metas is None:
molded_images, image_metas, _ = self.mold_inputs(images)
else:
molded_images = images
image_shape = molded_images[0].shape
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
model_in = [molded_images, image_metas, anchors]
# Run inference
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
model_in.append(0.)
outputs_np = kf(model_in)
# Pack the generated Numpy arrays into a a dict and log the results.
outputs_np = OrderedDict([(k, v)
for k, v in zip(outputs.keys(), outputs_np)])
for k, v in outputs_np.items():
log(k, v)
return outputs_np
############################################################
# Data Formatting
############################################################
def compose_image_meta(image_id, original_image_shape, image_shape,
window, scale, active_class_ids):
"""Takes attributes of an image and puts them in one 1D array.
image_id: An int ID of the image. Useful for debugging.
original_image_shape: [H, W, C] before resizing or padding.
image_shape: [H, W, C] after resizing and padding
window: (y1, x1, y2, x2) in pixels. The area of the image where the real
image is (excluding the padding)
scale: The scaling factor applied to the original image (float32)
active_class_ids: List of class_ids available in the dataset from which
the image came. Useful if training on images from multiple datasets
where not all classes are present in all datasets.
"""
meta = np.array(
[image_id] + # size=1
list(original_image_shape) + # size=3
list(image_shape) + # size=3
list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates
[scale] + # size=1
list(active_class_ids) # size=num_classes
)
return meta
def parse_image_meta(meta):
"""Parses an array that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
Returns a dict of the parsed values.
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
active_class_ids = meta[:, 12:]
return {
"image_id": image_id.astype(np.int32),
"original_image_shape": original_image_shape.astype(np.int32),
"image_shape": image_shape.astype(np.int32),
"window": window.astype(np.int32),
"scale": scale.astype(np.float32),
"active_class_ids": active_class_ids.astype(np.int32),
}
def parse_image_meta_graph(meta):
"""Parses a tensor that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
Returns a dict of the parsed tensors.
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
active_class_ids = meta[:, 12:]
return {
"image_id": image_id,
"original_image_shape": original_image_shape,
"image_shape": image_shape,
"window": window,
"scale": scale,
"active_class_ids": active_class_ids,
}
def mold_image(images, config):
"""Expects an RGB image (or array of images) and subtracts
the mean pixel and converts it to float. Expects image
colors in RGB order.
"""
return images.astype(np.float32) - config.MEAN_PIXEL
def unmold_image(normalized_images, config):
"""Takes a image normalized with mold() and returns the original."""
return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)
############################################################
# Miscellenous Graph Functions
############################################################
def trim_zeros_graph(boxes, name='trim_zeros'):
"""Often boxes are represented with matrices of shape [N, 4] and
are padded with zeros. This removes zero boxes.
boxes: [N, 4] matrix of boxes.
non_zeros: [N] a 1D boolean mask identifying the rows to keep
"""
non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)
boxes = tf.boolean_mask(boxes, non_zeros, name=name)
return boxes, non_zeros
def batch_pack_graph(x, counts, num_rows):
"""Picks different number of values from each row
in x depending on the values in counts.
"""
outputs = []
for i in range(num_rows):
outputs.append(x[i, :counts[i]])
return tf.concat(outputs, axis=0)
def norm_boxes_graph(boxes, shape):
"""Converts boxes from pixel coordinates to normalized coordinates.
boxes: [..., (y1, x1, y2, x2)] in pixel coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[..., (y1, x1, y2, x2)] in normalized coordinates
"""
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.divide(boxes - shift, scale)
def denorm_boxes_graph(boxes, shape):
"""Converts boxes from normalized coordinates to pixel coordinates.
boxes: [..., (y1, x1, y2, x2)] in normalized coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[..., (y1, x1, y2, x2)] in pixel coordinates
"""
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32)
class AnchorsLayer(tf.keras.layers.Layer):
def __init__(self, name="anchors", **kwargs):
super(AnchorsLayer, self).__init__(name=name, **kwargs)
def call(self, anchor):
return anchor
def get_config(self) :
config = super(AnchorsLayer, self).get_config()
return config
|
py | 7dfeabacf303ae4fba03326cd91c2bc98c639e5c | import os
import tabnanny
import subprocess
from ast import parse
from typing import Text
import multiprocessing as mp
from laziest.analyzer import Analyzer
from laziest.strings import test_method_prefix
from laziest.codegen import generate_test_file_content
from laziest.conf.config import init_config, default_settings
from laziest.walker import PathWalker, FilteredPaths
from codegraph.core import CodeGraph
tabnanny.verbose = True
q = mp.Queue()
def dump_to_file(path: Text, tf_content: Text, tests_dir: Text) -> Text:
test_file_name = f'test_{os.path.basename(path)}'
test_file_path = os.path.join(tests_dir, test_file_name)
with open(test_file_path, 'w+') as test_file:
test_file.write(tf_content)
return test_file_path
def run_laziest(args: dict):
""" main method with call steps to run laziest """
config_args = {arg: args[arg] for arg in args if arg in default_settings and args[arg] is not None}
# init config
cfg = init_config(config_args)
path = args['path']
if not os.path.exists(path):
raise Exception(f'Path {path} not exists')
fp = FilteredPaths(cfg.use_ignore_files)
pw = PathWalker(path, fp, cfg.recursive)
# get paths to python modules
paths = [x for x in pw.python_files if '__init__' not in x]
# TODO: need to add codegraph
# create code graph, iterate on bunch of functions, not files
# 1st level (with no dependencies)
# 2nd level (with deps on functions/clases) of first level and etc
# if not config_args.get('overwrite', False):
# append = True
# run differ, to collect existed file names and methods
# pass
generate_bunch_of_test_files(paths, args['debug'])
exit(0)
def generate_bunch_of_test_files(python_paths, debug):
num_cores = mp.cpu_count()
pool = mp.Pool(num_cores)
jobs = []
for python_file in python_paths:
jobs.append(pool.apply_async(tests_generator_per_file, (python_file, debug)))
# wait for all jobs to finish
for job in list(jobs):
job.get()
while not q.empty():
proc = subprocess.Popen(f'black -l {79} {q.get()}', shell=True)
proc.wait()
proc.kill()
pool.close()
pool.join()
def tests_generator_per_file(python_file, debug):
print(f'Run test generation for {python_file}')
# run TestSetCreator to get list of expected test files
append = False
# validate '.py' file - check intends
tabnanny.check(python_file)
# run analyzer
with open(python_file, "r") as source:
source_massive = source.read()
tree = parse(source_massive)
an = Analyzer(source_massive, debug)
an.visit(tree)
an.report()
cg = CodeGraph(paths=[python_file])
# data with entities end line no and start line no
code_lines = cg.get_lines_numbers()[python_file]
# to get diff with existed tests
signatures_list = {'classes': [], 'def': []}
for class_ in an.tree['classes']:
signatures_list['classes'].append(test_method_prefix + class_['name'])
# run test file generator
tf_content = generate_test_file_content(an, python_file, code_lines, debug)
if append:
# append new tests to tf
# if new method in test case for class - insert
pass
# TODO: need to change on getting prefix from command line and config
prefix = 'tests'
if prefix in python_file:
tests_dir = os.path.join(python_file.split(prefix)[0], prefix)
else:
tests_dir = os.path.join(os.path.dirname(os.path.dirname(python_file)), 'tests')
os.makedirs(tests_dir, exist_ok=True)
test_file_path = dump_to_file(python_file, tf_content, tests_dir)
q.put(test_file_path)
|
py | 7dfead9b1cc5490701c2f53936f47eb7b62289e7 | # coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
from abc import ABCMeta, abstractmethod
from math import log, floor
from google.protobuf.json_format import MessageToJson, Parse
from pyqrllib.pyqrllib import bin2hstr, QRLHelper, XmssFast
from qrl.core import config
from qrl.core.AddressState import AddressState
from qrl.core.misc import logger
from qrl.core.txs import build_tx
from qrl.crypto.misc import sha256
from qrl.generated import qrl_pb2
CODEMAP = {
'transfer': 1,
'coinbase': 2,
'latticePK': 3,
'message': 4,
'token': 5,
'transfer_token': 6,
'slave': 7
}
class Transaction(object, metaclass=ABCMeta):
"""
Abstract Base class to be derived by all other transactions
"""
def __init__(self, protobuf_transaction=None):
self._data = protobuf_transaction # This object cointains persistable data
if protobuf_transaction is None:
self._data = qrl_pb2.Transaction()
def __lt__(self, tx):
if self.fee < tx.fee:
return True
return False
def __gt__(self, tx):
if self.fee > tx.fee:
return True
return False
@property
def size(self):
return self._data.ByteSize()
@property
def pbdata(self):
"""
Returns a protobuf object that contains persistable data representing this object
:return: A protobuf Transaction object
:rtype: qrl_pb2.Transaction
"""
return self._data
@property
def type(self):
return self._data.WhichOneof('transactionType')
@property
def fee(self):
return self._data.fee
@property
def nonce(self):
return self._data.nonce
@property
def master_addr(self):
return self._data.master_addr
@property
def addr_from(self):
if self.master_addr:
return self.master_addr
return bytes(QRLHelper.getAddress(self.PK))
@property
def ots_key(self):
return self.get_ots_from_signature(self.signature)
@staticmethod
def get_ots_from_signature(signature):
try:
return int(bin2hstr(signature)[0:8], 16)
except ValueError:
raise ValueError('OTS Key Index: First 4 bytes of signature are invalid')
@staticmethod
def calc_allowed_decimals(value):
if not isinstance(value, int):
raise ValueError('value should be of integer type')
if value == 0:
return 19
# floor value could be negative, so return 0 when the floor value is negative
return max(floor(19 - log(value, 10)), 0)
@property
def PK(self):
return self._data.public_key
@property
def signature(self):
return self._data.signature
@staticmethod
def from_pbdata(pbdata: qrl_pb2.Transaction):
return build_tx(pbdata.WhichOneof('transactionType'), pbdata)
@staticmethod
def from_json(json_data):
pbdata = qrl_pb2.Transaction()
Parse(json_data, pbdata)
return Transaction.from_pbdata(pbdata)
@staticmethod
def get_slave(tx):
addr_from_pk = bytes(QRLHelper.getAddress(tx.PK))
if addr_from_pk != tx.addr_from:
return addr_from_pk
return None
@property
def txhash(self) -> bytes:
return self._data.transaction_hash
def update_txhash(self):
self._data.transaction_hash = self.generate_txhash()
def generate_txhash(self):
return sha256(
self.get_data_hash() +
self.signature +
self.PK
)
def get_data_bytes(self) -> bytes:
"""
This method returns the essential bytes that represent the transaction and will be later signed
:return:
"""
raise NotImplementedError
def get_data_hash(self) -> bytes:
"""
This method returns the hashes of the transaction data.
:return:
"""
return sha256(self.get_data_bytes())
def sign(self, object_with_sign_method):
self._data.signature = object_with_sign_method.sign(self.get_data_hash())
self.update_txhash()
@abstractmethod
def apply_state_changes(self, addresses_state):
"""
This method, applies the changes on the state caused by txn.
:return:
"""
raise NotImplementedError
def _apply_state_changes_for_PK(self, addresses_state: dict):
addr_from_pk = bytes(QRLHelper.getAddress(self.PK))
if addr_from_pk in addresses_state:
if self.addr_from != addr_from_pk:
addresses_state[addr_from_pk].transaction_hashes.append(self.txhash)
addresses_state[addr_from_pk].increase_nonce()
addresses_state[addr_from_pk].set_ots_key(self.ots_key)
@abstractmethod
def revert_state_changes(self, addresses_state, chain_manager):
"""
This method reverts the changes on the state caused by txn.
:return:
"""
raise NotImplementedError
def _revert_state_changes_for_PK(self, addresses_state, chain_manager):
addr_from_pk = bytes(QRLHelper.getAddress(self.PK))
if addr_from_pk in addresses_state:
if self.addr_from != addr_from_pk:
addresses_state[addr_from_pk].transaction_hashes.remove(self.txhash)
addresses_state[addr_from_pk].decrease_nonce()
addresses_state[addr_from_pk].unset_ots_key(self.ots_key, chain_manager)
def set_affected_address(self, addresses_set: set):
addresses_set.add(self.addr_from)
addresses_set.add(bytes(QRLHelper.getAddress(self.PK)))
@abstractmethod
def _validate_custom(self) -> bool:
"""
This is an extension point for derived classes validation
If derived classes need additional field validation they should override this member
"""
raise NotImplementedError
def validate_transaction_pool(self, transaction_pool):
for tx_set in transaction_pool:
txn = tx_set[1].transaction
if txn.txhash == self.txhash:
continue
if self.PK != txn.PK:
continue
if txn.ots_key == self.ots_key:
logger.info('State validation failed for %s because: OTS Public key re-use detected',
bin2hstr(self.txhash))
logger.info('Subtype %s', type(self))
return False
return True
def validate(self, verify_signature=True) -> bool:
"""
This method calls validate_or_raise, logs any failure and returns True or False accordingly
The main purpose is to avoid exceptions and accommodate legacy code
:return: True is the transaction is valid
:rtype: bool
"""
try:
self.validate_or_raise(verify_signature)
except ValueError as e:
logger.info('[%s] failed validate_tx', bin2hstr(self.txhash))
logger.warning(str(e))
return False
except Exception as e:
logger.exception(e)
return False
return True
def _coinbase_filter(self):
if config.dev.coinbase_address in [bytes(QRLHelper.getAddress(self.PK)), self.master_addr]:
raise ValueError('Coinbase Address only allowed to do Coinbase Transaction')
def _get_allowed_access_types(self):
return [0]
def _get_master_address(self):
return self.addr_from
def validate_or_raise(self, verify_signature=True) -> bool:
"""
This method will validate a transaction and raise exception if problems are found
:return: True if the exception is valid, exceptions otherwise
:rtype: bool
"""
if not self._validate_custom():
raise ValueError("Custom validation failed")
self._coinbase_filter()
expected_transaction_hash = self.generate_txhash()
if verify_signature and self.txhash != expected_transaction_hash:
logger.warning('Invalid Transaction hash')
logger.warning('Expected Transaction hash %s', bin2hstr(expected_transaction_hash))
logger.warning('Found Transaction hash %s', expected_transaction_hash)
raise ValueError("Invalid Transaction Hash")
if verify_signature and not XmssFast.verify(self.get_data_hash(),
self.signature,
self.PK):
raise ValueError("Invalid xmss signature")
return True
def validate_slave(self, addr_from_state: AddressState, addr_from_pk_state: AddressState):
addr_from_pk = bytes(QRLHelper.getAddress(self.PK))
master_address = self._get_master_address()
allowed_access_types = self._get_allowed_access_types()
if self.master_addr == addr_from_pk:
logger.warning('Matching master_addr field and address from PK')
return False
if addr_from_pk != master_address:
if str(self.PK) not in addr_from_state.slave_pks_access_type:
logger.warning("Public key and address don't match")
return False
access_type = addr_from_pk_state.slave_pks_access_type[str(self.PK)]
if access_type not in allowed_access_types:
logger.warning('Access Type %s', access_type)
logger.warning('Slave Address doesnt have sufficient permission')
return False
return True
def get_message_hash(self):
# FIXME: refactor, review that things are not recalculated too often, cache, etc.
return self.txhash
def to_json(self):
# FIXME: Remove once we move completely to protobuf
return MessageToJson(self._data, sort_keys=True)
def serialize(self) -> str:
return self._data.SerializeToString()
@staticmethod
def deserialize(data):
pbdata = qrl_pb2.Transaction()
pbdata.ParseFromString(bytes(data))
tx = Transaction(pbdata)
return tx
|
py | 7dfeae90c026f08e5853be0059507d5cd8f47f1d | from __future__ import absolute_import
from __future__ import print_function
import sys
import os
from optparse import OptionParser
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import pyverilog
import pyverilog.utils.util as util
from pyverilog.dataflow.dataflow_analyzer import VerilogDataflowAnalyzer
from pyverilog.dataflow.optimizer import VerilogDataflowOptimizer
from pyverilog.controlflow.active_range import VerilogActiveAnalyzer
def main():
INFO = "Active condition analyzer (Obsoluted)"
VERSION = pyverilog.__version__
USAGE = "Usage: python example_active_range.py -t TOPMODULE file ..."
def showVersion():
print(INFO)
print(VERSION)
print(USAGE)
sys.exit()
optparser = OptionParser()
optparser.add_option("-v", "--version", action="store_true", dest="showversion",
default=False, help="Show the version")
optparser.add_option("-t", "--top", dest="topmodule",
default="TOP", help="Top module, Default=TOP")
optparser.add_option("-s", "--search", dest="searchtarget", action="append",
default=[], help="Search Target Signal")
(options, args) = optparser.parse_args()
filelist = args
if options.showversion:
showVersion()
for f in filelist:
if not os.path.exists(f):
raise IOError("file not found: " + f)
if len(filelist) == 0:
showVersion()
analyzer = VerilogDataflowAnalyzer(filelist, options.topmodule)
analyzer.generate()
directives = analyzer.get_directives()
terms = analyzer.getTerms()
binddict = analyzer.getBinddict()
optimizer = VerilogDataflowOptimizer(terms, binddict)
optimizer.resolveConstant()
resolved_terms = optimizer.getResolvedTerms()
resolved_binddict = optimizer.getResolvedBinddict()
constlist = optimizer.getConstlist()
aanalyzer = VerilogActiveAnalyzer(options.topmodule, terms, binddict,
resolved_terms, resolved_binddict, constlist)
for target in options.searchtarget:
signal = util.toTermname(target)
print('Active Conditions: %s' % signal)
active_conditions = aanalyzer.getActiveConditions(signal)
print(sorted(active_conditions, key=lambda x: str(x)))
print('Changed Conditions')
changed_conditions = aanalyzer.getChangedConditions(signal)
print(sorted(changed_conditions, key=lambda x: str(x)))
print('Changed Condition Dict')
changed_conditiondict = aanalyzer.getChangedConditionsWithAssignments(signal)
print(sorted(changed_conditiondict.items(), key=lambda x: str(x[0])))
print('Unchanged Conditions')
unchanged_conditions = aanalyzer.getUnchangedConditions(signal)
print(sorted(unchanged_conditions, key=lambda x: str(x)))
if __name__ == '__main__':
main()
|
py | 7dfeaefa2f756d0d0f945c499cb27995b2aeba67 | import streamlit as st
from fpdf import FPDF
import base64
report_text = st.text_input("Report Text")
export_as_pdf = st.button("Export Report")
def create_download_link(val, filename):
b64 = base64.b64encode(val) # val looks like b'...'
return f'<a href="data:application/octet-stream;base64,{b64.decode()}" download="{filename}.pdf">Download file</a>'
if export_as_pdf:
pdf = FPDF()
pdf.add_page()
pdf.set_font('Arial', 'B', 16)
pdf.cell(40, 10, report_text)
html = create_download_link(pdf.output(dest="S").encode("latin-1"), "test")
st.markdown(html, unsafe_allow_html=True) |
py | 7dfeaf3601a944907b2f5efc7409d7a13234bfd8 | class Solution:
def sortColors(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
c = collections.Counter(nums)
for i in range(c[0]):
nums[i] = 0
for i in range(c[0], c[0] + c[1]):
nums[i] = 1
for i in range(c[0] + c[1], c[0] + c[1] + c[2]):
nums[i] = 2
|
py | 7dfeaf9d97459abe6ddd6674a0082809af8dbc3b | """Convert Freesurfer *.gii triangular mesh surface to Wavefront .obj file."""
import os
import numpy as np
import nibabel as nb
import bvbabel
import timeit
FILE = "/home/faruk/Documents/temp_bv_fsaverage/white_left.gii"
# -----------------------------------------------------------------------------
# Load data
gii = nb.load(FILE)
basename = FILE.split(os.extsep, 1)[0]
def compute_vertex_normals(verts, faces):
""""Compute vertex normals.
Parameters
----------
verts: 2d numpy array, shape [nvertices, 3]
Coordinates of vertices
faces: 2d numpy array [nfaces, 3]
Vertex indices forming triangles.
Returns
-------
normals: 2d numpy array, shape [nvertices, 3]
Unit vector vertex normals.
Reference
---------
https://sites.google.com/site/dlampetest/python/calculating-normals-of-a-triangle-mesh-using-numpy
"""
def normalize_v3(arr):
"""Normalize a numpy array of 3 component vectors shape=(n, 3)."""
lens = np.sqrt(arr[:, 0]**2. + arr[:, 1]**2. + arr[:, 2]**2.)
arr[:, 0] /= lens
arr[:, 1] /= lens
arr[:, 2] /= lens
return arr
norm = np.zeros(verts.shape, dtype=verts.dtype)
# Create an indexed view into the vertex array
tris = verts[faces]
# Calculate the normals (cross product of the vectors v1-v0 & v2-v0)
n = np.cross(tris[::, 1] - tris[::, 0], tris[::, 2] - tris[::, 0])
# Normalize weights in each normal equally.
n = normalize_v3(n)
# Convert face normals to vertex normals and normalize again
norm[faces[:, 0]] += n
norm[faces[:, 1]] += n
norm[faces[:, 2]] += n
return normalize_v3(norm)
# =============================================================================
# Extract vertices and faces
verts = gii.darrays[0].data
faces = gii.darrays[1].data
faces = faces[:, [0, 2, 1]]
norms = compute_vertex_normals(verts, faces)
nr_verts = verts.shape[0]
nr_faces = faces.shape[0]
# -----------------------------------------------------------------------------
# Save OBJ
basename = FILE.split(os.extsep, 1)[0]
outname = "{}_bvbabel.obj".format(basename)
print("Writing OBJ file...")
bvbabel.obj.write_obj(outname, verts, norms, faces)
print("Finished.")
|
py | 7dfeafb4762d6c11e434f923ad0e9181519205e5 | import typing as T
from datetime import datetime
from random import choice, randint
from string import ascii_lowercase, digits
from cumulusci.tests.util import random_sha
date_format = "%Y-%m-%dT%H:%M:%SZ"
class GithubApiTestMixin:
"""Mixin that provide common values and mocked http responses for tests of code that talks to the Github API"""
def init_github(self):
self.repo_api_url = "https://api.github.com/repos/TestOwner/TestRepo"
self.github_info = {
"github_owner": "TestOwner",
"github_repo": "TestRepo",
"github_username": "TestUser",
"github_password": "TestPass",
"prefix_beta": "beta/",
"prefix_prod": "release/",
"default_branch": "main",
}
def _get_expected_user(self, name):
user_url = "https://api.github.com/users/{}".format(name)
return {
"id": 1234567892,
"login": name,
"url": user_url,
"type": "User",
"site_admin": False,
"avatar_url": "https://avatars2.githubusercontent.com/u/42554011?v=4",
"gravatar_id": "",
"html_url": "https://github.com/{}".format(name),
"followers_url": user_url + "/followers",
"following_url": user_url + "/following{/other_user}",
"gists_url": user_url + "/gists{/gist_id}",
"starred_url": user_url + "/starred{/owner}{/repo}",
"subscriptions_url": user_url + "/subscriptions",
"organizations_url": user_url + "/orgs",
"repos_url": user_url + "/repos",
"events_url": user_url + "/events{/privacy}",
"received_events_url": user_url + "/received_events",
}
def _get_expected_repo(self, owner, name):
html_url = "https://github.com/{}/{}".format(owner, name)
url = "https://api.github.com/repos/{}/{}".format(owner, name)
owner_url = "https://api.github.com/users/{}".format(owner)
now = datetime.now().isoformat()
response_body = {
"id": 1234567890,
"name": name,
"description": "",
"archived": False,
"created_at": now,
"default_branch": "main",
"fork": False,
"forks_count": 1,
"full_name": "{}/{}".format(owner, name),
"git_url": "git://github.com/{}/{}.git".format(owner, name),
"has_downloads": False,
"has_issues": False,
"has_pages": False,
"has_projects": False,
"has_wiki": False,
"homepage": "",
"language": "Python",
"mirror_url": None,
"network_count": 0,
"open_issues_count": 0,
"owner": {
"id": 1234567891,
"login": owner,
"url": owner_url,
"type": "Organization",
"site_admin": False,
"avatar_url": "https://avatars2.githubusercontent.com/u/42554011?v=4",
"gravatar_id": "",
"html_url": "https://github.com/{}".format(owner),
"followers_url": owner_url + "/followers",
"following_url": owner_url + "/following{/other_user}",
"gists_url": owner_url + "/gists{/gist_id}",
"starred_url": owner_url + "/starred{/owner}{/repo}",
"subscriptions_url": owner_url + "/subscriptions",
"organizations_url": owner_url + "/orgs",
"repos_url": owner_url + "/repos",
"events_url": owner_url + "/events{/privacy}",
"received_events_url": owner_url + "/received_events",
},
"pushed_at": now,
"private": False,
"size": 1,
"ssh_url": "[email protected]:{}/{}.git".format(owner, name),
"stargazers_count": 0,
"subscribers_count": 0,
"svn_url": html_url,
"updated_at": now,
"watchers_count": 0,
"archive_url": url + "/{archive_format}{/ref}",
"assignees_url": url + "/assignees{/user}",
"blobs_url": url + "/git/blobs{/sha}",
"branches_url": url + "/branches{/branch}",
"clone_url": html_url + ".git",
"collaborators_url": url + "/collaborators{/collaborator}",
"comments_url": url + "/comments{/number}",
"commits_url": url + "/commits{/sha}",
"compare_url": url + "/compare/{base}...{head}",
"contents_url": url + "/contents/{+path}",
"contributors_url": url + "/CumulusCI/contributors",
"deployments_url": url + "/CumulusCI/deployments",
"downloads_url": url + "/downloads",
"events_url": url + "/events",
"forks_url": url + "/forks",
"git_commits_url": url + "/git/commits{/sha}",
"git_refs_url": url + "/git/refs{/sha}",
"git_tags_url": url + "/git/tags{/sha}",
"hooks_url": url + "/hooks",
"html_url": html_url,
"issue_comment_url": url + "/issues/comments{/number}",
"issue_events_url": url + "/issues/events{/number}",
"issues_url": url + "/issues{/number}",
"keys_url": url + "/keys{/key_id}",
"labels_url": url + "/labels{/name}",
"languages_url": url + "/languages",
"merges_url": url + "/merges",
"milestones_url": url + "/milestones{/number}",
"notifications_url": url + "/notifications{?since,all,participating}",
"pulls_url": url + "/pulls{/number}",
"releases_url": url + "/releases{/id}",
"stargazers_url": url + "/stargazers",
"statuses_url": url + "/statuses/{sha}",
"subscribers_url": url + "/subscribers",
"subscription_url": url + "/subscription",
"tags_url": url + "/tags",
"teams_url": url + "/teams",
"trees_url": url + "/git/trees{/sha}",
"url": url,
}
return response_body
def _get_expected_branch(self, branch, commit=None):
if not commit:
commit = self._random_sha()
response_body = {
"id": 1234567890,
"name": branch,
"commit": {
"sha": commit,
"url": "",
"author": None,
"comments_url": "",
"commit": {
"url": "",
"author": {},
"committer": {},
"message": "",
"tree": {"sha": "", "url": ""},
},
"committer": {},
"html_url": "",
"parents": [],
},
"_links": {},
"protected": False,
"protection": {},
"protection_url": "",
}
return response_body
def _get_expected_tag(
self, name, commit_sha, tag_sha=None, tag_date=None, message=None
):
if tag_sha is None:
tag_sha = self._random_sha()
if not tag_date:
tag_date = datetime.utcnow()
tag_date = datetime.strftime(tag_date, date_format)
return {
"sha": tag_sha,
"url": "",
"message": message or "",
"object": {"url": "", "sha": commit_sha, "type": "commit"},
"tag": name,
"tagger": {"date": tag_date},
}
def _get_expected_tag_ref(self, tag, sha):
return {
"ref": f"refs/tags/{tag}",
"object": {"type": "tag", "sha": sha, "url": ""},
"name": tag,
"url": "",
}
def _get_expected_ref(self, ref, sha, type="commit"):
return {
"ref": f"refs/{ref}",
"object": {"type": "commit", "sha": sha, "url": ""},
"name": ref,
"url": "",
}
def _get_expected_repo_tag(self, tag, sha):
return {
"name": tag,
"commit": {"sha": sha, "url": ""},
"tarball_url": "",
"zipball_url": "",
}
def _get_expected_commit(self, sha):
return {
"url": f"{self.repo_api_url}/commits/{sha}",
"sha": sha,
"files": [],
"stats": {},
}
def _get_expected_branches(self, branches=None):
if not branches:
branches = []
response_body = []
for branch in branches:
response_body.append(self._get_expected_branch(**branch))
return response_body
def _get_expected_compare(self, base, head, files=None):
if not files:
files = []
response_body = {
"base_commit": {
"url": "{}/commits/{}".format(self.repo_api_url, base),
"sha": base,
"author": None,
"comments_url": "",
"commit": {
"sha": "",
"url": "",
"author": {},
"committer": {},
"message": "",
"tree": {"sha": "", "url": ""},
},
"committer": {},
"html_url": "",
"parents": [],
},
"merge_base_commit": {
"url": "{}/commits/{}".format(self.repo_api_url, head),
"sha": head,
"author": None,
"comments_url": "",
"commit": {
"sha": "",
"url": "",
"author": {},
"committer": {},
"message": "",
"tree": {"sha": "", "url": ""},
},
"committer": {},
"html_url": "",
"parents": [],
},
"ahead_by": 0,
"behind_by": len(files),
"commits": [],
"diff_url": "",
"files": files,
"html_url": "",
"patch_url": "",
"permalink_url": "",
"status": "",
"total_commits": 0,
"url": "",
}
return response_body
def _get_expected_merge(self, conflict=None):
if conflict:
return {"message": "Merge Conflict"}
new_commit = self._random_sha()
response_body = {
"sha": new_commit,
"merged": True,
"message": "Merged",
"url": "",
"author": None,
"comments_url": "",
"commit": {
"sha": "",
"url": "",
"author": {},
"committer": {},
"message": "",
"tree": {"sha": "", "url": ""},
},
"committer": {},
"html_url": "",
"parents": [],
}
return response_body
def _get_expected_pull_request(
self, pull_id, issue_number, body=None, merged_date=None, **kw
):
if merged_date:
state = "closed"
merged_date = datetime.strftime(merged_date, date_format)
else:
state = "open"
commit_sha = self._random_sha()
merge_sha = None
if merged_date:
merge_sha = self._random_sha()
base_repo = self._get_expected_repo("TestOwner", "TestRepo")
if hasattr(self, "project_config"):
default_branch = self.project_config.project__git__default_branch
else:
default_branch = "main"
pr = {
"active_lock_reason": "too heated",
"additions": [],
"assignee": None,
"assignees": [],
"auto_merge": False,
"author_association": None,
"base": {
"ref": default_branch,
"sha": commit_sha,
"label": "",
"repo": base_repo,
},
"body": body or "testing",
"body_html": "testing",
"body_text": "testing",
"closed_at": merged_date,
"comments": [],
"comments_url": "",
"commits": [],
"commits_url": "",
"created_at": merged_date,
"deletions": [],
"diff_url": "",
"draft": False,
"head": {"ref": "some-other-branch", "sha": commit_sha, "label": ""},
"html_url": "https://github.com/TestOwner/TestRepo/pulls/{}".format(
issue_number
),
"id": pull_id,
"issue_url": "{}/issues/{}".format(self.repo_api_url, issue_number),
"_links": {},
"locked": True,
"merge_commit_sha": merge_sha,
"mergeable": not merged_date,
"mergeable_state": "clean",
"merged_at": merged_date,
"merged": merged_date is not None,
"merged_by": None,
"number": issue_number,
"patch_url": "",
"requested_teams": "",
"requested_reviewers": "",
"review_comment_url": "",
"review_comments": [],
"review_comments_url": "",
"state": state,
"statuses_url": "",
"title": "Pull Request #{}".format(issue_number),
"updated_at": merged_date,
"url": "https://github.com/TestOwner/TestRepo/pulls/{}".format(
issue_number
),
"user": base_repo["owner"],
}
pr.update(kw)
return pr
def _get_expected_pull_requests(self, num_pull_requests):
return [self._get_expected_pull_request(i, i) for i in range(num_pull_requests)]
def _get_expected_issue(self, issue_number, owner=None, repo=None, labels=None):
if owner is None:
owner = "TestOwner"
if repo is None:
repo = "TestRepo"
now = datetime.now().isoformat()
response_body = {
"assignee": None,
"assignees": [],
"body": "I'm having a problem with this.",
"body_html": "",
"body_text": "",
"closed_at": None,
"closed_by": None,
"comments": [],
"comments_url": "",
"created_at": now,
"events_url": "",
"html_url": "https://github.com/{}/{}/issues/{}".format(
owner, repo, issue_number
),
"id": issue_number,
"labels": labels or [],
"labels_url": "",
"locked": False,
"milestone": None,
"number": issue_number,
"state": "open",
"title": "Found a bug",
"updated_at": now,
"url": "https://api.github.com/repos/{}/{}/issues/{}".format(
owner, repo, issue_number
),
"user": self._get_expected_user("user"),
}
return response_body
def _get_expected_issue_comment(self, body):
now = datetime.now().isoformat()
return {
"author_association": "",
"body": body,
"body_html": "",
"body_text": "",
"created_at": now,
"url": "",
"html_url": "",
"id": 0,
"issue_url": "",
"updated_at": now,
"user": self._get_expected_user("user"),
}
def _get_expected_releases(
self, owner: str, repo: str, release_names: T.List[str]
) -> T.List[T.Dict]:
releases = []
for release in release_names:
releases.append(
{
"url": f"https://api.github.com/repos/{owner}/{repo}/releases/{release}",
"html_url": "https://github.com/octocat/Hello-World/releases/v1.0.0",
"assets_url": "https://api.github.com/repos/octocat/Hello-World/releases/1/assets",
"upload_url": "https://uploads.github.com/repos/octocat/Hello-World/releases/1/assets{?name,label}",
"tarball_url": "https://api.github.com/repos/octocat/Hello-World/tarball/v1.0.0",
"zipball_url": "https://api.github.com/repos/octocat/Hello-World/zipball/v1.0.0",
"id": 1,
"node_id": "MDc6UmVsZWFzZTE=",
"tag_name": release,
"target_commitish": "master",
"name": release,
"body": "Description of the release",
"draft": False,
"prerelease": False,
"created_at": "2013-02-27T19:35:32Z",
"published_at": "2013-02-27T19:35:32Z",
"author": {
"login": "octocat",
"id": 1,
"node_id": "MDQ6VXNlcjE=",
"avatar_url": "https://github.com/images/error/octocat_happy.gif",
"gravatar_id": "",
"url": "https://api.github.com/users/octocat",
"html_url": "https://github.com/octocat",
"followers_url": "https://api.github.com/users/octocat/followers",
"following_url": "https://api.github.com/users/octocat/following{/other_user}",
"gists_url": "https://api.github.com/users/octocat/gists{/gist_id}",
"starred_url": "https://api.github.com/users/octocat/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/octocat/subscriptions",
"organizations_url": "https://api.github.com/users/octocat/orgs",
"repos_url": "https://api.github.com/users/octocat/repos",
"events_url": "https://api.github.com/users/octocat/events{/privacy}",
"received_events_url": "https://api.github.com/users/octocat/received_events",
"type": "User",
"site_admin": False,
},
"assets": [
{
"url": f"https://api.github.com/repos/{owner}/{repo}/releases/assets/1",
"browser_download_url": "https://github.com/octocat/Hello-World/releases/download/v1.0.0/example.zip",
"id": 1,
"node_id": "MDEyOlJlbGVhc2VBc3NldDE=",
"name": "example.zip",
"label": "short description",
"state": "uploaded",
"content_type": "application/zip",
"size": 1024,
"download_count": 42,
"created_at": "2013-02-27T19:35:32Z",
"updated_at": "2013-02-27T19:35:32Z",
"uploader": {
"login": "octocat",
"id": 1,
"node_id": "MDQ6VXNlcjE=",
"avatar_url": "https://github.com/images/error/octocat_happy.gif",
"gravatar_id": "",
"url": "https://api.github.com/users/octocat",
"html_url": "https://github.com/octocat",
"followers_url": "https://api.github.com/users/octocat/followers",
"following_url": "https://api.github.com/users/octocat/following{/other_user}",
"gists_url": "https://api.github.com/users/octocat/gists{/gist_id}",
"starred_url": "https://api.github.com/users/octocat/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/octocat/subscriptions",
"organizations_url": "https://api.github.com/users/octocat/orgs",
"repos_url": "https://api.github.com/users/octocat/repos",
"events_url": "https://api.github.com/users/octocat/events{/privacy}",
"received_events_url": "https://api.github.com/users/octocat/received_events",
"type": "User",
"site_admin": False,
},
}
],
}
)
return releases
def _get_expected_release(self, tag_name, **kw):
now = datetime.now().isoformat()
release = {
"url": "https://release",
"assets": [],
"assets_url": "",
"author": self._get_expected_user("author"),
"body": "",
"created_at": now,
"draft": False,
"html_url": "",
"id": 1,
"name": "1.0",
"prerelease": False,
"published_at": now,
"tag_name": tag_name,
"tarball_url": "",
"target_commitish": "",
"upload_url": "",
"zipball_url": "",
}
release.update(kw)
return release
def _get_expected_prerelease_tag_gql(self, tag_name):
return {
"data": {"repository": {"releases": {"nodes": [{"tagName": tag_name}]}}}
}
def _random_sha(self):
return random_sha()
def _get_expected_not_found(self):
return {
"message": "Not Found",
"documentation_url": "https://developer.github.com/v3",
}
def _get_expected_label(self, name=None, desc=None):
return {
"id": randint(100000000, 999999999),
"node_id": "MDU6TGFiZWwyMDgwNDU5NDY=",
"url": "https://api.github.com/repos/octocat/Hello-World/labels/bug",
"name": name or "Test Label",
"description": desc or "Test label description.",
"color": "f29513",
"default": False,
}
def _get_expected_labels(self, labels):
return [self._get_expected_label(name=label) for label in labels]
def _get_expected_gist(self, description, files, public=False):
"""Gist creationg returns 201 on success"""
gh_id = self.create_id(20)
gist_files = {}
for filename, content in files.items():
gist_files[filename] = {
"filename": filename,
"type": "text/plain",
"language": "text",
"raw_url": f"https://gist.githubusercontent.com/octocat/{gh_id}/raw/99c1bf3a345505c2e6195198d5f8c36267de570b/hello_world.py",
"size": 199,
"truncated": False,
"content": content,
}
expected_gist = {
"url": f"https://api.github.com/gists/{gh_id}",
"forks_url": f"https://api.github.com/gists/{gh_id}/forks",
"commits_url": f"https://api.github.com/gists/{gh_id}/commits",
"id": gh_id,
"node_id": "MDQ6R2lzdGFhNWEzMTVkNjFhZTk0MzhiMThk",
"git_pull_url": f"https://gist.github.com/{gh_id}.git",
"git_push_url": f"https://gist.github.com/{gh_id}.git",
"html_url": f"https://gist.github.com/{gh_id}",
"files": gist_files,
"public": public,
"created_at": "2010-04-14T02:15:15Z",
"updated_at": "2011-06-20T11:34:15Z",
"description": "Hello World Examples",
"comments": 0,
"user": None,
"comments_url": f"https://api.github.com/gists/{gh_id}/comments/",
"owner": {
"login": "octocat",
"id": 1,
"node_id": "MDQ6VXNlcjE=",
"avatar_url": "https://github.com/images/error/octocat_happy.gif",
"gravatar_id": "",
"url": "https://api.github.com/users/octocat",
"html_url": "https://github.com/octocat",
"followers_url": "https://api.github.com/users/octocat/followers",
"following_url": "https://api.github.com/users/octocat/following{/other_user}",
"gists_url": "https://api.github.com/users/octocat/gists{/gist_id}",
"starred_url": "https://api.github.com/users/octocat/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/octocat/subscriptions",
"organizations_url": "https://api.github.com/users/octocat/orgs",
"repos_url": "https://api.github.com/users/octocat/repos",
"events_url": "https://api.github.com/users/octocat/events{/privacy}",
"received_events_url": "https://api.github.com/users/octocat/received_events",
"type": "User",
"site_admin": False,
},
"truncated": False,
"forks": [],
"history": [
{
"url": "https://api.github.com/gists/aa5a315d61ae9438b18d/57a7f021a713b1c5a6a199b54cc514735d2d462f",
"version": "57a7f021a713b1c5a6a199b54cc514735d2d462f",
"user": {
"login": "octocat",
"id": 1,
"node_id": "MDQ6VXNlcjE=",
"avatar_url": "https://github.com/images/error/octocat_happy.gif",
"gravatar_id": "",
"url": "https://api.github.com/users/octocat",
"html_url": "https://github.com/octocat",
"followers_url": "https://api.github.com/users/octocat/followers",
"following_url": "https://api.github.com/users/octocat/following{/other_user}",
"gists_url": "https://api.github.com/users/octocat/gists{/gist_id}",
"starred_url": "https://api.github.com/users/octocat/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/octocat/subscriptions",
"organizations_url": "https://api.github.com/users/octocat/orgs",
"repos_url": "https://api.github.com/users/octocat/repos",
"events_url": "https://api.github.com/users/octocat/events{/privacy}",
"received_events_url": "https://api.github.com/users/octocat/received_events",
"type": "User",
"site_admin": False,
},
"change_status": {"deletions": 0, "additions": 180, "total": 180},
"committed_at": "2010-04-14T02:15:15Z",
}
],
}
return expected_gist
def create_id(self, length):
characters = [*digits, *ascii_lowercase]
return "".join([choice(characters) for i in range(length)])
|
py | 7dfeb000df6cbcbc0ca90ae15e888b32930e38aa | # coding: UTF-8
import os
import torch
import numpy as np
import pickle as pkl
from tqdm import tqdm # python 进度条
import time
from datetime import timedelta
MAX_VOCAB_SIZE = 10000 # 词表长度限制
UNK, PAD = '<UNK>', '<PAD>' # 未知字,padding符号
def build_vocab(file_path, tokenizer, max_size, min_freq):
# vocab, {word: num}(按词频顺序),<UNK> 未知字,<PAD> 填充字符,
# 需要去停用词么?
# min_freq 最小为1,可以不设置
vocab_dic = {}
with open(file_path, 'r', encoding='UTF-8') as f:
for line in tqdm(f):
lin = line.strip()
if not lin:
continue
content = lin.split('\t')[0]
for word in tokenizer(content):
vocab_dic[word] = vocab_dic.get(word, 0) + 1
vocab_list = sorted([_ for _ in vocab_dic.items() if _[1] >= min_freq], key=lambda x: x[1], reverse=True)[:max_size]
vocab_dic = {word_count[0]: idx for idx, word_count in enumerate(vocab_list)}
vocab_dic.update({UNK: len(vocab_dic), PAD: len(vocab_dic) + 1})
return vocab_dic
def build_dataset(config, ues_word):
if ues_word:
tokenizer = lambda x: x.split(' ') # 以空格隔开,word-level
else:
tokenizer = lambda x: [y for y in x] # char-level
if os.path.exists(config.vocab_path):
vocab = pkl.load(open(config.vocab_path, 'rb'))
else:
vocab = build_vocab(config.train_path, tokenizer=tokenizer, max_size=MAX_VOCAB_SIZE, min_freq=1)
pkl.dump(vocab, open(config.vocab_path, 'wb'))
print(f"Vocab size: {len(vocab)}")
def load_dataset(path, pad_size=32):
contents = []
with open(path, 'r', encoding='UTF-8') as f:
for line in tqdm(f):
lin = line.strip()
if not lin:
continue
content, label = lin.split('\t')
words_line = []
token = tokenizer(content)
seq_len = len(token)
if pad_size:
if len(token) < pad_size:
token.extend([PAD] * (pad_size - len(token)))
else:
token = token[:pad_size]
seq_len = pad_size
# word to id
for word in token:
words_line.append(vocab.get(word, vocab.get(UNK)))
contents.append((words_line, int(label), seq_len))
return contents # [([...], 0), ([...], 1), ...]
train = load_dataset(config.train_path, config.pad_size)
dev = load_dataset(config.dev_path, config.pad_size)
test = load_dataset(config.test_path, config.pad_size)
return vocab, train, dev, test
class DatasetIterater(object):
def __init__(self, batches, batch_size, device):
self.batch_size = batch_size
self.batches = batches
self.n_batches = len(batches) // batch_size
self.residue = False # 记录batch数量是否为整数
if len(batches) % self.n_batches != 0:
self.residue = True
self.index = 0
self.device = device
def _to_tensor(self, datas):
x = torch.LongTensor([_[0] for _ in datas]).to(self.device)
y = torch.LongTensor([_[1] for _ in datas]).to(self.device)
# pad前的长度(超过pad_size的设为pad_size)
seq_len = torch.LongTensor([_[2] for _ in datas]).to(self.device)
return (x, seq_len), y
def __next__(self):
if self.residue and self.index == self.n_batches:
batches = self.batches[self.index * self.batch_size: len(self.batches)]
self.index += 1
batches = self._to_tensor(batches)
return batches
elif self.index >= self.n_batches:
self.index = 0
raise StopIteration
else:
batches = self.batches[self.index * self.batch_size: (self.index + 1) * self.batch_size]
self.index += 1
batches = self._to_tensor(batches)
return batches
def __iter__(self):
return self
def __len__(self):
if self.residue:
return self.n_batches + 1
else:
return self.n_batches
def build_iterator(dataset, config):
iter = DatasetIterater(dataset, config.batch_size, config.device)
return iter
def get_time_dif(start_time):
"""获取已使用时间"""
end_time = time.time()
time_dif = end_time - start_time
return timedelta(seconds=int(round(time_dif)))
if __name__ == "__main__":
'''提取预训练词向量'''
# 下面的目录、文件名按需更改。
train_dir = "./THUCNews/data/train.txt"
vocab_dir = "./THUCNews/data/vocab.pkl"
pretrain_dir = "./THUCNews/data/sgns.sogou.char"
emb_dim = 300
filename_trimmed_dir = "./THUCNews/data/embedding_SougouNews"
if os.path.exists(vocab_dir):
word_to_id = pkl.load(open(vocab_dir, 'rb'))
else:
# tokenizer = lambda x: x.split(' ') # 以词为单位构建词表(数据集中词之间以空格隔开)
tokenizer = lambda x: [y for y in x] # 以字为单位构建词表
word_to_id = build_vocab(train_dir, tokenizer=tokenizer, max_size=MAX_VOCAB_SIZE, min_freq=1)
pkl.dump(word_to_id, open(vocab_dir, 'wb'))
# embeddings = np.random.rand(len(word_to_id), emb_dim)
# f = open(pretrain_dir, "r", encoding='UTF-8')
# for i, line in enumerate(f.readlines()):
# # if i == 0: # 若第一行是标题,则跳过
# # continue
# lin = line.strip().split(" ")
# if lin[0] in word_to_id:
# idx = word_to_id[lin[0]]
# emb = [float(x) for x in lin[1:301]]
# embeddings[idx] = np.asarray(emb, dtype='float32')
# f.close()
# np.savez_compressed(filename_trimmed_dir, embeddings=embeddings)
vocab, train_data, dev_data, test_data = build_dataset(config, args.word)
train_iter = build_iterator(train_data, config)
train = load_dataset(config.train_path, config.pad_size)
|
py | 7dfeb0211b478d27fad8d8e45c78adee341f130e | ###############################################################################
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
###############################################################################
import re
import queue
from .proto import keras, is_tf_keras
from .proto.tfcompat import tensorflow as tf
from .proto.tfcompat import is_tf2
from .common import k2o_logger
from .topology import Topology
from .funcbook import get_converter, set_converter
from ._consts import TYPES
from ._tf_ops import pass_thru_converter
from ._parser_tf import (infer_variable_type, LayerInfo, is_placeholder_node,
tsname_to_node, on_parsing_keras_layer_v2, adjust_input_batch_size as _adjust_input_batch_size,
adjust_input_output_size as _adjust_input_output_size)
from ._parser_1x import (extract_inbound_nodes,
list_input_tensors, list_input_mask, list_output_mask,
list_output_tensors, list_input_shapes, list_output_shapes, on_parsing_keras_layer)
def _find_node(nodes, name):
try:
opname = tsname_to_node(name)
return next(n_ for n_ in nodes if n_.name == opname)
except StopIteration:
return None
def _locate_inputs_by_node(node_list, varset):
inputs = {}
for n_ in node_list:
assert n_ in node_list
for i_ in n_.inputs:
op = i_.op
if op.name[0] == '^':
continue
if (not is_placeholder_node(op)) and op in node_list:
continue
if i_ not in inputs:
v0 = varset.get_local_variable_or_declare_one(i_.name, infer_variable_type(i_, varset.target_opset))
inputs[i_] = v0
return list(inputs.values()), list(inputs.keys())
def _locate_outputs(node_list, varset):
var_output = []
nodes = []
for n_ in varset.variable_name_mapping.keys():
node = _find_node(node_list, n_)
if node is not None and (not is_placeholder_node(node)):
nodes.append(node)
assert nodes
for n0_ in nodes:
for n_ in n0_.outputs:
var_output.append(
varset.get_local_variable_or_declare_one(n_.name, infer_variable_type(n_, varset.target_opset)))
return var_output
def _is_relevant_keras_node(model, node):
# type: (keras.Model, object) -> bool
if not hasattr(model, '_nodes_by_depth'):
return True # 'Sequential' object has no attribute '_nodes_by_depth' in the legacy keras version.
for v in model._nodes_by_depth.values():
if node in v:
return True
return False
def _on_parsing_time_distributed_layer(graph, node_list, layer, model, varset, prefix=None):
"""
This conversion supports timedistributed wrapper partially where the layer itself can be converted by onnx.
"""
inputs = []
ishapes = []
outputs = []
oshapes = []
num_relevant_keras_node = 0
for nb_ in extract_inbound_nodes(layer):
if _is_relevant_keras_node(model, nb_):
inputs += list_input_tensors(nb_)
ishapes += list_input_shapes(nb_)
outputs += list_output_tensors(nb_)
oshapes += list_output_shapes(nb_)
num_relevant_keras_node = num_relevant_keras_node + 1
assert num_relevant_keras_node == 1
prefix = prefix or ''
i_ = inputs[0]
iname = prefix + i_.name
k2o_logger().debug('td_layer input: ' + iname)
i0 = varset.get_local_variable_or_declare_one(iname, infer_variable_type(i_, varset.target_opset))
o1_reshape_shape = (-1,) + oshapes[0][2:]
i0_reshape_name = i_.op.name + '_reshape_0:0'
i0_reshape = varset.declare_local_variable(i0_reshape_name, infer_variable_type(i_, varset.target_opset))
i0_reshape_shape = (-1,) + ishapes[0][2:]
i0_reshape.type.shape = i0_reshape_shape
operator_reshape_0 = varset.declare_local_operator(TYPES.TD_Reshape,
op_name=layer.name + '_reshape_0', target_shape=i0_reshape_shape)
operator_reshape_0.add_input(i0)
operator_reshape_0.add_output(i0_reshape)
o_ = outputs[0]
oname = prefix + o_.name
k2o_logger().debug('td_layer output: ' + oname)
o1 = varset.get_local_variable_or_declare_one(oname, infer_variable_type(o_, varset.target_opset))
oshapes1 = [-1 if s_ is None else s_ for s_ in oshapes[0]]
operator_reshape_1 = varset.declare_local_operator(TYPES.TD_Reshape,
op_name=layer.name + '_reshape_1', target_shape=oshapes1)
operator_reshape_1.add_output(o1)
o1_reshape_name = o_.op.name + '_reshape_1:0'
o1_reshape = varset.declare_local_variable(o1_reshape_name, infer_variable_type(o_, varset.target_opset))
o1_reshape.type.shape = o1_reshape_shape
operator_reshape_1.add_input(o1_reshape)
if isinstance(layer.layer, keras.Model):
kenode = extract_inbound_nodes(layer.layer)[0]
intop = varset.declare_local_operator(TYPES.Identity)
intop.add_input(i0_reshape)
intop.add_output(varset.get_local_variable_or_declare_one(list_input_tensors(kenode)[0].name))
_on_parsing_model_layer(layer.layer, graph, kenode, varset)
intop = varset.declare_local_operator(TYPES.Identity)
intop.add_input(varset.get_local_variable_or_declare_one(list_output_tensors(kenode)[0].name))
intop.add_output(o1_reshape)
else:
operator = varset.declare_local_operator(type(layer.layer), raw_model=layer.layer, op_name=layer.name)
operator.nodelist = node_list
operator.add_input(i0_reshape)
operator.add_output(o1_reshape)
cvt = get_converter(type(layer.layer))
if cvt is not None and hasattr(cvt, 'shape_infer'):
operator.shape_infer = cvt.shape_infer
def _check_layer_converter_availability(sub_model):
for l_ in sub_model.layers:
if isinstance(l_, keras.Model):
exist = _check_layer_converter_availability(l_)
else:
layer_type = type(l_)
exist = get_converter(layer_type) or \
layer_type in [keras.layers.InputLayer, keras.layers.wrappers.TimeDistributed]
if not exist:
k2o_logger().info("The layer {} doesn't have a specific converter, fall back.".format(str(l_)))
break
else:
return True
return False
def _create_identity(ts_from, ts_to, varset):
ty_ = infer_variable_type(ts_from, varset.target_opset)
var0 = varset.get_local_variable_or_declare_one(ts_from.name, ty_)
var1 = varset.get_local_variable_or_declare_one(ts_to.name, ty_)
op = varset.declare_local_operator(TYPES.Identity, op_name=ts_to.name)
op.add_input(var0)
op.add_output(var1)
return op
def _create_model_input_mapping_operators(ts_from, ts_to, prefix, subprefix, varset):
ty_ = infer_variable_type(ts_from, varset.target_opset)
# type(_infer_variable_type(ts_to, varset.target_opset) and ...
# ... type(ty_) can be different which is resolved by implicit cast.
var0 = varset.get_local_variable_or_declare_one(subprefix + ts_from.name, ty_)
var1 = varset.get_local_variable_or_declare_one(prefix + ts_to.name, ty_)
op = varset.declare_local_operator(TYPES.Identity, op_name=prefix + ts_to.name)
op.add_input(var0)
op.add_output(var1)
k2o_logger().debug(
"mapping: %s -> %s (%s -> %s)" % (ts_from.name, ts_to.name, subprefix + ts_from.name, prefix + ts_to.name))
return op
def _find_kenode_by_output_tensor(inbound_nodes, output_name):
def find_ts_name(tensors, name):
return next((ts_ for ts_ in tensors if ts_.name.find(name) == 0), None)
return next((n_ for n_ in inbound_nodes if find_ts_name(list_output_tensors(n_), output_name) is not None), None)
def _is_template_tensors(tensors, templ_tensors):
for t_, tt_ in zip(tensors, templ_tensors):
# t_.shape and tt_.shape can be different if the input shape is different.
if t_.name.find(tt_.name) < 0:
return False
return True
def _on_parsing_model_layer(sub_model, graph, target_kenode, varset, top_kenode=None, upper_prefix=None):
ts_inputs = []
ts_outputs = []
upper_prefix = upper_prefix if upper_prefix else ''
prefix = ''
# mapping input/output nodes for the sub_model.
inbound_nodes = extract_inbound_nodes(sub_model)
sub_model_node_idx = 0
if len(inbound_nodes) > 1 and inbound_nodes[0] is not target_kenode:
# Assumption: the first node in the inbound node list is always the one used in the keras layers.
curr_node = target_kenode
assert curr_node is not None
found = False
base_node = inbound_nodes[0]
for nodes_ in sub_model._nodes_by_depth.values():
for nd_ in nodes_:
if _is_template_tensors(list_output_tensors(curr_node), list_output_tensors(nd_)):
found = True
base_node = nd_
break
else:
sub_model_node_idx += 1
if found:
break
else:
assert False, "Cannot find the node for the model layer {}".format(sub_model.name)
bn_name_list = [bn_.name for bn_ in list_output_tensors(base_node)]
prefix_found = False
for idx_, out_ in enumerate(list_output_tensors(curr_node)):
if not prefix_found:
name_match_len = -1
for bn_name_ in bn_name_list:
cur_match_len = out_.name.find(bn_name_)
if cur_match_len > -1:
name_match_len = cur_match_len
break
assert name_match_len > 0
prefix = out_.name[0:name_match_len]
prefix_found = True
ts_outputs.append(out_)
if top_kenode is None:
top_kenode = curr_node
# the input node needs to be mapped to the outmost inbound keras node.
for idx_, in_ in enumerate(list_input_tensors(top_kenode)):
_create_model_input_mapping_operators(in_, list_input_tensors(inbound_nodes[0])[idx_],
upper_prefix + prefix, upper_prefix,
varset)
ts_inputs.append(in_)
k2o_logger().debug("prefix_beg: %s" % prefix)
for i_ in range(sub_model_node_idx, len(sub_model._nodes_by_depth)):
nodes_ = sub_model._nodes_by_depth[i_]
for n_ in nodes_:
layer = n_.outbound_layer
if isinstance(layer, keras.layers.InputLayer):
continue
elif isinstance(layer, keras.layers.wrappers.TimeDistributed):
_on_parsing_time_distributed_layer(graph, [], layer, sub_model, varset, upper_prefix + prefix)
elif isinstance(layer, keras.Model):
k2o_logger().debug("Processing a keras sub model - %s" % layer.name)
cur_kenode = _find_kenode_by_output_tensor(extract_inbound_nodes(layer), sub_model.outputs[0].name)
_on_parsing_model_layer(layer, graph, n_, varset, cur_kenode, upper_prefix + prefix)
else:
on_parsing_keras_layer(graph, [], layer, n_, sub_model, varset, upper_prefix + prefix)
k2o_logger().debug("prefix_end: - %s" % prefix)
return ts_inputs, ts_outputs
def _check_tfnode_converter_availability(graph, node):
var_assign_map = {'VarHandleOp': 'AssignVariableOp', 'VariableV2': 'Assign'}
if node.type in var_assign_map:
if is_tf2:
v_output = node.outputs[0].name
for graph_node_name in graph._nodes_by_name:
graph_op = graph._nodes_by_name[graph_node_name]
if graph_op.type == var_assign_map[node.type] and len(graph_op.inputs) > 1 and v_output == \
graph_op.inputs[0].name:
cur_i = graph_op.inputs[1].op
if cur_i.type == 'Const' and cur_i.get_attr('value').tensor_content != b'':
return True
return False
else:
return True
else:
cvt = get_converter(node.type)
return cvt is not None
def _check_tfnodes_converter_availability(graph, nodelist, debug_mode):
status = True
for n_ in nodelist:
if not _check_tfnode_converter_availability(graph, n_):
k2o_logger().warning(
"WARN: No corresponding ONNX op matches the tf.op node {} of type {}".format(n_.name, n_.type) +
"\n The generated ONNX model needs run with the custom op supports.")
status = False
return status
def _on_parsing_tf_nodes(graph, nodelist, varset, debug_mode):
_check_tfnodes_converter_availability(graph, nodelist, debug_mode)
for node_ in nodelist:
k2o_logger().debug("Processing a tf node - %s" % node_.name)
operator = varset.declare_local_operator(node_.type, raw_model=node_, op_name=node_.name)
for o_ in node_.outputs:
oname = o_.name
k2o_logger().debug('\toutput: ' + oname)
out0 = varset.get_local_variable_or_declare_one(oname, infer_variable_type(o_, varset.target_opset))
operator.add_output(out0)
for i_ in node_.inputs:
k2o_logger().debug('\tinput : ' + i_.name)
var_type = infer_variable_type(i_, varset.target_opset)
i0 = varset.get_local_variable_or_declare_one(i_.name, var_type)
operator.add_input(i0)
cvt = get_converter(operator.type)
if cvt is None:
assert isinstance(operator.type, str), \
"Only tf-op can be pass_thru conversion, type: {}".format(type(operator.type))
set_converter(operator.type, pass_thru_converter)
elif hasattr(cvt, 'shape_infer'):
operator.shape_infer = cvt.shape_infer
def _infer_graph_shape(topology, top_level, varset):
raw_model_container = topology.raw_model
var_queue = queue.Queue()
for i_ in raw_model_container.input_names:
var_queue.put_nowait(top_level.get_local_variable_or_declare_one(i_))
visited = set()
while not var_queue.empty():
var = var_queue.get_nowait()
k2o_logger().debug("var: " + var.full_name)
for oop in var.op_to:
if oop in visited:
continue
visited.add(oop)
if isinstance(oop.raw_operator, (keras.layers.Layer, tf.Operation)):
assert oop.outputs
elif oop.raw_operator:
oop.outputs = _locate_outputs(oop.raw_operator, varset)
else:
assert oop.outputs
for o_ in oop.outputs:
o_.op_from = oop
si = oop.shape_infer
if si is not None:
# let operator to build its own shape if it can't be deduced from the tf.graph.
si(oop)
for o_ in oop.outputs:
var_queue.put_nowait(o_)
def _create_link_node(var_ts, top_level, varset, reversed_io=False, adjust_batch_size=False):
if adjust_batch_size:
ty_ = _adjust_input_batch_size(infer_variable_type(var_ts, varset.target_opset))
else:
ty_ = infer_variable_type(var_ts, varset.target_opset)
var0 = top_level.get_local_variable_or_declare_one(var_ts.name, ty_)
var1 = varset.get_local_variable_or_declare_one(var_ts.name, ty_)
op = varset.declare_local_operator(TYPES.Identity)
if reversed_io:
var0, var1 = var1, var0
op.add_input(var1)
op.add_output(var0)
return op
def _build_inference_nodeset(graph, outputs):
nodes_to_keep = set()
node_inputs = outputs[:]
while node_inputs:
nd_ = node_inputs[0]
del node_inputs[0]
if nd_ in nodes_to_keep:
continue
nodes_to_keep.add(nd_)
node_inputs.extend(in_.op for in_ in nd_.inputs)
return nodes_to_keep
def _create_keras_nodelist(layer, inference_nodeset, out_node=None):
newly = list()
ts_end = set() # the input tensor set of the whole layer/model.
for node_ in extract_inbound_nodes(layer):
if out_node is not None and out_node.name not in \
[tsname_to_node(ts_.name) for ts_ in list_output_tensors(node_)]:
continue # this layer could be reused several times in the whole graph.
for ts_ in list_output_tensors(node_):
if ts_.op in inference_nodeset:
newly.extend([ts_.op for ts_ in list_output_tensors(node_)])
ts_end |= set(list_input_tensors(node_))
for ts_ in list_input_mask(layer):
ts_end.add(ts_)
for ts_ in list_output_mask(layer):
newly.append(ts_.op)
visited = set()
nodelist = list() # keep the node list order.
while newly:
visited.update(newly)
nodelist.extend(newly)
newly.clear()
for n_ in visited:
for i_ in n_.inputs:
if i_ in ts_end or i_.op in visited or i_.op not in inference_nodeset:
continue
if isinstance(layer, keras.Model): # ugly fixing for the shared layer.
if i_.name.startswith(layer.name):
pass
elif i_.name.startswith('^' + layer.name):
pass
else:
continue
newly.append(i_.op)
return nodelist
def _general_nodelist_closure(node, nodeset, keras_nodeset):
nodes = set()
visited = set()
def is_stop_node(nd):
return is_placeholder_node(nd) or nd in keras_nodeset
node_added = [node]
updated = True
while updated:
updated = False
while node_added:
nd_ = node_added[0]
del node_added[0]
if nd_ not in visited:
visited.add(nd_)
if not is_stop_node(nd_) and nd_ not in nodes:
nodes.add(nd_)
updated = True
node_added.extend(in_.op for in_ in nd_.inputs if not is_stop_node(in_.op))
node_added = []
for nd_ in nodeset:
if any(in_.op in nodes for in_ in nd_.inputs):
node_added.append(nd_)
return nodes
def _build_keras_nodeset(inference_nodeset, keras_node_dict):
nodes = set()
for layer_, _ in keras_node_dict.values():
nodes.update(_create_keras_nodelist(layer_, inference_nodeset))
return nodes
def _get_output_nodes(node_list, node):
nodes_has_children = set()
for node in node_list:
if node:
for input_tensor in node.inputs:
nodes_has_children.add(input_tensor.op)
return [n_ for n_ in node_list if n_ not in nodes_has_children] # need to keep the order.
def _filter_out_input(node_name):
# tf.keras BN layer sometimes create a placeholder node 'scale' in tf 2.x.
# It creates 'cond/input' since tf 2.2.
# Given bn layer will be converted in a whole layer, it's fine to just filter this node out.
filter_patterns = [r"batch_normalization_\d+\/scale$", r"batch_normalization_\d+\/cond/input"]
filter_out = False
for pattern_ in filter_patterns:
filter_out = filter_out or re.match(pattern_, node_name)
return filter_out
def _advance_by_input(cur_node, layer_nodes, subgraph, inputs, graph_inputs, q_overall):
for input_ in cur_node.inputs:
predecessor = input_.op
if is_placeholder_node(predecessor) and not _filter_out_input(predecessor.name):
inputs.add(predecessor)
graph_inputs.add(predecessor)
continue
if predecessor in layer_nodes or len(layer_nodes) == 0:
subgraph.append(predecessor)
else:
inputs.add(predecessor)
q_overall.put_nowait(predecessor)
def _visit_nodelist(activated_keras_nodes, input_nodes, layer_key,
keras_node_dict, node, nodelist, q_overall, visited):
subgraph = list()
i_subgraph = set()
for ot_ in (_get_output_nodes(activated_keras_nodes, node) if activated_keras_nodes else [node]):
if ot_ not in nodelist:
visited.add(ot_)
nodelist.append(ot_)
_advance_by_input(ot_, activated_keras_nodes, subgraph, i_subgraph, input_nodes, q_overall)
while subgraph:
int_node = subgraph.pop(0)
if int_node in input_nodes or int_node in visited or int_node in keras_node_dict:
continue
visited.add(int_node)
nodelist.append(int_node)
_advance_by_input(int_node, activated_keras_nodes, subgraph, i_subgraph, input_nodes, q_overall)
return i_subgraph
def _parse_nodes(graph, inference_nodeset, graph_inputs, keras_node_dict, keras_nodeset, node, nodelist, varset,
visited, q_overall):
layer_key_, model_ = (None, None)
if node.name in keras_node_dict:
layer_key_, model_ = keras_node_dict[node.name]
if isinstance(layer_key_, keras.Model) and \
_check_layer_converter_availability(layer_key_):
k2o_logger().debug("Processing a keras sub model - %s" % layer_key_.name)
kenode = _find_kenode_by_output_tensor(extract_inbound_nodes(layer_key_), node.name)
ts_in, ts_out = _on_parsing_model_layer(layer_key_, graph, kenode, varset)
for ts_ in ts_in:
if is_placeholder_node(ts_.op):
graph_inputs.add(ts_.op)
else:
q_overall.put_nowait(ts_.op)
visited.update(ts_.op for ts_ in ts_out)
return layer_key_, model_
activated_keras_nodes = _create_keras_nodelist(layer_key_, inference_nodeset, node)
else:
activated_keras_nodes = _general_nodelist_closure(node, inference_nodeset, keras_nodeset)
_visit_nodelist(activated_keras_nodes, graph_inputs, layer_key_,
keras_node_dict, node, nodelist, q_overall, visited)
return layer_key_, model_
def _parse_graph_core(graph, keras_node_dict, topology, top_scope, output_names):
"""
travel the tensor Graph and build the corresponding intermediate operation objects.
:param graph: the tensorflow session graph of the Keras mode.
:param keras_node_dict: the mapping of operation node to keras layer output.
:param topology: The whole topology of the intermediate objects.
:param top_scope: The top varset
:param output_names: the output names of the TF graph
:return: The whole topology of the intermediate objects.
"""
input_nodes = set()
# build the node in the working scope.
varset = topology.declare_scope('curr_', top_scope)
model_outputs = []
for name in output_names:
var_ts = graph.get_operation_by_name(tsname_to_node(name)).outputs[0]
_create_link_node(var_ts, top_scope, varset, adjust_batch_size=True)
model_outputs.append(var_ts.op)
# starting from the output node.
q_overall = queue.Queue()
for n_ in model_outputs:
q_overall.put_nowait(n_)
visited = set() # since the output could be shared among the successor nodes.
inference_nodeset = _build_inference_nodeset(graph, model_outputs)
keras_nodeset = _build_keras_nodeset(inference_nodeset, keras_node_dict)
while not q_overall.empty():
node = q_overall.get_nowait()
if node in input_nodes or node in visited or node not in inference_nodeset:
continue
nodes = []
layer_key_, model_ = _parse_nodes(graph, inference_nodeset, input_nodes, keras_node_dict, keras_nodeset,
node, nodes, varset, visited, q_overall)
if not nodes: # already processed by the _parse_nodes
continue
k2o_logger().debug('Processing a keras layer - (%s: %s)' % (layer_key_.name, type(layer_key_)) if
layer_key_ else (nodes[0].name, "Custom_Layer"))
if isinstance(layer_key_, keras.layers.TimeDistributed):
_on_parsing_time_distributed_layer(graph, nodes, layer_key_, model_, varset)
elif layer_key_ is None or get_converter(type(layer_key_)) is None:
_on_parsing_tf_nodes(graph, nodes, varset, topology.debug_mode)
else:
kenode = _find_kenode_by_output_tensor(extract_inbound_nodes(layer_key_), nodes[0].name)
on_parsing_keras_layer(graph, nodes, layer_key_, kenode, model_, varset)
for nd_ in input_nodes:
var_ts = nd_.outputs[0] # since it's placeholder node, safely claim there is only one output.
_create_link_node(var_ts, top_scope, varset, True)
_infer_graph_shape(topology, top_scope, varset)
topology.root_names = [variable.onnx_name for variable in top_scope.variables.values()]
return topology
def _sorted_inputs(nodelist, outputs, inputs_set):
inputs = []
node_set = frozenset(nodelist)
visited = set()
def travel(node):
for in_ts_ in node.inputs:
op_node = in_ts_.op
if op_node in visited:
continue
visited.add(op_node)
if (op_node in inputs_set) and (op_node not in inputs):
inputs.append(op_node)
elif op_node in node_set:
travel(op_node)
for ts_ in outputs:
travel(ts_.op)
return inputs
def _parse_nodes_v2(graph, inference_nodeset, graph_inputs, keras_node_dict, node, varset, visited, q_overall):
layer_key, model_ = (None, None)
current_layer_outputs = {}
if node.name in keras_node_dict:
layer_key, model_ = keras_node_dict[node.name]
else:
ts_out = node.outputs[0]
kh_ = getattr(ts_out, '_keras_history', None)
if kh_ is not None:
layer_key = kh_.layer
kenode = extract_inbound_nodes(layer_key)[kh_.node_index]
current_layer_outputs.update({ts_.op.name: (layer_key, None) for ts_ in list_output_tensors(kenode)})
if layer_key is None:
layer_info = LayerInfo.create_single_node(node, visited)
else:
if isinstance(layer_key, keras.Model):
k2o_logger().debug("Processing a keras model layer - %s" % layer_key.name)
kenode = _find_kenode_by_output_tensor(extract_inbound_nodes(layer_key), node.outputs[0].name)
for ts_ in list_output_tensors(kenode):
_create_identity(ts_.op.inputs[0], ts_, varset)
visited.add(ts_.op)
_advance_by_input(ts_.op, [ts_.op], list(), set(), graph_inputs, q_overall)
return None, model_
else:
layer_info = LayerInfo.create(node, layer_key,
{**keras_node_dict, **current_layer_outputs}, inference_nodeset)
nodelist = []
layer_inputs = _visit_nodelist(layer_info.nodelist, graph_inputs, None, keras_node_dict, node, nodelist,
q_overall, visited)
sorted_inputs = _sorted_inputs(layer_info.nodelist, layer_info.outputs, layer_inputs)
for input_ in sorted_inputs:
layer_info.inputs.extend(input_.outputs)
layer_info.nodelist = [n_ for n_ in layer_info.nodelist if not is_placeholder_node(n_)]
return layer_info, model_
def _parse_graph_core_v2(graph, keras_node_dict, topology, top_scope, output_names):
"""
travel the tensor Graph and build the corresponding intermediate operation objects.
:param graph: the tensorflow session graph of the Keras mode.
:param keras_node_dict: the mapping of operation node to keras layer output.
:param topology: The whole topology of the intermediate objects.
:param top_scope: The top varset
:param output_names: the output names of the TF graph
:return: The whole topology of the intermediate objects.
"""
input_nodes = set()
# build the node in the working scope.
varset = topology.declare_scope('curr_', top_scope)
model_outputs = []
for name in output_names:
var_ts = graph.get_operation_by_name(tsname_to_node(name)).outputs[0]
_create_link_node(var_ts, top_scope, varset, adjust_batch_size=True)
model_outputs.append(var_ts.op)
# starting from the output node.
q_overall = queue.Queue()
for n_ in model_outputs:
q_overall.put_nowait(n_)
visited = set() # since the output could be shared among the successor nodes.
# Some complicated layer may have some nodes which cannot be visited from the graph output...
# ..., so the layer outputs are added into visit graph to avoid missing nodes.
layer_outputs = [graph.get_operation_by_name(nm_) for nm_ in keras_node_dict]
inference_nodeset = _build_inference_nodeset(graph, model_outputs + layer_outputs)
while not q_overall.empty():
node = q_overall.get_nowait()
if node in input_nodes or node in visited or node not in inference_nodeset:
continue
layer_info, model_ = _parse_nodes_v2(graph, inference_nodeset, input_nodes, keras_node_dict, node,
varset, visited, q_overall)
if not layer_info: # already processed by the _parse_nodes_v2
continue
k2o_logger().debug('Processing a keras layer - (%s: %s)' % (layer_info.layer.name, type(layer_info.layer)) if
layer_info.layer else (layer_info.nodelist[0].name, "Custom_Layer"))
if layer_info.layer and isinstance(layer_info.layer, keras.layers.TimeDistributed):
_on_parsing_time_distributed_layer(graph, layer_info.nodelist, layer_info.layer, model_, varset)
elif layer_info.layer and get_converter(type(layer_info.layer)):
on_parsing_keras_layer_v2(graph, layer_info, varset)
else:
_on_parsing_tf_nodes(graph, layer_info.nodelist, varset, topology.debug_mode)
for nd_ in input_nodes:
var_ts = nd_.outputs[0] # since it's placeholder node, safely claim there is only one output.
_create_link_node(var_ts, top_scope, varset, True)
_infer_graph_shape(topology, top_scope, varset)
topology.root_names = [variable.onnx_name for variable in top_scope.variables.values()]
return topology
def parse_graph_modeless(topo, graph, target_opset, input_names, output_names, keras_node_dict):
top_level = topo.declare_scope('__root')
input_tensors = [graph.get_tensor_by_name(n_) for n_ in input_names]
output_tensors = [graph.get_tensor_by_name(n_) for n_ in output_names]
for ts_i_ in input_tensors:
var_type = _adjust_input_batch_size(infer_variable_type(ts_i_, target_opset))
if ts_i_.name.endswith(':0'):
str_value = ts_i_.name[:-2]
op = top_level.declare_local_operator(TYPES.Identity)
var0 = top_level.get_local_variable_or_declare_one(str_value, var_type)
var1 = top_level.get_local_variable_or_declare_one(ts_i_.name, var_type)
op.add_input(var0)
op.add_output(var1)
else:
str_value = ts_i_.name
top_level.get_local_variable_or_declare_one(str_value, var_type)
topo.raw_model.add_input_name(str_value)
for ts_o_ in output_tensors:
var_type = _adjust_input_batch_size(infer_variable_type(ts_o_, target_opset))
str_value = ts_o_.name
top_level.get_local_variable_or_declare_one(str_value, var_type)
topo.raw_model.add_output_name(str_value)
return _parse_graph_core_v2(
graph, keras_node_dict, topo, top_level, output_names
)
def parse_graph(topo, graph, target_opset, output_names, keras_node_dict):
# type: (Topology, tf.Graph, int, [], []) -> Topology
"""
Build the node-layer mapper and parse the whole TF graph of Keras Model.
"""
top_level = topo.declare_scope('__root')
dim_variable_counter = 0
# Create the onnx model input name before parsing to keep ...
# ... the model input names are identical to the original Keras model.
for idx_ in range(len(topo.raw_model.model.inputs)):
op = top_level.declare_local_operator(TYPES.Identity)
idx_key = idx_
if isinstance(topo.raw_model.model.inputs, dict):
idx_key = list(topo.raw_model.model.inputs.keys())[idx_]
input_ts = topo.raw_model.model.inputs[idx_key]
var_type = _adjust_input_batch_size(infer_variable_type(input_ts, target_opset))
dim_variable_counter = _adjust_input_output_size(var_type, dim_variable_counter)
str_value = input_ts.name
var0 = None
if hasattr(topo.raw_model.model, 'input_names'):
str_value = topo.raw_model.model.input_names[idx_]
elif input_ts.name.endswith(':0'):
str_value = input_ts.name[:-2]
else:
# if there is no difference between input tensor name and model input name,
# skip it.
var0 = top_level.get_local_variable_or_declare_one(str_value, var_type)
if not var0:
var0 = top_level.get_local_variable_or_declare_one(str_value, var_type)
var1 = top_level.get_local_variable_or_declare_one(input_ts.name, var_type)
op.add_input(var0)
op.add_output(var1)
topo.raw_model.add_input_name(str_value)
output_name_dict = {}
output_tensors = topo.raw_model.model.outputs
if output_names:
output_tensors = [graph.get_tensor_by_name(n_) for n_ in output_names]
for idx_, ts_ in enumerate(output_tensors):
op = top_level.declare_local_operator(TYPES.Identity)
var_type = _adjust_input_batch_size(infer_variable_type(ts_, target_opset))
dim_variable_counter = _adjust_input_output_size(var_type, dim_variable_counter)
str_value = ts_.name
use_ts_name = False
if hasattr(topo.raw_model.model, 'output_names'):
str_value = topo.raw_model.model.output_names[idx_]
elif ts_.name.endswith(':0'):
str_value = tsname_to_node(ts_.name)
else:
# if there is no difference between output tensor name and model output name
# skip it.
use_ts_name = True
if str_value in output_name_dict:
cur_count = output_name_dict[str_value]
output_name_dict[str_value] = cur_count + 1
str_value = str_value + ':' + str(cur_count)
else:
output_name_dict[str_value] = 1
if not use_ts_name:
var0 = top_level.get_local_variable_or_declare_one(str_value, var_type)
var1 = top_level.get_local_variable_or_declare_one(ts_.name, var_type)
op.add_input(var1)
op.add_output(var0)
topo.raw_model.add_output_name(str_value)
return _parse_graph_core_v2(
graph, keras_node_dict, topo, top_level, output_names
) if is_tf2 and is_tf_keras else _parse_graph_core(
graph, keras_node_dict, topo, top_level, output_names)
|
py | 7dfeb2493d33b99bf89a73977eea12feadbbd431 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 8 12:00:11 2017
@author: prmiles
"""
import numpy as np
import sys
from .utilities.progressbar import progress_bar
from .utilities.general import check_settings
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import colors as mplcolor
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from scipy.interpolate import interp1d
def calculate_intervals(chain, results, data, model, s2chain=None,
nsample=500, waitbar=True, sstype=0):
'''
Calculate distribution of model response to form propagation intervals
Samples values from chain, performs forward model evaluation, and
tabulates credible and prediction intervals (if obs. error var. included).
Args:
* **chain** (:class:`~numpy.ndarray`): Parameter chains, expect
shape=(nsimu, npar).
* **results** (:py:class:`dict`): Results dictionary generated by
pymcmcstat.
* **data** (:class:`~.DataStructure`): Data
* **model**: User defined function. Note, if your model outputs
multiple quantities of interest (QoI) at the same time in a
multi-dimensional array, then make sure it is returned as a
(N, p) array where N is the number of evaluation points and
p is the number of QoI.
Kwargs:
* **s2chain** (:py:class:`float`, :class:`~numpy.ndarray`, or None):
Observation error variance chain.
* **nsample** (:py:class:`int`): No. of samples drawn from posteriors.
* **waitbar** (:py:class:`bool`): Flag to display progress bar.
* **sstype** (:py:class:`int`): Sum-of-squares type. Can be 0 (normal),
1 (sqrt), or 2 (log).
Returns:
* :py:class:`dict` with two elements: 1) `credible` and 2) `prediction`
'''
parind = results['parind']
q = results['theta']
nsimu, npar = chain.shape
s2chain = check_s2chain(s2chain, nsimu)
iisample, nsample = define_sample_points(nsample, nsimu)
if waitbar is True:
__wbarstatus = progress_bar(iters=int(nsample))
ci = []
pi = []
multiple = False
for kk, isa in enumerate(iisample):
# progress bar
if waitbar is True:
__wbarstatus.update(kk)
# extract chain set
q[parind] = chain[kk, :]
# evaluate model
y = model(q, data)
# check model output
if y.ndim == 2:
nrow, ncol = y.shape
if nrow != y.size and ncol != y.size:
multiple = True
if multiple is False:
# store model prediction in credible intervals
ci.append(y.reshape(y.size,)) # store model output
if s2chain is None:
continue
else:
# estimate prediction intervals
s2 = s2chain[kk]
obs = observation_sample(s2, y, sstype)
pi.append(obs.reshape(obs.size,))
else:
# Model output contains multiple QoI
# Expect ncol = No. of QoI
if kk == 0:
cis = []
pis = []
for jj in range(ncol):
cis.append([])
pis.append([])
for jj in range(ncol):
# store model prediction in credible intervals
cis[jj].append(y[:, jj]) # store model output
if s2chain is None:
continue
else:
# estimate prediction intervals
if s2chain.ndim == 2:
if s2chain.shape[1] == ncol:
s2 = s2chain[kk, jj]
else:
s2 = s2chain[kk]
else:
s2 = s2chain[kk]
obs = observation_sample(s2, y[:, jj], sstype)
pis[jj].append(obs.reshape(obs.size,))
if multiple is False:
# Setup output
credible = np.array(ci)
if s2chain is None:
prediction = None
else:
prediction = np.array(pi)
return dict(credible=credible,
prediction=prediction)
else:
# Setup output for multiple QoI
out = []
for jj in range(ncol):
credible = np.array(cis[jj])
if s2chain is None:
prediction = None
else:
prediction = np.array(pis[jj])
out.append(dict(credible=credible,
prediction=prediction))
return out
# --------------------------------------------
def plot_intervals(intervals, time, ydata=None, xdata=None,
limits=[95],
adddata=None, addmodel=True, addlegend=True,
addcredible=True, addprediction=True,
data_display={}, model_display={}, interval_display={},
fig=None, figsize=None, legloc='upper left',
ciset=None, piset=None,
return_settings=False):
'''
Plot propagation intervals in 2-D
This routine takes the model distributions generated using the
:func:`~calculate_intervals` method and then plots specific
quantiles. The user can plot just the intervals, or also include the
median model response and/or observations. Specific settings for
credible intervals are controlled by defining the `ciset` dictionary.
Likewise, for prediction intervals, settings are defined using `piset`.
The setting options available for each interval are as follows:
- `limits`: This should be a list of numbers between 0 and 100, e.g.,
`limits=[50, 90]` will result in 50% and 90% intervals.
- `cmap`: The program is designed to "try" to choose colors that
are visually distinct. The user can specify the colormap to choose
from.
- `colors`: The user can specify the color they would like for each
interval in a list, e.g., ['r', 'g', 'b']. This list should have
the same number of elements as `limits` or the code will revert
back to its default behavior.
Args:
* **intervals** (:py:class:`dict`): Interval dictionary generated
using :meth:`calculate_intervals` method.
* **time** (:class:`~numpy.ndarray`): Independent variable, i.e.,
x-axis of plot
Kwargs:
* **ydata** (:class:`~numpy.ndarray` or None): Observations, expect
1-D array if defined.
* **xdata** (:class:`~numpy.ndarray` or None): Independent values
corresponding to observations. This is required if the observations
do not align with your times of generating the model response.
* **limits** (:py:class:`list`): Quantile limits that correspond to
percentage size of desired intervals. Note, this is the default
limits, but specific limits can be defined using the `ciset` and
`piset` dictionaries.
* **adddata** (:py:class:`bool`): Flag to include data
* **addmodel** (:py:class:`bool`): Flag to include median model
response
* **addlegend** (:py:class:`bool`): Flag to include legend
* **addcredible** (:py:class:`bool`): Flag to include credible
intervals
* **addprediction** (:py:class:`bool`): Flag to include prediction
intervals
* **model_display** (:py:class:`dict`): Display settings for median
model response
* **data_display** (:py:class:`dict`): Display settings for data
* **interval_display** (:py:class:`dict`): General display settings
for intervals.
* **fig**: Handle of previously created figure object
* **figsize** (:py:class:`tuple`): (width, height) in inches
* **legloc** (:py:class:`str`): Legend location - matplotlib help for
details.
* **ciset** (:py:class:`dict`): Settings for credible intervals
* **piset** (:py:class:`dict`): Settings for prediction intervals
* **return_settings** (:py:class:`bool`): Flag to return ciset and
piset along with fig and ax.
Returns:
* (:py:class:`tuple`) with elements
1) Figure handle
2) Axes handle
3) Dictionary with `ciset` and `piset` inside (only
outputted if `return_settings=True`)
'''
# unpack dictionary
credible = intervals['credible']
prediction = intervals['prediction']
# Check user-defined settings
ciset = __setup_iset(ciset,
default_iset=dict(
limits=limits,
cmap=None,
colors=None))
piset = __setup_iset(piset,
default_iset=dict(
limits=limits,
cmap=None,
colors=None))
# Check limits
ciset['limits'] = _check_limits(ciset['limits'], limits)
piset['limits'] = _check_limits(piset['limits'], limits)
# convert limits to ranges
ciset['quantiles'] = _convert_limits(ciset['limits'])
piset['quantiles'] = _convert_limits(piset['limits'])
# setup display settings
interval_display, model_display, data_display = setup_display_settings(
interval_display, model_display, data_display)
# Define colors
ciset['colors'] = setup_interval_colors(ciset, inttype='ci')
piset['colors'] = setup_interval_colors(piset, inttype='pi')
# Define labels
ciset['labels'] = _setup_labels(ciset['limits'], inttype='CI')
piset['labels'] = _setup_labels(piset['limits'], inttype='PI')
if fig is None:
fig = plt.figure(figsize=figsize)
ax = fig.gca()
time = time.reshape(time.size,)
# add prediction intervals
if addprediction is True:
for ii, quantile in enumerate(piset['quantiles']):
pi = generate_quantiles(prediction, np.array(quantile))
ax.fill_between(time, pi[0], pi[1], facecolor=piset['colors'][ii],
label=piset['labels'][ii], **interval_display)
# add credible intervals
if addcredible is True:
for ii, quantile in enumerate(ciset['quantiles']):
ci = generate_quantiles(credible, np.array(quantile))
ax.fill_between(time, ci[0], ci[1], facecolor=ciset['colors'][ii],
label=ciset['labels'][ii], **interval_display)
# add model (median model response)
if addmodel is True:
ci = generate_quantiles(credible, np.array(0.5))
ax.plot(time, ci, **model_display)
# add data to plot
if ydata is not None and adddata is None:
adddata = True
if adddata is True and ydata is not None:
if xdata is None:
ax.plot(time, ydata, **data_display)
else:
ax.plot(xdata, ydata, **data_display)
# add legend
if addlegend is True:
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels, loc=legloc)
if return_settings is True:
return fig, ax, dict(ciset=ciset, piset=piset)
else:
return fig, ax
# --------------------------------------------
def plot_3d_intervals(intervals, time, ydata=None, xdata=None,
limits=[95],
adddata=False, addlegend=True,
addmodel=True, figsize=None, model_display={},
data_display={}, interval_display={},
addcredible=True, addprediction=True,
fig=None, legloc='upper left',
ciset=None, piset=None,
return_settings=False):
'''
Plot propagation intervals in 3-D
This routine takes the model distributions generated using the
:func:`~calculate_intervals` method and then plots specific
quantiles. The user can plot just the intervals, or also include the
median model response and/or observations. Specific settings for
credible intervals are controlled by defining the `ciset` dictionary.
Likewise, for prediction intervals, settings are defined using `piset`.
The setting options available for each interval are as follows:
- `limits`: This should be a list of numbers between 0 and 100, e.g.,
`limits=[50, 90]` will result in 50% and 90% intervals.
- `cmap`: The program is designed to "try" to choose colors that
are visually distinct. The user can specify the colormap to choose
from.
- `colors`: The user can specify the color they would like for each
interval in a list, e.g., ['r', 'g', 'b']. This list should have
the same number of elements as `limits` or the code will revert
back to its default behavior.
Args:
* **intervals** (:py:class:`dict`): Interval dictionary generated
using :meth:`calculate_intervals` method.
* **time** (:class:`~numpy.ndarray`): Independent variable, i.e.,
x- and y-axes of plot. Note, it must be a 2-D array with
shape=(N, 2), where N is the number of evaluation points.
Kwargs:
* **ydata** (:class:`~numpy.ndarray` or None): Observations, expect
1-D array if defined.
* **xdata** (:class:`~numpy.ndarray` or None): Independent values
corresponding to observations. This is required if the observations
do not align with your times of generating the model response.
* **limits** (:py:class:`list`): Quantile limits that correspond to
percentage size of desired intervals. Note, this is the default
limits, but specific limits can be defined using the `ciset` and
`piset` dictionaries.
* **adddata** (:py:class:`bool`): Flag to include data
* **addmodel** (:py:class:`bool`): Flag to include median model
response
* **addlegend** (:py:class:`bool`): Flag to include legend
* **addcredible** (:py:class:`bool`): Flag to include credible
intervals
* **addprediction** (:py:class:`bool`): Flag to include prediction
intervals
* **model_display** (:py:class:`dict`): Display settings for median
model response
* **data_display** (:py:class:`dict`): Display settings for data
* **interval_display** (:py:class:`dict`): General display settings
for intervals.
* **fig**: Handle of previously created figure object
* **figsize** (:py:class:`tuple`): (width, height) in inches
* **legloc** (:py:class:`str`): Legend location - matplotlib help for
details.
* **ciset** (:py:class:`dict`): Settings for credible intervals
* **piset** (:py:class:`dict`): Settings for prediction intervals
* **return_settings** (:py:class:`bool`): Flag to return ciset and
piset along with fig and ax.
Returns:
* (:py:class:`tuple`) with elements
1) Figure handle
2) Axes handle
3) Dictionary with `ciset` and `piset` inside (only
outputted if `return_settings=True`)
'''
# unpack dictionary
credible = intervals['credible']
prediction = intervals['prediction']
# Check user-defined settings
ciset = __setup_iset(ciset,
default_iset=dict(
limits=limits,
cmap=None,
colors=None))
piset = __setup_iset(piset,
default_iset=dict(
limits=limits,
cmap=None,
colors=None))
# Check limits
ciset['limits'] = _check_limits(ciset['limits'], limits)
piset['limits'] = _check_limits(piset['limits'], limits)
# convert limits to ranges
ciset['quantiles'] = _convert_limits(ciset['limits'])
piset['quantiles'] = _convert_limits(piset['limits'])
# setup display settings
interval_display, model_display, data_display = setup_display_settings(
interval_display, model_display, data_display)
# Define colors
ciset['colors'] = setup_interval_colors(ciset, inttype='ci')
piset['colors'] = setup_interval_colors(piset, inttype='pi')
# Define labels
ciset['labels'] = _setup_labels(ciset['limits'], inttype='CI')
piset['labels'] = _setup_labels(piset['limits'], inttype='PI')
if fig is None:
fig = plt.figure(figsize=figsize)
ax = Axes3D(fig)
ax = fig.gca()
time1 = time[:, 0]
time2 = time[:, 1]
# add prediction intervals
if addprediction is True:
for ii, quantile in enumerate(piset['quantiles']):
pi = generate_quantiles(prediction, np.array(quantile))
# Add a polygon instead of fill_between
rev = np.arange(time1.size - 1, -1, -1)
x = np.concatenate((time1, time1[rev]))
y = np.concatenate((time2, time2[rev]))
z = np.concatenate((pi[0], pi[1][rev]))
verts = [list(zip(x, y, z))]
surf = Poly3DCollection(verts,
color=piset['colors'][ii],
label=piset['labels'][ii])
# Add fix for legend compatibility
surf._facecolors2d = surf._facecolors3d
surf._edgecolors2d = surf._edgecolors3d
ax.add_collection3d(surf)
# add credible intervals
if addcredible is True:
for ii, quantile in enumerate(ciset['quantiles']):
ci = generate_quantiles(credible, np.array(quantile))
# Add a polygon instead of fill_between
rev = np.arange(time1.size - 1, -1, -1)
x = np.concatenate((time1, time1[rev]))
y = np.concatenate((time2, time2[rev]))
z = np.concatenate((ci[0], ci[1][rev]))
verts = [list(zip(x, y, z))]
surf = Poly3DCollection(verts,
color=ciset['colors'][ii],
label=ciset['labels'][ii])
# Add fix for legend compatibility
surf._facecolors2d = surf._facecolors3d
surf._edgecolors2d = surf._edgecolors3d
ax.add_collection3d(surf)
# add model (median model response)
if addmodel is True:
ci = generate_quantiles(credible, np.array(0.5))
ax.plot(time1, time2, ci, **model_display)
# add data to plot
if ydata is not None and adddata is None:
adddata = True
if adddata is True:
if xdata is None:
ax.plot(time1, time2, ydata.reshape(time1.shape), **data_display)
else: # User provided xdata array for observation points
ax.plot(xdata[:, 0], xdata[:, 1],
ydata.reshape(time1.shape), **data_display)
# add legend
if addlegend is True:
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels, loc=legloc)
if return_settings is True:
return fig, ax, dict(ciset=ciset, piset=piset)
else:
return fig, ax
def check_s2chain(s2chain, nsimu):
'''
Check size of s2chain
Args:
* **s2chain** (:py:class:`float`, :class:`~numpy.ndarray`, or `None`):
Observation error variance chain or value
* **nsimu** (:py:class:`int`): No. of elements in chain
Returns:
* **s2chain** (:class:`~numpy.ndarray` or `None`)
'''
if s2chain is None:
return None
else:
if isinstance(s2chain, float):
s2chain = np.ones((nsimu,))*s2chain
if s2chain.ndim == 2:
if s2chain.shape[0] != nsimu:
s2chain = s2chain * np.ones((nsimu, s2chain.size))
else:
if s2chain.size != nsimu: # scalars provided for multiple QoI
s2chain = s2chain * np.ones((nsimu, s2chain.size))
return s2chain
# --------------------------------------------
def observation_sample(s2, y, sstype):
'''
Calculate model response with observation errors.
Args:
* **s2** (:class:`~numpy.ndarray`): Observation error(s).
* **y** (:class:`~numpy.ndarray`): Model responses.
* **sstype** (:py:class:`int`): Flag to specify sstype.
Returns:
* **opred** (:class:`~numpy.ndarray`): Model responses with observation errors.
'''
if sstype == 0:
opred = y + np.random.standard_normal(y.shape) * np.sqrt(s2)
elif sstype == 1: # sqrt
opred = (np.sqrt(y) + np.random.standard_normal(y.shape) * np.sqrt(s2))**2
elif sstype == 2: # log
opred = y*np.exp(np.random.standard_normal(y.shape) * np.sqrt(s2))
else:
sys.exit('Unknown sstype')
return opred
# --------------------------------------------
def define_sample_points(nsample, nsimu):
'''
Define indices to sample from posteriors.
Args:
* **nsample** (:py:class:`int`): Number of samples to draw from posterior.
* **nsimu** (:py:class:`int`): Number of MCMC simulations.
Returns:
* **iisample** (:class:`~numpy.ndarray`): Array of indices in posterior set.
* **nsample** (:py:class:`int`): Number of samples to draw from posterior.
'''
# define sample points
if nsample >= nsimu:
iisample = range(nsimu) # sample all points from chain
nsample = nsimu
else:
# randomly sample from chain
iisample = np.ceil(np.random.rand(nsample)*nsimu) - 1
iisample = iisample.astype(int)
return iisample, nsample
# --------------------------------------------
def generate_quantiles(x, p=np.array([0.25, 0.5, 0.75])):
'''
Calculate empirical quantiles.
Args:
* **x** (:class:`~numpy.ndarray`): Observations from which to generate quantile.
* **p** (:class:`~numpy.ndarray`): Quantile limits.
Returns:
* (:class:`~numpy.ndarray`): Interpolated quantiles.
'''
# extract number of rows/cols from np.array
n = x.shape[0]
# define vector valued interpolation function
xpoints = np.arange(0, n, 1)
interpfun = interp1d(xpoints, np.sort(x, 0), axis=0)
# evaluation points
itpoints = (n - 1)*p
return interpfun(itpoints)
def setup_display_settings(interval_display, model_display, data_display):
'''
Compare user defined display settings with defaults and merge.
Args:
* **interval_display** (:py:class:`dict`): User defined settings for interval display.
* **model_display** (:py:class:`dict`): User defined settings for model display.
* **data_display** (:py:class:`dict`): User defined settings for data display.
Returns:
* **interval_display** (:py:class:`dict`): Settings for interval display.
* **model_display** (:py:class:`dict`): Settings for model display.
* **data_display** (:py:class:`dict`): Settings for data display.
'''
# Setup interval display
default_interval_display = dict(
linestyle=':',
linewidth=1,
alpha=1.0,
edgecolor='k')
interval_display = check_settings(default_interval_display, interval_display)
# Setup model display
default_model_display = dict(
linestyle='-',
color='k',
marker='',
linewidth=2,
markersize=5,
label='Model')
model_display = check_settings(default_model_display, model_display)
# Setup data display
default_data_display = dict(
linestyle='',
color='b',
marker='.',
linewidth=1,
markersize=5,
label='Data')
data_display = check_settings(default_data_display, data_display)
return interval_display, model_display, data_display
def setup_interval_colors(iset, inttype='CI'):
'''
Setup colors for empirical intervals
This routine attempts to distribute the color of the UQ intervals
based on a normalize color map. Or, it will assign user-defined
colors; however, this only happens if the correct number of colors
are specified.
Args:
* **iset** (:py:class:`dict`): This dictionary should contain the
following keys - `limits`, `cmap`, and `colors`.
Kwargs:
* **inttype** (:py:class:`str`): Type of uncertainty interval
Returns:
* **ic** (:py:class:`list`): List containing color for each interval
'''
limits, cmap, colors = iset['limits'], iset['cmap'], iset['colors']
norm = __setup_cmap_norm(limits)
cmap = __setup_default_cmap(cmap, inttype)
# assign colors using color map or using colors defined by user
ic = []
if colors is None: # No user defined colors
for limits in limits:
ic.append(cmap(norm(limits)))
else:
if len(colors) == len(limits): # correct number of colors defined
for color in colors:
ic.append(color)
else: # User defined the wrong number of colors
print('Note, user-defined colors were ignored. Using color map. '
+ 'Expected a list of length {}, but received {}'.format(
len(limits), len(colors)))
for limits in limits:
ic.append(cmap(norm(limits)))
return ic
# --------------------------------------------
def _setup_labels(limits, inttype='CI'):
'''
Setup labels for prediction/credible intervals.
'''
labels = []
for limit in limits:
labels.append(str('{}% {}'.format(limit, inttype)))
return labels
def _check_limits(limits, default_limits):
if limits is None:
limits = default_limits
limits.sort(reverse=True)
return limits
def _convert_limits(limits):
rng = []
for limit in limits:
limit = limit/100
rng.append([0.5 - limit/2, 0.5 + limit/2])
return rng
def __setup_iset(iset, default_iset):
'''
Setup interval settings by comparing user input to default
'''
if iset is None:
iset = {}
iset = check_settings(default_iset, iset)
return iset
def __setup_cmap_norm(limits):
if len(limits) == 1:
norm = mplcolor.Normalize(vmin=0, vmax=100)
else:
norm = mplcolor.Normalize(vmin=min(limits), vmax=max(limits))
return norm
def __setup_default_cmap(cmap, inttype):
if cmap is None:
if inttype.upper() == 'CI':
cmap = cm.autumn
else:
cmap = cm.winter
return cmap
|
py | 7dfeb3846b3c1e387f71b5244f435e9940cd4a2e | #!/usr/bin/python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import shutil, sys;
""" Copy File.
This module works much like the cp posix command - it takes 2 arguments:
(src, dst) and copies the file with path |src| to |dst|.
"""
def Main(src, dst):
return shutil.copyfile(src, dst)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1], sys.argv[2]))
|
py | 7dfeb3f4f7c1d389612e800109c9ba39c35ceb93 | input = """
%#maxint=10.
%number(0..10).
q(C) :- number(C).
a | b :- p(A,B), q(C), r(X,A), q(X), not s(X), not t(C).
p(1,1).
r(3,1).
s(5).
s(0).
t(0).
t(2).
"""
output = """
%#maxint=10.
%number(0..10).
q(C) :- number(C).
a | b :- p(A,B), q(C), r(X,A), q(X), not s(X), not t(C).
p(1,1).
r(3,1).
s(5).
s(0).
t(0).
t(2).
"""
|
py | 7dfeb490479e24d8548934ee8891e774440e26d1 | from django.urls import path
from . import views
urlpatterns = [
path("", views.IndexRedirectView.as_view()),
]
|
py | 7dfeb4c72f1db3152a850607476bdfe94ab9a4a0 | from typing import List
from ..object import Object
from pyrogram import raw, utils, types
import pyrogram
class Channel(Object):
def __init__(
self,
*,
client: "pyrogram.Client" = None,
id: int = None,
title: str = None,
is_forbidden: bool = None,
forbidden_until: int = None,
username: str = None,
photo: "types.ChatPhoto" = None,
is_creator: bool = None,
left: bool = None,
is_broadcast: bool = None,
is_verified: bool = None,
is_supergroup: bool = None,
is_restricted: bool = None,
signatures_enabled: bool = None,
min: bool = None,
is_scam: bool = None,
is_fake: bool = None,
has_private_join_link: bool = None,
has_geo: bool = None,
slow_mode: bool = None,
access_hash: int = None,
date: int = None,
version: int = None,
restrictions: List["types.Restriction"] = None,
admin_rights: "types.ChatAdminRights" = None,
banned_rights: "types.ChatPermissions" = None,
default_banned_rights: "types.ChatPermissions" = None,
members_count: int = None,
):
super().__init__(client)
self.id = id
self.title = title
self.is_forbidden = is_forbidden
self.forbidden_until = forbidden_until
self.username = username
self.photo = photo
self.is_creator = is_creator
self.left = left
self.is_broadcast = is_broadcast
self.is_verified = is_verified
self.is_supergroup = is_supergroup
self.is_restricted = is_restricted
self.signatures_enabled = signatures_enabled
self.min = min
self.is_scam = is_scam
self.is_fake = is_fake
self.has_private_join_link = has_private_join_link
self.has_geo = has_geo
self.slow_mode = slow_mode
self.access_hash = access_hash
self.date = date
self.version = version
self.restrictions = restrictions
self.admin_rights = admin_rights
self.banned_rights = banned_rights
self.default_banned_rights = default_banned_rights
self.members_count = members_count
@staticmethod
def _parse(client, channel):
if channel is None:
return None
peer_id = utils.get_channel_id(channel.id)
return Channel(
client=client,
id=peer_id,
title=getattr(channel, 'title', None),
is_forbidden=isinstance(channel, raw.types.ChannelForbidden),
forbidden_until=getattr(channel, 'until_date', None),
username=getattr(channel, 'username', None),
photo=types.ChatPhoto._parse(client, getattr(channel, "photo", None), peer_id,
getattr(channel, 'access_hash', 0)),
is_creator=getattr(channel, 'creator', None),
left=getattr(channel, 'left', None),
is_broadcast=getattr(channel, 'broadcast', None),
is_verified=getattr(channel, 'verified', None),
is_supergroup=getattr(channel, 'megagroup', None),
is_restricted=getattr(channel, 'restricted', None),
signatures_enabled=getattr(channel, 'signatures', None),
min=getattr(channel, 'min', None),
is_scam=getattr(channel, 'scam', None),
is_fake=getattr(channel, "fake", None),
has_private_join_link=getattr(channel, 'has_link', None),
has_geo=getattr(channel, 'has_geo', None),
slow_mode=getattr(channel, 'slowmode_enabled', None),
access_hash=getattr(channel, 'access_hash', None),
date=getattr(channel, 'date', None),
version=getattr(channel, 'version', None),
restrictions=types.List(
[types.Restriction._parse(r) for r in getattr(channel, 'restriction_reason', [])]) or None,
admin_rights=types.ChatAdminRights._parse(getattr(channel, 'admin_rights', None)),
banned_rights=types.ChatPermissions._parse(getattr(channel, 'banned_rights', None)),
default_banned_rights=types.ChatPermissions._parse(getattr(channel, 'default_banned_rights', None)),
members_count=getattr(channel, 'participants_count', None),
)
@staticmethod
def _parse_input_channel(client: "pyrogram.Client", input_channel: raw.types.InputChannel):
if input_channel is None:
return None
peer_id = utils.get_channel_id(input_channel.channel_id)
return Channel(
client=client,
id=peer_id,
access_hash=getattr(input_channel, 'access_hash', None),
)
|
py | 7dfeb5040afd6bfd883f33ab5353f730b58cd5b8 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mips.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
py | 7dfeb50f443e313e7f311027daac78f3c9ae4c26 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Build a block by the txs with minimum hash value
"""
def buildGoodEblock(node, numTxInBlock, termNum = 0):
# ascending sort by hash
sortedEblock = sorted(node.epool, key=lambda tx: tx.sha256ToInt("%s" % (termNum)))
block = sortedEblock[:numTxInBlock]
return block
"""
Build a block by the using b (num of txs in a block) different hash functions
and taking the txs that minimize each hash
"""
def buildGoodEblockMultipleHashFunc(node, numTxInBlock):
# TODO finish this procedure
for i in range(numTxInBlock):
# ascending sort by hash
sortedEpool = sorted(node.epool, \
key=lambda tx: tx.sha256ToInt("%s" % (i)))
block = sortedEpool[:numTxInBlock]
return block
"""
Build a block by taking txs with hash value lower than some threshold
"""
def buildGoodEblockConstThreshold(node, numTxInBlock, threshold):
epool = node.epool
block = []
blockSize = 0
for tx in epool:
if tx.hashInt < threshold:
block.append(tx)
blockSize += 1
if blockSize >= numTxInBlock:
break
return block
|
py | 7dfeb54d24029a171db52f3fb2998342e2f19a63 | #Devloper: JuanAstaiza
#Date: 20-Sep-2021
'''
Descripción: Este es nuestro primer script pyhton con Python.
Este script genera una ventana en PyGame con el título ¡Hello world!
'''
#1. Importar librerias / paquetes
import pygame
import sys
# 2. Inicializar Pygame
pygame.init();
# 3. Dimenzionar (w x h) el tamaño de la ventana del video juego
# Configuraciones generales de la ventana (WxH,title,Color)
width=800
height=400
mywindow=pygame.display.set_mode((width,height))
pygame.display.set_caption("Number Race v 1.0")
#Cargar imagen
icon = pygame.image.load('images/car.png')
#setear imagen
pygame.display.set_icon(icon)
# Setear Colores R(red) G (Green) B (Blue) => HxD
# RGB => 0-225
#Primera opción
white = pygame.Color(255,255,255)
red = pygame.Color(255,0,0)
green = pygame.Color(0,255,0)
blue = pygame.Color(0,0,255)
x = pygame.Color(140,217,150)
y = pygame.Color(255,229,143)
#Segunda opción
bgColor = (100,100,100)
#Figuras
##Restangulo
rect1=pygame.Rect(150,300,150,50) #x,y,w,h
rect2=pygame.Rect(350,230,150,50) #x,y,w,h
rect1.center = (width // 2,height // 2)
#El punto medio 800 (División de asignación) - CALCULA EL ALTO DE LA PANTALLA PARA SABER EL PUNTO INTEMEDIO
print(rect2.x)
print(rect2.y)
##Circulo
#4. Mantener visible / abierta la ventana en pantalla
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT: # Valida si usuario presionó cerrar x
pygame.quit() # Cierra la ventana
sys.exit() # Cierra
mywindow.fill(x)
pygame.draw.rect(mywindow,red,rect1) # contexto (ventana) ,color ,rectangulo
pygame.draw.rect(mywindow,blue,rect2) # contexto (ventana) ,color ,rectangulo
pygame.draw.rect(mywindow,green,(50,50,50,50))
pygame.draw.line(mywindow,red,(10,10),(300,10),5)
pygame.draw.circle(mywindow,y,(400,200),100)
pygame.draw.polygon(mywindow, blue,((0,300), (100,200),(200,300)))
pygame.display.update() |
py | 7dfeb585ad3cee27b83a42be93794ce6ca904db5 | from django.apps import AppConfig
#registry of model streams
class policyEngineConfig(AppConfig):
name = 'policyengine'
def ready(self):
import policyengine.handlers
from actstream import registry
registry.register(self.get_model('GovernableAction'))
registry.register(self.get_model('GovernableActionBundle'))
registry.register(self.get_model('Policy'))
registry.register(self.get_model('CommunityUser'))
registry.register(self.get_model('CommunityRole'))
registry.register(self.get_model('BooleanVote'))
registry.register(self.get_model('NumberVote'))
registry.register(self.get_model('Proposal'))
registry.register(self.get_model('CommunityDoc'))
|
py | 7dfeb5a8aaa0321d95dbe55f28247e44bd5d83b0 | print("Kinjal Raykarmakar\nSec: CSE2H\tRoll: 29\n")
word = input("Enter a word: ")
print(word[::-1]) |
py | 7dfeb5e6bde4f2ecf90b2479e18aa0aa9ee0bd6a | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
""" Pretty-Print an Interface object as structured text (Yum)
This module provides a function, asStructuredText, for rendering an
interface as structured text.
"""
import zope.interface
def asStructuredText(I, munge=0):
""" Output structured text format. Note, this will whack any existing
'structured' format of the text. """
r = [I.getName()]
outp = r.append
level = 1
if I.getDoc():
outp(_justify_and_indent(_trim_doc_string(I.getDoc()), level))
bases = [base
for base in I.__bases__
if base is not zope.interface.Interface
]
if bases:
outp(_justify_and_indent("This interface extends:", level, munge))
level += 1
for b in bases:
item = "o %s" % b.getName()
outp(_justify_and_indent(_trim_doc_string(item), level, munge))
level -= 1
namesAndDescriptions = sorted(I.namesAndDescriptions())
outp(_justify_and_indent("Attributes:", level, munge))
level += 1
for name, desc in namesAndDescriptions:
if not hasattr(desc, 'getSignatureString'): # ugh...
item = "%s -- %s" % (desc.getName(),
desc.getDoc() or 'no documentation')
outp(_justify_and_indent(_trim_doc_string(item), level, munge))
level -= 1
outp(_justify_and_indent("Methods:", level, munge))
level += 1
for name, desc in namesAndDescriptions:
if hasattr(desc, 'getSignatureString'): # ugh...
item = "%s%s -- %s" % (desc.getName(),
desc.getSignatureString(),
desc.getDoc() or 'no documentation')
outp(_justify_and_indent(_trim_doc_string(item), level, munge))
return "\n\n".join(r) + "\n\n"
def _trim_doc_string(text):
""" Trims a doc string to make it format
correctly with structured text. """
lines = text.replace('\r\n', '\n').split('\n')
nlines = [lines.pop(0)]
if lines:
min_indent = min([len(line) - len(line.lstrip())
for line in lines])
for line in lines:
nlines.append(line[min_indent:])
return '\n'.join(nlines)
def _justify_and_indent(text, level, munge=0, width=72):
""" indent and justify text, rejustify (munge) if specified """
indent = " " * level
if munge:
lines = []
line = indent
text = text.split()
for word in text:
line = ' '.join([line, word])
if len(line) > width:
lines.append(line)
line = indent
else:
lines.append(line)
return '\n'.join(lines)
else:
return indent + \
text.strip().replace("\r\n", "\n") .replace("\n", "\n" + indent)
|
py | 7dfeb5ecaaaee43220e0e4369bd828153d0545c5 | # -*- coding: utf-8 -*-
import json
from chanjo.cli import root
from chanjo.store.models import Sample
from chanjo.cli.calculate import dump_json
def test_mean(popexist_db, cli_runner):
# GIVEN an existing databse with one sample
assert Sample.query.count() == 1
# WHEN assessing the mean values metrics per sample
res = cli_runner.invoke(root, ['-d', popexist_db.uri, 'calculate', 'mean'])
# THEN the command should return JSON results
assert res.exit_code == 0
# ... returns some debug info to STDERR that we strip away
lines = res.output.strip().split('\n')
assert len(lines) == 1
# ... the last row (no incl. empty line) is JSON formatted
data = json.loads(lines[0].strip())
assert data['sample_id'] == 'sample'
assert isinstance(data['mean_coverage'], float)
def test_dump_json():
# GIVEN some dict
data = {'name': 'PT Anderson', 'age': 45}
# WHEN dumping to JSON with pretty-option enabled
json = dump_json(data, pretty=True)
# THEN the output is formatted over multiple lines
assert isinstance(json, str)
assert len(json.split('\n')) == 4
|
py | 7dfeb6cce093abab78fbdfca356c5d8f79edc874 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# Thanks to https://devblogs.microsoft.com/cppblog/clear-functional-c-documentation-with-sphinx-breathe-doxygen-cmake/
# This calls doxygen when we are running RTD
# Doxygen XML output in saved in docs/doxygen_output/xml
# This is then parsed using breathe and sphinx, using output from docs/doxygen_output/xml
import subprocess, os
# Check if we're running on Read the Docs' servers
read_the_docs_build = os.environ.get('READTHEDOCS', None) == 'True'
breathe_projects = {}
if read_the_docs_build:
subprocess.call('doxygen', shell=True)
breathe_projects['TaskTorrent'] = 'doxygen_output/xml/'
# -- Project information -----------------------------------------------------
project = 'TaskTorrent'
copyright = '2019-2020, Leopold Cambier'
author = 'Leopold Cambier'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [ "breathe" ]
breathe_default_project = "TaskTorrent"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
|
py | 7dfeb845ef24d4653bf1b8a490d35b3a0f281fa9 | # Copyright 2020 Mario Graff Guerrero
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from re import T
from typing import Callable, Iterable, List, Union
from b4msa.textmodel import TextModel
from microtc.utils import load_model
from microtc import emoticons
from EvoMSA.utils import download
from collections import OrderedDict, defaultdict
from microtc.utils import Counter
from os.path import isfile, join, dirname
from microtc.textmodel import TextModel
from microtc.params import OPTION_DELETE, OPTION_NONE
from microtc.utils import tweet_iterator
from .place import BoundingBox, location
try:
from tqdm import tqdm
except ImportError:
def tqdm(x, **kwargs):
return x
class Dataset(object):
"""
Self-supervised learning requires the automatic construction of a
labeled dataset. This class contains different methods to facilitate
the build of this type of dataset, starting from an unlabeled corpus.
>>> from text_models.dataset import Dataset
>>> dataset = Dataset()
>>> dataset.add(dataset.load_emojis())
>>> dataset.add(dataset.tm_words())
>>> result = dataset.klass("good morning Mexico")
>>> dataset.process("good morning Mexico")
['~', '~mexico~']
"""
def __init__(self, lang="En"):
self._lang = lang
self._map = dict()
@property
def textModel(self):
"Text model used to process the texts"
try:
return self._tm
except AttributeError:
self._tm = load_model(download("b4msa_%s.tm" % self._lang))
return self._tm
@staticmethod
def load_emojis():
def download(fname):
from urllib import request
import os
output = fname.split("/")[-1]
if os.path.isfile(output):
return output
request.urlretrieve(fname, output)
return output
if isfile(join(dirname(__file__), "data", "emojis.dict")):
return load_model(join(dirname(__file__), "data", "emojis.dict"))
data = "https://www.unicode.org/Public/emoji/12.1/emoji-data.txt"
sec = "https://www.unicode.org/Public/emoji/12.1/emoji-sequences.txt"
var ="https://www.unicode.org/Public/emoji/12.1/emoji-variation-sequences.txt"
zwj = "https://www.unicode.org/Public/emoji/12.1/emoji-zwj-sequences.txt"
emos = emoticons.read_emoji_standard(download(data))
emoticons.read_emoji_standard(download(sec), emos)
emoticons.read_emoji_standard(download(var), emos)
emoticons.read_emoji_standard(download(zwj), emos)
return {x: True for x in emos.keys()}
def tm_words(self):
"""
Text model words
:rtype: dict
"""
tm = self.textModel.text_transformations
emos = self.load_emojis()
words = [tm(k) for k in self.textModel.model.word2id.keys()
if k[:2] != "q:" and k.count("~") == 0 and k not in emos]
words.sort()
_ = OrderedDict([(w, True) for w in words])
return _
def aggress_words(self):
from EvoMSA import base
from microtc.utils import tweet_iterator
import os
lang = self._lang.lower()
fname = os.path.join(os.path.dirname(base.__file__), 'conf',
'aggressiveness.%s' % lang)
data = list(tweet_iterator(fname))[0]
tm = self.textModel.text_transformations
return {tm(x): True for x in data["words"]}
def affective_words(self):
from ConceptModelling import text_preprocessing as base
from microtc.utils import tweet_iterator
import os
lang = self._lang.lower()
fname = os.path.join(os.path.dirname(base.__file__), 'data',
'%s.affective.words.json' % lang)
tm = self.textModel.text_transformations
words = dict()
for data in tweet_iterator(fname):
words.update({tm(x): True for x in data["words"]})
return words
@property
def klasses(self):
"""Labels or words"""
try:
return self._words
except AttributeError:
self._words = OrderedDict()
return self._words
def add(self, data):
"""
Add words to the processor
:param data: words
:type data: dict
"""
self._map.update({k: v for k, v in data.items() if not isinstance(v, bool)})
words = self.klasses
words.update(data)
if hasattr(self, "_data_structure"):
del self._data_structure
@property
def data_structure(self):
try:
return self._data_structure
except AttributeError:
_ = emoticons.create_data_structure
self._data_structure = _(self.klasses)
return self._data_structure
def klass(self, text):
"""
Labels in a text
:param text:
:type text: str
:returns: The labels in the text
:rtype: set
"""
get = self._map.get
text = self.textModel.text_transformations(text)
lst = self.find_klass(text)
_ = [text[a:b] for a, b in lst]
return set([get(x, x) for x in _])
def find_klass(self, text):
"""Obtain the position of each label in the text
:param text: text
:type text: str
:return: list of pairs, init and end of the word
:rtype: list
"""
blocks = list()
init = i = end = 0
head = self.data_structure
current = head
text_length = len(text)
while i < text_length:
char = text[i]
try:
current = current[char]
i += 1
if "__end__" in current:
end = i
except KeyError:
current = head
if end > init:
blocks.append([init, end])
if (end - init) > 2 and text[end - 1] == '~':
init = i = end = (end - 1)
else:
init = i = end
elif i > init:
if (i - init) > 2 and text[i - 1] == '~':
init = end = i = (i - 1)
else:
init = end = i
else:
init += 1
i = end = init
if end > init:
blocks.append([init, end])
return blocks
def process(self, text, klass=None):
"""
Remove klass from text
:param text:
:type text: str
:param klass:
:type klass: str
:rtype: list
"""
text = self.textModel.text_transformations(text)
lst = self.find_klass(text)
if klass is not None:
lst = [[a, b] for a, b in lst if text[a:b] == klass]
lst.reverse()
init = 0
B = []
text_len = len(text)
while len(lst):
a, b = lst.pop()
if (b - a) > 2:
if a < text_len and text[a] == "~" and a > 0:
a += 1
if b > 0 and text[b-1] == "~" and b < text_len:
b -= 1
B.append(text[init:a])
init = b
if init < len(text):
B.append(text[init:])
return [x for x in B if len(x)]
def remove(self, klass):
"""
Remove label from the processor
:param klass:
:type klass: str
"""
del self.klasses[klass]
if hasattr(self, "_data_structure"):
del self._data_structure
class TokenCount(object):
"""Count frequency"""
def __init__(self, tokenizer: Callable[[Union[str, dict]], Iterable[str]]) -> None:
self._tokenizer = tokenizer
self._counter = Counter()
@property
def counter(self) -> Counter:
return self._counter
@property
def num_documents(self) -> int:
return self.counter.update_calls
def process(self, iterable: Iterable[Union[str, dict]]) -> None:
pl = self.process_line
[pl(line) for line in iterable]
def process_line(self, txt: Union[str, dict]) -> None:
self.counter.update(self._tokenizer(txt))
def clean(self) -> None:
counter = self.counter
min_value = 0.0001 * counter.update_calls
min_value = max(2, min_value)
keys = list(counter.keys())
for k in keys:
if counter[k] <= min_value:
del counter[k]
@staticmethod
def textModel(token_list) -> TextModel:
tm = TextModel(num_option=OPTION_DELETE, usr_option=OPTION_NONE,
url_option=OPTION_DELETE, emo_option=OPTION_NONE,
hashtag_option=OPTION_NONE,
del_dup=False, del_punc=True, token_list=token_list)
return tm
@classmethod
def bigrams(cls) -> "TokenCount":
tm = cls.textModel(token_list=[-2])
return cls(tokenizer=tm.tokenize)
@classmethod
def co_ocurrence(cls) -> "TokenCount":
tm = cls.textModel(token_list=[-1])
def co_ocurrence(txt):
tokens = tm.tokenize(txt)
for k, frst in enumerate(tokens[:-1]):
for scnd in tokens[k+1:]:
if frst == scnd:
yield frst
else:
_ = [frst, scnd]
_.sort()
yield "~".join(_)
return cls(tokenizer=co_ocurrence)
@classmethod
def single_co_ocurrence(cls) -> "TokenCount":
tm = cls.textModel(token_list=[-1])
def co_ocurrence(txt):
tokens = tm.tokenize(txt)
for k, frst in enumerate(tokens[:-1]):
for scnd in tokens[k+1:]:
if frst != scnd:
_ = [frst, scnd]
_.sort()
yield "~".join(_)
for x in tokens:
yield x
return cls(tokenizer=co_ocurrence)
class GeoFrequency(object):
def __init__(self, fnames: Union[list, str],
reader: Callable[[str], Iterable[dict]]=tweet_iterator) -> None:
self._fnames = fnames if isinstance(fnames, list) else [fnames]
self._reader = reader
self._label = BoundingBox().label
self._data = defaultdict(TokenCount.single_co_ocurrence)
_ = join(dirname(__file__), "data", "state.dict")
self._states = load_model(_)
@property
def data(self) -> defaultdict:
return self._data
def compute(self) -> None:
for fname in tqdm(self._fnames):
self.compute_file(fname)
def compute_file(self, fname: str) -> None:
label = self._label
states = self._states
data = self._data
for line in self._reader(fname):
try:
country, geo = None, None
country = line["place"]["country_code"]
geo = label(dict(position=location(line), country=country))
geo = states[geo]
except Exception:
pass
if geo is not None:
data[geo].process_line(line)
elif country is not None:
data[country].process_line(line)
else:
data["nogeo"].process_line(line)
def clean(self) -> None:
keys = list(self.data.keys())
data = self.data
max_value = max([x.num_documents for x in data.values()])
min_value = 0.0001 * max_value
min_value = max(2, min_value)
for key in keys:
data[key].clean()
if len(data[key].counter) == 0 or data[key].num_documents <= min_value:
del data[key] |
py | 7dfeb887ef361d3b25970046065979cd6edf54a6 | import sys
from grndiag.parser import Parser
if sys.version_info < (2, 7):
import unittest2 as unittest
else
import unittest
class TestParser(unittest.TestCase):
|
py | 7dfeb8ab7ba61e04844b1e80b5f16dce88285c03 | import requests
import json
from .helper import Helper
"""
- ERROR 403 v1/celery_tasks/last/
- ERROR 404 v1/celery_tasks/last/{org_pk}/
"""
class Celery_tasks(Helper):
def __init__(self, base_url, org_pk, teams_pk, access_token, _csrf_token, headers, pagination):
super().__init__(base_url, org_pk, teams_pk, access_token, _csrf_token, headers, pagination)
# TODO GET on /api/v1/celery_tasks/last/
def get_last_celery_task(self):
""" """
route = 'v1/celery_tasks/last/{0}/'.format(self.org_pk)
response = self.process_request(requests, 'GET', self.base_url, route, self.headers, None, None)
return self.process_response(response)
def get_celery_tasks_list(self, page=1):
""" Get the list of celery tasks """
route = 'v1/celery_tasks/list/{0}/?page_size={1}&page={2}'.format(self.org_pk, self.pagination, page)
response = self.process_request(requests, 'GET', self.base_url, route, self.headers, None, None)
return self.process_response(response) |
py | 7dfeb95f803dbc59f20f65547a1017edaf039247 | # -*- coding: utf-8 -*-
"""
smash.models.verify_model_response
This file was automatically generated for SMASH by SMASH v2.0 ( https://smashlabs.io )
"""
class VerifyModelResponse(object):
"""Implementation of the 'Verify Model Response' model.
TODO: type model description here.
Attributes:
request (string): TODO: type description here.
to (string): TODO: type description here.
verified (string): TODO: type description here.
id (string): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"request" : "request",
"to" : "to",
"verified" : "verified",
"id" : "id"
}
def __init__(self,
request=None,
to=None,
verified=None,
id=None,
additional_properties = {}):
"""Constructor for the VerifyModelResponse class"""
# Initialize members of the class
self.request = request
self.to = to
self.verified = verified
self.id = id
# Add additional model properties to the instance
self.additional_properties = additional_properties
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
request = dictionary.get("request")
to = dictionary.get("to")
verified = dictionary.get("verified")
id = dictionary.get("id")
# Clean out expected properties from dictionary
for key in cls._names.values():
if key in dictionary:
del dictionary[key]
# Return an object of this model
return cls(request,
to,
verified,
id,
dictionary)
|
py | 7dfeba78bae3cc5f2942ef3697d3353d7339d799 | import pytest
from pandas.util import testing as tm
import ibis
from ibis.backends.base_file import FileDatabase
from ibis.backends.csv import CSVClient, CSVTable
@pytest.fixture
def transformed(csv):
# we need to cast to a timestamp type
# as we read in as strings
closes = csv.csv_dir.close
closes = closes.mutate(time=closes.time.cast('timestamp'))
opens = csv.csv_dir.open
opens = opens.mutate(time=opens.time.cast('timestamp'))
t = opens.inner_join(closes, ['time', 'ticker'])
t = t[opens, closes.close]
t = t.mutate(avg=(t.open + t.close) / 2)
t = t[['time', 'ticker', 'avg']]
return t
def test_client(tmpdir, file_backends_data):
# construct with a path to a file
csv = tmpdir
for k, v in file_backends_data.items():
f = csv / '{}.csv'.format(k)
v.to_csv(str(f), index=False)
c = CSVClient(csv / 'open.csv')
assert c.list_databases() == []
assert c.list_tables() == ['open']
c = CSVClient(csv / 'close.csv')
assert c.list_databases() == []
assert c.list_tables() == ['close']
def test_navigation(csv):
# directory navigation
assert isinstance(csv, FileDatabase)
result = dir(csv)
assert result == ['csv_dir']
prices = csv.csv_dir
assert isinstance(prices, FileDatabase)
result = dir(prices)
assert result == ['close', 'open']
result = prices.list_tables()
assert result == ['close', 'open']
opens = prices.open
assert isinstance(opens.op(), CSVTable)
closes = prices.close
assert isinstance(closes.op(), CSVTable)
def test_read(csv, file_backends_data):
closes = csv.csv_dir.close
assert str(closes) is not None
result = closes.execute()
expected = file_backends_data['close']
# csv's don't preserve dtypes
expected['time'] = expected['time'].astype(str)
tm.assert_frame_equal(result, expected)
result = closes.execute()
tm.assert_frame_equal(result, expected)
def test_read_with_projection(csv2, file_backends_data):
t = csv2.csv_dir2.df
result = t.execute()
assert 'close' in result.columns
assert 'open' in result.columns
t = t[['time', 'ticker', 'close']]
result = t.execute()
assert 'close' in result.columns
assert 'open' not in result.columns
def test_insert(transformed, tmpdir):
t = transformed
# csv's don't preserve dtypes
expected = t.execute()
expected['time'] = expected['time'].astype(str)
tpath = tmpdir / 'new_csv'
tpath.mkdir()
path = tpath / 'foo.csv'
assert not path.exists()
c = ibis.csv.connect(tpath)
c.insert('foo.csv', t)
assert path.exists()
# readback
t = CSVClient(str(tpath)).database()
result = t.list_tables()
assert result == ['foo']
result = t.foo.execute()
tm.assert_frame_equal(result, expected)
|
py | 7dfebab94a2f56763b3a46192868fb888f305971 | # -*- coding: utf-8 -*-
# Copyright 2014 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glanceclient import client as glance_client
from keystoneauth1 import loading as ks_loading
from oslo_config import cfg
from oslo_log import log
LOG = log.getLogger(__name__)
CONF = cfg.CONF
def drop_resource(service, resource_id):
_glance_client = GlanceClient()
if service == 'image':
_glance_client.delete_image(resource_id)
class GlanceClient(object):
def __init__(self):
ks_loading.register_session_conf_options(CONF, "glance_client")
ks_loading.register_auth_conf_options(CONF, "glance_client")
self.auth = ks_loading.load_auth_from_conf_options(
CONF,
"glance_client")
self.session = ks_loading.load_session_from_conf_options(
CONF,
"glance_client",
auth=self.auth)
self.glance_client = glance_client.Client(
version='1',
session=self.session,
auth=self.auth)
def delete_image(self, image_id, region_name=None):
self.glance_client.images.delete(image_id)
|
py | 7dfebb0b60e95620e6d159c0a161d0e61a5f9d13 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_host
version_added: "2.3"
author: "Wayne Witzel III (@wwitzel3)"
short_description: create, update, or destroy Ansible Tower host.
description:
- Create, update, or destroy Ansible Tower hosts. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- The name to use for the host.
required: True
type: str
description:
description:
- The description to use for the host.
type: str
inventory:
description:
- Inventory the host should be made a member of.
required: True
type: str
enabled:
description:
- If the host should be enabled.
type: bool
default: 'yes'
variables:
description:
- Variables to use for the host. Use C(@) for a file.
type: str
state:
description:
- Desired state of the resource.
choices: ["present", "absent"]
default: "present"
type: str
extends_documentation_fragment: awx.awx.auth
'''
EXAMPLES = '''
- name: Add tower host
tower_host:
name: localhost
description: "Local Host Group"
inventory: "Local Inventory"
state: present
tower_config_file: "~/tower_cli.cfg"
variables:
example_var: 123
'''
import os
from ..module_utils.ansible_tower import TowerModule, tower_auth_config, tower_check_mode
try:
import tower_cli
import tower_cli.exceptions as exc
from tower_cli.conf import settings
except ImportError:
pass
def main():
argument_spec = dict(
name=dict(required=True),
description=dict(),
inventory=dict(required=True),
enabled=dict(type='bool', default=True),
variables=dict(),
state=dict(choices=['present', 'absent'], default='present'),
)
module = TowerModule(argument_spec=argument_spec, supports_check_mode=True)
name = module.params.get('name')
description = module.params.get('description')
inventory = module.params.get('inventory')
enabled = module.params.get('enabled')
state = module.params.get('state')
variables = module.params.get('variables')
if variables:
if variables.startswith('@'):
filename = os.path.expanduser(variables[1:])
with open(filename, 'r') as f:
variables = f.read()
json_output = {'host': name, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
host = tower_cli.get_resource('host')
try:
inv_res = tower_cli.get_resource('inventory')
inv = inv_res.get(name=inventory)
if state == 'present':
result = host.modify(name=name, inventory=inv['id'], enabled=enabled,
variables=variables, description=description, create_on_missing=True)
json_output['id'] = result['id']
elif state == 'absent':
result = host.delete(name=name, inventory=inv['id'])
except (exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update host, inventory not found: {0}'.format(excinfo), changed=False)
except (exc.ConnectionError, exc.BadRequest, exc.AuthError) as excinfo:
module.fail_json(msg='Failed to update host: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
if __name__ == '__main__':
main()
|
py | 7dfebb0d3c654dd83dd83085ca7624687dc9e4da | # coding: utf-8
"""
SendinBlue API
SendinBlue provide a RESTFul API that can be used with any languages. With this API, you will be able to : - Manage your campaigns and get the statistics - Manage your contacts - Send transactional Emails and SMS - and much more... You can download our wrappers at https://github.com/orgs/sendinblue **Possible responses** | Code | Message | | :-------------: | ------------- | | 200 | OK. Successful Request | | 201 | OK. Successful Creation | | 202 | OK. Request accepted | | 204 | OK. Successful Update/Deletion | | 400 | Error. Bad Request | | 401 | Error. Authentication Needed | | 402 | Error. Not enough credit, plan upgrade needed | | 403 | Error. Permission denied | | 404 | Error. Object does not exist | | 405 | Error. Method not allowed | | 406 | Error. Not Acceptable | # noqa: E501
OpenAPI spec version: 3.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RemainingCreditModel(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'child': 'RemainingCreditModelChild',
'reseller': 'RemainingCreditModelReseller'
}
attribute_map = {
'child': 'child',
'reseller': 'reseller'
}
def __init__(self, child=None, reseller=None): # noqa: E501
"""RemainingCreditModel - a model defined in Swagger""" # noqa: E501
self._child = None
self._reseller = None
self.discriminator = None
self.child = child
self.reseller = reseller
@property
def child(self):
"""Gets the child of this RemainingCreditModel. # noqa: E501
:return: The child of this RemainingCreditModel. # noqa: E501
:rtype: RemainingCreditModelChild
"""
return self._child
@child.setter
def child(self, child):
"""Sets the child of this RemainingCreditModel.
:param child: The child of this RemainingCreditModel. # noqa: E501
:type: RemainingCreditModelChild
"""
if child is None:
raise ValueError("Invalid value for `child`, must not be `None`") # noqa: E501
self._child = child
@property
def reseller(self):
"""Gets the reseller of this RemainingCreditModel. # noqa: E501
:return: The reseller of this RemainingCreditModel. # noqa: E501
:rtype: RemainingCreditModelReseller
"""
return self._reseller
@reseller.setter
def reseller(self, reseller):
"""Sets the reseller of this RemainingCreditModel.
:param reseller: The reseller of this RemainingCreditModel. # noqa: E501
:type: RemainingCreditModelReseller
"""
if reseller is None:
raise ValueError("Invalid value for `reseller`, must not be `None`") # noqa: E501
self._reseller = reseller
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RemainingCreditModel, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RemainingCreditModel):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 7dfebbc9bc1eb680e56c77fb96a186ab7f90e1be | import os, sys, re, json
from subprocess import Popen, PIPE, STDOUT
# By default use builds/ammo.js. Or the commandline argument can override that.
build = os.path.join('builds', 'temp.js')
if len(sys.argv) > 1:
build = sys.argv[1]
print 'Using build:', build
exec(open(os.path.expanduser('~/.emscripten'), 'r').read())
JS_ENGINE = SPIDERMONKEY_ENGINE
#JS_ENGINE = V8_ENGINE # Note: fails stress due to floating point differences
print
print '==================================='
print
def run(filename):
if JS_ENGINE == SPIDERMONKEY_ENGINE:
return Popen(JS_ENGINE + ['-e', 'gcparam("maxBytes", 1024*1024*1024); load("' + build + '"); load("' + os.path.join('tests', 'testutils.js') + '")', filename], stdout=PIPE).communicate()[0]
else:
return Popen(JS_ENGINE + [build, os.path.join('tests', 'testutils.js'), filename], stdout=PIPE).communicate()[0]
__counter = 0
def stage(text):
global __counter
print __counter, ':', text
__counter += 1
if len(sys.argv) != 3 or sys.argv[2] != 'benchmark':
stage('regression tests')
for test in ['basics', 'wrapping', '2', '3', 'constraint']:
name = test + '.js'
print ' ', name
fullname = os.path.join('tests', name)
output = run(fullname)
assert 'ok.' in output, output
stage('hello world')
output = run(os.path.join('examples', 'hello_world.js'))
#assert open(os.path.join('examples', 'hello_world.txt')).read() in output, output
assert 'ok.' in output, output
stage('stress')
output = run(os.path.join('tests', 'stress.js'))
#assert
'''
0 : 0.00,-56.00,0.00
1 : 13.03,-5.00,-5.24
2 : 2.43,-5.00,-3.49
3 : -6.77,-5.54,-0.77
4 : -2.55,-5.00,2.84
5 : 11.75,-5.00,11.25
6 : 2.35,-5.61,0.12
total time:''' in output#, output
assert 'ok.' in output, output
print ' stress benchmark: ' + output.split('\n')[-3]
print
print '==================================='
print
print 'ok.'
|
py | 7dfebbda1bdf7ecdbc7317ce0cc339d246de6c67 | """
Training the SNRM model.
Authors: Hamed Zamani ([email protected])
"""
import pymongo
import logging
import numpy as np
import tensorflow as tf
import json
from dictionary import Dictionary
from params import FLAGS
from snrm import SNRM
import time
from util import check_gpu_available, my_tokenize
import random
import os
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
# layer_size is a list containing the size of each layer. It can be set through the 'hiddein_x' arguments.
# layer_size = [FLAGS.emb_dim]
layer_size = []
for i in [FLAGS.hidden_1, FLAGS.hidden_2, FLAGS.hidden_3]: #, FLAGS.hidden_4]: # ,FLAGS.hidden_5]:
if i <= 0:
break
layer_size.append(i)
# Dictionary is a class containing terms and their IDs. The implemented class just load the terms from a Galago dump
# file. If you are not using Galago, you have to implement your own reader. See the 'dictionary.py' file.
dictionary = Dictionary()
print("loading dictionary...")
# dictionary.load_from_galago_dump(FLAGS.base_path + FLAGS.dict_file_name, FLAGS.dict_min_freq)
dictionary.load_my_dict(FLAGS.base_path + FLAGS.dict_file_name, FLAGS.dict_min_freq)
print("creating SNRM model...")
# The SNRM model.
snrm = SNRM(dictionary=dictionary,
pre_trained_embedding_file_name=FLAGS.base_path + FLAGS.pre_trained_embedding_file_name,
batch_size=FLAGS.batch_size,
max_q_len=FLAGS.max_q_len,
max_doc_len=FLAGS.max_doc_len,
emb_dim=FLAGS.emb_dim,
layer_size=layer_size,
dropout_parameter=FLAGS.dropout_parameter,
regularization_term=FLAGS.regularization_term,
learning_rate=FLAGS.learning_rate)
client = pymongo.MongoClient()
db = client.snrm
# doc_coll = db.docs_exp
doc_coll = db.docs2
all_doc_map = {}
print("loading all documents...")
for doc in doc_coll.find({}):
all_doc_map[doc["docNo"]] = doc["tokens"]
print("documents loaded.")
def tokens2vec(tokens, length):
data = [0] * length
count = 0
for token in tokens:
if token in dictionary.term_to_id.keys():
if count >= length:
break
data[count] = dictionary.term_to_id[token]
count += 1
return data
def get_tokens(docNo):
global all_doc_map
return all_doc_map[docNo]
def generate_batch(batch_size, mode='train'):
"""
Generating pairwise training or validation data for each batch. This function should be implemented.
Note: For unknown terms term ID should be set to zero. Please use the dictionary size for padding. In other
words, padding value should be |V|+1, where |V| is the vocabulary size.
Args:
batch_size (int): total number of training or validation data in each batch.
mode (str): should be either 'train' or 'valid'.
Returns:
batch_query (list): a 2D list of int containing query term IDs with size (batch_size * FLAGS.max_q_len).
batch_doc1 (list): a 2D list of int containing doc 1 term IDs with size (batch_size * FLAGS.max_doc_len).
batch_doc2 (list): a 2D list of int containing doc 2 term IDs with size (batch_size * FLAGS.max_doc_len).
batch_label (list): a 2D list of float within the range of [0, 1] with size (batch_size * 1).
Label shows the probability of doc1 being more relevant than doc2. This can simply be 0 or 1.
"""
# raise Exception('the generate_batch method is not implemented.')
global batch_index
batch_query = []
batch_doc1 = []
batch_doc2 = []
batch_label = []
if batch_index+batch_size >= data_size:
batch_index = batch_index + batch_size - data_size
batch_data = pair_wise_data[batch_index:batch_index+batch_size]
random.shuffle(batch_data) # shuffle the order
for data in batch_data:
q = tokens2vec(my_tokenize(data["q"]), FLAGS.max_q_len)
d1 = tokens2vec(get_tokens(data["d1_id"]), FLAGS.max_doc_len)
d2 = tokens2vec(get_tokens(data["d2_id"]), FLAGS.max_doc_len)
batch_query.append(q)
batch_doc1.append(d1)
batch_doc2.append(d2)
if data["label"] == -1:
batch_label.append(0.0)
else:
batch_label.append(1.0)
batch_index += batch_size
return batch_query, batch_doc1, batch_doc2, batch_label
batch_index = 0
print("loading pair wise dataset")
with open("../data/pair_wise_data.json", "r") as f:
pair_wise_data = json.load(f)
data_size = len(pair_wise_data)
print("dataset loaded, len:", data_size)
# # check the gpu availability
# while not check_gpu_available():
# time.sleep(1)
writer = tf.summary.FileWriter(FLAGS.base_path + FLAGS.log_path + FLAGS.run_name, graph=snrm.graph)
# Launch the graph
with tf.Session(graph=snrm.graph) as session:
session.run(snrm.init)
logging.info('Initialized')
# ckpt = tf.train.get_checkpoint_state(FLAGS.base_path + FLAGS.model_path)
# # print(FLAGS.base_path + FLAGS.model_path, "----", FLAGS.run_name + "164000")
# # print(ckpt)
# # exit()
# model_checkpoint_path = FLAGS.base_path + FLAGS.model_path + FLAGS.run_name + "162000"
# print(model_checkpoint_path)
# if os.path.exists(model_checkpoint_path + ".meta"):
# logging.info(model_checkpoint_path)
# snrm.saver.restore(session, model_checkpoint_path) # restore all variables
# logging.info('Load model from {:s}'.format(model_checkpoint_path))
# training
if not FLAGS.experiment_mode:
num_steps = FLAGS.num_train_steps
average_loss = 0
for step in range(num_steps):
query, doc1, doc2, labels = generate_batch(FLAGS.batch_size, 'train')
labels = np.array(labels)
labels = np.concatenate(
[labels.reshape(FLAGS.batch_size, 1), 1.-labels.reshape(FLAGS.batch_size, 1)], axis=1)
feed_dict = {snrm.query_pl: query,
snrm.doc1_pl: doc1,
snrm.doc2_pl: doc2,
snrm.labels_pl: labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val, summary = session.run([snrm.optimizer, snrm.loss, snrm.summary_op], feed_dict=feed_dict)
writer.add_summary(summary, step)
if step % 10 == 0:
print(step, batch_index, time.strftime("%Y-%m-%d %H:%M:%S"))
if (step % FLAGS.validate_every_n_steps == 0) and (step != 0):
valid_coss = 0.
valid_id = 0
doc_total_len = 0
doc_count = 0
q_total_len = 0
q_count = 0
all_zero_d_count = 0
all_zero_q_count = 0
for valid_step in range(FLAGS.num_valid_steps):
query, doc1, doc2, labels = generate_batch(FLAGS.batch_size, 'valid')
labels = np.array(labels)
labels = np.concatenate(
[labels.reshape(FLAGS.batch_size, 1), 1. - labels.reshape(FLAGS.batch_size, 1)], axis=1)
# print(labels)
feed_dict = {snrm.query_pl: query,
snrm.doc1_pl: doc1,
snrm.doc2_pl: doc2,
snrm.labels_pl: labels}
cost_val, doc_repr1, q_repr = session.run([snrm.cost,
snrm.d1_repr,
snrm.q_repr],
feed_dict=feed_dict)
# doc_repr1 = session.run(snrm.doc_representation, feed_dict={snrm.doc_pl: doc1})
# q_repr = session.run(snrm.query_representation, feed_dict={snrm.query_pl: query})
for i in range(FLAGS.batch_size):
d_c = 0 # 本次的document not zero count
for j in range(len(doc_repr1[i])):
if doc_repr1[i][j] > 0.:
d_c += 1
if d_c == 0:
all_zero_d_count += 1
doc_total_len += d_c
doc_count += FLAGS.batch_size
for i in range(FLAGS.batch_size):
q_c = 0 # 本次的query not zero count
for j in range(len(q_repr[i])):
if q_repr[i][j] > 0.:
q_c += 1
if q_c == 0:
all_zero_q_count += 1
q_total_len += q_c
q_count += FLAGS.batch_size
valid_coss += cost_val
valid_coss /= FLAGS.num_valid_steps
print('Average cost on validation set at step ', step, ': ', valid_coss)
print('Doc Avg Length at step ', step, ': ', doc_total_len / doc_count,
", zc-->", all_zero_d_count)
print('Query Avg Length at step ', step, ': ', q_total_len / q_count,
", zc-->", all_zero_q_count)
if step > 0 and step % FLAGS.save_snapshot_every_n_steps == 0:
save_path = snrm.saver.save(session, FLAGS.base_path + FLAGS.model_path + FLAGS.run_name + str(step))
print('Model saved in file: %s' % save_path)
save_path = snrm.saver.save(session, FLAGS.base_path + FLAGS.model_path + FLAGS.run_name)
print('Model saved in file: %s' % save_path)
else:
print('Experiment Mode is ON!')
# inference should be done. You should implement it. It's easy. Please refer to the paper. You should just
# construct the inverted index from the learned representations. Then the query should fed to the network and
# the documents that contain the "query latent terms" should be scored and ranked. If you have any question,
# please do not hesitate to contact me ([email protected]).
|
py | 7dfebcac1d2d7648fabe3af050b382c3b43ee100 | import sys
import threading
import logging
from collections import deque
from inspect import isawaitable
from asyncio import get_event_loop
from pulsar.api import create_future, ensure_future, AsyncObject
from .utils import wait, GreenletWorker, getcurrent
_DEFAULT_WORKERS = 100
_MAX_WORKERS = 1000
class _DONE:
pass
class GreenPool(AsyncObject):
"""A pool of running greenlets.
This pool maintains a group of greenlets to perform asynchronous
tasks via the :meth:`submit` method.
"""
worker_name = 'exec'
def __init__(self, max_workers=None, loop=None):
self._loop = loop or get_event_loop()
self._max_workers = min(max_workers or _DEFAULT_WORKERS, _MAX_WORKERS)
self._greenlets = set()
self._available = set()
self._queue = deque()
self._shutdown = False
self._waiter = None
self._logger = logging.getLogger('pulsar.greenpool')
self._shutdown_lock = threading.Lock()
self.wait = wait
@property
def max_workers(self):
return self._max_workers
@max_workers.setter
def max_workers(self, value):
value = int(value)
assert value > 0
self._max_workers = value
@property
def in_green_worker(self):
"""True if the current greenlet is a green pool worker
"""
return isinstance(getcurrent(), GreenletWorker)
@property
def closed(self):
"""True if this pool is closed and no task can queued
"""
return self._shutdown
def submit(self, func, *args, **kwargs):
"""Equivalent to ``func(*args, **kwargs)``.
This method create a new task for function ``func`` and adds it to
the queue.
Return a :class:`~asyncio.Future` called back once the task
has finished.
"""
with self._shutdown_lock:
if self._shutdown:
raise RuntimeError(
'cannot schedule new futures after shutdown')
if self.in_green_worker:
return wait(func(*args, **kwargs))
else:
future = create_future(self._loop)
self._put((future, func, args, kwargs))
return future
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown = True
self._put(None)
if wait:
self._waiter = create_future(self._loop)
return self._waiter
def getcurrent(self):
return getcurrent()
# INTERNALS
def _adjust_greenlet_count(self):
if (not self._shutdown and not self._available and
len(self._greenlets) < self._max_workers):
green = GreenletWorker(self._green_run)
self._greenlets.add(green)
self.logger.debug('Num greenlets: %d', len(self._greenlets))
green.switch()
return self._available
def _put(self, task):
# Run in the main greenlet of the evnet-loop thread
self._queue.appendleft(task)
self._check_queue()
def _check_queue(self):
# Run in the main greenlet of the event-loop thread
if not self._adjust_greenlet_count():
self.logger.debug('No greenlet available')
return self._loop.call_soon(self._check_queue)
try:
task = self._queue.pop()
except IndexError:
return
ensure_future(self._green_task(self._available.pop(), task),
loop=self._loop)
async def _green_task(self, green, task):
# Coroutine executing the in main greenlet
# This coroutine is executed for every task put into the queue
while task is not _DONE:
# switch to the greenlet to start the task
task = green.switch(task)
# if an asynchronous result is returned, await
while True:
try:
task = await task
except TypeError as exc:
if isawaitable(task):
task = self._dispach_error(green, exc)
else:
break
except Exception as exc:
task = self._dispach_error(green, exc)
def _dispach_error(self, green, exc):
exc_info = sys.exc_info()
if not exc_info[0]:
exc_info = (exc, None, None)
return green.throw(*exc_info)
def _green_run(self):
# The run method of a worker greenlet
task = True
while task:
green = getcurrent()
parent = green.parent
assert parent
# add greenlet in the available greenlets
self._available.add(green)
task = parent.switch(_DONE) # switch back to the main execution
if task:
future, func, args, kwargs = task
try:
try:
result = wait(func(*args, **kwargs), True)
except StopIteration as exc: # See PEP 479
raise RuntimeError('Unhandled StopIteration') from exc
except Exception as exc:
future.set_exception(exc)
else:
future.set_result(result)
else: # Greenlet cleanup
self._greenlets.remove(green)
if self._greenlets:
self._put(None)
elif self._waiter:
self._waiter.set_result(None)
self._waiter = None
parent.switch(_DONE)
|
py | 7dfebcae6da8394e9ecb77f25177e0dcf10c6d01 | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 25 11:03:00 2021
@author: ZiyaoHe
"""
from geopy import distance
import time
newport_ri = (41.49008, -71.312796)
cleveland_oh = (41.499498, -81.695391)
import timeit
start = timeit.default_timer()
d1=distance.distance(newport_ri, cleveland_oh).meters
stop = timeit.default_timer()
print("---Geopy %s seconds ---" % (stop - start))
#newport_ri1 = (41.49008, -71.312796,300)
#cleveland_oh1 = (41.499498, -81.695391,712.5)
#d2=distance.distance(newport_ri1, cleveland_oh1).meters
start = timeit.default_timer()
import math
#d3=math.sqrt(d1**2+412.5**2)
#print('d2 is:' + d2)
#print('d3 is:' + d3)
def d_2points(lat1,lng1,h1,lat2,lng2,h2):
R = 6378100 #radius of earth
lng1=math.radians(lng1)
lng2=math.radians(lng2)
lat1=math.radians(lat1)
lat2=math.radians(lat2)
dlng = lng1-lng2
dlat = lat1 - lat2
dh= h1-h2
a = math.sin(dlat / 2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlng / 2)**2
c = math.asin(math.sqrt(a))
dis_horizontal = 2 *R * c
return dis_horizontal
d2= d_2points(41.49008, -71.312796,10,41.499498, -81.695391,12)
stop = timeit.default_timer()
print("---Haversine %s seconds ---" %( (stop - start)))
print('d1 is:' + str(d1))
print('d2 is:' + str(d2))
import decimal
a=decimal.Decimal('0.2285714285714285841168345671446461762700762068')
print(1/a)
print((1/a)%10)
|
py | 7dfebd38ea371481846c58e8a0110b7d11c79de5 | # Copyright 2012 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.db import model_base
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.api.v2 import attributes
from neutron.db import models_v2
from neutron.db import standard_attr
class SecurityGroup(standard_attr.HasStandardAttributes, model_base.BASEV2,
model_base.HasId, model_base.HasProject):
"""Represents a v2 neutron security group."""
name = sa.Column(sa.String(attributes.NAME_MAX_LEN))
class DefaultSecurityGroup(model_base.BASEV2, model_base.HasProjectPrimaryKey):
__tablename__ = 'default_security_group'
security_group_id = sa.Column(sa.String(36),
sa.ForeignKey("securitygroups.id",
ondelete="CASCADE"),
nullable=False)
security_group = orm.relationship(
SecurityGroup, lazy='joined',
backref=orm.backref('default_security_group', cascade='all,delete'),
primaryjoin="SecurityGroup.id==DefaultSecurityGroup.security_group_id",
)
class SecurityGroupPortBinding(model_base.BASEV2):
"""Represents binding between neutron ports and security profiles."""
port_id = sa.Column(sa.String(36),
sa.ForeignKey("ports.id",
ondelete='CASCADE'),
primary_key=True)
security_group_id = sa.Column(sa.String(36),
sa.ForeignKey("securitygroups.id"),
primary_key=True)
revises_on_change = ('ports', )
# Add a relationship to the Port model in order to instruct SQLAlchemy to
# eagerly load security group bindings
ports = orm.relationship(
models_v2.Port,
backref=orm.backref("security_groups",
lazy='joined', cascade='delete'))
class SecurityGroupRule(standard_attr.HasStandardAttributes, model_base.BASEV2,
model_base.HasId, model_base.HasProject):
"""Represents a v2 neutron security group rule."""
security_group_id = sa.Column(sa.String(36),
sa.ForeignKey("securitygroups.id",
ondelete="CASCADE"),
nullable=False)
remote_group_id = sa.Column(sa.String(36),
sa.ForeignKey("securitygroups.id",
ondelete="CASCADE"),
nullable=True)
revises_on_change = ('security_group', )
direction = sa.Column(sa.Enum('ingress', 'egress',
name='securitygrouprules_direction'))
ethertype = sa.Column(sa.String(40))
protocol = sa.Column(sa.String(40))
port_range_min = sa.Column(sa.Integer)
port_range_max = sa.Column(sa.Integer)
remote_ip_prefix = sa.Column(sa.String(255))
security_group = orm.relationship(
SecurityGroup,
backref=orm.backref('rules', cascade='all,delete', lazy='joined'),
primaryjoin="SecurityGroup.id==SecurityGroupRule.security_group_id")
source_group = orm.relationship(
SecurityGroup,
backref=orm.backref('source_rules', cascade='all,delete'),
primaryjoin="SecurityGroup.id==SecurityGroupRule.remote_group_id")
|
py | 7dfebd88dd874008838686b0ac35b8c5fed739a3 | #!/usr/bin/env python3
PROXY = {}
def setProxy(proxy_string):
# Create valid ProxyDict
if proxy_string != "-":
PROXY = {'http': proxy_string, 'https': proxy_string}
# No proxy if nothing is set in .ini
else:
PROXY = {} |
py | 7dfebe70101ef7eb4b885a2cbb59ceb29a3188ec | #!python
from linkedlist import LinkedList
# Implement LinkedQueue below, then change the assignment at the bottom
# to use this Queue implementation to verify it passes all tests
class LinkedQueue(object):
def __init__(self, iterable=None):
"""Initialize this queue and enqueue the given items, if any."""
# Initialize a new linked list to store the items
self.list = LinkedList()
if iterable is not None:
for item in iterable:
self.enqueue(item)
def __repr__(self):
"""Return a string representation of this queue."""
return 'Queue({} items, front={})'.format(self.length(), self.front())
def is_empty(self):
"""Return True if this queue is empty, or False otherwise."""
# TODO: Check if empty
return self.list.is_empty()
def length(self):
"""Return the number of items in this queue."""
# TODO: Count number of items
return self.list.length()
def enqueue(self, item):
"""Insert the given item at the back of this queue.
Running time: O(???) – Why? [TODO]"""
# TODO: Insert given item
self.list.prepend(item)
def front(self):
"""Return the item at the front of this queue without removing it,
or None if this queue is empty."""
# TODO: Return front item, if any
if self.list.is_empty():
return None
else:
return self.list.tail.data
def dequeue(self):
"""Remove and return the item at the front of this queue,
or raise ValueError if this queue is empty.
Running time: O(???) – Why? [TODO]"""
# TODO: Remove and return front item, if any
if self.list.is_empty():
raise ValueError("no data")
else:
front_data = self.list.tail.data
self.list.delete(front_data)
return front_data
# Implement ArrayQueue below, then change the assignment at the bottom
# to use this Queue implementation to verify it passes all tests
class ArrayQueue(object):
def __init__(self, iterable=None):
"""Initialize this queue and enqueue the given items, if any."""
# Initialize a new list (dynamic array) to store the items
self.list = list()
if iterable is not None:
for item in iterable:
self.enqueue(item)
def __repr__(self):
"""Return a string representation of this queue."""
return 'Queue({} items, front={})'.format(self.length(), self.front())
def is_empty(self):
"""Return True if this queue is empty, or False otherwise."""
# TODO: Check if empty
if len(self.list) == 0:
return True
else:
return False
def length(self):
"""Return the number of items in this queue."""
# TODO: Count number of items
return len(self.list)
def enqueue(self, item):
"""Insert the given item at the back of this queue.
Running time: O(???) – Why? [TODO]"""
# TODO: Insert given item
self.list.append(item)
def front(self):
"""Return the item at the front of this queue without removing it,
or None if this queue is empty."""
# TODO: Return front item, if any
if self.is_empty():
return None
return self.list[0]
def dequeue(self):
"""Remove and return the item at the front of this queue,
or raise ValueError if this queue is empty.
Running time: O(???) – Why? [TODO]"""
# TODO: Remove and return front item, if any
if self.is_empty():
raise ValueError("Queue is empty")
front_item = self.list[0]
self.list.pop(0)
return front_item
# Implement LinkedQueue and ArrayQueue above, then change the assignment below
# to use each of your Queue implementations to verify they each pass all tests
Queue = LinkedQueue
# Queue = ArrayQueue
|
py | 7dfebf55973da03430259c0f28b8f918b2d8a0ff | import time
import random
import numpy as np
import pandas as pd
import cv2, os, serial, csv
import matplotlib.pyplot as plt
from . import clock as clock
from functions import cnn_to_raw
from img_serializer import serialize_image
from file_finder import get_new_filename
class SuironIO(clock.Action):
"""
Class which handles input output aspect of the suiron
- Reads inputs from webcam and normalizes them
- Also reads serial input and write them to file
"""
# Constructor
def __init__(self, id=1, width=72, height=48, depth=3, baudrate=57600):
# Image settings
self.width = int(width)
self.height = int(height)
self.depth = int(depth)
self.sz=self.width *self.height *self.depth
# Video IO
self.cap = cv2.VideoCapture(id) # Use first capture device
# Serial IO
self.outfile = None
# In-memory variable to record data
# to prevent too much I/O
self.frame_results = []
self.servo_results = []
self.motorspeed_results = []
""" Functions below are used for inputs (recording data) """
# Initialize settings before saving
def init_saving(self, folder='data', filename='output_', extension='.csv'):
fileoutname = get_new_filename(folder=folder, filename=filename, extension=extension)
# Filename to save serial data and image data
# Output file
print(fileoutname)
outfile = open(fileoutname, 'w') # Truncate file first
self.outfile = open(fileoutname, 'a')
def start(self, period):
thread=clock.Clock(self, period)
thread.start()
return thread
def run(self):
time.sleep(0.1)
# Saves both inputs
def record_inputs(self, s_inputs):
# Frame is just a numpy array
frame = self.get_frame()
# Serial inputs is a dict with key 'servo', and 'motor'
# If its not in manual mode then proceed
# print("yeah")
# print("helllo {}".format(s_inputs))
if s_inputs:
servo = s_inputs['servo']
motor = s_inputs['motor']
# Append to memory
# tolist so it actually appends the entire thing
dat=serialize_image(frame)
# print(len(dat))
if (len(dat)==self.sz):
self.frame_results.append(dat)
# print(serialize_image(frame))
self.servo_results.append(servo)
self.motorspeed_results.append(motor)
# Gets frame
def get_frame(self):
ret, frame = self.cap.read()
# If we get a frame, save it
if not ret:
raise IOError('No image found!')
frame = self.normalize_frame(frame)
return frame
# Gets frame for prediction
def get_frame_prediction(self):
ret, frame = self.cap.read()
# if we get a frame
if not ret:
raise IOError('No image found!')
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_CUBIC)
frame = frame.astype('uint8')
return frame
# Normalizes inputs so we don't have to worry about weird
# characters e.g. \r\n
def normalize_serial(self, line):
# Assuming that it receives
# servo, motor
# 'error' basically means that
# its in manual mode
try:
line = line.replace('\n', '').split(',')
line_dict = {'servo': int(line[0]), 'motor': int(line[1])}
return line_dict
except:
return None
# Normalizes frame so we don't have BGR as opposed to RGB
def normalize_frame(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_CUBIC)
frame = frame.flatten()
frame = frame.astype('uint8')
return frame
# Saves files
def save_inputs(self):
raw_data = {
'image': self.frame_results,
'servo': self.servo_results,
'motor': self.motorspeed_results
}
df = pd.DataFrame(raw_data, columns=['image', 'servo', 'motor'])
df.to_csv(self.outfile)
""" Functions below are used for ouputs (controlling servo/motor) """
# Controls the servo given the numpy array outputted by
# the neural network
def servo_write(self, np_y):
servo_out = cnn_to_raw(np_y)
if (servo_out < 90):
servo_out *= 0.85
elif (servo_out > 90):
servo_out *= 1.15
self.ser.write('steer,' + str(servo_out) + '\n')
time.sleep(0.02)
# Sets the motor at a fixed speed
def motor_write_fixed(self):
self.ser.write('motor,80\n')
time.sleep(0.02)
# Stops motors
def motor_stop(self):
self.ser.write('motor,90\n')
time.sleep(0.02)
# Staightens servos
def servo_straighten(self):
self.ser.write('steer,90')
time.sleep(0.02)
def __del__(self):
if self.outfile:
self.outfile.close()
|
py | 7dfec05d4d18d64c9e8c4d71915995f929f7fc5a | import pandas
#filter the data based on the ten rules of investing by benjamin graham
fin_analysis = fin_analysis[fin_analysis["epsEstimate"] > 0 ]
fin_analysis = fin_analysis[(fin_analysis["price"] < 50) & (fin_analysis["price"] > 0)]
fin_analysis = fin_analysis[(fin_analysis["peRatio"] < 11) & (fin_analysis["peRatio"] != -1)]
fin_analysis = fin_analysis[fin_analysis["priceToBook"] < 10]
fin_analysis = fin_analysis[fin_analysis["priceToSales"] < 10 & (fin_analysis["priceToSales"] > 0)]
fin_analysis = fin_analysis[fin_analysis["debt"] > 0]
fin_analysis = fin_analysis[fin_analysis["day5ChangePercent"] > 0]
|
py | 7dfec28f465c66a48ea1ff642b59ce2913e633f6 | # -*- encoding: utf-8 -*-
#
# Copyright (c) 2013-2018, OVH SAS.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of OVH SAS nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY OVH SAS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL OVH SAS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import asyncio
import mock
import unittest
from uuid import uuid4
def _run(coro):
return asyncio.run(coro)
class AsyncMock(mock.Mock):
def __call__(self, *args, **kwargs):
sup = super(AsyncMock, self)
async def coro():
return sup.__call__(*args, **kwargs)
return coro()
def __await__(self):
return self().__await__()
class testConsumerKeyRequest(unittest.TestCase):
def test_add_rules(self):
# Prepare
import asyncovh
m_client = AsyncMock()
ck = asyncovh.ConsumerKeyRequest(m_client)
# Test: No-op
self.assertEqual([], ck._access_rules)
ck._access_rules = []
# Test: allow one
ck.add_rule("GET", '/me')
self.assertEqual([
{'method': 'GET', 'path': '/me'},
], ck._access_rules)
ck._access_rules = []
# Test: allow safe methods on domain
ck.add_rules(asyncovh.API_READ_WRITE_SAFE, '/domains/test.com')
self.assertEqual([
{'method': 'GET', 'path': '/domains/test.com'},
{'method': 'POST', 'path': '/domains/test.com'},
{'method': 'PUT', 'path': '/domains/test.com'},
], ck._access_rules)
ck._access_rules = []
# Test: allow all sms, strips suffix
ck.add_recursive_rules(asyncovh.API_READ_WRITE, '/sms/*')
self.assertEqual([
{'method': 'GET', 'path': '/sms'},
{'method': 'POST', 'path': '/sms'},
{'method': 'PUT', 'path': '/sms'},
{'method': 'DELETE', 'path': '/sms'},
{'method': 'GET', 'path': '/sms/*'},
{'method': 'POST', 'path': '/sms/*'},
{'method': 'PUT', 'path': '/sms/*'},
{'method': 'DELETE', 'path': '/sms/*'},
], ck._access_rules)
ck._access_rules = []
# Test: allow all, does not insert the empty rule
ck.add_recursive_rules(asyncovh.API_READ_WRITE, '/')
self.assertEqual([
{'method': 'GET', 'path': '/*'},
{'method': 'POST', 'path': '/*'},
{'method': 'PUT', 'path': '/*'},
{'method': 'DELETE', 'path': '/*'},
], ck._access_rules)
ck._access_rules = []
# Test launch request
ck.add_recursive_rules(asyncovh.API_READ_WRITE, '/')
self.assertEqual(m_client.request_consumerkey.return_value, _run(ck.request()))
m_client.request_consumerkey.assert_called_once_with(ck._access_rules, None)
|
py | 7dfec3a6b8863495f8dfac89576357e5e5000a9c | import uuid
import ipaddress
from kubernetes import client
from games.enabled import get_game_by_id
NAMESPACE="default"
EXTERNAL_PREFIX="77.80.229.128/25"
def list(u_ip):
services = client.CoreV1Api().list_service_for_all_namespaces(
watch=False,
label_selector="app=gaas"
)
deployments = client.AppsV1Api().list_deployment_for_all_namespaces(
watch=False,
label_selector="app=gaas"
)
pods = client.CoreV1Api().list_pod_for_all_namespaces(
watch=False,
label_selector="app=gaas"
)
servers={}
for service in services.items:
uid=service.metadata.labels["server"]
candelete=False
ip=service.spec.external_i_ps[0]
game=service.metadata.labels["game"]
name=get_game_by_id(game).name
servers[uid] = {
"uid":service.metadata.labels["server"],
"game": game,
"gamename": name,
"ip": ip,
"ports": [
"{}/{}".format(port.protocol, port.port)
for port in service.spec.ports
]
}
for deploy in deployments.items:
uid=deploy.metadata.labels["server"]
if uid not in servers:
continue
if deploy.metadata.labels["creator"] == u_ip:
servers[uid]["candelete"] = "yes"
print(deploy.metadata.labels)
for container in deploy.spec.template.spec.containers:
if container.env:
servers[uid]["env"]={
env.name: env.value
for env in container.env
}
for pod in pods.items:
uid=pod.metadata.labels["server"]
if uid not in servers:
continue
servers[uid]["pods"]=[{
"ready": status.ready,
"image": status.image,
"restart_count": status.restart_count,
"state": status.state.waiting.reason if status.state.waiting is not None else None,
} for status in pod.status.container_statuses]
return servers
def delete(uid, ip):
deployment = client.AppsV1Api().read_namespaced_deployment_status(
name="gaas-{}".format(uid),
namespace=NAMESPACE,
)
if deployment.metadata.labels["creator"] != ip:
raise Exception("You did not create this job")
client.AppsV1Api().delete_namespaced_deployment(
name="gaas-{}".format(uid),
namespace=NAMESPACE,
)
client.CoreV1Api().delete_namespaced_service(
name="gaas-{}".format(uid),
namespace=NAMESPACE,
)
def add(ip, game_id, params):
game=get_game_by_id(game_id)
game.validate_params(params)
uid=uuid.uuid4().hex[:12]
name="gaas-{}".format(uid)
labels={
"app": "gaas",
"game": game_id,
"server": uid,
"creator": ip,
}
metadata=client.V1ObjectMeta(
labels=labels,
name=name,
)
ip_ext=alloc_ip()
extra_env=[client.V1EnvVar(
name="IP_ALLOC",
value=ip_ext
), client.V1EnvVar(
name="IP_CREATOR",
value=ip
)]
containers = game.make_deployment(params)
generic_ports = []
# TODO(bluecmd): Hack to work around that not all
# ports are routed to the VIP by default. This allows
# outgoing connections from inside the pod on the VIP.
for p in range(50000, 50016):
generic_ports.append(client.V1ServicePort(
name="internal-tcp-" + str(p), port=p, target_port=p, protocol="TCP"))
generic_ports.append(client.V1ServicePort(
name="internal-udp-" + str(p), port=p, target_port=p, protocol="UDP"))
for container in containers:
if container.env:
container.env.extend(extra_env)
else:
container.env = extra_env
if not container.resources:
container.resources=client.V1ResourceRequirements(
limits={
"cpu": "4",
"memory": "32G"
},
requests={
"cpu": "2",
"memory": "16G"
}
)
deployment=client.V1Deployment(
spec=client.V1DeploymentSpec(
replicas=1,
strategy=client.AppsV1beta1DeploymentStrategy(
rolling_update=client.AppsV1beta1RollingUpdateDeployment(
max_surge=0,
max_unavailable=1
)
),
selector=client.V1LabelSelector(
match_labels=labels,
),
template=client.V1PodTemplateSpec(
spec=client.V1PodSpec(
containers=containers,
termination_grace_period_seconds=0,
# TODO(bluecmd): Hack to work around that not all
# ports are routed to the VIP by default. This allows
# outgoing connections from inside the pod on the VIP.
security_context=client.V1PodSecurityContext(
sysctls=[client.V1Sysctl(
name='net.ipv4.ip_local_port_range',
value='50000 50015')]),
affinity=client.V1Affinity(
node_affinity=client.V1NodeAffinity(
required_during_scheduling_ignored_during_execution=client.V1NodeSelector(
node_selector_terms=[
client.V1NodeSelectorTerm(
match_expressions=[
client.V1NodeSelectorRequirement(
key="kubernetes.io/role",
operator="NotIn",
values=["shared"]
)
]
)
]
)
)
)
)
)
)
)
service=client.V1Service(
spec=client.V1ServiceSpec(
type="ClusterIP",
selector=labels,
ports=game.make_service(params) + generic_ports,
external_i_ps=[ip_ext],
)
)
deployment.metadata=metadata
deployment.spec.template.metadata=metadata
service.metadata=metadata
service.metadata.annotations={"kube-router.io/service.dsr": "tunnel"}
client.AppsV1Api().create_namespaced_deployment(
namespace=NAMESPACE,
body=deployment,
)
service_resp = client.CoreV1Api().create_namespaced_service(
namespace=NAMESPACE,
body=service,
)
return {"uid": uid, "ip": ip}
def alloc_ip():
space=ipaddress.ip_network(EXTERNAL_PREFIX)
reserved=[]
services = client.CoreV1Api().list_service_for_all_namespaces(watch=False)
for service in services.items:
if service.spec.external_i_ps:
reserved.extend(service.spec.external_i_ps)
for ip in space:
if str(ip) not in reserved:
print("Alloc ip {}".format(ip))
return str(ip)
raise Exception("Cluster ran out of available IPs")
|
py | 7dfec3d299480f66dd4d424e8849c601044f4ace | import os
import logging
import ast
import sys
import importlib
from itertools import product, chain
logger = logging.getLogger('soil')
builtins = importlib.import_module('builtins')
def name(value, known_modules=[]):
'''Return a name that can be imported, to serialize/deserialize an object'''
if value is None:
return 'None'
if not isinstance(value, type): # Get the class name first
value = type(value)
tname = value.__name__
if hasattr(builtins, tname):
return tname
modname = value.__module__
if modname == '__main__':
return tname
if known_modules and modname in known_modules:
return tname
for kmod in known_modules:
if not kmod:
continue
module = importlib.import_module(kmod)
if hasattr(module, tname):
return tname
return '{}.{}'.format(modname, tname)
def serializer(type_):
if type_ != 'str' and hasattr(builtins, type_):
return repr
return lambda x: x
def serialize(v, known_modules=[]):
'''Get a text representation of an object.'''
tname = name(v, known_modules=known_modules)
func = serializer(tname)
return func(v), tname
def deserializer(type_, known_modules=[]):
if type(type_) != str: # Already deserialized
return type_
if type_ == 'str':
return lambda x='': x
if type_ == 'None':
return lambda x=None: None
if hasattr(builtins, type_): # Check if it's a builtin type
cls = getattr(builtins, type_)
return lambda x=None: ast.literal_eval(x) if x is not None else cls()
# Otherwise, see if we can find the module and the class
modules = known_modules or []
options = []
for mod in modules:
if mod:
options.append((mod, type_))
if '.' in type_: # Fully qualified module
module, type_ = type_.rsplit(".", 1)
options.append ((module, type_))
errors = []
for modname, tname in options:
try:
module = importlib.import_module(modname)
cls = getattr(module, tname)
return getattr(cls, 'deserialize', cls)
except (ImportError, AttributeError) as ex:
errors.append((modname, tname, ex))
raise Exception('Could not find type {}. Tried: {}'.format(type_, errors))
def deserialize(type_, value=None, **kwargs):
'''Get an object from a text representation'''
if not isinstance(type_, str):
return type_
des = deserializer(type_, **kwargs)
if value is None:
return des
return des(value)
|
py | 7dfec42717b0208da00788e0cb50b384a587a800 | from unittest import mock
import pytest
from oauthlib.common import Request as OAuthRequest
from oauthlib.oauth2 import InvalidRequestError
from h.models import Token
from h.oauth.errors import InvalidRefreshTokenError
from h.services.oauth_provider import (
OAuthProviderService,
oauth_provider_service_factory,
)
@pytest.mark.usefixtures("validator_service", "user_service")
class TestOAuthProviderService:
def test_load_client_id_sets_client_id_from_refresh_token(
self, svc, oauth_request, factories, validator_service
):
token_1, token_2 = factories.OAuth2Token(), factories.OAuth2Token()
oauth_request.refresh_token = token_2.refresh_token
def fake_find_refresh_token(refresh_token):
if refresh_token == token_1.refresh_token:
return token_1
elif refresh_token == token_2.refresh_token:
return token_2
validator_service.find_refresh_token.side_effect = fake_find_refresh_token
assert oauth_request.client_id is None
svc.load_client_id_from_refresh_token(oauth_request)
assert oauth_request.client_id == token_2.authclient.id
def test_load_client_id_skips_setting_client_id_when_not_refresh_token(
self, svc, oauth_request, factories, validator_service
):
token = factories.OAuth2Token()
def fake_find_refresh_token(refresh_token):
if refresh_token == token.refresh_token:
return token
validator_service.find_refresh_token.side_effect = fake_find_refresh_token
svc.load_client_id_from_refresh_token(oauth_request)
assert oauth_request.client_id is None
def test_load_client_id_raises_for_missing_refresh_token(
self, svc, oauth_request, validator_service
):
validator_service.find_refresh_token.return_value = None
oauth_request.refresh_token = "missing"
with pytest.raises(InvalidRefreshTokenError):
svc.load_client_id_from_refresh_token(oauth_request)
def test_generate_access_token(self, svc, token_urlsafe):
token_urlsafe.return_value = "very-secret"
assert svc.generate_access_token(None) == "5768-very-secret"
def test_generate_refresh_token(self, svc, token_urlsafe):
token_urlsafe.return_value = "top-secret"
assert svc.generate_refresh_token(None) == "4657-top-secret"
def test_validate_revocation_request_adds_revoke_marker(self, svc, oauth_request):
try:
svc.validate_revocation_request(oauth_request)
except InvalidRequestError:
# Not here to test this
pass
finally:
assert oauth_request.h_revoke_request is True
def test_validate_revocation_request_looks_up_token(
self, svc, oauth_request, token
):
oauth_request.token = mock.sentinel.token
svc.oauth_validator.find_token.return_value = token
oauth_request.http_method = "POST"
svc.validate_revocation_request(oauth_request)
svc.oauth_validator.find_token.assert_called_once_with(mock.sentinel.token)
assert oauth_request.client_id == token.authclient.id
@pytest.fixture
def token(self):
return mock.create_autospec(Token, instance=True)
@pytest.fixture
def svc(self, pyramid_request):
return oauth_provider_service_factory(None, pyramid_request)
@pytest.fixture
def token_urlsafe(self, patch):
return patch("h.services.oauth_provider.token_urlsafe")
@pytest.fixture
def oauth_request(self):
return OAuthRequest("/")
@pytest.mark.usefixtures("validator_service", "user_service")
class TestOAuthProviderServiceFactory:
def test_it_returns_oauth_provider_service(self, pyramid_request):
svc = oauth_provider_service_factory(None, pyramid_request)
assert isinstance(svc, OAuthProviderService)
@pytest.fixture
def validator_service(pyramid_config):
svc = mock.Mock()
pyramid_config.register_service(svc, name="oauth_validator")
return svc
|
py | 7dfec45a1c5f2da9b4777111601504cb8435c206 | #!/usr/bin/env python
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright Holders: Rene Milk, Stephan Rave, Felix Schindler
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
"""Proof of concept for solving the poisson equation in 2D using linear finite elements and our grid interface
Usage:
cg.py [--rect] PROBLEM-NUMBER DIRICHLET-NUMBER NEUMANN-NUMBER NEUMANN-COUNT
Arguments:
PROBLEM-NUMBER {0,1}, selects the problem to solve
DIRICHLET-NUMBER {0,1,2}, selects the Dirichlet data function
NEUMANN-NUMBER {0,1}, selects the Neumann data function
NEUMANN-COUNT 0: no neumann boundary
1: right edge is neumann boundary
2: right+top edges are neumann boundary
3: right+top+bottom edges are neumann boundary
--rect Use RectGrid instead of TriaGrid
Options:
-h, --help this message
"""
from __future__ import absolute_import, division, print_function
import math as m
from docopt import docopt
import numpy as np
from pymor.analyticalproblems.elliptic import EllipticProblem
from pymor.discretizers.elliptic import discretize_elliptic_cg
from pymor.domaindescriptions.boundarytypes import BoundaryType
from pymor.domaindescriptions.basic import RectDomain
from pymor.domaindiscretizers.default import discretize_domain_default
from pymor.functions.basic import GenericFunction, ConstantFunction
from pymor.grids.rect import RectGrid
from pymor.grids.tria import TriaGrid
def cg_demo(nrhs, ndirichlet, nneumann, neumann_count, rect_grid=False):
rhs0 = GenericFunction(lambda X: np.ones(X.shape[:-1]) * 10, 2) # NOQA
rhs1 = GenericFunction(lambda X: (X[..., 0] - 0.5) ** 2 * 1000, 2) # NOQA
dirichlet0 = GenericFunction(lambda X: np.zeros(X.shape[:-1]), 2) # NOQA
dirichlet1 = GenericFunction(lambda X: np.ones(X.shape[:-1]), 2) # NOQA
dirichlet2 = GenericFunction(lambda X: X[..., 0], 2) # NOQA
neumann0 = None
neumann1 = ConstantFunction(3., dim_domain=2)
neumann2 = GenericFunction(lambda X: 50*(0.1 <= X[..., 1]) * (X[..., 1] <= 0.2)
+50*(0.8 <= X[..., 1]) * (X[..., 1] <= 0.9), 2)
domain0 = RectDomain() # NOQA
domain1 = RectDomain(right=BoundaryType('neumann')) # NOQA
domain2 = RectDomain(right=BoundaryType('neumann'), top=BoundaryType('neumann')) # NOQA
domain3 = RectDomain(right=BoundaryType('neumann'), top=BoundaryType('neumann'), bottom=BoundaryType('neumann')) # NOQA
assert 0 <= nrhs <= 1, ValueError('Invalid rhs number.')
rhs = eval('rhs{}'.format(nrhs))
assert 0 <= ndirichlet <= 2, ValueError('Invalid Dirichlet boundary number.')
dirichlet = eval('dirichlet{}'.format(ndirichlet))
assert 0 <= nneumann <= 2, ValueError('Invalid Neumann boundary number.')
neumann = eval('neumann{}'.format(nneumann))
assert 0 <= neumann_count <= 3, ValueError('Invalid Neumann boundary count.')
domain = eval('domain{}'.format(neumann_count))
for n in [32, 128]:
grid_name = '{1}(({0},{0}))'.format(n, 'RectGrid' if rect_grid else 'TriaGrid' )
print('Solving on {0}'.format(grid_name))
print('Setup problem ...')
problem = EllipticProblem(domain=domain, rhs=rhs, dirichlet_data=dirichlet, neumann_data=neumann)
print('Discretize ...')
if rect_grid:
grid, bi = discretize_domain_default(problem.domain, diameter=m.sqrt(2) / n, grid_type=RectGrid)
else:
grid, bi = discretize_domain_default(problem.domain, diameter=1. / n, grid_type=TriaGrid)
discretization, _ = discretize_elliptic_cg(analytical_problem=problem, grid=grid, boundary_info=bi)
print('Solve ...')
U = discretization.solve()
print('Plot ...')
discretization.visualize(U, title=grid_name)
print('')
if __name__ == '__main__':
args = docopt(__doc__)
nrhs = int(args['PROBLEM-NUMBER'])
ndirichlet = int(args['DIRICHLET-NUMBER'])
nneumann = int(args['NEUMANN-NUMBER'])
neumann_count = int(args['NEUMANN-COUNT'])
rect_grid = bool(args['--rect'])
cg_demo(nrhs, ndirichlet, nneumann, neumann_count, rect_grid)
|
py | 7dfec4cc8e26c302f3b1ded06c1dc6d5bd8abbf1 | # dataset settings
dataset_type = 'Hie_Dataset'
# img_norm_cfg = dict(
# mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromNIIFile'),
dict(type='ExtractDataFromObj'),
dict(type='NormalizeMedical', norm_type='full_volume_mean',
instensity_min_val=0.5,
instensity_max_val=99.5),
# dict(type='ResizeMedical', size=(80, 160, 160)),
dict(type='ResizeMedical', size=(160, 160, 80)),
# dict(type='Normalize', **img_norm_cfg),
dict(type='ConcatImage'),
# dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['gt_label', 'img']),
dict(type='Collect', keys=['img', 'gt_label'])
]
test_pipeline = [
dict(type='LoadImageFromNIIFile'),
dict(type='ExtractDataFromObj'),
dict(type='NormalizeMedical', norm_type='full_volume_mean',
instensity_min_val=0.5,
instensity_max_val=99.5),
dict(type='ResizeMedical', size=(160, 160, 80)),
dict(type='ToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=8,
train=dict(
type=dataset_type,
data_prefix='/opt/data/private/project/charelchen.cj/workDir/dataset/hie/'
'hie_resample_0.5x0.5x0.5_niigz',
ann_file='/opt/data/private/project/charelchen.cj/workDir/dataset/hie/t1_zw_fse_train.txt',
pipeline=train_pipeline,
modes=['t1_zw']),
val=dict(
type=dataset_type,
data_prefix='/opt/data/private/project/charelchen.cj/workDir/dataset/hie/'
'hie_resample_0.5x0.5x0.5_niigz',
ann_file='/opt/data/private/project/charelchen.cj/workDir/dataset/hie/t1_zw_fse_val.txt',
pipeline=test_pipeline,
modes=['t1_zw']),
test=dict(
# replace `data/val` with `data/test` for standard test
type=dataset_type,
data_prefix='/opt/data/private/project/charelchen.cj/workDir/dataset/hie/'
'hie_resample_0.5x0.5x0.5_niigz',
ann_file='/opt/data/private/project/charelchen.cj/workDir/dataset/hie/t1_zw_fse_val.txt',
pipeline=test_pipeline,
modes=['t1_zw']))
evaluation = dict(interval=2, metric=['accuracy', 'precision', 'recall', 'f1_score', 'support'])
norm_cfg = dict(type='BN3d', requires_grad=True)
conv_cfg = dict(type='Conv3d')
num_classes = 2
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(
type='ResNet',
depth=18,
in_channels=1,
in_dims=3,
num_stages=4,
out_indices=(3, ),
style='pytorch',
norm_cfg=norm_cfg,
conv_cfg=conv_cfg,
init_cfg=[
dict(type='Kaiming', layer=['Conv3d']),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm', 'BN3d'])
]
),
neck=dict(type='GlobalAveragePooling', dim=3),
head=dict(
type='LinearClsHead',
num_classes=num_classes,
in_channels=512,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1,),
))
optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='step', step=[40, 80, 120])
runner = dict(type='EpochBasedRunner', max_epochs=160)
log_config = dict(
interval=10,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
checkpoint_config = dict(by_epoch=True, interval=2)
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
py | 7dfec513ed8e613a2646452c095b52035c8ce369 | # Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
# Default: '%Y-%m-%d %H:%M:%S'
# c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
# Default: '[%(name)s]%(highlevel)s %(message)s'
# c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
# Choices: any of [0, 10, 20, 30, 40, 50, 'DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL']
# Default: 30
# c.Application.log_level = 30
## Instead of starting the Application, dump configuration to stdout
# Default: False
# c.Application.show_config = False
## Instead of starting the Application, dump configuration to stdout (as JSON)
# Default: False
# c.Application.show_config_json = False
#------------------------------------------------------------------------------
# JupyterApp(Application) configuration
#------------------------------------------------------------------------------
## Base class for Jupyter applications
## Answer yes to any prompts.
# Default: False
# c.JupyterApp.answer_yes = False
## Full path of a config file.
# Default: ''
# c.JupyterApp.config_file = ''
## Specify a config file to load.
# Default: ''
# c.JupyterApp.config_file_name = ''
## Generate default config file.
# Default: False
# c.JupyterApp.generate_config = False
## The date format used by logging formatters for %(asctime)s
# See also: Application.log_datefmt
# c.JupyterApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
# See also: Application.log_format
# c.JupyterApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
# See also: Application.log_level
# c.JupyterApp.log_level = 30
## Instead of starting the Application, dump configuration to stdout
# See also: Application.show_config
# c.JupyterApp.show_config = False
## Instead of starting the Application, dump configuration to stdout (as JSON)
# See also: Application.show_config_json
# c.JupyterApp.show_config_json = False
#------------------------------------------------------------------------------
# NotebookApp(JupyterApp) configuration
#------------------------------------------------------------------------------
## Set the Access-Control-Allow-Credentials: true header
# Default: False
# c.NotebookApp.allow_credentials = False
## Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
# Default: ''
# c.NotebookApp.allow_origin = ''
## Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
# Default: ''
# c.NotebookApp.allow_origin_pat = ''
## Allow password to be changed at login for the notebook server.
#
# While logging in with a token, the notebook server UI will give the opportunity to
# the user to enter a new password at the same time that will replace
# the token login mechanism.
#
# This can be set to false to prevent changing password from
# the UI/API.
# Default: True
# c.NotebookApp.allow_password_change = True
## Allow requests where the Host header doesn't point to a local server
#
# By default, requests get a 403 forbidden response if the 'Host' header
# shows that the browser thinks it's on a non-local domain.
# Setting this option to True disables this check.
#
# This protects against 'DNS rebinding' attacks, where a remote web server
# serves you a page and then changes its DNS to send later requests to a
# local IP, bypassing same-origin checks.
#
# Local IP addresses (such as 127.0.0.1 and ::1) are allowed as local,
# along with hostnames configured in local_hostnames.
# Default: False
# c.NotebookApp.allow_remote_access = False
## Whether to allow the user to run the notebook as root.
# Default: False
# c.NotebookApp.allow_root = False
## Answer yes to any prompts.
# See also: JupyterApp.answer_yes
# c.NotebookApp.answer_yes = False
## "
# Require authentication to access prometheus metrics.
# Default: True
# c.NotebookApp.authenticate_prometheus = True
## Reload the webapp when changes are made to any Python src files.
# Default: False
# c.NotebookApp.autoreload = False
## DEPRECATED use base_url
# Default: '/'
# c.NotebookApp.base_project_url = '/'
## The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted,
# and will automatically be added.
# Default: '/'
# c.NotebookApp.base_url = '/'
## Specify what command to use to invoke a web
# browser when opening the notebook. If not specified, the
# default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the
# BROWSER environment variable to override it.
# Default: ''
# c.NotebookApp.browser = ''
## The full path to an SSL/TLS certificate file.
# Default: ''
# c.NotebookApp.certfile = ''
## The full path to a certificate authority certificate for SSL/TLS client
# authentication.
# Default: ''
# c.NotebookApp.client_ca = ''
## Full path of a config file.
# See also: JupyterApp.config_file
# c.NotebookApp.config_file = ''
## Specify a config file to load.
# See also: JupyterApp.config_file_name
# c.NotebookApp.config_file_name = ''
## The config manager class to use
# Default: 'notebook.services.config.manager.ConfigManager'
# c.NotebookApp.config_manager_class = 'notebook.services.config.manager.ConfigManager'
## The notebook manager class to use.
# Default: 'notebook.services.contents.largefilemanager.LargeFileManager'
# c.NotebookApp.contents_manager_class = 'notebook.services.contents.largefilemanager.LargeFileManager'
## Extra keyword arguments to pass to `set_secure_cookie`. See tornado's
# set_secure_cookie docs for details.
# Default: {}
# c.NotebookApp.cookie_options = {}
## The random bytes used to secure cookies.
# By default this is a new random number every time you start the Notebook.
# Set it to a value in a config file to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
# Default: b''
# c.NotebookApp.cookie_secret = b''
## The file where the cookie secret is stored.
# Default: ''
# c.NotebookApp.cookie_secret_file = ''
## Override URL shown to users.
#
# Replace actual URL, including protocol, address, port and base URL,
# with the given value when displaying URL to the users. Do not change
# the actual connection URL. If authentication token is enabled, the
# token is added to the custom URL automatically.
#
# This option is intended to be used when the URL to display to the user
# cannot be determined reliably by the Jupyter notebook server (proxified
# or containerized setups for example).
# Default: ''
# c.NotebookApp.custom_display_url = ''
## The default URL to redirect to from `/`
# Default: '/tree'
# c.NotebookApp.default_url = '/tree'
## Disable cross-site-request-forgery protection
#
# Jupyter notebook 4.3.1 introduces protection from cross-site request forgeries,
# requiring API requests to either:
#
# - originate from pages served by this server (validated with XSRF cookie and token), or
# - authenticate with a token
#
# Some anonymous compute resources still desire the ability to run code,
# completely without authentication.
# These services can disable all authentication and security checks,
# with the full knowledge of what that implies.
# Default: False
# c.NotebookApp.disable_check_xsrf = False
## Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX
# source.
# Default: True
# c.NotebookApp.enable_mathjax = True
## extra paths to look for Javascript notebook extensions
# Default: []
# c.NotebookApp.extra_nbextensions_path = []
## handlers that should be loaded at higher priority than the default services
# Default: []
# c.NotebookApp.extra_services = []
## Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server machine,
# or overriding individual files in the IPython
# Default: []
# c.NotebookApp.extra_static_paths = []
## Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
# Default: []
# c.NotebookApp.extra_template_paths = []
# Default: ''
# c.NotebookApp.file_to_run = ''
## Generate default config file.
# See also: JupyterApp.generate_config
# c.NotebookApp.generate_config = False
## Extra keyword arguments to pass to `get_secure_cookie`. See tornado's
# get_secure_cookie docs for details.
# Default: {}
# c.NotebookApp.get_secure_cookie_kwargs = {}
## Deprecated: Use minified JS file or not, mainly use during dev to avoid JS
# recompilation
# Default: False
# c.NotebookApp.ignore_minified_js = False
## (bytes/sec)
# Maximum rate at which stream output can be sent on iopub before they are
# limited.
# Default: 1000000
# c.NotebookApp.iopub_data_rate_limit = 1000000
## (msgs/sec)
# Maximum rate at which messages can be sent on iopub before they are
# limited.
# Default: 1000
# c.NotebookApp.iopub_msg_rate_limit = 1000
## The IP address the notebook server will listen on.
# Default: 'localhost'
# c.NotebookApp.ip = 'localhost'
## Supply extra arguments that will be passed to Jinja environment.
# Default: {}
# c.NotebookApp.jinja_environment_options = {}
## Extra variables to supply to jinja templates when rendering.
# Default: {}
# c.NotebookApp.jinja_template_vars = {}
## The kernel manager class to use.
# Default: 'notebook.services.kernels.kernelmanager.MappingKernelManager'
# c.NotebookApp.kernel_manager_class = 'notebook.services.kernels.kernelmanager.MappingKernelManager'
## The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of Jupyter and the next stable one.
# Default: 'jupyter_client.kernelspec.KernelSpecManager'
# c.NotebookApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager'
## The full path to a private key file for usage with SSL/TLS.
# Default: ''
# c.NotebookApp.keyfile = ''
## Hostnames to allow as local when allow_remote_access is False.
#
# Local IP addresses (such as 127.0.0.1 and ::1) are automatically accepted
# as local as well.
# Default: ['localhost']
# c.NotebookApp.local_hostnames = ['localhost']
## The date format used by logging formatters for %(asctime)s
# See also: Application.log_datefmt
# c.NotebookApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
# See also: Application.log_format
# c.NotebookApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set to True to enable JSON formatted logs. Run "pip install notebook[json-
# logging]" to install the required dependent packages. Can also be set using
# the environment variable JUPYTER_ENABLE_JSON_LOGGING=true.
# Default: False
# c.NotebookApp.log_json = False
## Set the log level by value or name.
# See also: Application.log_level
# c.NotebookApp.log_level = 30
## The login handler class to use.
# Default: 'notebook.auth.login.LoginHandler'
# c.NotebookApp.login_handler_class = 'notebook.auth.login.LoginHandler'
## The logout handler class to use.
# Default: 'notebook.auth.logout.LogoutHandler'
# c.NotebookApp.logout_handler_class = 'notebook.auth.logout.LogoutHandler'
## The MathJax.js configuration file that is to be used.
# Default: 'TeX-AMS-MML_HTMLorMML-full,Safe'
# c.NotebookApp.mathjax_config = 'TeX-AMS-MML_HTMLorMML-full,Safe'
## A custom url for MathJax.js.
# Should be in the form of a case-sensitive url to MathJax,
# for example: /static/components/MathJax/MathJax.js
# Default: ''
# c.NotebookApp.mathjax_url = ''
## Sets the maximum allowed size of the client request body, specified in the
# Content-Length request header field. If the size in a request exceeds the
# configured value, a malformed HTTP message is returned to the client.
#
# Note: max_body_size is applied even in streaming mode.
# Default: 536870912
# c.NotebookApp.max_body_size = 536870912
## Gets or sets the maximum amount of memory, in bytes, that is allocated for use
# by the buffer manager.
# Default: 536870912
# c.NotebookApp.max_buffer_size = 536870912
## Gets or sets a lower bound on the open file handles process resource limit.
# This may need to be increased if you run into an OSError: [Errno 24] Too many
# open files. This is not applicable when running on Windows.
# Default: 0
# c.NotebookApp.min_open_files_limit = 0
## Dict of Python modules to load as notebook server extensions. Entry values can
# be used to enable and disable the loading of the extensions. The extensions
# will be loaded in alphabetical order.
# Default: {}
# c.NotebookApp.nbserver_extensions = {}
## The directory to use for notebooks and kernels.
# Default: ''
# c.NotebookApp.notebook_dir = ''
## Whether to open in a browser after starting.
# The specific browser used is platform dependent and
# determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser
# (NotebookApp.browser) configuration option.
# Default: True
# c.NotebookApp.open_browser = True
## Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-
# password.
# Default: ''
# c.NotebookApp.password = ''
## Forces users to use a password for the Notebook server.
# This is useful in a multi user environment, for instance when
# everybody in the LAN can access each other's machine through ssh.
#
# In such a case, serving the notebook server on localhost is not secure
# since any user can connect to the notebook server via ssh.
# Default: False
# c.NotebookApp.password_required = False
## The port the notebook server will listen on (env: JUPYTER_PORT).
# Default: 8888
# c.NotebookApp.port = 8888
## The number of additional ports to try if the specified port is not available
# (env: JUPYTER_PORT_RETRIES).
# Default: 50
# c.NotebookApp.port_retries = 50
## DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
# Default: 'disabled'
# c.NotebookApp.pylab = 'disabled'
## If True, display a button in the dashboard to quit
# (shutdown the notebook server).
# Default: True
# c.NotebookApp.quit_button = True
## (sec) Time window used to
# check the message and data rate limits.
# Default: 3
# c.NotebookApp.rate_limit_window = 3
## Reraise exceptions encountered loading server extensions?
# Default: False
# c.NotebookApp.reraise_server_extension_failures = False
## DEPRECATED use the nbserver_extensions dict instead
# Default: []
# c.NotebookApp.server_extensions = []
## The session manager class to use.
# Default: 'notebook.services.sessions.sessionmanager.SessionManager'
# c.NotebookApp.session_manager_class = 'notebook.services.sessions.sessionmanager.SessionManager'
## Instead of starting the Application, dump configuration to stdout
# See also: Application.show_config
# c.NotebookApp.show_config = False
## Instead of starting the Application, dump configuration to stdout (as JSON)
# See also: Application.show_config_json
# c.NotebookApp.show_config_json = False
## Shut down the server after N seconds with no kernels or terminals running and
# no activity. This can be used together with culling idle kernels
# (MappingKernelManager.cull_idle_timeout) to shutdown the notebook server when
# it's not in use. This is not precisely timed: it may shut down up to a minute
# later. 0 (the default) disables this automatic shutdown.
# Default: 0
# c.NotebookApp.shutdown_no_activity_timeout = 0
## The UNIX socket the notebook server will listen on.
# Default: ''
# c.NotebookApp.sock = ''
## The permissions mode for UNIX socket creation (default: 0600).
# Default: '0600'
# c.NotebookApp.sock_mode = '0600'
## Supply SSL options for the tornado HTTPServer.
# See the tornado docs for details.
# Default: {}
# c.NotebookApp.ssl_options = {}
## Supply overrides for terminado. Currently only supports "shell_command". On
# Unix, if "shell_command" is not provided, a non-login shell is launched by
# default when the notebook server is connected to a terminal, a login shell
# otherwise.
# Default: {}
# c.NotebookApp.terminado_settings = {}
## Set to False to disable terminals.
#
# This does *not* make the notebook server more secure by itself.
# Anything the user can in a terminal, they can also do in a notebook.
#
# Terminals may also be automatically disabled if the terminado package
# is not available.
# Default: True
# c.NotebookApp.terminals_enabled = True
## Token used for authenticating first-time connections to the server.
#
# The token can be read from the file referenced by JUPYTER_TOKEN_FILE or set directly
# with the JUPYTER_TOKEN environment variable.
#
# When no password is enabled,
# the default is to generate a new, random token.
#
# Setting to an empty string disables authentication altogether, which
# is NOT RECOMMENDED.
# Default: '<generated>'
# c.NotebookApp.token = '<generated>'
## Supply overrides for the tornado.web.Application that the Jupyter notebook
# uses.
# Default: {}
# c.NotebookApp.tornado_settings = {}
## Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headers sent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
# Default: False
# c.NotebookApp.trust_xheaders = False
## Disable launching browser by redirect file
#
# For versions of notebook > 5.7.2, a security feature measure was added that
# prevented the authentication token used to launch the browser from being visible.
# This feature makes it difficult for other users on a multi-user system from
# running code in your Jupyter session as you.
#
# However, some environments (like Windows Subsystem for Linux (WSL) and Chromebooks),
# launching a browser using a redirect file can lead the browser failing to load.
# This is because of the difference in file structures/paths between the runtime and
# the browser.
#
# Disabling this setting to False will disable this behavior, allowing the browser
# to launch by using a URL and visible token (as before).
# Default: True
# c.NotebookApp.use_redirect_file = True
## DEPRECATED, use tornado_settings
# Default: {}
# c.NotebookApp.webapp_settings = {}
## Specify Where to open the notebook on startup. This is the
# `new` argument passed to the standard library method `webbrowser.open`.
# The behaviour is not guaranteed, but depends on browser support. Valid
# values are:
#
# - 2 opens a new tab,
# - 1 opens a new window,
# - 0 opens in an existing window.
#
# See the `webbrowser.open` documentation for details.
# Default: 2
# c.NotebookApp.webbrowser_open_new = 2
## Set the tornado compression options for websocket connections.
#
# This value will be returned from
# :meth:`WebSocketHandler.get_compression_options`. None (default) will disable
# compression. A dict (even an empty one) will enable compression.
#
# See the tornado docs for WebSocketHandler.get_compression_options for details.
# Default: None
# c.NotebookApp.websocket_compression_options = None
## The base URL for websockets,
# if it differs from the HTTP server (hint: it almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
# Default: ''
# c.NotebookApp.websocket_url = ''
#------------------------------------------------------------------------------
# ConnectionFileMixin(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Mixin for configurable classes that work with connection files
## JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security dir
# of the current profile, but can be specified by absolute path.
# Default: ''
# c.ConnectionFileMixin.connection_file = ''
## set the control (ROUTER) port [default: random]
# Default: 0
# c.ConnectionFileMixin.control_port = 0
## set the heartbeat port [default: random]
# Default: 0
# c.ConnectionFileMixin.hb_port = 0
## set the iopub (PUB) port [default: random]
# Default: 0
# c.ConnectionFileMixin.iopub_port = 0
## Set the kernel's IP address [default localhost].
# If the IP address is something other than localhost, then
# Consoles on other machines will be able to connect
# to the Kernel, so be careful!
# Default: ''
# c.ConnectionFileMixin.ip = ''
## set the shell (ROUTER) port [default: random]
# Default: 0
# c.ConnectionFileMixin.shell_port = 0
## set the stdin (ROUTER) port [default: random]
# Default: 0
# c.ConnectionFileMixin.stdin_port = 0
# Choices: any of ['tcp', 'ipc'] (case-insensitive)
# Default: 'tcp'
# c.ConnectionFileMixin.transport = 'tcp'
#------------------------------------------------------------------------------
# KernelManager(ConnectionFileMixin) configuration
#------------------------------------------------------------------------------
## Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
## Should we autorestart the kernel if it dies.
# Default: True
# c.KernelManager.autorestart = True
## JSON file in which to store connection info [default: kernel-<pid>.json]
# See also: ConnectionFileMixin.connection_file
# c.KernelManager.connection_file = ''
## set the control (ROUTER) port [default: random]
# See also: ConnectionFileMixin.control_port
# c.KernelManager.control_port = 0
## set the heartbeat port [default: random]
# See also: ConnectionFileMixin.hb_port
# c.KernelManager.hb_port = 0
## set the iopub (PUB) port [default: random]
# See also: ConnectionFileMixin.iopub_port
# c.KernelManager.iopub_port = 0
## Set the kernel's IP address [default localhost].
# See also: ConnectionFileMixin.ip
# c.KernelManager.ip = ''
## set the shell (ROUTER) port [default: random]
# See also: ConnectionFileMixin.shell_port
# c.KernelManager.shell_port = 0
## Time to wait for a kernel to terminate before killing it, in seconds. When a
# shutdown request is initiated, the kernel will be immediately sent an
# interrupt (SIGINT), followedby a shutdown_request message, after 1/2 of
# `shutdown_wait_time`it will be sent a terminate (SIGTERM) request, and finally
# at the end of `shutdown_wait_time` will be killed (SIGKILL). terminate and
# kill may be equivalent on windows. Note that this value can beoverridden by
# the in-use kernel provisioner since shutdown times mayvary by provisioned
# environment.
# Default: 5.0
# c.KernelManager.shutdown_wait_time = 5.0
## set the stdin (ROUTER) port [default: random]
# See also: ConnectionFileMixin.stdin_port
# c.KernelManager.stdin_port = 0
# See also: ConnectionFileMixin.transport
# c.KernelManager.transport = 'tcp'
#------------------------------------------------------------------------------
# Session(Configurable) configuration
#------------------------------------------------------------------------------
## Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them
# with ZMQ sockets or ZMQStream objects. Objects can communicate with each
# other over the network via Session objects, and only need to work with the
# dict-based IPython message spec. The Session will handle
# serialization/deserialization, security, and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits,
# and signing with HMAC digests via the key/keyfile traits.
#
# Parameters
# ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output
# *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
## Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# Default: 1024
# c.Session.buffer_threshold = 1024
## Whether to check PID to protect against calls after fork.
#
# This check can be disabled if fork-safety is handled elsewhere.
# Default: True
# c.Session.check_pid = True
## Threshold (in bytes) beyond which a buffer should be sent without copying.
# Default: 65536
# c.Session.copy_threshold = 65536
## Debug output in the Session
# Default: False
# c.Session.debug = False
## The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# Default: 65536
# c.Session.digest_history_size = 65536
## The maximum number of items for a container to be introspected for custom serialization.
# Containers larger than this are pickled outright.
# Default: 64
# c.Session.item_threshold = 64
## execution key, for signing messages.
# Default: b''
# c.Session.key = b''
## path to file containing execution key.
# Default: ''
# c.Session.keyfile = ''
## Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# Default: {}
# c.Session.metadata = {}
## The name of the packer for serializing messages.
# Should be one of 'json', 'pickle', or an import name
# for a custom callable serializer.
# Default: 'json'
# c.Session.packer = 'json'
## The UUID identifying this session.
# Default: ''
# c.Session.session = ''
## The digest scheme used to construct the message signatures.
# Must have the form 'hmac-HASH'.
# Default: 'hmac-sha256'
# c.Session.signature_scheme = 'hmac-sha256'
## The name of the unpacker for unserializing messages.
# Only used with custom functions for `packer`.
# Default: 'json'
# c.Session.unpacker = 'json'
## Username for the Session. Default is your system username.
# Default: 'acg34'
# c.Session.username = 'acg34'
#------------------------------------------------------------------------------
# MultiKernelManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for managing multiple kernels.
## The name of the default kernel to start
# Default: 'python3'
# c.MultiKernelManager.default_kernel_name = 'python3'
## The kernel manager class. This is configurable to allow
# subclassing of the KernelManager for customized behavior.
# Default: 'jupyter_client.ioloop.IOLoopKernelManager'
# c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
## Share a single zmq.Context to talk to all my kernels
# Default: True
# c.MultiKernelManager.shared_context = True
#------------------------------------------------------------------------------
# MappingKernelManager(MultiKernelManager) configuration
#------------------------------------------------------------------------------
## A KernelManager that handles notebook mapping and HTTP error handling
## White list of allowed kernel message types.
# When the list is empty, all message types are allowed.
# Default: []
# c.MappingKernelManager.allowed_message_types = []
## Whether messages from kernels whose frontends have disconnected should be buffered in-memory.
# When True (default), messages are buffered and replayed on reconnect,
# avoiding lost messages due to interrupted connectivity.
# Disable if long-running kernels will produce too much output while
# no frontends are connected.
# Default: True
# c.MappingKernelManager.buffer_offline_messages = True
## Whether to consider culling kernels which are busy.
# Only effective if cull_idle_timeout > 0.
# Default: False
# c.MappingKernelManager.cull_busy = False
## Whether to consider culling kernels which have one or more connections.
# Only effective if cull_idle_timeout > 0.
# Default: False
# c.MappingKernelManager.cull_connected = False
## Timeout (in seconds) after which a kernel is considered idle and ready to be culled.
# Values of 0 or lower disable culling. Very short timeouts may result in kernels being culled
# for users with poor network connections.
# Default: 0
# c.MappingKernelManager.cull_idle_timeout = 0
## The interval (in seconds) on which to check for idle kernels exceeding the
# cull timeout value.
# Default: 300
# c.MappingKernelManager.cull_interval = 300
## The name of the default kernel to start
# See also: MultiKernelManager.default_kernel_name
# c.MappingKernelManager.default_kernel_name = 'python3'
## Timeout for giving up on a kernel (in seconds).
# On starting and restarting kernels, we check whether the
# kernel is running and responsive by sending kernel_info_requests.
# This sets the timeout in seconds for how long the kernel can take
# before being presumed dead.
# This affects the MappingKernelManager (which handles kernel restarts)
# and the ZMQChannelsHandler (which handles the startup).
# Default: 60
# c.MappingKernelManager.kernel_info_timeout = 60
## The kernel manager class. This is configurable to allow
# See also: MultiKernelManager.kernel_manager_class
# c.MappingKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
# Default: ''
# c.MappingKernelManager.root_dir = ''
## Share a single zmq.Context to talk to all my kernels
# See also: MultiKernelManager.shared_context
# c.MappingKernelManager.shared_context = True
#------------------------------------------------------------------------------
# KernelSpecManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## List of allowed kernel names.
#
# By default, all installed kernels are allowed.
# Default: set()
# c.KernelSpecManager.allowed_kernelspecs = set()
## If there is no Python kernelspec registered and the IPython
# kernel is available, ensure it is added to the spec list.
# Default: True
# c.KernelSpecManager.ensure_native_kernel = True
## The kernel spec class. This is configurable to allow
# subclassing of the KernelSpecManager for customized behavior.
# Default: 'jupyter_client.kernelspec.KernelSpec'
# c.KernelSpecManager.kernel_spec_class = 'jupyter_client.kernelspec.KernelSpec'
## Deprecated, use `KernelSpecManager.allowed_kernelspecs`
# Default: set()
# c.KernelSpecManager.whitelist = set()
#------------------------------------------------------------------------------
# ContentsManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Base class for serving files and directories.
#
# This serves any text or binary file,
# as well as directories,
# with special handling for JSON notebook documents.
#
# Most APIs take a path argument,
# which is always an API-style unicode path,
# and always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
## Allow access to hidden files
# Default: False
# c.ContentsManager.allow_hidden = False
# Default: None
# c.ContentsManager.checkpoints = None
# Default: 'notebook.services.contents.checkpoints.Checkpoints'
# c.ContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints'
# Default: {}
# c.ContentsManager.checkpoints_kwargs = {}
## handler class to use when serving raw file requests.
#
# Default is a fallback that talks to the ContentsManager API,
# which may be inefficient, especially for large files.
#
# Local files-based ContentsManagers can use a StaticFileHandler subclass,
# which will be much more efficient.
#
# Access to these files should be Authenticated.
# Default: 'notebook.files.handlers.FilesHandler'
# c.ContentsManager.files_handler_class = 'notebook.files.handlers.FilesHandler'
## Extra parameters to pass to files_handler_class.
#
# For example, StaticFileHandlers generally expect a `path` argument
# specifying the root directory from which to serve files.
# Default: {}
# c.ContentsManager.files_handler_params = {}
## Glob patterns to hide in file and directory listings.
# Default: ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
# c.ContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
## Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure,
# such as removing notebook outputs or other side effects that
# should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
# Default: None
# c.ContentsManager.pre_save_hook = None
# Default: '/'
# c.ContentsManager.root_dir = '/'
## The base name used when creating untitled directories.
# Default: 'Untitled Folder'
# c.ContentsManager.untitled_directory = 'Untitled Folder'
## The base name used when creating untitled files.
# Default: 'untitled'
# c.ContentsManager.untitled_file = 'untitled'
## The base name used when creating untitled notebooks.
# Default: 'Untitled'
# c.ContentsManager.untitled_notebook = 'Untitled'
#------------------------------------------------------------------------------
# FileManagerMixin(Configurable) configuration
#------------------------------------------------------------------------------
## Mixin for ContentsAPI classes that interact with the filesystem.
#
# Provides facilities for reading, writing, and copying both notebooks and
# generic files.
#
# Shared by FileContentsManager and FileCheckpoints.
#
# Note ---- Classes using this mixin must provide the following attributes:
#
# root_dir : unicode
# A directory against which API-style paths are to be resolved.
#
# log : logging.Logger
## By default notebooks are saved on disk on a temporary file and then if successfully written, it replaces the old ones.
# This procedure, namely 'atomic_writing', causes some bugs on file system without operation order enforcement (like some networked fs).
# If set to False, the new notebook is written directly on the old one which could fail (eg: full filesystem or quota )
# Default: True
# c.FileManagerMixin.use_atomic_writing = True
#------------------------------------------------------------------------------
# FileContentsManager(FileManagerMixin, ContentsManager) configuration
#------------------------------------------------------------------------------
## Allow access to hidden files
# See also: ContentsManager.allow_hidden
# c.FileContentsManager.allow_hidden = False
# See also: ContentsManager.checkpoints
# c.FileContentsManager.checkpoints = None
# See also: ContentsManager.checkpoints_class
# c.FileContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints'
# See also: ContentsManager.checkpoints_kwargs
# c.FileContentsManager.checkpoints_kwargs = {}
## If True (default), deleting files will send them to the
# platform's trash/recycle bin, where they can be recovered. If False,
# deleting files really deletes them.
# Default: True
# c.FileContentsManager.delete_to_trash = True
## handler class to use when serving raw file requests.
# See also: ContentsManager.files_handler_class
# c.FileContentsManager.files_handler_class = 'notebook.files.handlers.FilesHandler'
## Extra parameters to pass to files_handler_class.
# See also: ContentsManager.files_handler_params
# c.FileContentsManager.files_handler_params = {}
##
# See also: ContentsManager.hide_globs
# c.FileContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
## Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk,
# such as converting the notebook to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written
# - model: the model representing the file
# - contents_manager: this ContentsManager instance
# Default: None
# c.FileContentsManager.post_save_hook = None
## Python callable or importstring thereof
# See also: ContentsManager.pre_save_hook
# c.FileContentsManager.pre_save_hook = None
# Default: ''
# c.FileContentsManager.root_dir = ''
## DEPRECATED, use post_save_hook. Will be removed in Notebook 5.0
# Default: False
# c.FileContentsManager.save_script = False
## The base name used when creating untitled directories.
# See also: ContentsManager.untitled_directory
# c.FileContentsManager.untitled_directory = 'Untitled Folder'
## The base name used when creating untitled files.
# See also: ContentsManager.untitled_file
# c.FileContentsManager.untitled_file = 'untitled'
## The base name used when creating untitled notebooks.
# See also: ContentsManager.untitled_notebook
# c.FileContentsManager.untitled_notebook = 'Untitled'
## By default notebooks are saved on disk on a temporary file and then if
# successfully written, it replaces the old ones.
# See also: FileManagerMixin.use_atomic_writing
# c.FileContentsManager.use_atomic_writing = True
#------------------------------------------------------------------------------
# NotebookNotary(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for computing and verifying notebook signatures.
## The hashing algorithm used to sign notebooks.
# Choices: any of ['sha384', 'md5', 'sha224', 'blake2b', 'sha3_224', 'sha1', 'sha3_512', 'blake2s', 'sha512', 'sha3_384', 'sha3_256', 'sha256']
# Default: 'sha256'
# c.NotebookNotary.algorithm = 'sha256'
## The storage directory for notary secret and database.
# Default: ''
# c.NotebookNotary.data_dir = ''
## The sqlite file in which to store notebook signatures.
# By default, this will be in your Jupyter data directory.
# You can set it to ':memory:' to disable sqlite writing to the filesystem.
# Default: ''
# c.NotebookNotary.db_file = ''
## The secret key with which notebooks are signed.
# Default: b''
# c.NotebookNotary.secret = b''
## The file where the secret key is stored.
# Default: ''
# c.NotebookNotary.secret_file = ''
## A callable returning the storage backend for notebook signatures.
# The default uses an SQLite database.
# Default: traitlets.Undefined
# c.NotebookNotary.store_factory = traitlets.Undefined
#------------------------------------------------------------------------------
# AsyncMultiKernelManager(MultiKernelManager) configuration
#------------------------------------------------------------------------------
## The name of the default kernel to start
# See also: MultiKernelManager.default_kernel_name
# c.AsyncMultiKernelManager.default_kernel_name = 'python3'
## The kernel manager class. This is configurable to allow
# subclassing of the AsyncKernelManager for customized behavior.
# Default: 'jupyter_client.ioloop.AsyncIOLoopKernelManager'
# c.AsyncMultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.AsyncIOLoopKernelManager'
## Share a single zmq.Context to talk to all my kernels
# See also: MultiKernelManager.shared_context
# c.AsyncMultiKernelManager.shared_context = True
## Whether to make kernels available before the process has started. The
# kernel has a `.ready` future which can be awaited before connecting
# Default: False
# c.AsyncMultiKernelManager.use_pending_kernels = False
#------------------------------------------------------------------------------
# AsyncMappingKernelManager(MappingKernelManager, AsyncMultiKernelManager) configuration
#------------------------------------------------------------------------------
## White list of allowed kernel message types.
# See also: MappingKernelManager.allowed_message_types
# c.AsyncMappingKernelManager.allowed_message_types = []
## Whether messages from kernels whose frontends have disconnected should be
# buffered in-memory.
# See also: MappingKernelManager.buffer_offline_messages
# c.AsyncMappingKernelManager.buffer_offline_messages = True
## Whether to consider culling kernels which are busy.
# See also: MappingKernelManager.cull_busy
# c.AsyncMappingKernelManager.cull_busy = False
## Whether to consider culling kernels which have one or more connections.
# See also: MappingKernelManager.cull_connected
# c.AsyncMappingKernelManager.cull_connected = False
## Timeout (in seconds) after which a kernel is considered idle and ready to be
# culled.
# See also: MappingKernelManager.cull_idle_timeout
# c.AsyncMappingKernelManager.cull_idle_timeout = 0
## The interval (in seconds) on which to check for idle kernels exceeding the
# cull timeout value.
# See also: MappingKernelManager.cull_interval
# c.AsyncMappingKernelManager.cull_interval = 300
## The name of the default kernel to start
# See also: MultiKernelManager.default_kernel_name
# c.AsyncMappingKernelManager.default_kernel_name = 'python3'
## Timeout for giving up on a kernel (in seconds).
# See also: MappingKernelManager.kernel_info_timeout
# c.AsyncMappingKernelManager.kernel_info_timeout = 60
## The kernel manager class. This is configurable to allow
# See also: AsyncMultiKernelManager.kernel_manager_class
# c.AsyncMappingKernelManager.kernel_manager_class = 'jupyter_client.ioloop.AsyncIOLoopKernelManager'
# See also: MappingKernelManager.root_dir
# c.AsyncMappingKernelManager.root_dir = ''
## Share a single zmq.Context to talk to all my kernels
# See also: MultiKernelManager.shared_context
# c.AsyncMappingKernelManager.shared_context = True
## Whether to make kernels available before the process has started. The
# See also: AsyncMultiKernelManager.use_pending_kernels
# c.AsyncMappingKernelManager.use_pending_kernels = False
#------------------------------------------------------------------------------
# GatewayKernelManager(AsyncMappingKernelManager) configuration
#------------------------------------------------------------------------------
## Kernel manager that supports remote kernels hosted by Jupyter Kernel or
# Enterprise Gateway.
## White list of allowed kernel message types.
# See also: MappingKernelManager.allowed_message_types
# c.GatewayKernelManager.allowed_message_types = []
## Whether messages from kernels whose frontends have disconnected should be
# buffered in-memory.
# See also: MappingKernelManager.buffer_offline_messages
# c.GatewayKernelManager.buffer_offline_messages = True
## Whether to consider culling kernels which are busy.
# See also: MappingKernelManager.cull_busy
# c.GatewayKernelManager.cull_busy = False
## Whether to consider culling kernels which have one or more connections.
# See also: MappingKernelManager.cull_connected
# c.GatewayKernelManager.cull_connected = False
## Timeout (in seconds) after which a kernel is considered idle and ready to be
# culled.
# See also: MappingKernelManager.cull_idle_timeout
# c.GatewayKernelManager.cull_idle_timeout = 0
## The interval (in seconds) on which to check for idle kernels exceeding the
# cull timeout value.
# See also: MappingKernelManager.cull_interval
# c.GatewayKernelManager.cull_interval = 300
## The name of the default kernel to start
# See also: MultiKernelManager.default_kernel_name
# c.GatewayKernelManager.default_kernel_name = 'python3'
## Timeout for giving up on a kernel (in seconds).
# See also: MappingKernelManager.kernel_info_timeout
# c.GatewayKernelManager.kernel_info_timeout = 60
## The kernel manager class. This is configurable to allow
# See also: AsyncMultiKernelManager.kernel_manager_class
# c.GatewayKernelManager.kernel_manager_class = 'jupyter_client.ioloop.AsyncIOLoopKernelManager'
# See also: MappingKernelManager.root_dir
# c.GatewayKernelManager.root_dir = ''
## Share a single zmq.Context to talk to all my kernels
# See also: MultiKernelManager.shared_context
# c.GatewayKernelManager.shared_context = True
## Whether to make kernels available before the process has started. The
# See also: AsyncMultiKernelManager.use_pending_kernels
# c.GatewayKernelManager.use_pending_kernels = False
#------------------------------------------------------------------------------
# GatewayKernelSpecManager(KernelSpecManager) configuration
#------------------------------------------------------------------------------
## List of allowed kernel names.
# See also: KernelSpecManager.allowed_kernelspecs
# c.GatewayKernelSpecManager.allowed_kernelspecs = set()
## If there is no Python kernelspec registered and the IPython
# See also: KernelSpecManager.ensure_native_kernel
# c.GatewayKernelSpecManager.ensure_native_kernel = True
## The kernel spec class. This is configurable to allow
# See also: KernelSpecManager.kernel_spec_class
# c.GatewayKernelSpecManager.kernel_spec_class = 'jupyter_client.kernelspec.KernelSpec'
## Deprecated, use `KernelSpecManager.allowed_kernelspecs`
# See also: KernelSpecManager.whitelist
# c.GatewayKernelSpecManager.whitelist = set()
#------------------------------------------------------------------------------
# GatewayClient(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This class manages the configuration. It's its own singleton class so that we
# can share these values across all objects. It also contains some helper methods
# to build request arguments out of the various config options.
## The authorization token used in the HTTP headers. (JUPYTER_GATEWAY_AUTH_TOKEN
# env var)
# Default: None
# c.GatewayClient.auth_token = None
## The filename of CA certificates or None to use defaults.
# (JUPYTER_GATEWAY_CA_CERTS env var)
# Default: None
# c.GatewayClient.ca_certs = None
## The filename for client SSL certificate, if any. (JUPYTER_GATEWAY_CLIENT_CERT
# env var)
# Default: None
# c.GatewayClient.client_cert = None
## The filename for client SSL key, if any. (JUPYTER_GATEWAY_CLIENT_KEY env var)
# Default: None
# c.GatewayClient.client_key = None
## The time allowed for HTTP connection establishment with the Gateway server.
# (JUPYTER_GATEWAY_CONNECT_TIMEOUT env var)
# Default: 40.0
# c.GatewayClient.connect_timeout = 40.0
## A comma-separated list of environment variable names that will be included, along with
# their values, in the kernel startup request. The corresponding `env_whitelist` configuration
# value must also be set on the Gateway server - since that configuration value indicates which
# environmental values to make available to the kernel. (JUPYTER_GATEWAY_ENV_WHITELIST env var)
# Default: ''
# c.GatewayClient.env_whitelist = ''
## The time allowed for HTTP reconnection with the Gateway server for the first time.
# Next will be JUPYTER_GATEWAY_RETRY_INTERVAL multiplied by two in factor of numbers of retries
# but less than JUPYTER_GATEWAY_RETRY_INTERVAL_MAX.
# (JUPYTER_GATEWAY_RETRY_INTERVAL env var)
# Default: 1.0
# c.GatewayClient.gateway_retry_interval = 1.0
## The maximum time allowed for HTTP reconnection retry with the Gateway server.
# (JUPYTER_GATEWAY_RETRY_INTERVAL_MAX env var)
# Default: 30.0
# c.GatewayClient.gateway_retry_interval_max = 30.0
## The maximum retries allowed for HTTP reconnection with the Gateway server.
# (JUPYTER_GATEWAY_RETRY_MAX env var)
# Default: 5
# c.GatewayClient.gateway_retry_max = 5
## Additional HTTP headers to pass on the request. This value will be converted to a dict.
# (JUPYTER_GATEWAY_HEADERS env var)
# Default: '{}'
# c.GatewayClient.headers = '{}'
## The password for HTTP authentication. (JUPYTER_GATEWAY_HTTP_PWD env var)
# Default: None
# c.GatewayClient.http_pwd = None
## The username for HTTP authentication. (JUPYTER_GATEWAY_HTTP_USER env var)
# Default: None
# c.GatewayClient.http_user = None
## The gateway API endpoint for accessing kernel resources
# (JUPYTER_GATEWAY_KERNELS_ENDPOINT env var)
# Default: '/api/kernels'
# c.GatewayClient.kernels_endpoint = '/api/kernels'
## The gateway API endpoint for accessing kernelspecs
# (JUPYTER_GATEWAY_KERNELSPECS_ENDPOINT env var)
# Default: '/api/kernelspecs'
# c.GatewayClient.kernelspecs_endpoint = '/api/kernelspecs'
## The gateway endpoint for accessing kernelspecs resources
# (JUPYTER_GATEWAY_KERNELSPECS_RESOURCE_ENDPOINT env var)
# Default: '/kernelspecs'
# c.GatewayClient.kernelspecs_resource_endpoint = '/kernelspecs'
## The time allowed for HTTP request completion. (JUPYTER_GATEWAY_REQUEST_TIMEOUT
# env var)
# Default: 40.0
# c.GatewayClient.request_timeout = 40.0
## The url of the Kernel or Enterprise Gateway server where
# kernel specifications are defined and kernel management takes place.
# If defined, this Notebook server acts as a proxy for all kernel
# management and kernel specification retrieval. (JUPYTER_GATEWAY_URL env var)
# Default: None
# c.GatewayClient.url = None
## For HTTPS requests, determines if server's certificate should be validated or not.
# (JUPYTER_GATEWAY_VALIDATE_CERT env var)
# Default: True
# c.GatewayClient.validate_cert = True
## The websocket url of the Kernel or Enterprise Gateway server. If not provided, this value
# will correspond to the value of the Gateway url with 'ws' in place of 'http'. (JUPYTER_GATEWAY_WS_URL env var)
# Default: None
# c.GatewayClient.ws_url = None
#------------------------------------------------------------------------------
# TerminalManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
##
## Timeout (in seconds) in which a terminal has been inactive and ready to be culled.
# Values of 0 or lower disable culling.
# Default: 0
# c.TerminalManager.cull_inactive_timeout = 0
## The interval (in seconds) on which to check for terminals exceeding the
# inactive timeout value.
# Default: 300
# c.TerminalManager.cull_interval = 300
|
py | 7dfec5a961d8c74c80f4b8eb6c749905a249e9a5 | #
# This file is auto-generated, please do not modify directly!
#
import unittest
import gm
class TestMat3f(unittest.TestCase):
def testDefaultInitialization(self):
matrix = gm.Mat3f()
self.assertEqual(matrix, gm.Mat3f(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))
def testElementInitialization(self):
gm.Mat3f(0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0)
def testElementReadAccess(self):
matrix = gm.Mat3f(0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0)
self.assertAlmostEqual(matrix[0], 0.0)
self.assertAlmostEqual(matrix[1], 2.0)
self.assertAlmostEqual(matrix[2], 4.0)
self.assertAlmostEqual(matrix[3], 6.0)
self.assertAlmostEqual(matrix[4], 8.0)
self.assertAlmostEqual(matrix[5], 10.0)
self.assertAlmostEqual(matrix[6], 12.0)
self.assertAlmostEqual(matrix[7], 14.0)
self.assertAlmostEqual(matrix[8], 16.0)
def testElementWriteAccess(self):
matrix = gm.Mat3f()
matrix[0] = 0.0
matrix[1] = 2.0
matrix[2] = 4.0
matrix[3] = 6.0
matrix[4] = 8.0
matrix[5] = 10.0
matrix[6] = 12.0
matrix[7] = 14.0
matrix[8] = 16.0
self.assertAlmostEqual(matrix[0], 0.0)
self.assertAlmostEqual(matrix[1], 2.0)
self.assertAlmostEqual(matrix[2], 4.0)
self.assertAlmostEqual(matrix[3], 6.0)
self.assertAlmostEqual(matrix[4], 8.0)
self.assertAlmostEqual(matrix[5], 10.0)
self.assertAlmostEqual(matrix[6], 12.0)
self.assertAlmostEqual(matrix[7], 14.0)
self.assertAlmostEqual(matrix[8], 16.0)
def testMatrixElementReadAccess(self):
matrix = gm.Mat3f(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0)
self.assertEqual(matrix[0, 0], 0.0)
self.assertEqual(matrix[0, 1], 1.0)
self.assertEqual(matrix[0, 2], 2.0)
self.assertEqual(matrix[1, 0], 3.0)
self.assertEqual(matrix[1, 1], 4.0)
self.assertEqual(matrix[1, 2], 5.0)
self.assertEqual(matrix[2, 0], 6.0)
self.assertEqual(matrix[2, 1], 7.0)
self.assertEqual(matrix[2, 2], 8.0)
def testMatrixElementWriteAccess(self):
matrix = gm.Mat3f()
matrix[0, 0] = 0.0
matrix[0, 1] = 1.0
matrix[0, 2] = 2.0
matrix[1, 0] = 3.0
matrix[1, 1] = 4.0
matrix[1, 2] = 5.0
matrix[2, 0] = 6.0
matrix[2, 1] = 7.0
matrix[2, 2] = 8.0
self.assertEqual(matrix, gm.Mat3f(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0))
|
py | 7dfec60156de5ff5f7a0bb752a9a86cf58bdd258 | import random
import os
from . import line_count, create_idx_key, get_idx_key, get_write_fn, get_read_fn, _io_type
from . import _env, parser, json, glob, Timer
if _env['tqdm']:
from tqdm.auto import tqdm, trange
if _env['tf']:
from .tflow import setup_tf_serialization_features, serialize_tf_example, SerializeTFWorker, TFRWriter
from .tflow import TFDatasetFromTensors, TFRDataset
if _env['torch']:
from .torch import serialize_torch_example, SerializeTorchWorker, setup_torch_serialization_features
from .torch import TorchWriter, DynamicCollate, PylinesDataset, PylinesIterableFunctionDataset, PylinesDatasetFromIterator
#if _env['ray']:
# import ray.util.multiprocessing as mp
#else:
import math
from .logger import get_logger
import multiprocessing as mp
import hashlib
import gc
logger = get_logger()
_tokenize_fn = None
# https://stackoverflow.com/questions/620367/how-to-jump-to-a-particular-line-in-a-huge-text-file
class LineSeekableFile:
def __init__(self, seekable):
self.fin = seekable
self.line_map = list() # Map from line index -> file position.
self.line_map.append(0)
while seekable.readline():
self.line_map.append(seekable.tell())
def index(self):
return self.line_map
def __len__(self):
return len(self.line_map)
def __getitem__(self, index):
# NOTE: This assumes that you're not reading the file sequentially.
# For that, just use 'for line in file'.
self.fin.seek(self.line_map[index])
return self.fin.readline()
class LazyLoadFile:
def __init__(self, filename, skip_broken=True):
self.filename = filename
self.reader = get_read_fn(filename)
self._skip = skip_broken
self.fseek = LineSeekableFile(self.reader)
if self._skip:
self.lineidx = 0
self.badidx = 0
def random_iter(self, num_lines=None):
num_lines = num_lines if num_lines else len(self.fseek)
total_idx = [i for i in range(num_lines)]
random.shuffle(total_idx)
for idx in total_idx:
if self._skip:
try:
yield self.loads(self.fseek[idx])
self.lineidx += 1
except:
self.badidx += 1
else:
yield self.loads(self.fseek[idx])
def quick_iter(self, num_lines=None):
num_lines = num_lines if num_lines else len(self.fseek)
for x, line in enumerate(self.reader):
if self._skip:
try:
yield self.loads(line)
self.lineidx += 1
except:
self.badidx += 1
else:
yield self.loads(line)
if x >= num_lines:
break
def iter(self):
for line in self.reader:
if self._skip:
try:
yield self.loads(line)
self.lineidx += 1
except:
self.badidx += 1
else:
yield self.loads(line)
def loads(self, v):
return parser.parse(v).as_dict()
def __getitem__(self, idx):
return self.loads(self.fseek[idx])
def __len__(self):
return len(self.fseek)
def stats(self):
return {'loaded': self.lineidx, 'missed': self.badidx}
def resetstats(self):
self.lineidx = 0
self.badidx = 0
return {'loaded': self.lineidx, 'missed': self.badidx}
def setup_tokenize_fn(tokenizer_fn):
assert _env['transformers'], 'Transformers must be installed to use tokenize function'
global _tokenize_fn
_tokenize_fn = tokenizer_fn
def TokenizerWorker(ex):
try:
result = _tokenize_fn(ex)
return result
except:
return None
def setup_iter_fn(iter_fn):
global _iter_func
_iter_func = iter_fn
def IterWorker(ex):
try:
result = _iter_func(ex)
return result
except:
return None
def setup_filter_fns(filter_fns):
global _filter_func
_filter_func = filter_fns
def FilterWorker(ex):
result = {}
for key in ex:
if key not in _filter_func['bypass'] and key in _filter_func:
res = _filter_func[key](ex[key])
if res:
result[key] = res
elif key in _filter_func['bypass']:
result[key] = ex[key]
if bool(result):
return None
return result
def FileIterator(filename):
with get_read_fn(filename) as f:
for line in f:
yield parser.parse(line).as_dict()
raise StopIteration
def make_hashes(inputs):
return hashlib.sha256(str.encode(inputs)).hexdigest()
def check_hashes(inputs, hashed_text):
if make_hashes(inputs) == hashed_text:
return hashed_text
return False
class Pylines:
def __init__(self, input_fns=None, output_fn=None, skip_broken=True, overwrite_output=False, use_lazy=False, use_mp=True, use_idx=False, total_lines=0):
self._skip, self._lazy, self._mp, self._idx, self._overwrite = skip_broken, use_lazy, use_mp, use_idx, overwrite_output
self.total_lines = total_lines
self.writer, self.reader = None, None
self.input_fns, self.output_fn = None, None
self.stats = {}
self.timer = Timer()
self.stored_items = list()
self._io(input_fns, output_fn)
def as_tokenizer(self, tokenizer_fn=None, input_fns=None, use_mp=True):
if tokenizer_fn:
setup_tokenize_fn(tokenizer_fn)
assert _tokenize_fn, 'tokenizer_fn must first be set before being able to run'
self._io(input_fns, output_fn=None)
for result in self._as_iter(_tokenize_fn, TokenizerWorker, use_mp, desc='Tokenization'):
yield result
logger.info(f'{self.timer.stop()} for Tokenizing {self.total_lines} Items')
def run_tokenizer(self, tokenizer_fn=None, input_fns=None, output_fn=None, use_mp=True):
self._io(input_fns, output_fn)
for result in self.as_tokenizer(tokenizer_fn=tokenizer_fn, use_mp=use_mp):
self.write(result)
self.flush()
def as_processor(self, iter_func=None, input_fns=None, use_mp=True):
if iter_func:
setup_iter_fn(iter_func)
assert _iter_func, 'iter_func must first be set before running'
self._io(input_fns, output_fn=None)
for result in self._as_iter(_iter_func, IterWorker, use_mp, desc='Iterator Function'):
yield result
logger.info(f'{self.timer.stop()} for {self.total_lines} Items')
def run_processor(self, iter_func=None, input_fns=None, output_fn=None, use_mp=True):
self._io(input_fns, output_fn)
for result in self.as_processor(iter_func=iter_func, use_mp=use_mp):
self.write(result)
self.flush()
# filter_funcs = {'text': filter_fuc, 'target': filter_func, 'idx': filter_func, 'bypass': ['key_1', 'key_2']}
def as_filter(self, filter_funcs=None, input_fns=None, use_mp=True):
if filter_funcs:
setup_filter_fns(filter_funcs)
assert _filter_func, 'filter_funcs must first be set before running'
self._io(input_fns, output_fn=None)
for result in self._as_iter(FilterWorker, FilterWorker, use_mp, desc='Filtering Items'):
yield result
logger.info(f'{self.timer.stop()} for Filtering {self.total_lines} Items')
def run_filter(self, filter_funcs=None, input_fns=None, output_fn=None, use_mp=True):
self._io(input_fns, output_fn)
for result in self.as_filter(filter_funcs=filter_funcs, use_mp=use_mp):
self.write(result)
self.flush()
def _tftensordict(self, all_examples, dataset_features=None):
_features = list()
_tensor_examples = dict()
for axis in dataset_features:
_features += dataset_features[axis]['names']
for feats in _features:
_tensor_examples[feats] = list()
for ex in all_examples:
for key, v in ex.items():
if key in _features:
_tensor_examples[key].extend(v)
return _tensor_examples
def _tfencoder(self, all_examples, dataset_features=None, slices=True, use_mp=True):
if dataset_features:
for axis in dataset_features:
assert 'names' in dataset_features[axis], 'names is a required key for dataset features.'
setup_tf_serialization_features(dataset_features)
if slices:
_tensor_ds = self._tftensordict(all_examples, dataset_features)
return _tensor_ds
else:
for serialized_ex in self._as_iter_items(all_examples, serialize_tf_example, SerializeTFWorker, use_mp=use_mp, desc=f'Serializing to TFRecords'):
yield serialized_ex
def _tfwriter(self, all_examples, output_dir, dataset_features=None, start_idx=1, split_key='split', split='train', write_string='{}_shard_{}.tfrecords', shard_size=50000, overwrite=False, use_tempdir=False, use_mp=True):
_total_match = self.count_matching(split_key, split) if split_key else self.total_lines
with TFRWriter(output_dir, _total_match, start_idx, split, write_string, shard_size, overwrite, use_tempdir) as writer:
for serialized_ex in self._tfencoder(all_examples, dataset_features, slices=False, use_mp=use_mp):
writer.write(serialized_ex)
tfrecord_files, total_items = writer.close()
return tfrecord_files, total_items
def _torchencoder(self, all_examples, dataset_features=None, use_mp=True):
if dataset_features:
setup_torch_serialization_features(dataset_features)
for serialized_ex in self._as_iter_items(all_examples, serialize_torch_example, SerializeTorchWorker, use_mp=use_mp, desc=f'Serializing to Torch'):
yield serialized_ex
def _torchwriter(self, all_examples, output_dir, dataset_features=None, start_idx=1, split_key='split', split='train', write_string='{}_shard_{}.pkl', shard_size=50000, overwrite=False, use_tempdir=False, use_mp=True, compression=True):
_total_match = self.count_matching(split_key, split) if split_key else self.total_lines
with TorchWriter(output_dir, _total_match, start_idx, split, write_string, shard_size, overwrite, use_tempdir, compression) as writer:
for serialized_ex in self._torchencoder(all_examples, dataset_features, use_mp):
writer.write(serialized_ex)
torch_files, total_items = writer.close()
return torch_files, total_items
def _tokenize_examples(self, tokenizer_fn, use_mp=True):
all_results = list()
if tokenizer_fn:
for result in self.as_tokenizer(tokenizer_fn, use_mp=use_mp):
all_results.append(result)
else:
logger.warning(f'No Tokenizer Function Provided. Assuming Input Files are Pretokenized.')
for result in self.as_iterator():
all_results.append(result)
logger.info(f'Loaded {len(all_results)} Examples. Keys: {list(i for i in all_results[0])}')
return all_results
def as_encoder(self, dataset_features=None, tokenizer_fn=None, serialization='tf', input_fns=None, use_mp=True):
_methods = ['tf', 'torch']
assert serialization in _methods, f'Currently only {_methods} are supported'
assert _env[serialization], f'{serialization} library is required to run Serialization'
self._io(input_fns, output_fn=None)
all_examples = self._tokenize_examples(tokenizer_fn, use_mp)
if serialization == 'tf':
for serialized_ex in self._tfencoder(all_examples, dataset_features, use_mp):
yield serialized_ex
elif serialization == 'torch':
for serialized_ex in self._torchencoder(all_examples, dataset_features, use_mp):
yield serialized_ex
logger.info(f'{self.timer.stop()} for Serializing [{serialization}] {len(all_examples)} Examples')
def run_encoder(self, output_dir, dataset_features=None, tokenizer_fn=None, serialization='tf', input_fns=None, start_idx=1, split_key='split', split='train', write_string='{}_shard_{}.tfrecords', shard_size=50000, overwrite=False, use_tempdir=False, use_mp=True, compression=True):
self._io(input_fns, output_fn=None)
all_examples = self._tokenize_examples(tokenizer_fn, use_mp)
if serialization == 'tf':
tfrecord_files, total_items = self._tfwriter(all_examples, output_dir, dataset_features, start_idx, split_key, split, write_string, shard_size, overwrite, use_tempdir, use_mp)
return tfrecord_files, total_items
elif serialization == 'torch':
torch_files, total_items = self._torchwriter(all_examples, output_dir, dataset_features, start_idx, split_key, split, write_string, shard_size, overwrite, use_tempdir, use_mp, compression)
return torch_files, total_items
def as_dataset(self, batch_sizes, dataset_features=None, tokenizer_fn=None, framework='tf', input_fns=None, split_key='split', splits=['train', 'validation', 'test'], use_mp=True):
self._io(input_fns, output_fn=None)
all_examples = self._tokenize_examples(tokenizer_fn, use_mp)
_dataset = dict()
_encoder_fn = self._tfencoder if framework == 'tf' else None
if splits:
_splitdataset = self._dataset_splits(all_examples, split_key, splits)
for split in splits:
if _encoder_fn:
_encoded_examples = list()
for example in _encoder_fn(_splitdataset[split], dataset_features, use_mp):
_encoded_examples.append(example)
else:
_encoded_examples = _splitdataset[split]
_dataset[split] = {'examples': _encoded_examples, 'batch_size': batch_sizes[split]}
_splitdataset = None
gc.collect()
else:
if _encoder_fn:
_encoded_examples = list()
for example in _encoder_fn(all_examples, dataset_features, use_mp):
_encoded_examples.append(example)
else:
_encoded_examples = all_examples
_dataset['train'] = {'examples': _encoded_examples, 'batch_size': batch_sizes}
splits = ['train']
if framework == 'tf':
_tfdataset = TFDatasetFromTensors(_dataset, dataset_features)
return _tfdataset
elif framework == 'torch':
_torchdataset = dict()
for split in splits:
_torchdataset[split] = PylinesDataset(num_examples=len(_dataset[split]['examples']), examples=_dataset[split]['examples'])
logger.info('Torch Dataset should be used with DynamicCollate function with the DataLoader for Optimal Performance')
return _torchdataset
def _dataset_splits(self, all_examples, split_key, splits):
split_results = dict()
for split in splits:
split_results[split] = list()
for example in all_examples:
ex_split = example[split_key]
split_results[ex_split].append(example)
return split_results
def _as_iter(self, IterFunc, Worker, use_mp, desc):
pbar = trange(self.total_lines, desc=desc) if _env['tqdm'] else None
self.timer.start(desc)
if use_mp:
if isinstance(use_mp, int):
pool = mp.Pool(use_mp)
else:
pool = mp.Pool()
for fn in self.input_fns:
for result in pool.imap_unordered(Worker, FileIterator(fn)):
if result:
yield result
if pbar:
pbar.update()
else:
for fn in self.input_fns:
for result in self._file_iter(fn):
ex = IterFunc(result)
if ex:
yield ex
if pbar:
pbar.update()
if pbar:
pbar.close()
def _as_iter_items(self, items, IterFunc, Worker, use_mp, desc):
pbar = trange(len(items), desc=desc) if _env['tqdm'] else None
self.timer.start(desc)
if use_mp:
if isinstance(use_mp, int):
pool = mp.Pool(use_mp)
else:
pool = mp.Pool()
for result in pool.imap_unordered(Worker, items):
if result:
yield result
if pbar:
pbar.update()
else:
for item in items:
ex = IterFunc(item)
if ex:
yield ex
if pbar:
pbar.update()
if pbar:
pbar.close()
def deduplicate(self, keys, input_fns=None, output_fn=None, write=True):
self._io(input_fns, output_fn)
_sets = {}
results = list()
assert _io_type(keys) == 'list', 'Keys must be in the form of a list'
for key in keys:
_sets[key] = set()
for result in self.as_iterator():
_pass = True
for k in keys:
hashed_key = make_hashes(result[k])
if hashed_key in _sets[k]:
_pass = False
else:
_sets[k].add(hashed_key)
if _pass:
if write:
self.write(result)
else:
results.append(result)
if not write:
return results
def find(self, key, value, results='first', filename=None, verbose=False):
assert results in ['first', 'all'], 'Results should either be all or first to return'
_matched_results = list()
_matched = False
if filename:
for x, result in enumerate(self._fast_file_iter(filename)):
if result[key] == value:
_matched_results.append((result if _io_type(result) == 'dict' else result.as_dict()))
_matched = True
if _matched:
if verbose:
logger.info(f'Found Match on IDX: {x}')
if results == 'first':
break
elif results == 'all':
_matched = False
else:
for fn in self.input_fns:
for x, result in enumerate(self._fast_file_iter(fn)):
if result[key] == value:
_matched_results.append((result if _io_type(result) == 'dict' else result.as_dict()))
_matched = True
if results == 'first':
break
if _matched:
if verbose:
logger.info(f'Found Match on IDX: {x} in {fn}')
if results == 'first':
break
elif results == 'all':
_matched = False
if _matched and results == 'first':
break
return _matched_results
def merge(self, input_fns=None, output_fn=None):
self._io(input_fns, output_fn)
pbar = trange(self.total_lines, desc=f'Merging {len(self.input_fns)} Files') if _env['tqdm'] else None
self.timer.start(f'Merging {len(self.input_fns)} Files')
for result in self.as_iterator():
self.write(result)
if pbar:
pbar.update()
self.flush()
if pbar:
pbar.close()
logger.info(f'{self.timer.stop()} with {self.total_lines} Items to {self.output_fn}')
def count_matching(self, key, value, input_fns=None):
_matches = 0
self._io(input_fns)
for result in self.as_iterator():
if result[key] == value:
_matches += 1
return _matches
def verify_linecount(self):
_idx = 0
for result in self.as_iterator():
if result and result != '':
_idx += 1
logger.info(f'Original Count: {self.total_lines} Verified Count: {_idx}')
if _idx >= self.total_lines:
self.total_lines = _idx
return _idx
def write(self, item):
if not self.writer:
assert self.output_fn, 'Output File must be set prior to write. call .writefile(filename) to set the output file'
self.writer_fn = get_write_fn(self.output_fn, overwrite=self._overwrite)
self.writer = self.writer_fn.write
self._writeidx = 0
self.flushidx = math.ceil(self.total_lines / 10)
self.writer(json.dumps(item, ensure_ascii=False))
self.writer('\n')
self._writeidx += 1
if self._writeidx % self.flushidx == 0:
self.writer_fn.flush()
self._writeidx = 0
def index(self, idx, fn=None):
if fn:
return self._file_idx(idx, fn)
else:
results = {}
for fn in self.input_fns:
results[fn] = self._file_idx(idx, fn)
return results
def to_dict(self, input_fns=None):
results = {}
self._io(input_fns)
for result in self.as_iterator():
for key in result:
if key not in results:
results[key] = {'items': [result[key]], 'count': 1}
else:
results[key]['items'].append(result[key])
results[key]['count'] += 1
return results
def to_list(self, input_fns=None):
results = list()
self._io(input_fns)
for result in self.as_iterator():
results.append(result)
return results
def from_list(self, all_items, write=True, clear_items=False, output_fn=None):
self._io(input_fns=None, output_fn=output_fn)
assert _io_type(all_items) == 'list', 'This function must be used with a list'
if write:
logger.info(f'Writing {len(all_items)} to {self.output_fn}')
for item in all_items:
self.write(item)
else:
if clear_items:
logger.info(f'Flushing Existing Items from Memory')
self.stored_items = None
gc.collect()
self.stored_items = list()
self.stored_items += all_items
logger.info(f'Stored {len(all_items)} to Memory. Total Stored Items: {len(self.stored_items)}. Call pylines.stored_items to access items.')
def to_batches(self, all_items=None, batch_size=2):
all_batches = list()
all_items = all_items if all_items else self.stored_items
for batch in self._build_batches(all_items, batch_size):
all_batches += [batch]
return all_batches
def as_iterator(self):
for fn in self.input_fns:
for item in self._file_iter(fn):
yield item
def _build_batches(self, lst, batch_size):
for i in range(0, len(lst), batch_size):
yield lst[i:i + batch_size]
def _file_idx(self, idx, fn):
reader = get_read_fn(fn)
for x, line in enumerate(reader):
if x < idx:
pass
elif x == idx:
return self.loads(line)
reader.close()
def _file_iter(self, fn):
reader = get_read_fn(fn)
if self._skip:
self._reset_stats(fn)
for line in reader:
if self._skip:
try:
yield self.loads(line)
self.stats[fn]['read'] += 1
except:
self.stats[fn]['missed'] += 1
else:
yield self.loads(line)
reader.close()
def _fast_file_iter(self, fn):
reader = get_read_fn(fn)
if self._skip:
self._reset_stats(fn)
for line in reader:
if self._skip:
try:
yield self.loads(line)
self.stats[fn]['read'] += 1
except:
self.stats[fn]['missed'] += 1
else:
yield self.loads(line)
reader.close()
def _get_file_lines(self, input_fns=None):
if input_fns:
for fn in input_fns:
self.total_lines += line_count(fn)
else:
self.total_lines = 0
for fn in self.input_fns:
self.total_lines += line_count(fn)
def __iter__(self):
for fn in self.input_fns:
for result in self._file_iter(fn):
yield result
def _io(self, input_fns=None, output_fn=None):
if input_fns:
self._setup_input_fns(input_fns)
self._get_file_lines()
if output_fn:
self._setup_output_fn(output_fn)
def _setup_input_fns(self, input_fns):
in_files = []
if _io_type(input_fns) == 'str':
if input_fns.endswith('*'):
in_files = glob(input_fns)
else:
in_files = [input_fns]
elif _io_type(input_fns) == 'list':
for fn in input_fns:
if fn.endswith('*'):
in_files += glob(fn)
else:
in_files.append(fn)
else:
raise ValueError('Input Filenames should be a string or list')
if self.input_fns:
in_files = [f for f in in_files if f not in self.input_fns]
if len(in_files) != 0:
self.input_fns += in_files
else:
self.input_fns = in_files
def _setup_output_fn(self, output_fn):
if self.writer:
self.writer_fn.flush()
self.writer_fn.close()
self.writer, self.writer_fn = None, None
if _io_type(output_fn) == 'str':
self.output_fn = output_fn
else:
raise ValueError('Output Filenames should be a string')
def set_output_fn(self, output_fn):
_setup_output_fn(self, output_fn)
def parse(self, v):
return parser.parse(v)
def loads(self, v):
if _io_type(v) == 'bytes':
return parser.parse(v).as_dict()
else:
return json.loads(v)
def load(self, v):
return json.load(v)
def dumps(self, v):
return json.dumps(v, ensure_ascii=False)
def dump(self, fn, v):
if _io_type(fn) == 'str':
json.dump(v, get_write_fn(fn, overwrite=self._overwrite))
else:
json.dump(v, fn)
def _reset_stats(self, fn):
self.stats[fn] = {'read': 0, 'missed': 0}
def clear_input_files(self):
self.stats = {}
self.input_fns = None
def set_input_files(self, input_fns):
self.clear_input_files()
self._setup_input_fns(input_fns)
def add_files(self, input_fns):
self._setup_input_fns(input_fns)
def set_writefile(self, output_fn, overwrite=False):
self._overwrite = overwrite
self._setup_output_fn(output_fn)
def close(self):
if self.writer:
self.writer_fn.flush()
self.writer_fn.close()
if self.reader:
self.reader.close()
def flush(self):
if self.writer:
self.writer_fn.flush()
def linecount(self, filename=None):
results = {}
if filename:
results[filename] = line_count(filename)
return results
for fn in self.input_fns:
results[fn] = line_count(fn)
return results
def __len__(self):
return self.total_lines
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
|
py | 7dfec6962caf536d310e5cff349c735a7355c087 | # Author: Alexandre Barachant <[email protected]>
# Sylvain Chevallier <[email protected]>
# License: BSD Style.
import json
import os
import os.path as osp
import requests
from mne import get_config, set_config
from mne.datasets.utils import _get_path
from mne.utils import _fetch_file, _url_to_local_path, verbose
from pooch import file_hash, retrieve
from requests.exceptions import HTTPError
def get_dataset_path(sign, path):
"""Returns the dataset path allowing for changes in MNE_DATA
config
Parameters
----------
sign : str
Signifier of dataset
path : None | str
Location of where to look for the data storing location.
If None, the environment variable or config parameter
``MNE_DATASETS_(signifier)_PATH`` is used. If it doesn't exist, the
"~/mne_data" directory is used. If the dataset
is not found under the given path, the data
will be automatically downloaded to the specified folder.
Returns
-------
path : None | str
Location of where to look for the data storing location
"""
sign = sign.upper()
key = "MNE_DATASETS_{:s}_PATH".format(sign)
if get_config(key) is None:
if get_config("MNE_DATA") is None:
path_def = osp.join(osp.expanduser("~"), "mne_data")
print(
"MNE_DATA is not already configured. It will be set to "
"default location in the home directory - "
+ path_def
+ "\nAll datasets will be downloaded to this location, if anything is "
"already downloaded, please move manually to this location"
)
if not osp.isdir(path_def):
os.makedirs(path_def)
set_config("MNE_DATA", osp.join(osp.expanduser("~"), "mne_data"))
set_config(key, get_config("MNE_DATA"))
return _get_path(path, key, sign)
@verbose
def data_path(url, sign, path=None, force_update=False, update_path=True, verbose=None):
"""Get path to local copy of given dataset URL. **Deprecated**
This is a low-level function useful for getting a local copy of a
remote dataset. It is deprecated in favor of data_dl.
Parameters
----------
url : str
Path to remote location of data
sign : str
Signifier of dataset
path : None | str
Location of where to look for the data storing location.
If None, the environment variable or config parameter
``MNE_DATASETS_(signifier)_PATH`` is used. If it doesn't exist, the
"~/mne_data" directory is used. If the dataset
is not found under the given path, the data
will be automatically downloaded to the specified folder.
force_update : bool
Force update of the dataset even if a local copy exists.
update_path : bool | None, **Deprecated**
Unused, kept for compatibility purpose.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`).
Returns
-------
path : list of str
Local path to the given data file. This path is contained inside a list
of length one, for compatibility.
""" # noqa: E501
path = get_dataset_path(sign, path)
key_dest = "MNE-{:s}-data".format(sign.lower())
destination = _url_to_local_path(url, osp.join(path, key_dest))
# Fetch the file
if not osp.isfile(destination) or force_update:
if osp.isfile(destination):
os.remove(destination)
if not osp.isdir(osp.dirname(destination)):
os.makedirs(osp.dirname(destination))
_fetch_file(url, destination, print_destination=False)
return destination
@verbose
def data_dl(url, sign, path=None, force_update=False, verbose=None):
"""Download file from url to specified path
This function should replace data_path as the MNE will not support the download
of dataset anymore. This version is using Pooch.
Parameters
----------
url : str
Path to remote location of data
sign : str
Signifier of dataset
path : None | str
Location of where to look for the data storing location.
If None, the environment variable or config parameter
``MNE_DATASETS_(signifier)_PATH`` is used. If it doesn't exist, the
"~/mne_data" directory is used. If the dataset
is not found under the given path, the data
will be automatically downloaded to the specified folder.
force_update : bool
Force update of the dataset even if a local copy exists.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`).
Returns
-------
path : list of str
Local path to the given data file. This path is contained inside a list
of length one, for compatibility.
"""
path = get_dataset_path(sign, path)
key_dest = "MNE-{:s}-data".format(sign.lower())
destination = _url_to_local_path(url, osp.join(path, key_dest))
# Fetch the file
if not osp.isfile(destination) or force_update:
if osp.isfile(destination):
os.remove(destination)
if not osp.isdir(osp.dirname(destination)):
os.makedirs(osp.dirname(destination))
known_hash = None
else:
known_hash = file_hash(destination)
dlpath = retrieve(
url, known_hash, fname=osp.basename(url), path=osp.dirname(destination)
)
return dlpath
# This function is from https://github.com/cognoma/figshare (BSD-3-Clause)
def fs_issue_request(method, url, headers, data=None, binary=False):
"""Wrapper for HTTP request
Parameters
----------
method : str
HTTP method. One of GET, PUT, POST or DELETE
url : str
URL for the request
headers: dict
HTTP header information
data: dict
Figshare article data
binary: bool
Whether data is binary or not
Returns
-------
response_data: dict
JSON response for the request returned as python dict
"""
if data is not None and not binary:
data = json.dumps(data)
response = requests.request(method, url, headers=headers, data=data)
try:
response.raise_for_status()
try:
response_data = json.loads(response.text)
except ValueError:
response_data = response.content
except HTTPError as error:
print("Caught an HTTPError: {}".format(error))
print("Body:\n", response.text)
raise
return response_data
def fs_get_file_list(article_id, version=None):
"""List all the files associated with a given article.
Parameters
----------
article_id : str or int
Figshare article ID
version : str or id, default is None
Figshare article version. If None, selects the most recent version.
Returns
-------
response : dict
HTTP request response as a python dict
"""
fsurl = "https://api.figshare.com/v2"
if version is None:
url = fsurl + "/articles/{}/files".format(article_id)
headers = {"Content-Type": "application/json"}
response = fs_issue_request("GET", url, headers=headers)
return response
else:
url = fsurl + "/articles/{}/versions/{}".format(article_id, version)
request = fs_issue_request("GET", url, headers=headers)
return request["files"]
def fs_get_file_hash(filelist):
"""Returns a dict associating figshare file id to MD5 hash
Parameters
----------
filelist : list of dict
HTTP request response from fs_get_file_list
Returns
-------
response : dict
keys are file_id and values are md5 hash
"""
return {str(f["id"]): "md5:" + f["supplied_md5"] for f in filelist}
def fs_get_file_id(filelist):
"""Returns a dict associating filename to figshare file id
Parameters
----------
filelist : list of dict
HTTP request response from fs_get_file_list
Returns
-------
response : dict
keys are filname and values are file_id
"""
return {f["name"]: str(f["id"]) for f in filelist}
def fs_get_file_name(filelist):
"""Returns a dict associating figshare file id to filename
Parameters
----------
filelist : list of dict
HTTP request response from fs_get_file_list
Returns
-------
response : dict
keys are file_id and values are file name
"""
return {str(f["id"]): f["name"] for f in filelist}
|
py | 7dfec70777e8c914e82162a3f6b699f248b3783d | import numpy as np
import json
"""
Do not modify this file.
"""
# Softmax loss and Softmax gradient
### Loss functions ###
class softmax_cross_entropy:
def __init__(self):
self.expand_Y = None
self.calib_logit = None
self.sum_exp_calib_logit = None
self.prob = None
def forward(self, X, Y):
self.expand_Y = np.zeros(X.shape).reshape(-1)
self.expand_Y[Y.astype(int).reshape(-1) + np.arange(X.shape[0]) * X.shape[1]] = 1.0
self.expand_Y = self.expand_Y.reshape(X.shape)
self.calib_logit = X - np.amax(X, axis = 1, keepdims = True)
self.sum_exp_calib_logit = np.sum(np.exp(self.calib_logit), axis = 1, keepdims = True)
self.prob = np.exp(self.calib_logit) / self.sum_exp_calib_logit
forward_output = - np.sum(np.multiply(self.expand_Y, self.calib_logit - np.log(self.sum_exp_calib_logit))) / X.shape[0]
return forward_output
def backward(self, X, Y):
backward_output = - (self.expand_Y - self.prob) / X.shape[0]
return backward_output
### Momentum ###
def add_momentum(model):
momentum = dict()
for module_name, module in model.items():
if hasattr(module, 'params'):
for key, _ in module.params.items():
momentum[module_name + '_' + key] = np.zeros(module.gradient[key].shape)
return momentum
def data_loader_mnist(dataset):
# This function reads the MNIST data and separate it into train, val, and test set
with open(dataset, 'r') as f:
data_set = json.load(f)
train_set, valid_set, test_set = data_set['train'], data_set['valid'], data_set['test']
Xtrain = np.array(train_set[0])
Ytrain = np.array(train_set[1])
Xvalid = np.array(valid_set[0])
Yvalid = np.array(valid_set[1])
Xtest = np.array(test_set[0])
Ytest = np.array(test_set[1])
return Xtrain, Ytrain, Xvalid, Yvalid, Xtest, Ytest
def predict_label(f):
# This is a function to determine the predicted label given scores
if f.shape[1] == 1:
return (f > 0).astype(float)
else:
return np.argmax(f, axis=1).astype(float).reshape((f.shape[0], -1))
class DataSplit:
def __init__(self, X, Y):
self.X = X
self.Y = Y
self.N, self.d = self.X.shape
def get_example(self, idx):
batchX = np.zeros((len(idx), self.d))
batchY = np.zeros((len(idx), 1))
for i in range(len(idx)):
batchX[i] = self.X[idx[i]]
batchY[i, :] = self.Y[idx[i]]
return batchX, batchY |
py | 7dfec764c914a8bfa53efc065569bae0e556442c | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2017 Juan Cabral
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
# FUTURE
# =============================================================================
from __future__ import unicode_literals
# =============================================================================
# DOC
# =============================================================================
__doc__ = """"""
# =============================================================================
# IMPORTS
# =============================================================================
import numpy as np
from .core import Extractor
# =============================================================================
# EXTRACTOR CLASS
# =============================================================================
class SmallKurtosis(Extractor):
r"""
**SmallKurtosis**
Small sample kurtosis of the magnitudes.
.. math::
SmallKurtosis = \frac{N (N+1)}{(N-1)(N-2)(N-3)}
\sum_{i=1}^N (\frac{m_i-\hat{m}}{\sigma})^4 -
\frac{3( N-1 )^2}{(N-2) (N-3)}
For a normal distribution, the small kurtosis should be zero:
.. code-block:: pycon
>>> fs = feets.FeatureSpace(only=['SmallKurtosis'])
>>> features, values = fs.extract(**lc_normal)
>>> dict(zip(features, values))
{'SmallKurtosis': 0.044451779515607193}
See http://www.xycoon.com/peakedness_small_sample_test_1.htm
References
----------
.. [richards2011machine] Richards, J. W., Starr, D. L., Butler, N. R.,
Bloom, J. S., Brewer, J. M., Crellin-Quick, A., ... &
Rischard, M. (2011). On machine-learned classification of variable stars
with sparse and noisy time-series data.
The Astrophysical Journal, 733(1), 10. Doi:10.1088/0004-637X/733/1/10.
"""
data = ['magnitude']
features = ["SmallKurtosis"]
def fit(self, magnitude):
n = len(magnitude)
mean = np.mean(magnitude)
std = np.std(magnitude)
S = sum(((magnitude - mean) / std) ** 4)
c1 = float(n * (n + 1)) / ((n - 1) * (n - 2) * (n - 3))
c2 = float(3 * (n - 1) ** 2) / ((n - 2) * (n - 3))
return {"SmallKurtosis": c1 * S - c2}
|
py | 7dfec8dc6f9582f21be812730b444e2430b8fa82 | from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cmsplugin_cascade', '0011_merge_sharable_with_cascadeelement'),
]
operations = [
migrations.AlterField(
model_name='cascadeelement',
name='shared_glossary',
field=models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, to='cmsplugin_cascade.SharedGlossary'),
),
]
|
py | 7dfec921bae27c946d38dc4284993184e91a446f | from tpk4128.client import SocketClient
import time
import cv2
import numpy as np
def main():
client = SocketClient('10.0.0.7', 50007)
while True:
client.sendall(b'Hello World!')
# Tip: len(img.tostring())
size, data = client.recv(2764800) # my macbook 2764800
if not data:
break
# Tip: img.dtype, img.shape
img = np.frombuffer(data, dtype=np.uint8).reshape(720, 1280, 3) # Dimentions 720, 1280, 3
cv2.imshow('img', img)
if cv2.waitKey(20) == 27: # Esc: 27
break
if __name__ == '__main__':
main()
|
py | 7dfec93e29c966f08fd53b3d62d205371b8bce36 |
GSM_03_38_POR_BASIC = {
0x00: "@",
0x01: "\u00a3",
0x02: "$",
0x03: "\u00a5",
0x04: "\u00ea",
0x05: "\u00e9",
0x06: "\u00fa",
0x07: "\u00ed",
0x08: "\u00f3",
0x09: "\u00e7",
0x0a: "\n",
0x0b: "\u00d4",
0x0c: "\u00f4",
0x0d: "\r",
0x0e: "\u00c1",
0x0f: "\u00e1",
0x10: "\u0394",
0x11: "_",
0x12: "\u00aa",
0x13: "\u00c7",
0x14: "\u00c0",
0x15: "\u221e",
0x16: "^",
0x17: "\\",
0x18: "\u20ac",
0x19: "\u00d3",
0x1a: "|",
0x1b: "",
0x1c: "\u00c2",
0x1d: "\u00e2",
0x1e: "\u00ca",
0x1f: "\u00c9",
0x20: " ",
0x21: "!",
0x22: '"',
0x23: "#",
0x24: "\u00ba",
0x25: "%",
0x26: "&",
0x27: "'",
0x28: "(",
0x29: ")",
0x2a: "*",
0x2b: "+",
0x2c: ",",
0x2d: "-",
0x2e: ".",
0x2f: "/",
0x30: "0",
0x31: "1",
0x32: "2",
0x33: "3",
0x34: "4",
0x35: "5",
0x36: "6",
0x37: "7",
0x38: "8",
0x39: "9",
0x3a: ":",
0x3b: ";",
0x3c: "<",
0x3d: "=",
0x3e: ">",
0x3f: "?",
0x40: "\u00cd",
0x41: "A",
0x42: "B",
0x43: "C",
0x44: "D",
0x45: "E",
0x46: "F",
0x47: "G",
0x48: "H",
0x49: "I",
0x4a: "J",
0x4b: "K",
0x4c: "L",
0x4d: "M",
0x4e: "N",
0x4f: "O",
0x50: "P",
0x51: "Q",
0x52: "R",
0x53: "S",
0x54: "T",
0x55: "U",
0x56: "V",
0x57: "W",
0x58: "X",
0x59: "Y",
0x5a: "Z",
0x5b: "\u00c3",
0x5c: "\u00d5",
0x5d: "\u00da",
0x5e: "\u00dc",
0x5f: "\u00a7",
0x60: "~",
0x61: "a",
0x62: "b",
0x63: "c",
0x64: "d",
0x65: "e",
0x66: "f",
0x67: "g",
0x68: "h",
0x69: "i",
0x6a: "j",
0x6b: "k",
0x6c: "l",
0x6d: "m",
0x6e: "n",
0x6f: "o",
0x70: "p",
0x71: "q",
0x72: "r",
0x73: "s",
0x74: "t",
0x75: "u",
0x76: "v",
0x77: "w",
0x78: "x",
0x79: "y",
0x7a: "z",
0x7b: "\u00e3",
0x7c: "\u00f5",
0x7d: "`",
0x7e: "\u00fc",
0x7f: "\u00e0",
}
GSM_03_38_POR_EXTENSION = {
0x00: "",
0x01: "",
0x02: "",
0x03: "",
0x04: "",
0x05: "\u00ea",
0x06: "",
0x07: "",
0x08: "",
0x09: "\u00e7",
0x0a: "\n",
0x0b: "\u00d4",
0x0c: "\u00f4",
0x0d: "",
0x0e: "\u00c1",
0x0f: "\u00e1",
0x10: "",
0x11: "",
0x12: "\u03a6",
0x13: "\u0393",
0x14: "^",
0x15: "\u03a9",
0x16: "\u03a0",
0x17: "\u03a8",
0x18: "\u03a3",
0x19: "\u0398",
0x1a: "",
0x1b: "",
0x1c: "",
0x1d: "",
0x1e: "",
0x1f: "\u00ca",
0x20: "",
0x21: "",
0x22: "",
0x23: "",
0x24: "",
0x25: "",
0x26: "",
0x27: "",
0x28: "{",
0x29: "}",
0x2a: "",
0x2b: "",
0x2c: "",
0x2d: "",
0x2e: "",
0x2f: "\\",
0x30: "",
0x31: "",
0x32: "",
0x33: "",
0x34: "",
0x35: "",
0x36: "",
0x37: "",
0x38: "",
0x39: "",
0x3a: "",
0x3b: "",
0x3c: "[",
0x3d: "~",
0x3e: "]",
0x3f: "",
0x40: "|",
0x41: "\u00c0",
0x42: "",
0x43: "",
0x44: "",
0x45: "",
0x46: "",
0x47: "",
0x48: "",
0x49: "\u00cd",
0x4a: "",
0x4b: "",
0x4c: "",
0x4d: "",
0x4e: "",
0x4f: "\u00d3",
0x50: "",
0x51: "",
0x52: "",
0x53: "",
0x54: "",
0x55: "\u00da",
0x56: "",
0x57: "",
0x58: "",
0x59: "",
0x5a: "",
0x5b: "\u00c3",
0x5c: "\u00d5",
0x5d: "",
0x5e: "",
0x5f: "",
0x60: "",
0x61: "\u00c2",
0x62: "",
0x63: "",
0x64: "",
0x65: "\u20ac",
0x66: "",
0x67: "",
0x68: "",
0x69: "\u00ed",
0x6a: "",
0x6b: "",
0x6c: "",
0x6d: "",
0x6e: "",
0x6f: "\u00f3",
0x70: "",
0x71: "",
0x72: "",
0x73: "",
0x74: "",
0x75: "\u00fa",
0x76: "",
0x77: "",
0x78: "",
0x79: "",
0x7a: "",
0x7b: "\u00e3",
0x7c: "\u00f5",
0x7d: "",
0x7e: "",
0x7f: "\u00e2",
}
# These are characters that shouldn't be in a properly decoded string
UNRECOGNIZED_CHAR_CODES = {
0x00: True,
0x01: True,
0x02: True,
0x03: True,
0x04: True,
0x05: True,
0x06: True,
0x07: True,
0x08: True,
0x09: True,
0x0b: True,
0x0c: True,
0x0e: True,
0x0f: True,
0x10: True,
0x11: True,
0x12: True,
0x13: True,
0x14: True,
0x15: True,
0x16: True,
0x17: True,
0x18: True,
0x19: True,
0x1a: True,
0x1b: True,
0x1c: True,
0x1d: True,
0x1e: True,
0x1f: True,
0x7f: True,
}
ESCAPE_CHAR = 0x1b
def looks_like_gsm(text):
for char in text:
if UNRECOGNIZED_CHAR_CODES.get(ord(char), False):
return True
return False
def gsm_to_unicode(text):
"""
Converts each character in text to it's equivalent GSM-decoded
character. If the character is anything other than ESCAPE_CHAR,
then it's decoded with GSM_03_38_POR_BASIC.
If a character is the ESCAPE_CHAR, then it is skipped and the
next character is decoded with GSM_03_38_POR_EXTENSION.
"""
result = ""
is_escape = False
for char in text:
gsm_char_code = ord(char)
if gsm_char_code == ESCAPE_CHAR:
is_escape = True
elif is_escape:
ext_char = GSM_03_38_POR_EXTENSION.get(gsm_char_code, "")
if ext_char:
result += ext_char
else:
result += " %s" % GSM_03_38_POR_BASIC.get(gsm_char_code, "")
is_escape = False
else:
result += GSM_03_38_POR_BASIC.get(gsm_char_code, "")
return result
def convert_raw_string(text):
"""
Unfortunately, it's not possible to tell whether we'll get a message
that's encoded with GSM or not. So all we can do is decode
messages that look like they are encoded with GSM.
"""
if looks_like_gsm(text):
return gsm_to_unicode(text)
else:
return text
|
py | 7dfec94eb28595611df5af7c7a3207d1bdc0029e | from django.urls import re_path, include, path
from data.views import Classify, ClassifyTags, classify_stats
urlpatterns = [
re_path('^classify/$', Classify.as_view(), name='classify'),
re_path('^tags/$', ClassifyTags.as_view(), name='tags'),
re_path('^status/$', classify_stats, name='status'),
]
|
py | 7dfec9946cd7567c9fe0da83b5abd87ca2f3669b | """
https://github.com/hysts/pytorch_cutmix/blob/master/cutmix.py
MIT License
Copyright (c) 2019 hysts
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def cutmix_data(x, y, alpha):
if alpha > 0.:
lam = np.random.beta(alpha, alpha)
if lam < 0.5:
lam = 1 - lam
else:
lam = 1.
if type(x) != tuple:
batch_size = x.size()[0]
index = torch.randperm(batch_size).cuda()
image_h, image_w = x.size()[2:]
cx = np.random.uniform(0, image_w)
cy = np.random.uniform(0, image_h)
w = image_w * np.sqrt(1 - lam)
h = image_h * np.sqrt(1 - lam)
x0 = int(np.round(max(cx - w / 2, 0)))
x1 = int(np.round(min(cx + w / 2, image_w)))
y0 = int(np.round(max(cy - h / 2, 0)))
y1 = int(np.round(min(cy + h / 2, image_h)))
mixed_x = x
mixed_x[:, :, y0:y1, x0:x1] = x[index, :, y0:y1, x0:x1]
else:
# x[0]: (B, C, W, H)
# x[1] ~ x[N]: (B, ?)
batch_size = x[0].size()[0]
index = torch.randperm(batch_size).cuda()
image_h, image_w = x[0].size()[2:]
cx = np.random.uniform(0, image_w)
cy = np.random.uniform(0, image_h)
w = image_w * np.sqrt(1 - lam)
h = image_h * np.sqrt(1 - lam)
x0 = int(np.round(max(cx - w / 2, 0)))
x1 = int(np.round(min(cx + w / 2, image_w)))
y0 = int(np.round(max(cy - h / 2, 0)))
y1 = int(np.round(min(cy + h / 2, image_h)))
mix_x = x[0]
mix_x[:, :, y0:y1, x0:x1] = x[0][index, :, y0:y1, x0:x1]
mixed_x = [mix_x]
for i in range(len(x)):
if i > 0:
mixed_x.append(lam * x[i] + (1 - lam) * x[i][index,:])
mixed_x = tuple(mixed_x)
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam |
py | 7dfec99e6bd771bc4de988e49f2d023c40ebca70 | from fastapi import FastAPI
from starlette.middleware.sessions import SessionMiddleware
from starlette.middleware.cors import CORSMiddleware
from .resources import models, tests, vocab, results, auth, simulations, livepapers
from . import settings
description = """
The EBRAINS Model Validation Service is a web service to support
the structured [validation](https://en.wikipedia.org/wiki/Verification_and_validation_of_computer_simulation_models)
of neuroscience models.
Using the [EBRAINS Knowledge Graph](https://kg.ebrains.eu) as its data store, the Model Validation Service provides:
- a catalog of models, each linked to validations of that model;
- a library of validation tests, implemented in Python, each linked to reference data;
- a database of validation results.
The Model Validation Service is used by the EBRAINS Model Catalog web app (new version coming soon).
A [Python client](https://hbp-validation-client.readthedocs.io/) for the service is also available.
These pages provide interactive documentation. To try out queries, you will need an [EBRAINS user account](https://ebrains.eu/register/).
Please [login here](https://validation-v2.brainsimulation.eu/login), then copy the "access token" into the dialog that appears when you click "Authorize" below.
(this workflow will be simplified in the near future).
"""
app = FastAPI(title="EBRAINS Model Validation Service", description=description, version="2.0")
app.add_middleware(
SessionMiddleware,
secret_key=settings.SESSIONS_SECRET_KEY
)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(auth.router, tags=["Authentication and authorization"])
app.include_router(models.router, tags=["Models"])
app.include_router(tests.router, tags=["Validation Tests"])
app.include_router(results.router, tags=["Validation Results"])
app.include_router(simulations.router, tags=["Simulations"])
app.include_router(livepapers.router, tags=["Live Papers"])
app.include_router(vocab.router, tags=["Controlled vocabularies"])
|
py | 7dfec9b4feb10ca4ffc8708b277581d16b025207 | import torch
from torch.optim.optimizer import Optimizer, required
import torch.optim._functional as F
from agc_optims.utils import agc
class SGD_AGC(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum) with adaptive gradient clipping (AGC).
.. math::
\begin{aligned}
&\rule{110mm}{0.4pt} \\
&\textbf{input} : \gamma \text{ (lr)}, \: \theta_0 \text{ (params)}, \: f(\theta)
\text{ (objective)}, \: \lambda \text{ (weight decay)}, \\
&\hspace{13mm} \:\mu \text{ (momentum)}, \:\tau \text{ (dampening)},\:nesterov\\[-1.ex]
&\rule{110mm}{0.4pt} \\
&\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\
&\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\
&\hspace{5mm}\textbf{if} \: \lambda \neq 0 \\
&\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\
&\hspace{5mm}\textbf{if} \: \mu \neq 0 \\
&\hspace{10mm}\textbf{if} \: t > 1 \\
&\hspace{15mm} \textbf{b}_t \leftarrow \mu \textbf{b}_{t-1} + (1-\tau) g_t \\
&\hspace{10mm}\textbf{else} \\
&\hspace{15mm} \textbf{b}_t \leftarrow g_t \\
&\hspace{10mm}\textbf{if} \: nesterov \\
&\hspace{15mm} g_t \leftarrow g_{t-1} + \mu \textbf{b}_t \\
&\hspace{10mm}\textbf{else} \\[-1.ex]
&\hspace{15mm} g_t \leftarrow \textbf{b}_t \\
&\hspace{5mm}\theta_t \leftarrow \theta_{t-1} - \gamma g_t \\[-1.ex]
&\rule{110mm}{0.4pt} \\[-1.ex]
&\bf{return} \: \theta_t \\[-1.ex]
&\rule{110mm}{0.4pt} \\[-1.ex]
\end{aligned}
Nesterov momentum is based on the formula from
`On the importance of initialization and momentum in deep learning`__.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
momentum (float, optional): momentum factor (default: 0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
dampening (float, optional): dampening for momentum (default: 0)
nesterov (bool, optional): enables Nesterov momentum (default: False)
clipping (float, optional): clipping value for the AGC (default: 1e-2)
agc_eps (float, optional): term used in agc to prevent grads clipped to zero (default: 1e-3)
Example:
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
__ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf
.. note::
The implementation of SGD with Momentum/Nesterov subtly differs from
Sutskever et. al. and implementations in some other frameworks.
Considering the specific case of Momentum, the update can be written as
.. math::
\begin{aligned}
v_{t+1} & = \mu * v_{t} + g_{t+1}, \\
p_{t+1} & = p_{t} - \text{lr} * v_{t+1},
\end{aligned}
where :math:`p`, :math:`g`, :math:`v` and :math:`\mu` denote the
parameters, gradient, velocity, and momentum respectively.
This is in contrast to Sutskever et. al. and
other frameworks which employ an update of the form
.. math::
\begin{aligned}
v_{t+1} & = \mu * v_{t} + \text{lr} * g_{t+1}, \\
p_{t+1} & = p_{t} - v_{t+1}.
\end{aligned}
The Nesterov version is analogously modified.
This implementation of SGD was taken from the official PyTorch Sources and the code for the AGC was adapted from
https://github.com/vballoli/nfnets-pytorch.
"""
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False, clipping=1e-2, agc_eps=1e-3):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
if not 0.0 <= clipping < 1.0:
raise ValueError("Invalid clipping parameter: {}".format(clipping))
if not 0.0 <= agc_eps:
raise ValueError("Invalid agc_eps value: {}".format(agc_eps))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov, clipping=clipping, agc_eps=agc_eps)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(SGD_AGC, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGD_AGC, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
d_p_list = []
momentum_buffer_list = []
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
lr = group['lr']
clipping = group['clipping']
agc_eps = group['agc_eps']
for p in group['params']:
if p.grad is not None:
## AGC
agc(param=p, clipping=clipping, eps=agc_eps)
params_with_grad.append(p)
d_p_list.append(p.grad)
state = self.state[p]
if 'momentum_buffer' not in state:
momentum_buffer_list.append(None)
else:
momentum_buffer_list.append(state['momentum_buffer'])
F.sgd(params_with_grad,
d_p_list,
momentum_buffer_list,
weight_decay=weight_decay,
momentum=momentum,
lr=lr,
dampening=dampening,
nesterov=nesterov)
# update momentum_buffers in state
for p, momentum_buffer in zip(params_with_grad, momentum_buffer_list):
state = self.state[p]
state['momentum_buffer'] = momentum_buffer
return loss |
py | 7dfec9d8efa482df166c292f23d3121052c0bdfd | class AssetTypeService(object):
"""
:class:`fortnox.AssetTypeService` is used by :class:`fortnox.Client` to make
actions related to Asset Type resource.
Normally you won't instantiate this class directly.
"""
"""
Allowed attributes for AssetType to send to Fortnox backend servers.
"""
OPTS_KEYS_TO_PERSIST = ['Number', 'Description', 'AccountAssetId',
'AccountDepreciationId', 'AccountValueLossId',
'Type']
SERVICE = "AssetType"
def __init__(self, http_client):
"""
:param :class:`fortnox.HttpClient` http_client: Pre configured high-level http client.
"""
self.__http_client = http_client
@property
def http_client(self):
return self.__http_client
def list(self, **params):
"""
Retrieve all AssetTypes
Returns all AssetTypes available to the Company, according to the parameters provided
:calls: ``get /assets/types``
:param dict params: (optional) Search options.
:return: List of dictionaries that support attriubte-style access, which represent collection of AssetTypes.
:rtype: list
"""
_, _, asset_types = self.http_client.get("/assets/types", params=params)
return asset_types[1:]
def retrieve(self, id):
"""
Retrieve a single AssetType
Returns a single AssetType according to the unique AssetType ID provided
If the specified AssetType does not exist, this query returns an error
:calls: ``get /assets/types/{id}``
:param int id: Unique identifier of an AssetType.
:return: Dictionary that support attriubte-style access and represent AssetType resource.
:rtype: dict
"""
_, _, asset_type = self.http_client.get("/assets/types/{id}".format(id=id))
return asset_type
def create(self, *args, **kwargs):
"""
Create an AssetType
Creates a new AssetType
**Notice** the AssetType's name **must** be unique within the scope of the resource_type
:calls: ``post /assets/types``
:param tuple *args: (optional) Single object representing AssetType resource.
:param dict **kwargs: (optional) AssetType attributes.
:return: Dictionary that support attriubte-style access and represents newely created AssetType resource.
:rtype: dict
"""
if not args and not kwargs:
raise Exception('attributes for AssetType are missing')
initial_attributes = args[0] if args else kwargs
attributes = dict((k, v) for k, v in initial_attributes.items())
attributes.update({'service': self.SERVICE})
_, _, asset_type = self.http_client.post("/assets/types", body=attributes)
return asset_type
def update(self, id, *args, **kwargs):
"""
Update an AssetType
Updates an AssetType's information
If the specified AssetType does not exist, this query will return an error
**Notice** if you want to update an AssetType, you **must** make sure the AssetType's name is unique within the scope of the specified resource
:calls: ``put /assets/types/{id}``
:param int id: Unique identifier of an AssetType.
:param tuple *args: (optional) Single object representing AssetType resource which attributes should be updated.
:param dict **kwargs: (optional) AssetType attributes to update.
:return: Dictionary that support attriubte-style access and represents updated AssetType resource.
:rtype: dict
"""
if not args and not kwargs:
raise Exception('attributes for AssetType are missing')
attributes = args[0] if args else kwargs
attributes = dict((k, v) for k, v in attributes.items())
attributes.update({'service': self.SERVICE})
_, _, asset_type = self.http_client.put("/assets/types/{id}".format(id=id), body=attributes)
return asset_type
def destroy(self, id):
"""
Delete an AssetType
Deletes an existing AssetType
If the specified AssetType is assigned to any resource, we will remove this AssetType from all such resources
If the specified AssetType does not exist, this query will return an error
This operation cannot be undone
:calls: ``delete /assets/types/{id}``
:param int id: Unique identifier of an AssetType.
:return: True if the operation succeeded.
:rtype: bool
"""
status_code, _, _ = self.http_client.delete("/assets/types/{id}".format(id=id))
return status_code == 200
|
py | 7dfec9e6badca436f3c74cbfbf9c1af0ab75d016 | from django.shortcuts import render,get_object_or_404,redirect
from django.utils import timezone
from web_hiring.models import Post
from django.db.models import Q
from django.forms.models import model_to_dict
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.decorators import login_required
from django.template.loader import render_to_string
from django.http import JsonResponse
from django.urls import reverse_lazy
from web_hiring.forms import PostForm
from django.views.generic import (TemplateView, ListView,
DetailView,CreateView,
UpdateView,DeleteView,)
from django.conf import settings
from web_hiring.notificators import vacancy_notification
# Create your views here.
class AboutView(TemplateView):
template_name = 'about.html'
class PostListView(ListView):
model = Post
paginate_by = 30
def get_queryset(self):
return Post.objects.filter(publish_date__lte=timezone.now()).order_by('-publish_date')
class SearchResultsView(ListView):
model = Post
paginate_by = 30
template_name = 'post_search.html'
def get_queryset(self): # новый
query = self.request.GET.get('q')
object_list = Post.objects.filter(
Q(title__icontains=query) | Q(text__icontains=query),
publish_date__lte=timezone.now()).order_by('-publish_date')
return object_list
class PostDetailView(DetailView):
model = Post
class CreatePostView(LoginRequiredMixin,CreateView):
login_url = '/login/'
redirect_field_name = 'web_hiring/post_detail.html'
form_class = PostForm
model = Post
def form_valid(self, form):
form.instance.author = self.request.user
form.instance.salary = form.instance.salary + ' $'
new_form = form.save(commit=False)
if new_form.title == "Другое":
form.instance.title = self.request.POST.get('titlespecify')
return super().form_valid(form)
class PostUpdateView(LoginRequiredMixin,UpdateView):
login_url = '/login/'
redirect_field_name = 'web_hiring/post_detail.html'
form_class = PostForm
model = Post
class PostDeleteView(LoginRequiredMixin,DeleteView):
model = Post
success_url = reverse_lazy('web_hiring:post_list')
class DraftListView(LoginRequiredMixin,ListView):
login_url = '/login/'
redirect_field_name = 'web_hiring/post_list.html'
model = Post
def get_queryset(self):
return Post.objects.filter(publish_date__isnull=True).order_by('create_date')
class PublihedListView(LoginRequiredMixin,ListView):
login_url = '/login/'
redirect_field_name = 'web_hiring/post_list.html'
model = Post
def get_queryset(self):
return Post.objects.filter(author=self.request.user,publish_date__isnull=False).order_by('create_date')
##################################################################
##################################################################
@login_required
def post_publish(request,pk):
post = get_object_or_404(Post,pk=pk)
form = model_to_dict(Post.objects.get(pk=pk))
vacancy_notification(form)
post.publish()
return redirect('web_hiring:post_detail',pk=pk)
|
py | 7dfec9ffdf0095e4110914bc45657a3a01b6d73f | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Weapon()
result.template = "object/weapon/melee/sword/crafted_saber/shared_sword_lightsaber_one_handed_s1_gen4.iff"
result.attribute_template_id = 10
result.stfName("weapon_name","sword_lightsaber_type1")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
py | 7dfeca408940019822823154bd164719f8c72877 | #!/usr/bin/env python
import codecs
import os
from setuptools import find_packages, setup
here = os.path.abspath(os.path.dirname(__file__))
def read_file(filename):
"""Open a related file and return its content."""
with codecs.open(os.path.join(here, filename), encoding="utf-8") as f:
content = f.read()
return content
README = read_file("README.rst")
CHANGELOG = read_file("CHANGELOG.rst")
setup(
name="django-tinymce",
version="3.0.3.dev0",
packages=find_packages(),
include_package_data=True,
author="Aljosa Mohorovic",
author_email="[email protected]",
description=(
"A Django application that contains a widget to render a "
"form field as a TinyMCE editor."
),
long_description=README + "\n\n" + CHANGELOG,
license="MIT License",
keywords="django widget tinymce",
zip_safe=False,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Libraries :: Application Frameworks",
"Topic :: Software Development :: Libraries :: Python Modules",
],
platforms=["any"],
url="https://github.com/aljosa/django-tinymce",
test_suite="runtests.runtests",
)
|
py | 7dfecab277c4e6d76eeb1bf7c836bb23116f8765 | #! /usr/bin/env python
# coding=utf-8
"""Tests the views."""
# Django
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
# Current django project
from sports_manager.models import Gymnasium
class TestGymnasiumCreateViewAsAnonymous(TestCase):
"""Tests."""
def test_get(self):
"""Tests."""
r = self.client.get(reverse('sports-manager:gymnasium-create'))
self.assertEqual(r.status_code, 403)
def test_post(self):
"""Tests."""
d = {
'name': 'Watteau',
'address': '37 rue Lequesne',
'city': 'Nogent-Sur-Marne',
'zip_code': '94130',
'phone': '0100000000',
'surface': '123',
'capacity': '456',
}
r = self.client.post(reverse('sports-manager:gymnasium-create'), d)
self.assertEqual(r.status_code, 403)
class TestGymnasiumCreateViewAsLogged(TestCase):
"""Tests."""
def setUp(self):
"""Setup for al the following tests."""
self.dict = {
'username': "hbuyse",
'password': "usermodel",
'first_name': "Henri",
'last_name': "Buyse"
}
get_user_model().objects.create_user(**self.dict)
def test_get(self):
"""Tests."""
self.assertTrue(self.client.login(username=self.dict['username'], password=self.dict['password']))
r = self.client.get(reverse('sports-manager:gymnasium-create'))
self.assertEqual(r.status_code, 403)
def test_post(self):
"""Tests."""
d = {
'name': 'Watteau',
'address': '37 rue Lequesne',
'city': 'Nogent-Sur-Marne',
'zip_code': '94130',
'phone': '0100000000',
'surface': '123',
'capacity': '456',
}
self.assertTrue(self.client.login(username=self.dict['username'], password=self.dict['password']))
r = self.client.post(reverse('sports-manager:gymnasium-create'), d)
self.assertEqual(r.status_code, 403)
class TestGymnasiumCreateViewAsStaff(TestCase):
"""Tests."""
def setUp(self):
"""Setup for al the following tests."""
self.dict = {
'username': "hbuyse",
'password': "usermodel",
'first_name': "Henri",
'last_name': "Buyse",
'is_staff': True
}
get_user_model().objects.create_user(**self.dict)
def test_get(self):
"""Tests."""
self.assertTrue(self.client.login(username=self.dict['username'], password=self.dict['password']))
r = self.client.get(reverse('sports-manager:gymnasium-create'))
self.assertEqual(r.status_code, 200)
def test_post(self):
"""Tests."""
d = {
'name': 'Watteau',
'address': '37 rue Lequesne',
'city': 'Nogent-Sur-Marne',
'zip_code': '94130',
'phone': '0100000000',
'surface': '123',
'capacity': '456',
}
self.assertTrue(self.client.login(username=self.dict['username'], password=self.dict['password']))
r = self.client.post(reverse('sports-manager:gymnasium-create'), d)
self.assertRedirects(r, '/gymnasium/watteau/', fetch_redirect_response=False)
class TestGymnasiumCreateViewAsSuperuser(TestCase):
"""Tests."""
def setUp(self):
"""Setup for al the following tests."""
self.dict = {
'username': "hbuyse",
'password': "usermodel",
'first_name': "Henri",
'last_name': "Buyse",
'email': '[email protected]'
}
get_user_model().objects.create_superuser(**self.dict)
def test_get(self):
"""Tests."""
self.assertTrue(self.client.login(username=self.dict['username'], password=self.dict['password']))
r = self.client.get(reverse('sports-manager:gymnasium-create'))
self.assertEqual(r.status_code, 200)
def test_post(self):
"""Tests."""
d = {
'name': 'Watteau',
'address': '37 rue Lequesne',
'city': 'Nogent-Sur-Marne',
'zip_code': '94130',
'phone': '0100000000',
'surface': '123',
'capacity': '456',
}
self.assertTrue(self.client.login(username=self.dict['username'], password=self.dict['password']))
r = self.client.post(reverse('sports-manager:gymnasium-create'), d)
self.assertRedirects(r, '/gymnasium/watteau/', fetch_redirect_response=False)
|
py | 7dfecb3103725007520d56459a887898bedc8166 | '''
Copyright (c) 2013-2014, Joshua Pitts
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
def eat_code_caves(flItms, caveone, cavetwo):
try:
if flItms['CavesPicked'][cavetwo][0] == flItms['CavesPicked'][caveone][0]:
return int(flItms['CavesPicked'][cavetwo][1], 16) - int(flItms['CavesPicked'][caveone][1], 16)
else:
caveone_found = False
cavetwo_found = False
forward = True
windows_memoffset_holder = 0
for section in flItms['Sections']:
if flItms['CavesPicked'][caveone][0] == section[0] and caveone_found is False:
caveone_found = True
if cavetwo_found is False:
windows_memoffset_holder += section[1] + 4096 - section[1] % 4096 - section[3]
forward = True
continue
if section[1] % 4096 == 0:
continue
break
if flItms['CavesPicked'][cavetwo][0] == section[0] and cavetwo_found is False:
cavetwo_found = True
if caveone_found is False:
windows_memoffset_holder += -(section[1] + 4096 - section[1] % 4096 - section[3])
forward = False
continue
if section[1] % 4096 == 0:
continue
break
if caveone_found is True or cavetwo_found is True:
if section[1] % 4096 == 0:
continue
if forward is True:
windows_memoffset_holder += section[1] + 4096 - section[1] % 4096 - section[3]
if forward is False:
windows_memoffset_holder += -(section[1] + 4096 - section[1] % 4096 - section[3])
continue
#Need a way to catch all the sections in between other sections
return int(flItms['CavesPicked'][cavetwo][1], 16) - int(flItms['CavesPicked'][caveone][1], 16) + windows_memoffset_holder
except Exception:
#print "EAT CODE CAVE", str(e)
return 0 |
py | 7dfecc914c053c0adb5c42c77c863f10ebceb68d | # polling_location/urls.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.conf.urls import url
from . import views_admin
urlpatterns = [
url(r'^$', views_admin.polling_location_list_view, name='polling_location_list',),
url(r'^import/$',
views_admin.polling_locations_import_from_master_server_view,
name='polling_locations_import_from_master_server'),
url(r'^import_status/$',
views_admin.polling_locations_import_from_master_server_status_view,
name='polling_locations_import_from_master_server_status'),
# Processing incoming file with polling locations
url(r'^import_polling_locations_process/$', views_admin.import_polling_locations_process_view,
name='import_polling_locations_process'),
url(r'^(?P<polling_location_local_id>[0-9]+)/edit/$', views_admin.polling_location_edit_view,
name='polling_location_edit'),
url(r'^(?P<polling_location_local_id>[0-9]+)/visualize/$', views_admin.polling_location_visualize_view,
name='polling_location_visualize'),
url(r'^(?P<polling_location_we_vote_id>wv[\w]{2}ploc[\w]+)/edit_we_vote_id/$',
views_admin.polling_location_edit_view, name='polling_location_we_vote_id_edit'),
url(r'^polling_location_edit_process/$', views_admin.polling_location_edit_process_view,
name='polling_location_edit_process'),
url(r'^polling_locations_add_address_from_latitude_and_longitude/$',
views_admin.polling_locations_add_address_from_latitude_and_longitude_view,
name='polling_locations_add_address_from_latitude_and_longitude'),
url(r'^polling_locations_add_latitude_and_longitude/$',
views_admin.polling_locations_add_latitude_and_longitude_view,
name='polling_locations_add_latitude_and_longitude'),
url(r'^statistics/$', views_admin.polling_location_statistics_view, name='polling_location_statistics',),
url(r'^(?P<polling_location_local_id>[0-9]+)/summary/$', views_admin.polling_location_summary_view,
name='polling_location_summary'),
url(r'^(?P<polling_location_we_vote_id>wv[\w]{2}ploc[\w]+)/summary/$',
views_admin.polling_location_summary_by_we_vote_id_view, name='polling_location_summary_by_we_vote_id'),
url(r'^soft_delete_duplicates/$',
views_admin.soft_delete_duplicates_view,
name='soft_delete_duplicates'),
]
|
py | 7dfecdcd3062edd46ca9d5184dbfb5c33c4dae07 | # -*- coding: utf-8 -*-
# Copyright 2018-2020 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for cache's show_spinner option."""
import streamlit as st
from tests import testutil
@st.cache(show_spinner=False)
def function_without_spinner():
return 3
@st.cache(show_spinner=True)
def function_with_spinner():
return 3
class CacheSpinnerTest(testutil.DeltaGeneratorTestCase):
"""
We test the ability to turn on and off the spinner with the show_spinner
option by inspecting the report queue.
"""
def test_with_spinner(self):
"""If the show_spinner flag is set, there should be one element in the
report queue.
"""
function_with_spinner()
self.assertFalse(self.report_queue.is_empty())
def test_without_spinner(self):
"""If the show_spinner flag is not set, the report queue should be
empty.
"""
function_without_spinner()
self.assertTrue(self.report_queue.is_empty())
|
py | 7dfece1e6d0fbc1e14e6e60f0965e987fb3fcfbe | """
FactSet SCIM API
FactSet's SCIM API implementation. # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.ProcuretoPaySCIM.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.ProcuretoPaySCIM.exceptions import ApiAttributeError
class ProductResourceMeta(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'resource_type': (str,), # noqa: E501
'location': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'resource_type': 'resourceType', # noqa: E501
'location': 'location', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""ProductResourceMeta - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
resource_type (str): [optional] # noqa: E501
location (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ProductResourceMeta - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
resource_type (str): [optional] # noqa: E501
location (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
py | 7dfecfc0322c25ec378083eeeaa25444b8a1878a | from django.contrib import admin
from instagram.models import Post, Comment
admin.site.register(Post)
admin.site.register(Comment)
|
py | 7dfed0bb2038fbf056a9eeaf1a5c69922ae78332 | from asynctest import TestCase as AsyncTestCase
from asynctest import mock as async_mock
from .....admin.request_context import AdminRequestContext
from .....storage.error import StorageError
from .. import routes as test_module
from ..manager import V10DiscoveryMgr
from ..messages.query import Query
from ..models.discovery_record import V10DiscoveryExchangeRecord
class TestDiscoveryRoutes(AsyncTestCase):
async def setUp(self):
self.session_inject = {}
self.context = AdminRequestContext.test_context(self.session_inject)
self.profile = self.context.profile
self.request_dict = {
"context": self.context,
"outbound_message_router": async_mock.CoroutineMock(),
}
self.request = async_mock.MagicMock(
app={},
match_info={},
query={},
__getitem__=lambda _, k: self.request_dict[k],
)
async def test_query_features(self):
self.request.json = async_mock.CoroutineMock()
self.request.query = {"query": "*"}
test_rec = V10DiscoveryExchangeRecord(
discovery_exchange_id="3fa85f64-5717-4562-b3fc-2c963f66afa6",
query_msg=Query(query="*"),
)
with async_mock.patch.object(
test_module.web, "json_response"
) as mock_response, async_mock.patch.object(
V10DiscoveryMgr, "create_and_send_query", autospec=True
) as mock_create_query:
mock_create_query.return_value = test_rec
res = await test_module.query_features(self.request)
mock_response.assert_called_once_with(test_rec.serialize())
async def test_query_features_with_connection(self):
self.request.json = async_mock.CoroutineMock()
self.request.query = {"query": "*", "connection_id": "test", "comment": "test"}
test_rec = V10DiscoveryExchangeRecord(
discovery_exchange_id="3fa85f64-5717-4562-b3fc-2c963f66afa6",
query_msg=Query(query="*"),
)
with async_mock.patch.object(
test_module.web, "json_response"
) as mock_response, async_mock.patch.object(
V10DiscoveryMgr, "create_and_send_query", autospec=True
) as mock_create_query:
mock_create_query.return_value = test_rec
res = await test_module.query_features(self.request)
mock_response.assert_called_once_with(test_rec.serialize())
async def test_query_records(self):
self.request.json = async_mock.CoroutineMock()
self.request.query = {"connection_id": "test"}
test_rec = V10DiscoveryExchangeRecord(
discovery_exchange_id="3fa85f64-5717-4562-b3fc-2c963f66afa6",
query_msg=Query(query="*"),
)
with async_mock.patch.object(
test_module.web, "json_response"
) as mock_response, async_mock.patch.object(
test_module, "V10DiscoveryExchangeRecord", autospec=True
) as mock_ex_rec:
mock_ex_rec.retrieve_by_connection_id.return_value = test_rec
res = await test_module.query_records(self.request)
mock_response.assert_called_once_with({"results": [test_rec.serialize()]})
async def test_query_records_x(self):
self.request.json = async_mock.CoroutineMock()
self.request.query = {"connection_id": "test"}
with async_mock.patch.object(
test_module.web, "json_response"
) as mock_response, async_mock.patch.object(
test_module, "V10DiscoveryExchangeRecord", autospec=True
) as mock_ex_rec:
mock_ex_rec.retrieve_by_connection_id.side_effect = StorageError
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.query_records(self.request)
async def test_query_records_all(self):
self.request.json = async_mock.CoroutineMock()
test_recs = [
V10DiscoveryExchangeRecord(
discovery_exchange_id="3fa85f64-5717-4562-b3fc-2c963f66afa6",
query_msg=Query(query="*"),
),
V10DiscoveryExchangeRecord(
discovery_exchange_id="3fa85f64-5717-4562-b3fc-2c963f66afa7",
query_msg=Query(query="test"),
),
]
with async_mock.patch.object(
test_module.web, "json_response"
) as mock_response, async_mock.patch.object(
test_module, "V10DiscoveryExchangeRecord", autospec=True
) as mock_ex_rec:
mock_ex_rec.query.return_value = test_recs
res = await test_module.query_records(self.request)
mock_response.assert_called_once_with(
{"results": [k.serialize() for k in test_recs]}
)
async def test_query_records_connection_x(self):
self.request.json = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module.web, "json_response"
) as mock_response, async_mock.patch.object(
test_module, "V10DiscoveryExchangeRecord", autospec=True
) as mock_ex_rec:
mock_ex_rec.query.side_effect = StorageError
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.query_records(self.request)
async def test_register(self):
mock_app = async_mock.MagicMock()
mock_app.add_routes = async_mock.MagicMock()
await test_module.register(mock_app)
mock_app.add_routes.assert_called_once()
async def test_post_process_routes(self):
mock_app = async_mock.MagicMock(_state={"swagger_dict": {}})
test_module.post_process_routes(mock_app)
assert "tags" in mock_app._state["swagger_dict"]
|
py | 7dfed4089b28783a18e0758944253c92694d718a | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# mp_throttle documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 9 20:17:19 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import re
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
root = os.path.abspath('/home/elpunkt/Documents/Link to Projekte/Python/mp_throttle')
sys.path.append(root)
def find_version_author_mail():
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(root, "mp_throttle/__init__.py"), 'r') as fp:
version_file = fp.read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
author_match = re.search(r"^__author__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
author, mail = re.split(r'<',author_match.group(1))
return version_match.group(1), author.rstrip(), re.sub('>', '', mail)
_version, _author, _mail = find_version_author_mail()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'mp_throttle'
copyright = '2018, Lucas Langholf'
author = _author
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = _version
# The full version, including alpha/beta/rc tags.
release = _version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'github_button': True,
'github_banner': True,
'github_user': 'elpunkt',
'github_repo': 'mp_throttle',
'description': 'Throttle and Monitor for Python multiprocessing.',
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['navigation.html', 'localtoc.html', 'searchbox.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'mp_throttledoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'mp_throttle.tex', 'mp\\_throttle Documentation',
_author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mp_throttle', 'mp_throttle Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'mp_throttle', 'mp_throttle Documentation',
author, 'mp_throttle', 'Throttle and Monitor for multiprocessing.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
py | 7dfed5c5b8f6fa08b2d33f3bb11f6b570b9276a8 | '''
Copyright (C) 2014 Parrot SA
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Parrot nor the names
of its contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
'''
from ARFuncs import *
def Android_AppendDepsPrebuiltAndroidMk(target, lib, mkfile, array, suffix, rootDir):
for soName in lib.soLibs:
libPrefix = 'lib' if not soName.startswith('lib') else ''
libPrefixUpper = libPrefix.upper()
soNameUpper = libPrefixUpper + soName.upper()
soNameLower = libPrefix + soName.lower()
pbName = '%(soNameUpper)s-prebuilt' % locals()
if not pbName in array:
mkfile.write('# %(soNameUpper)s\n' % locals())
mkfile.write('include $(CLEAR_VARS)\n')
mkfile.write('\n')
mkfile.write('LOCAL_MODULE := %(pbName)s\n' % locals())
mkfile.write('LOCAL_SRC_FILES := %(rootDir)s/$(TARGET_ARCH_ABI)/lib/%(soName)s' % locals() + '\n')
mkfile.write('\n')
mkfile.write('include $(PREBUILT_SHARED_LIBRARY)\n')
mkfile.write('\n')
array.append(pbName)
def Android_CreateApplicationMk(projectRoot, abis):
appMkName = '%(projectRoot)s/jni/Application.mk' % locals()
appMkTmpName = '%(projectRoot)s/jni/Application.mk.new' % locals()
appMk = open(appMkTmpName, 'w')
appMk.write('APP_ABI := ')
for abi in abis:
appMk.write (abi + ' ')
appMk.write('\n')
appMk.write('APP_PLATFORM := android-' + os.environ.get('AR_ANDROID_MIN_VERSION') + '\n')
appMk.close()
ARReplaceFileIfDifferent(appMkName, appMkTmpName)
def Android_CreateAndroidManifest(projectRoot, lib):
androidManifestName = '%(projectRoot)s/AndroidManifest.xml' % locals()
androidManifestTmpName = '%(projectRoot)s/AndroidManifest.xml.new' % locals()
libLower = lib.name.lower()
androidMinVersion = os.environ.get('AR_ANDROID_MIN_VERSION')
androidTargetVersion = os.environ.get('AR_ANDROID_API_VERSION')
androidManifest = open(androidManifestTmpName, 'w')
androidManifest.write('<?xml version="1.0" encoding="utf-8"?>\n')
androidManifest.write('<manifest xmlns:android="http://schemas.android.com/apk/res/android"\n')
androidManifest.write(' package="com.parrot.arsdk.%(libLower)s"' % locals())
androidManifest.write(' android:installLocation="auto"\n')
androidManifest.write(' android:versionCode="1"\n')
androidManifest.write(' android:versionName="1" >\n')
androidManifest.write('\n')
androidManifest.write(' <uses-sdk\n')
androidManifest.write(' android:minSdkVersion="%(androidMinVersion)s"\n' % locals())
androidManifest.write(' android:targetSdkVersion="%(androidTargetVersion)s" />\n' % locals())
androidManifest.write('</manifest>\n')
androidManifest.close()
ARReplaceFileIfDifferent(androidManifestName, androidManifestTmpName)
def Android_CreateAndroidMk(target, projectRoot, installRoot, lib, debug, hasNative=True, inhouse=False):
JniRootDir = '%(projectRoot)s/jni/' % locals()
andMkName = '%(JniRootDir)s/Android.mk' % locals()
andMkTmpName = '%(JniRootDir)s/Android.mk.new' % locals()
suffix = '_dbg' if debug else ''
prebuilts = []
andMk = open(andMkTmpName, 'w')
andMk.write('LOCAL_PATH := $(call my-dir)\n')
andMk.write('\n')
# Write prebuilt deps (use shared)
lib.runOnAllDeps(target, Android_AppendDepsPrebuiltAndroidMk, mkfile=andMk, array=prebuilts, suffix=suffix, rootDir=installRoot)
# Write prebuilt self (use shared)
libUpper = lib.name.upper()
libLower = lib.name.lower()
if hasNative:
andMk.write('# lib%(lib)s\n' % locals())
andMk.write('include $(CLEAR_VARS)\n')
andMk.write('\n')
andMk.write('LOCAL_MODULE := LIB%(libUpper)s-prebuilt\n' % locals())
andMk.write('LOCAL_SRC_FILES := %(installRoot)s/$(TARGET_ARCH_ABI)/lib/lib%(libLower)s%(suffix)s.' % locals() + target.soext + '\n')
andMk.write('\n')
andMk.write('include $(PREBUILT_SHARED_LIBRARY)\n')
andMk.write('\n')
# Write JNI Compiler wrapper
andMk.write('# JNI Wrapper\n')
andMk.write('include $(CLEAR_VARS)\n')
andMk.write('\n')
# TEMP ALWAYS USE -g !!!
#andMk.write('LOCAL_CFLAGS := \n')
andMk.write('LOCAL_CFLAGS := -g\n')
# END OF TEMP ALWAYS USE -g !!!
if inhouse:
andMk.write('LOCAL_CFLAGS += -D_IN_HOUSE\n')
andMk.write('ifeq ($(TARGET_ARCH_ABI), armeabi-v7a)\n')
andMk.write(' LOCAL_CFLAGS += -mfloat-abi=softfp -mfpu=neon\n')
andMk.write('endif\n')
andMk.write('LOCAL_C_INCLUDES:= %(installRoot)s/$(TARGET_ARCH_ABI)/include\n' % locals())
andMk.write('LOCAL_MODULE := lib%(libLower)s_android%(suffix)s\n' % locals())
JniCFiles = []
for Dir, directories, files in os.walk(JniRootDir):
for _file in files:
if _file.endswith('.c'):
JniCFiles.append (os.path.join(Dir, _file).replace(JniRootDir, ''))
if JniCFiles:
andMk.write('LOCAL_SRC_FILES :=')
for _file in JniCFiles:
andMk.write(' %(_file)s' % locals())
andMk.write('\n')
andMk.write('LOCAL_LDLIBS := -llog -lz\n')
if hasNative or prebuilts:
andMk.write('LOCAL_SHARED_LIBRARIES :=')
if hasNative:
andMk.write(' LIB%(libUpper)s-prebuilt' % locals())
for dep in prebuilts:
andMk.write(' %(dep)s' % locals())
andMk.write('\n')
andMk.write('include $(BUILD_SHARED_LIBRARY)\n')
andMk.close()
ARReplaceFileIfDifferent(andMkName, andMkTmpName)
|
py | 7dfed63925116f18144bff17e2885029e29250c3 | """ CLI argument definitions for the screenshotter. This module defines the argument parser. """
from argparse import ArgumentParser, RawDescriptionHelpFormatter
parser = ArgumentParser(
description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument(
'--temp-dir',
default='/tmp/public-cache',
help='Local temp dir for snapshots')
parser.add_argument('--states',
default='',
help='Comma-separated list of state 2-letter names. If present, will only screenshot those.')
parser.add_argument('--phantomjscloud-key', default='',
help='API key for PhantomJScloud, used for browser image capture')
# Args relating to S3 setup
parser.add_argument(
'--s3-bucket',
default='covid-data-archive',
help='S3 bucket name')
parser.add_argument(
'--s3-subfolder',
default='state_screenshots',
help='Name of subfolder on S3 bucket to upload files to')
parser.add_argument('--push-to-s3', dest='push_to_s3', action='store_true', default=False,
help='Push screenshots to S3')
# Determines which screenshots we're aiming to take; these may have different schedules. Only one
# can be true at a time
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--screenshot-core-urls', dest='core_urls', action='store_true', default=False,
help='Screenshot core data URLs from States Info')
group.add_argument('--screenshot-crdt-urls', dest='crdt_urls', action='store_true', default=False,
help='Screenshot CRDT data URLs')
group.add_argument('--screenshot-ltc-urls', dest='ltc_urls', action='store_true', default=False,
help='Screenshot LTC data URLs')
# Allows the user to specify primary, secondary, etc. screenshots to take
# If none of these arguments are present, all screenshots will be taken
screenshot_type_group = parser.add_argument_group('which_screenshots')
screenshot_type_group.add_argument('--primary', dest='primary', action='store_true',
default=False, help='Run the primary screenshot')
screenshot_type_group.add_argument('--secondary', dest='secondary', action='store_true',
default=False, help='Run the secondary screenshot')
screenshot_type_group.add_argument('--tertiary', dest='tertiary', action='store_true',
default=False, help='Run the tertiary screenshot')
screenshot_type_group.add_argument('--quaternary', dest='quaternary', action='store_true',
default=False, help='Run the quaternary screenshot')
screenshot_type_group.add_argument('--quinary', dest='quinary', action='store_true',
default=False, help='Run the quinary screenshot')
# Args relating to Slack notifications
parser.add_argument(
'--slack-channel',
default='',
help='Slack channel ID to notify on screenshot errors')
parser.add_argument(
'--slack-api-token',
default='',
help='Slack API token to use for notifications')
|
py | 7dfed7a52363a8f52c5dc7af85f3bac8d6fe6ae2 | """
Module to manage the init services programs (sysv init and systemd).
"""
from jadi import component
from aj.api.http import url, HttpPlugin
from aj.auth import authorize
from aj.api.endpoint import endpoint, EndpointError
from aj.plugins.services.api import ServiceManager, ServiceOperationError
@component(HttpPlugin)
class Handler(HttpPlugin):
def __init__(self, context):
self.context = context
self.managers = {x.id:x for x in ServiceManager.all(self.context)}
def __service_to_json(self, svc):
"""
Utility to convert a Service object into dict for angular.
:param svc: Service object
:type svc: Service
:return: Services informations
:rtype: dict
"""
return {
'id': svc.id,
'name': svc.name,
'state': svc.state,
'running': svc.running,
'managerId': svc.manager.id,
'enabled': svc.enabled,
'static': svc.static,
}
@url(r'/api/services/managers')
@endpoint(api=True)
def handle_api_managers(self, http_context):
"""
List all available managers.
:param http_context: HttpContext
:type http_context: HttpContext
:return: List of managers, one manager per dict
:rtype: list of dict
"""
return [
{
'id': mgr.id,
'name': mgr.name,
} for mgr in self.managers.values()
]
@url(r'/api/services/list/(?P<manager_id>\w+)')
@endpoint(api=True)
def handle_api_list(self, http_context, manager_id=None):
"""
Retrieve all services from one specified init system.
:param http_context: HttpContext
:type http_context: HttpContext
:param manager_id: Manager id, e.g. systemd
:type manager_id: string
:return: List of services informations
:rtype: list of dict
"""
return [self.__service_to_json(svc) for svc in self.managers[manager_id].list()]
@url(r'/api/services/get/(?P<manager_id>\w+)/(?P<service_id>.+)')
@endpoint(api=True)
def handle_api_get(self, http_context, manager_id=None, service_id=None):
"""
Retrieve the service informations for one specified service in one
specified manager.
:param http_context: HttpContext
:type http_context: HttpContext
:param manager_id: Manager id, e.g. systemd
:type manager_id: string
:param service_id: Service id, e.g. ssh
:type service_id: string
:return: Service informations
:rtype: dict
"""
return self.__service_to_json(self.managers[manager_id].get_service(service_id))
@url(r'/api/services/get_status/(?P<manager_id>\w+)/(?P<service_id>.+)')
@endpoint(api=True)
def handle_api_get_status(self, http_context, manager_id=None, service_id=None):
"""
Retrieve the service informations for one specified service in one
specified manager.
:param http_context: HttpContext
:type http_context: HttpContext
:param manager_id: Manager id, e.g. systemd
:type manager_id: string
:param service_id: Service id, e.g. ssh
:type service_id: string
:return: Service informations
:rtype: dict
"""
return self.managers[manager_id].get_status(service_id)
@url(r'/api/services/do/(?P<operation>\w+)/(?P<manager_id>\w+)/(?P<service_id>.+)')
@authorize('services:manage')
@endpoint(api=True)
def handle_api_operate(self, http_context, manager_id=None, operation=None, service_id=None):
"""
Launch one command for a specified service.
:param http_context: HttpContext
:type http_context: HttpContext
:param manager_id: Manager id, e.g. systemd
:type manager_id: string
:param operation: Operation type, e.g. start
:type operation: string
:param service_id: Service id, e.g. ssh
:type service_id: string
:return: Service informations
:rtype: dict
"""
if operation not in ['start', 'stop', 'restart', 'kill', 'disable', 'enable']:
return
try:
getattr(self.managers[manager_id], operation)(service_id)
except ServiceOperationError as e:
raise EndpointError(e)
|
py | 7dfed8067f770f101b470a5e633c65056ecce429 | import json
import copy
import datetime
import random
import requests
import sys
import tabulate
import time
from inputimeout import TimeoutOccurred, inputimeout
from collections import Counter
from hashlib import sha256
from captcha import captcha_builder_manual, captcha_builder_auto, captcha_builder_api
BOOKING_URL = "https://cdn-api.co-vin.in/api/v2/appointment/schedule"
BENEFICIARIES_URL = "https://cdn-api.co-vin.in/api/v2/appointment/beneficiaries"
CALENDAR_URL_DISTRICT = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/calendarByDistrict?district_id={0}&date={1}"
CALENDAR_URL_PINCODE = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/calendarByPin?pincode={0}&date={1}"
CAPTCHA_URL = "https://cdn-api.co-vin.in/api/v2/auth/getRecaptcha"
OTP_PUBLIC_URL = "https://cdn-api.co-vin.in/api/v2/auth/public/generateOTP"
OTP_PRO_URL = "https://cdn-api.co-vin.in/api/v2/auth/generateMobileOTP"
RESCHEDULE_URL = "https://cdn-api.co-vin.in/api/v2/appointment/reschedule"
CANCEL_URL = "https://cdn-api.co-vin.in/api/v2/appointment/cancel"
OTP_VALIDATE_URL = "https://cdn-api.co-vin.in/api/v2/auth/validateMobileOtp"
DOWNLOAD_APPOINTMENT = "https://cdn-api.co-vin.in/api/v2/appointment/appointmentslip/download?appointment_id={}"
WARNING_BEEP_DURATION = (1000, 5000)
try:
import winsound
except ImportError:
import os
if sys.platform == "darwin":
def beep(freq, duration):
# brew install SoX --> install SOund eXchange universal sound sample translator on mac
os.system(
f"play -n synth {duration / 1000} sin {freq} >/dev/null 2>&1")
else:
def beep(freq, duration):
# apt-get install beep --> install beep package on linux distros before running
os.system('beep -f %s -l %s' % (freq, duration))
else:
def beep(freq, duration):
winsound.Beep(freq, duration)
def book_appointment(request_header, details, mobile, generate_captcha_pref, api_key=None, captcha_api_choice=None):
"""
This function
1. Takes details in json format
2. Attempts to book an appointment using the details
3. Returns True or False depending on Token Validity
"""
try:
valid_captcha = True
while valid_captcha:
captcha = generate_captcha(request_header, generate_captcha_pref, api_key, captcha_api_choice)
details["captcha"] = captcha
print(
"================================= ATTEMPTING BOOKING ==================================================")
resp = requests.post(BOOKING_URL, headers=request_header, json=details)
# print(f"Booking Response Code: {resp.status_code}")
# print(f"Booking Response : {resp.text}")
if resp.status_code == 401:
print("TOKEN INVALID")
return resp.status_code
elif resp.status_code == 200:
beep(WARNING_BEEP_DURATION[0], WARNING_BEEP_DURATION[1])
print(
"############## BOOKED! ############################ BOOKED! ################")
print(
" Hey, Hey, Hey! It's your lucky day! ")
booked_appointment_id = resp.text
booked_appointment_id = (booked_appointment_id[32:68])
print(booked_appointment_id)
response = requests.get(DOWNLOAD_APPOINTMENT.format(booked_appointment_id), headers=request_header)
if response.status_code == 200:
filename = "appointment_slip" + booked_appointment_id
with open(filename, 'wb') as f:
f.write(response.content)
return 1000
elif resp.status_code == 409:
# This vaccination center is completely booked for the selected date.
print(f"Response: {resp.status_code} : {resp.text}")
return resp.status_code
elif resp.status_code == 400:
# bad request or invalid captcha
print(f"Response: {resp.status_code} : {resp.text}")
return resp.status_code
elif resp.status_code >= 500:
# Internal server error
print(f"Response: {resp.status_code} : {resp.text}")
pass
else:
print(f"Response: {resp.status_code} : {resp.text}")
return True
except Exception as e:
print(str(e))
beep(WARNING_BEEP_DURATION[0], WARNING_BEEP_DURATION[1])
def check_and_book(request_header, beneficiary_dtls, location_dtls, search_option, **kwargs):
"""
This function
1. Checks the vaccination calendar for available slots,
2. Lists all viable options,
3. Takes user's choice of vaccination center and slot,
4. Calls function to book appointment, and
5. Returns True or False depending on Token Validity
"""
slots_available = False
try:
min_age_booking = get_min_age(beneficiary_dtls)
minimum_slots = kwargs["min_slots"]
refresh_freq = kwargs["ref_freq"]
auto_book = kwargs["auto_book"]
start_date = kwargs["start_date"]
vaccine_type = kwargs["vaccine_type"]
fee_type = kwargs["fee_type"]
mobile = kwargs["mobile"]
captcha_automation = kwargs['captcha_automation']
captcha_api_choice = kwargs['captcha_api_choice']
captcha_automation_api_key = kwargs['captcha_automation_api_key']
dose_num = kwargs['dose_num']
excluded_pincodes = kwargs['excluded_pincodes'],
reschedule_inp = kwargs['reschedule_inp']
if isinstance(start_date, int) and start_date == 2:
start_date = (datetime.datetime.today() + datetime.timedelta(days=1)).strftime("%d-%m-%Y")
elif isinstance(start_date, int) and start_date == 1:
start_date = datetime.datetime.today().strftime("%d-%m-%Y")
else:
pass
if search_option == 2:
options = check_calendar_by_district(request_header, vaccine_type, location_dtls, start_date, minimum_slots,
min_age_booking, fee_type, dose_num, excluded_pincodes)
else:
options = check_calendar_by_pincode(request_header, vaccine_type, location_dtls, start_date, minimum_slots,
min_age_booking, fee_type, dose_num)
if isinstance(options, bool):
return False
options = sorted(options,
key=lambda k: (k["district"].lower(),
k["pincode"],
k["name"].lower(),
datetime.datetime.strptime(k["date"], "%d-%m-%Y"),
),
)
if len(options) > 0:
slots_available = True
options = sorted(options, key=lambda k: (k["available"], k["date"]), reverse=True)
print(
"\n======================================= Available slots found =======================================")
tmp_options = copy.deepcopy(options)
if len(tmp_options) > 0:
cleaned_options_for_display = []
for item in tmp_options:
item.pop("session_id", None)
item.pop("center_id", None)
cleaned_options_for_display.append(item)
display_table(cleaned_options_for_display)
if auto_book == 'n':
try:
choice = inputimeout(
prompt='----------> Enter a choice e.g: 1.4 for (1st center 4th slot):\n----------> OR wait 20 second for auto update of centers: ',
timeout=20)
choice = choice.split('.')
choice = [int(item) for item in choice]
except ValueError:
print("invalid input")
return True
if reschedule_inp == "r" or reschedule_inp == "R":
new_req = {
'appointment_id': [beneficiary['appointment_id'] for beneficiary in beneficiary_dtls],
'center_id': options[choice[0] - 1]['center_id'],
'session_id': options[choice[0] - 1]['session_id'],
'slot': options[choice[0] - 1]['slots'][choice[1] - 1],
}
print(f"Booking with info: {new_req}")
return reschedule_appointment(request_header, new_req, mobile, captcha_automation,
captcha_automation_api_key, captcha_api_choice)
else:
new_req = {
'beneficiaries': [beneficiary['bref_id'] for beneficiary in beneficiary_dtls],
'dose': dose_num,
'center_id': options[choice[0] - 1]['center_id'],
'session_id': options[choice[0] - 1]['session_id'],
'slot': options[choice[0] - 1]['slots'][choice[1] - 1]
}
print(f"Booking with info: {new_req}")
booking_status = book_appointment(request_header, new_req, mobile, captcha_automation,
captcha_automation_api_key, captcha_api_choice)
if booking_status == 1000:
return "break"
else:
return True
else:
for i in range(refresh_freq, 0, -1):
msg = f"No viable options. Next update in {i} seconds.."
print(msg, end="\r", flush=True)
sys.stdout.flush()
time.sleep(1)
choice = "."
except TimeoutOccurred:
time.sleep(1)
return True
else:
if not slots_available:
return True
else:
# sort by date and maximum available slots
start_epoch = int(time.time())
# if captcha automation is enabled then spend maximum 30 seconds before requesting new availability status from CoWIN. here, max time for both captcha auto and manual is same
MAX_ALLOWED_DURATION_OF_STALE_INFORMATION_IN_SECS = 1 * 30 if captcha_automation != 'n' else 1 * 60
# Try all available centers one by one
for i in range(0, len(options)):
option = options[i]
# all_slots_of_a_center = option.get("slots", [])
# randomly choosing 2/3 slots of a center instead of trying all slots one by one to minimise API hit if center is full
all_slots_of_a_center = (random.sample(option["slots"], 2)) if ((len(option['slots'])) <= 4) else (random.sample(option["slots"], 3))
if not all_slots_of_a_center:
continue
# Try all slots of a center one by one
for selected_slot in all_slots_of_a_center:
current_epoch = int(time.time())
if current_epoch - start_epoch >= MAX_ALLOWED_DURATION_OF_STALE_INFORMATION_IN_SECS:
print(
"\n\n######################## Tried too many times but still not able to book, getting new availability status from CoWIN #####################\n\n")
return True
try:
center_id = option["center_id"]
print(
f"\n============> Trying Choice # {i + 1} Center Name # {option['name']} , Center # {center_id}, Slot #{selected_slot}")
if reschedule_inp == "r" or reschedule_inp == "R":
new_req = {
"appointment_id": beneficiary_dtls[0]['appointment_id'],
"center_id": option["center_id"],
"session_id": option["session_id"],
"slot": selected_slot,
}
print(f"Booking with info: {new_req}")
booking_status = reschedule_appointment(request_header, new_req, mobile, captcha_automation,
captcha_automation_api_key, captcha_api_choice)
else:
new_req = {
"beneficiaries": [beneficiary["bref_id"] for beneficiary in beneficiary_dtls],
"dose": dose_num,
"center_id": option["center_id"],
"session_id": option["session_id"],
"slot": selected_slot,
}
print(f"Booking with info: {new_req}")
booking_status = book_appointment(request_header, new_req, mobile, captcha_automation,
captcha_automation_api_key, captcha_api_choice)
if booking_status == 1000:
return "break"
# token invalid. returning 401 response code
elif booking_status == 401:
return True
# bad request or captcha error
elif booking_status == 400:
pass
# selected slot of the center is fully booked
else:
pass
except IndexError:
print("============> Invalid Option!")
os.system("pause")
pass
# tried all slots of all centers but still not able to book then look for current status of centers
return True
# --------------get all pincodes to filter centers by excluded pincode ---------------#
def get_all_pincodes(request_header, district_id, start_date, min_age):
if start_date == 1:
INP_DATE = datetime.datetime.today().strftime("%d-%m-%Y")
else:
INP_DATE = (datetime.datetime.today() + datetime.timedelta(days=1)).strftime("%d-%m-%Y")
DIST_ID = district_id
URL = \
CALENDAR_URL_DISTRICT.format(DIST_ID, INP_DATE)
response = requests.get(URL, headers=request_header)
if response.status_code == 200:
pincode_list = response.json()
if "centers" in pincode_list:
pincode_list = filter_centers_by_age(pincode_list, int(min_age))
refined_pincodes = []
if "centers" in pincode_list:
for center in list(pincode_list["centers"]):
tmp = {"pincode": center["pincode"],
"name": center["name"],
"block name": center["block_name"]
}
refined_pincodes.append(tmp)
if len(refined_pincodes) > 0:
print(
"\n List of all available centers : \n you can enter other pincodes too to avoid those center in future\n")
display_table(refined_pincodes)
else:
print(
"\n No available centers found at present.. you can how ever add the pincodes to exclude the centers if they become available \n")
excluded_pincodes = []
pincodes = input(
"Enter comma separated pincodes to exclude: \n(you can enter pincodes to avoid those center in future)\n")
for idx, pincode in enumerate(pincodes.split(",")):
if not pincode or len(pincode) < 6:
print(f"Ignoring invalid pincode: {pincode}")
continue
pincode = {'pincode': pincode}
excluded_pincodes.append(pincode)
return excluded_pincodes
else:
print("\n No centers available on: " + str(INP_DATE))
else:
print(response.status_code)
pass
def get_districts(request_header):
"""
This function
1. Lists all states, prompts to select one,
2. Lists all districts in that state, prompts to select required ones, and
3. Returns the list of districts as list(dict)
"""
states = requests.get("https://cdn-api.co-vin.in/api/v2/admin/location/states", headers=request_header)
if states.status_code == 200:
states = states.json()["states"]
refined_states = []
for state in states:
tmp = {"state": state["state_name"]}
refined_states.append(tmp)
display_table(refined_states)
state = int(input("\nEnter State index: "))
state_id = states[state - 1]["state_id"]
districts = requests.get(f"https://cdn-api.co-vin.in/api/v2/admin/location/districts/{state_id}",
headers=request_header)
if districts.status_code == 200:
districts = districts.json()["districts"]
refined_districts = []
for district in districts:
tmp = {"district": district["district_name"]}
refined_districts.append(tmp)
display_table(refined_districts)
reqd_districts = input("\nEnter comma separated index numbers of districts to monitor : ")
districts_idx = [int(idx) - 1 for idx in reqd_districts.split(",")]
reqd_districts = [
{
"district_id": item["district_id"],
"district_name": item["district_name"],
"alert_freq": 440 + ((2 * idx) * 110),
}
for idx, item in enumerate(districts)
if idx in districts_idx
]
print(f"Selected districts: ")
display_table(reqd_districts)
return reqd_districts
else:
print("Unable to fetch districts")
print(districts.status_code)
print(districts.text)
os.system("pause")
sys.exit(1)
else:
print("Unable to fetch states")
print(states.status_code)
print(states.text)
os.system("pause")
sys.exit(1)
def fetch_beneficiaries(request_header):
return requests.get(BENEFICIARIES_URL, headers=request_header)
def get_required_beneficiaries(request_header, beneficiaries):
"""
This function
1. Fetches all beneficiaries registered under the mobile number,
2. Prompts user to select the applicable beneficiaries, and
3. Returns the list of beneficiaries as list(dict)
"""
refined_beneficiaries = []
for beneficiary in beneficiaries:
beneficiary["age"] = datetime.datetime.today().year - int(beneficiary["birth_year"])
if beneficiary["vaccination_status"] == "Partially Vaccinated" and len(beneficiary["dose2_date"]) == 0:
dose2_date_calculated = vaccine_dose2_duedate(beneficiary["vaccine"], beneficiary["dose1_date"])
beneficiary["dose2_date"] = dose2_date_calculated
tmp = {
"bref_id": beneficiary["beneficiary_reference_id"],
"name": beneficiary["name"],
"vaccine": beneficiary["vaccine"],
"age": beneficiary["age"],
"status": beneficiary["vaccination_status"],
"birth_year": beneficiary["birth_year"],
"mobile_number": beneficiary["mobile_number"],
"photo_id_type": beneficiary["photo_id_type"],
"photo_id_number": beneficiary["photo_id_number"],
"dose1_date": beneficiary["dose1_date"],
"dose2_date": beneficiary["dose2_date"],
}
refined_beneficiaries.append(tmp)
display_table(refined_beneficiaries)
print(
"""
################# IMPORTANT NOTES #################
# 1. While selecting beneficiaries, make sure that selected beneficiaries are all taking the same dose: either first OR second.
# Please do no try to club together booking for first dose for one beneficiary and second dose for another beneficiary.
#
# 2. While selecting beneficiaries, also make sure that beneficiaries selected for second dose are all taking the same vaccine: COVISHIELD OR COVAXIN.
# Please do no try to club together booking for beneficiary taking COVISHIELD with beneficiary taking COVAXIN.
#
# 3. If you're selecting multiple beneficiaries, make sure all are of the same age group (45+ or 18+) as defined by the govt.
# Please do not try to club together booking for younger and older beneficiaries.
###################################################
"""
)
reqd_beneficiaries = input("\nEnter comma separated index numbers of beneficiaries to book for : ")
beneficiary_idx = [int(idx) - 1 for idx in reqd_beneficiaries.split(",")]
reqd_beneficiaries = [
{
"bref_id": item["bref_id"],
"name": item["name"],
"vaccine": item["vaccine"],
"age": item["age"],
"status": item["status"],
"dose1_date": item["dose1_date"],
"dose2_date": item["dose2_date"],
}
for idx, item in enumerate(refined_beneficiaries)
if idx in beneficiary_idx
]
print(f"Selected beneficiaries: ")
display_table(reqd_beneficiaries)
return reqd_beneficiaries
def clear_bucket_and_send_OTP(storage_url, mobile, request_header):
print("clearing OTP bucket: " + storage_url)
response = requests.put(storage_url, data={})
data = {
"mobile": mobile,
"secret": "U2FsdGVkX1+z/4Nr9nta+2DrVJSv7KS6VoQUSQ1ZXYDx/CJUkWxFYG6P3iM/VW+6jLQ9RDQVzp/RcZ8kbT41xw==",
}
print(f"Requesting OTP with mobile number {mobile}..")
txnId = requests.post(url=OTP_PRO_URL, json=data, headers=request_header)
if txnId.status_code == 200:
txnId = txnId.json()["txnId"]
else:
print("Unable to Create OTP")
print(txnId.text)
time.sleep(5) # Safety net against rate limit
txnId = None
return txnId
def check_calendar_by_district(request_header, vaccine_type, location_dtls, start_date,
minimum_slots, min_age_booking, fee_type, dose_num, excluded_pincodes):
"""
This function
1. Takes details required to check vaccination calendar
2. Filters result by minimum number of slots available
3. Returns False if token is invalid
4. Returns list of vaccination centers & slots if available
"""
try:
print(
"=========================================================================================================================")
today = datetime.datetime.today()
base_url = CALENDAR_URL_DISTRICT
if vaccine_type:
base_url += f"&vaccine={vaccine_type}"
options = []
for location in location_dtls:
resp = requests.get(base_url.format(location["district_id"], start_date), headers=request_header, )
if resp.status_code == 401:
print("TOKEN INVALID")
return False
elif resp.status_code == 200:
resp = resp.json()
resp = filter_centers_by_age(resp, min_age_booking)
if len(excluded_pincodes) > 1:
resp = filer_by_excluded_pincodes(resp, excluded_pincodes)
if "centers" in resp:
print(
f"Total Centers available in {location['district_name']} from {start_date} as of {today.strftime('%Y-%m-%d %H:%M:%S')}: {len(resp['centers'])}")
options += viable_options(resp, minimum_slots, min_age_booking, fee_type, dose_num)
else:
pass
for location in location_dtls:
if location["district_name"] in [option["district"] for option in options]:
for _ in range(2):
beep(location["alert_freq"], 150)
return options
except Exception as e:
print(str(e))
beep(WARNING_BEEP_DURATION[0], WARNING_BEEP_DURATION[1])
def generate_token_OTP(mobile, request_header, otp_validation_header):
"""
This function generate OTP and returns a new token or None when not able to get token
"""
storage_url = "https://kvdb.io/SK2XsE52VMgzwaZMKAK2pc/" + mobile
txnId = clear_bucket_and_send_OTP(storage_url, mobile, request_header)
if txnId is None:
return txnId
time.sleep(10)
t_end = time.time() + 60 * 3 # try to read OTP for at most 3 minutes
while time.time() < t_end:
response = requests.get(storage_url)
if response.status_code == 200:
print("OTP SMS is:" + response.text)
print("OTP SMS len is:" + str(len(response.text)))
OTP = response.text
OTP = OTP.replace("Your OTP to register/access CoWIN is ", "")
OTP = OTP.replace(". It will be valid for 3 minutes. - CoWIN", "")
if not OTP:
time.sleep(5)
continue
break
else:
# Hope it won't throw 500 error a little later, wait for 5 sec and try again
print("error fetching OTP API:" + response.text)
time.sleep(5)
if not OTP:
return None
print("Parsed OTP:" + OTP)
data = {"otp": sha256(str(OTP.strip()).encode("utf-8")).hexdigest(), "txnId": txnId}
print(f"Validating OTP..")
token = requests.post(url=OTP_VALIDATE_URL, json=data, headers=otp_validation_header)
if token.status_code == 200:
token = token.json()["token"]
else:
print("Unable to Validate OTP")
print(token.text)
return None
print(f"Token Generated: {token}")
return token
def check_calendar_by_pincode(request_header, vaccine_type, location_dtls, start_date,
minimum_slots, min_age_booking, fee_type, dose_num):
"""
This function
1. Takes details required to check vaccination calendar
2. Filters result by minimum number of slots available
3. Returns False if token is invalid
4. Returns list of vaccination centers & slots if available
"""
try:
print(
"============================================================================================================")
today = datetime.datetime.today()
base_url = CALENDAR_URL_PINCODE
if vaccine_type:
base_url += f"&vaccine={vaccine_type}"
options = []
for location in location_dtls:
resp = requests.get(base_url.format(location["pincode"], start_date), headers=request_header)
if resp.status_code == 401:
print("TOKEN INVALID")
return False
elif resp.status_code == 200:
resp = resp.json()
resp = filter_centers_by_age(resp, min_age_booking)
if "centers" in resp:
print(
f"Centers available in {location['pincode']} from {start_date} as of {today.strftime('%Y-%m-%d %H:%M:%S')}: {len(resp['centers'])}")
options += viable_options(resp, minimum_slots, min_age_booking, fee_type, dose_num)
else:
print(f"\nno centers in response for pincode : {location['pincode']}")
pass
for location in location_dtls:
if int(location["pincode"]) in [option["pincode"] for option in options]:
for _ in range(2):
beep(location["alert_freq"], 150)
return options
except Exception as e:
print(str(e))
beep(WARNING_BEEP_DURATION[0], WARNING_BEEP_DURATION[1])
def generate_token_OTP_manual(mobile, request_header, otp_validation_header):
"""
This function generate OTP and returns a new token
"""
if not mobile:
print("Mobile number cannot be empty")
os.system('pause')
sys.exit()
valid_token = False
while not valid_token:
try:
data = {"mobile": mobile,
"secret": "U2FsdGVkX1+z/4Nr9nta+2DrVJSv7KS6VoQUSQ1ZXYDx/CJUkWxFYG6P3iM/VW+6jLQ9RDQVzp/RcZ8kbT41xw=="}
txnId = requests.post(url=OTP_PRO_URL, json=data, headers=request_header)
if txnId.status_code == 200:
print(f"Successfully requested OTP for mobile number {mobile} at {datetime.datetime.today()}..")
txnId = txnId.json()['txnId']
OTP = input("Enter OTP (If this takes more than 2 minutes, press Enter to retry): ")
if OTP:
data = {"otp": sha256(str(OTP).encode('utf-8')).hexdigest(), "txnId": txnId}
print(f"Validating OTP..")
token = requests.post(url=OTP_VALIDATE_URL, json=data, headers=otp_validation_header)
if token.status_code == 200:
token = token.json()['token']
print(f'Token Generated: {token}')
valid_token = True
return token
else:
print('Unable to Validate OTP')
print(f"Response: {token.text}")
retry = input(f"Retry with {mobile} ? (y/n Default y): ")
retry = retry if retry else 'y'
if retry == 'y':
pass
else:
sys.exit()
else:
print('Unable to Generate OTP')
print(txnId.status_code, txnId.text)
retry = input(f"Retry with {mobile} ? (y/n Default y): ")
retry = retry if retry else 'y'
if retry.lower() == 'y':
pass
else:
sys.exit()
except Exception as e:
print(str(e))
def collect_user_details(request_header):
# Get Beneficiaries
print("Fetching registered beneficiaries.. ")
beneficiaries = fetch_beneficiaries(request_header)
if beneficiaries.status_code == 200:
beneficiaries = beneficiaries.json()["beneficiaries"]
beneficiary_dtls = get_required_beneficiaries(request_header, beneficiaries)
else:
print("Unable to fetch beneficiaries")
print(beneficiaries.status_code)
print(beneficiaries.text)
os.system("pause")
sys.exit(1)
if len(beneficiary_dtls) == 0:
print("There should be at least one beneficiary. Exiting.")
os.system("pause")
sys.exit(1)
active_appointment = check_active_appointment(beneficiary_dtls, beneficiaries)
if len(active_appointment) > 0:
print(
"\n\n================================== Active appointments found ======================================\n"
"======================= Cancel/Reschedule appointment or remove user(s) having active appointment(s) ========================\n")
cleaned_appointments_for_display = cleaned_display(active_appointment)
display_table(cleaned_appointments_for_display)
reschedule_inp = input(print(
f"\nSelect c : cancel all appointments \n"
f"Select b : Proceed with beneficiary having no active appointment \n"
f"Select r : Reschedule active appointment only (c/b/r): default b\n"
f"************* NOTE: Only one active appointment can be rescheduled at a time **********\n"))
if reschedule_inp.lower() in ["r", "n", "c"] and reschedule_inp.lower() == "r":
if len(active_appointment) == 1:
print(
"\n================================================ Rescheduling appointments for ===========================================\n")
beneficiary_dtls = active_appointment[:]
cleaned = cleaned_display(active_appointment)
display_table(cleaned)
else:
beneficiary_dtls = collect_reschedule_appointment_data(active_appointment)
print(
"\n================================================ Rescheduling appointments for ===========================================\n")
display_table(beneficiary_dtls)
elif reschedule_inp.lower() == 'c':
cancel_appointments(request_header, active_appointment)
else:
req_list = []
seen = set()
for active_beneficiary in list(active_appointment):
seen.add(active_beneficiary['bref_id'])
for filter_beneficiary in beneficiary_dtls:
if str(filter_beneficiary['bref_id']) not in seen:
req_list.append(filter_beneficiary)
if len(req_list) > 0:
print(
"\n================================================ Continuing with... ================================================\n")
beneficiary_dtls = req_list[:]
cleaned_active_benf = cleaned_display(req_list)
display_table(cleaned_active_benf)
else:
print(
"\n======================================= No eligible beneficiary selected for booking.. exiting script.. ======================================\n")
os.system("pause")
sys.exit(1)
else:
reschedule_inp = None
# Make sure all beneficiaries have the same type of vaccine
vaccine_types = [beneficiary["vaccine"] for beneficiary in beneficiary_dtls]
vaccines = Counter(vaccine_types)
if len(vaccines.keys()) != 1:
print(f"All beneficiaries in one attempt should have the same vaccine type. Found {len(vaccines.keys())}")
os.system("pause")
sys.exit(1)
vaccine_type = vaccine_types[0]
if not vaccine_type:
print("\n================================= Vaccine Info =================================\n")
vaccine_type = get_vaccine_preference()
print("\n================================= Starting Date =================================\n")
# Get search start date
start_date = input(
"\nSearch for next seven day starting from when?"
"\nUse 1 for today, 2 for tomorrow, or provide a date in the "
"format DD-MM-YYYY. Default 2: ")
if not start_date:
start_date = 2
search_dose2_date = (datetime.datetime.today() + datetime.timedelta(days=1)).strftime("%d-%m-%Y")
elif start_date in ["1", "2"]:
start_date = int(start_date)
search_dose2_date = datetime.datetime.today().strftime("%d-%m-%Y")
else:
try:
datetime.datetime.strptime(start_date, "%d-%m-%Y")
today = datetime.datetime.today().strftime("%d-%m-%Y")
# assuming that a given date more than 15 days in future is of no use
if (datetime.datetime.strptime(start_date, "%d-%m-%Y") - datetime.datetime.strptime(today,
"%d-%m-%Y")).days > 15:
print(
"\n\n------------------ Info: GIVEN DATE IS OUT OF RANGE.. PROCEEDING WITH TOMORROW -------------------")
start_date = 2
search_dose2_date = (datetime.datetime.today() + datetime.timedelta(days=1)).strftime("%d-%m-%Y")
except ValueError:
start_date = 2
print('\nInvalid Date! Proceeding with tomorrow.')
search_dose2_date = (datetime.datetime.today() + datetime.timedelta(days=1)).strftime("%d-%m-%Y")
if all([beneficiary['status'] == 'Partially Vaccinated' for beneficiary in beneficiary_dtls]):
max_start_date = (datetime.datetime.today() + datetime.timedelta(days=5)).strftime("%d-%m-%Y")
# enabling multiple beneficiaries to book for dose 2 if dose2 dates are in past for all ############
for beneficiary in beneficiary_dtls:
if (datetime.datetime.strptime(beneficiary['dose2_date'], "%d-%m-%Y") - (
datetime.datetime.strptime(search_dose2_date, "%d-%m-%Y"))).days > 0:
print(
f"\n\n========================================================= due date for dose2 is too far ============================================\n\n"
f"\n########################### Scheduled dose2 date for beneficiary {beneficiary['name']} is {beneficiary['dose2_date']} #################################\n"
f"############################# Please select a start date in between {beneficiary['dose2_date']} and {max_start_date} ###############################")
print(
"\n ============================================ exiting script due to invalid start date ======================================\n")
os.system("pause")
sys.exit(1)
print("\n================================= Location Info =================================\n")
search_option = input(
"""Search by Pincode? Or by State/District? \nEnter 1 for Pincode or 2 for State/District. (Default 2) : """)
if not search_option or int(search_option) not in [1, 2]:
search_option = 2
else:
search_option = int(search_option)
if search_option == 2:
# Collect vaccination center preferance
location_dtls = get_districts(request_header)
# exclude pincode
exclude_option = input(
"""Do you want to avoid centers of particular pincode? y/n (default n) : """)
if not exclude_option or exclude_option not in ["y", "n"]:
exclude_option = "n"
if exclude_option.lower() == "y":
for district in location_dtls:
get_district_id = district["district_id"]
for district in beneficiary_dtls:
min_age = district["age"]
excluded_pincodes = get_all_pincodes(request_header, get_district_id, start_date, min_age)
else:
excluded_pincodes = None
else:
# Collect vaccination center preference
location_dtls = get_pincodes()
excluded_pincodes = None
print("\n================================= Additional Info =================================\n")
# Set booking condition (either one after another or all at once)
minimum_slots = input(
f"Filter out centers with availability less than ? default (Maximum) {len(beneficiary_dtls)} : ")
if minimum_slots:
minimum_slots = (int(minimum_slots)
if int(minimum_slots) == 1
else len(beneficiary_dtls))
else:
minimum_slots = len(beneficiary_dtls)
# Get refresh frequency
print("\n================================= Refresh Frequency =================================\n")
refresh_freq = input("How often do you want to refresh the calendar (in seconds)? Default 15. Minimum 5. : ")
refresh_freq = int(refresh_freq) if refresh_freq and int(refresh_freq) >= 5 else 15
# Get preference of Free/Paid option
fee_type = get_fee_type_preference()
# choice to auto book or to choose centers manually
print(
"\n============================ CAUTION! ============= CAUTION! CAUTION! ================ CAUTION! ========================\n")
print(
"============== BE CAREFUL WITH THIS OPTION! AUTO-BOOKING WILL BOOK THE FIRST AVAILABLE CENTRE, DATE, AND A RANDOM SLOT! ========================")
auto_book = input("Do you want to enable auto-booking? (y/n) Default # y: ")
auto_book = "y" if not auto_book else auto_book
print(
"\n============================== Captcha Automation =============================")
captcha_automation = input(
"\n###### Do you want to automate captcha autofill? (api/ai/n) Default : ai: #######"
"\n###### ai : Solve the captcha automatically #######"
"\n###### API : paid API key from anti captcha or 2captcha will be required to enter next #######")
captcha_automation = "ai" if not captcha_automation else captcha_automation
captcha_api_choice = None
captcha_automation_api_key = None
if captcha_automation.lower() == "api":
captcha_api_choice = input(
"Select your preferred API service, 0 for https://anti-captcha.com and 1 for https://2captcha.com/ ("
"Default 0) :")
if captcha_api_choice not in ['0', '1']:
captcha_api_choice = '0'
if captcha_api_choice == '0':
# captcha_automation_api_key = "anti captcha api key"
# else:
# captcha_automation_api_key = "2captcha api key"
captcha_automation_api_key = input("Enter your Anti-Captcha or 2Captcha API key: ")
elif captcha_automation.lower() == "n":
captcha_api_choice = None
captcha_automation_api_key = None
collected_details = {
"beneficiary_dtls": beneficiary_dtls,
"location_dtls": location_dtls,
"search_option": search_option,
"minimum_slots": minimum_slots,
"refresh_freq": refresh_freq,
"auto_book": auto_book,
"start_date": start_date,
"vaccine_type": vaccine_type,
"fee_type": fee_type,
'captcha_automation': captcha_automation,
'captcha_api_choice': captcha_api_choice,
'captcha_automation_api_key': captcha_automation_api_key,
'excluded_pincodes': excluded_pincodes,
'reschedule_inp': reschedule_inp
}
return collected_details
def get_vaccine_preference():
print("It seems you're trying to find a slot for your first dose. Do you have a vaccine preference?")
preference = input("Enter 0 for No Preference, 1 for COVISHIELD, 2 for COVAXIN, or 3 for SPUTNIK V. Default 0 : ")
preference = int(preference) if preference and int(preference) in [0, 1, 2, 3] else 0
if preference == 1:
return "COVISHIELD"
elif preference == 2:
return "COVAXIN"
elif preference == 3:
return "SPUTNIK V"
else:
return None
def get_fee_type_preference():
print("\nDo you have a fee type preference?")
preference = input("Enter 0 for No Preference, 1 for Free Only, or 2 for Paid Only. Default 0 : ")
preference = int(preference) if preference and int(preference) in [0, 1, 2] else 0
if preference == 1:
return ["Free"]
elif preference == 2:
return ["Paid"]
else:
return ["Free", "Paid"]
def get_pincodes():
locations = []
pincodes = input("Enter comma separated index numbers of pincodes to monitor: ")
for idx, pincode in enumerate(pincodes.split(",")):
pincode = {"pincode": pincode, "alert_freq": 440 + ((2 * idx) * 110)}
locations.append(pincode)
return locations
def get_dose_num(collected_details):
# If any person has vaccine detail populated, we imply that they'll be taking second dose
# Note: Based on the assumption that everyone have the *EXACT SAME* vaccine status
if any(detail['vaccine']
for detail in collected_details["beneficiary_dtls"]):
return 2
return 1
def display_table(dict_list):
"""
This function
1. Takes a list of dictionary
2. Add an Index column, and
3. Displays the data in tabular format
"""
header = ["idx"] + list(dict_list[0].keys())
rows = [[idx + 1] + list(x.values()) for idx, x in enumerate(dict_list)]
print(tabulate.tabulate(rows, header, tablefmt="grid"))
def display_info_dict(details):
for key, value in details.items():
if isinstance(value, list):
if all(isinstance(item, dict) for item in value):
print(f"\t{key}:")
display_table(value)
else:
print(f"\t{key}\t: {value}")
else:
print(f"\t{key}\t: {value}")
def filter_centers_by_age(resp, min_age_booking):
if min_age_booking >= 45:
center_age_filter = 45
else:
center_age_filter = 18
if "centers" in resp:
for center in list(resp["centers"]):
for session in list(center["sessions"]):
if session['min_age_limit'] != center_age_filter:
center["sessions"].remove(session)
if len(center["sessions"]) == 0:
resp["centers"].remove(center)
return resp
def get_min_age(beneficiary_dtls):
"""
This function returns a min age argument, based on age of all beneficiaries
:param beneficiary_dtls:
:return: min_age:int
"""
age_list = [item["age"] for item in beneficiary_dtls]
min_age = min(age_list)
return min_age
def generate_captcha(request_header, captcha_automation, api_key, captcha_api_choice):
print("================================= GETTING CAPTCHA ==================================================")
resp = requests.post(CAPTCHA_URL, headers=request_header)
print(f'Captcha Response Code: {resp.status_code}')
if resp.status_code == 200 and captcha_automation == "n":
return captcha_builder_manual(resp.json())
elif resp.status_code == 200 and captcha_automation == "api":
return captcha_builder_api(resp.json(), api_key, captcha_api_choice)
elif resp.status_code == 200 and captcha_automation == "ai":
return captcha_builder_auto(resp.json())
def filer_by_excluded_pincodes(resp, excluded_pincodes):
if "centers" in resp:
available_center = resp['centers']
pin_excluded_centers = []
seen = set()
for pincodes in list(excluded_pincodes):
if pincodes['pincode'] not in seen:
seen.add(pincodes['pincode'])
for center in available_center:
if str(center['pincode']) not in seen:
pin_excluded_centers.append(center)
resp['centers'] = pin_excluded_centers
return resp
def vaccine_dose2_duedate(vaccine_type, dose1_date):
"""
This function
1.Checks the vaccine type
2.Returns the appropriate due date for the vaccine type
"""
if vaccine_type == "COVISHIELD":
dose1 = datetime.datetime.strptime(dose1_date, "%d-%m-%Y")
covishield_due_date = dose1 + datetime.timedelta(84)
return covishield_due_date.strftime("%d-%m-%Y")
elif vaccine_type == "COVAXIN":
dose1 = datetime.datetime.strptime(dose1_date, "%d-%m-%Y")
covaxin_due_date = dose1 + datetime.timedelta(28)
return covaxin_due_date.strftime("%d-%m-%Y")
elif vaccine_type == "SPUTNIK V":
dose1 = datetime.datetime.strptime(dose1_date, "%d-%m-%Y")
sputnikV_due_date = dose1 + datetime.timedelta(21)
return sputnikV_due_date.strftime("%d-%m-%Y")
def get_saved_user_info(filename):
with open(filename, "r") as f:
data = json.load(f)
return data
def viable_options(resp, minimum_slots, min_age_booking, fee_type, dose_num):
options = []
if len(resp["centers"]) >= 0:
for center in resp["centers"]:
for session in center["sessions"]:
available_capacity = min(session[f'available_capacity_dose{dose_num}'], session['available_capacity'])
if ((available_capacity >= minimum_slots)
and (session["min_age_limit"] <= min_age_booking)
and (center["fee_type"] in fee_type)):
out = {
"name": center["name"],
"district": center["district_name"],
"pincode": center["pincode"],
"center_id": center["center_id"],
"vaccine": session["vaccine"],
"fee_type": center["fee_type"],
"fee": session.get("fee", "0"),
"available": available_capacity,
"date": session["date"],
"slots": session["slots"],
"session_id": session["session_id"],
}
options.append(out)
else:
pass
else:
pass
return options
def save_user_info(filename, details):
if not details['excluded_pincodes']:
details['excluded_pincodes'] = None
print("\n================================= Save Info =================================\n")
save_info = input("Would you like to save this as a JSON file for easy use next time?: (y/n Default y): ")
save_info = save_info if save_info else "y"
if save_info.lower() == "y":
with open(filename, "w") as f:
# JSON pretty save to file
json.dump(details, f, sort_keys=True, indent=4)
print(f"Info saved to {filename} in {os.getcwd()}")
else:
print("User Details has not been saved")
def confirm_and_proceed(collected_details):
print("\n================================= Confirm Info =================================\n")
display_info_dict(collected_details)
confirm = input("\nProceed with above info (y/n Default y) : ")
confirm = confirm if confirm else "y"
if confirm.lower() != "y":
print("Details not confirmed. Exiting process.")
os.system("pause")
sys.exit()
def collect_reschedule_appointment_data(active_appointment_detailed):
print(
f"\n================== select the user you want to reschedule the appointment for (CHOOSE ONLY ONE USER) ===================\n")
clean_data = cleaned_display(active_appointment_detailed)
display_table(clean_data)
# loop to force user to enter correct input
while True:
try:
reschedule_input = input()
if 0 < int(reschedule_input) <= len(active_appointment_detailed):
break
except ValueError:
print("Invalid Input ! Let's try again")
pass
if reschedule_input:
reschedule_idx = [int(idx) - 1 for idx in reschedule_input.split(",")]
data = [
{"bref_id": item["bref_id"],
"name": item["beneficiary"],
"age": item["age"],
"center_name": item["center_name"],
"slot": item["slot"],
"appointment_id": item["appointment_id"],
"status": item["status"],
"vaccine": item["vaccine"],
"dose1_date": item["dose1_date"],
"dose2_date": item["dose2_date"],
}
for idx, item in enumerate(list(active_appointment_detailed))
if idx in reschedule_idx
]
return data
else:
print("\n=========================== wrong input.. exiting.... ===============================")
os.system('pause')
sys.exit()
def check_active_appointment(reqired_beneficiaries, beneficiaries):
active_appointments_list = []
beneficiary_ref_ids = [beneficiary["bref_id"]
for beneficiary in reqired_beneficiaries]
beneficiary_dtls = [all_active_beneficiary
for all_active_beneficiary in beneficiaries
if all_active_beneficiary['beneficiary_reference_id'] in beneficiary_ref_ids]
for beneficiary_active in beneficiary_dtls:
expected_appointments = (1 if beneficiary_active['vaccination_status'] == "Partially Vaccinated" else 0)
if beneficiary_active["vaccination_status"] == "Partially Vaccinated" and len(
beneficiary_active["dose2_date"]) == 0:
dose2_date_calculated = vaccine_dose2_duedate(beneficiary_active["vaccine"],
beneficiary_active["dose1_date"])
beneficiary_active["dose2_date"] = dose2_date_calculated
if len(beneficiary_active["appointments"]) > expected_appointments:
beneficiary_active["age"] = datetime.datetime.today().year - int(beneficiary_active["birth_year"])
data = beneficiary_active['appointments'][expected_appointments]
beneficiary_data = {'center_name': data['name'],
'state_name': data['state_name'],
'dose': data['dose'],
'date': data['date'],
'slot': data['slot'],
'appointment_id': data['appointment_id'],
'session_id': data['session_id']
}
active_appointments_list.append(
{"bref_id": beneficiary_active["beneficiary_reference_id"],
"beneficiary": beneficiary_active['name'],
'age': beneficiary_active["age"],
**beneficiary_data,
'status': beneficiary_active['vaccination_status'],
'vaccine': beneficiary_active['vaccine'],
'birth_year': beneficiary_active['birth_year'],
"mobile_number": beneficiary_active["mobile_number"],
"dose1_date": beneficiary_active['dose1_date'],
"dose2_date": beneficiary_active['dose2_date']
}
)
return active_appointments_list
def reschedule_appointment(request_header, details, mobile, generate_captcha_pref, api_key=None,
captcha_api_choice=None):
try:
valid_captcha = True
while valid_captcha:
captcha = generate_captcha(request_header, generate_captcha_pref, api_key, captcha_api_choice)
details["captcha"] = captcha
print(
"================================= ATTEMPTING BOOKING ==================================================")
resp = requests.post(RESCHEDULE_URL, headers=request_header, json=details)
print(f"Booking Response Code: {resp.status_code}")
print(f"Booking Response : {resp.text}")
if resp.status_code == 401:
print("TOKEN INVALID")
return False
elif resp.status_code == 204:
beep(WARNING_BEEP_DURATION[0], WARNING_BEEP_DURATION[1])
print("############## RESCHEDULED! ############################ RESCHEDULED! ##############")
print(" YOUR APPOINTMENT HAS BEEN RESCHEDULED ")
re_appointment_id = resp.text
re_appointment_id = (re_appointment_id[32:68])
response = requests.get(
"https://cdn-api.co-vin.in/api/v2/appointment/appointmentslip/download?appointment_id={}".format(
re_appointment_id), headers=request_header)
if response.status_code == 200:
filename = "appointment_slip" + re_appointment_id
with open(filename, 'wb') as f:
f.write(response.content)
else:
print("unable to download appointment slip")
print(f"Response: {resp.status_code} : {resp.text}")
print("\nPress any key twice to exit program.")
os.system('pause')
os.system('pause')
sys.exit(1)
elif resp.status_code == 409:
# This vaccination center is completely booked for the selected date
print(f"Response: {resp.status_code} : {resp.text}")
return True
elif resp.status_code == 400:
print(f"Response: {resp.status_code} : {resp.text}")
# {"errorCode":"APPOIN0011","error":"You have selected the same vaccination center and date as that of your current appointment. Please select a different vaccination center or the date for rescheduling."}
break
elif resp.status_code >= 500:
# Server error at the time of high booking
print(f"Response: {resp.status_code} : {resp.text}")
pass
else:
print(f"Response: {resp.status_code} : {resp.text}")
return True
except Exception as e:
print(str(e))
beep(WARNING_BEEP_DURATION[0], WARNING_BEEP_DURATION[1])
def cleaned_display(appointment):
clean_display = []
active_appointment = copy.deepcopy(appointment)
for item in active_appointment:
item.pop("session_id", None)
item.pop("status", None)
item.pop("vaccine", None)
item.pop("birth_year", None)
item.pop("mobile_number", None)
item.pop("appointment_id", None)
item.pop("state_name", None)
item.pop("dose1_date", None)
item.pop("dose2_date", None)
clean_display.append(item)
return clean_display
def cancel_appointments(request_header, active_appointments):
confirm = input(print("\nAre you sure ? type yes-cancel to cancel all appointments"))
if confirm == "yes-cancel":
appointment_to_cancel = []
for beneficiary in list(active_appointments):
tmp = {
'appointment_id': beneficiary["appointment_id"],
'beneficiariesToCancel': beneficiary["bref_id"],
'name': beneficiary["beneficiary"]
}
appointment_to_cancel.append(tmp)
for value_present in appointment_to_cancel:
data = {
'appointment_id': value_present['appointment_id'],
'beneficiariesToCancel': [value_present['beneficiariesToCancel']]
}
response = requests.post(CANCEL_URL, headers=request_header, json=data)
if response.status_code == 204:
print("appointment of " + str(value_present['name']) + " has been cancelled")
else:
try:
print("\n UNABLE TO CANCEL THE APPOINTMENT of " + str(value_present['name']))
print(f"Response: {response.status_code} : {response.text}")
os.system('pause')
sys.exit(1)
except Exception as e:
print(str(e))
pass
os.system('pause')
sys.exit(1)
else:
print("\n\n wrong input.. exiting the cancellation")
os.system('pause')
sys.exit(0)
|
py | 7dfed879f907c52b6aafe11a0364bb18eec0541b | """
Stitches submodels together.
"""
import numpy as np
import time, os
import itertools
from functools import partial
from collections import defaultdict, namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
# Custom modules
from src import hyperprior
from src.loss import losses
from src.helpers import maths, datasets, utils
from src.network import encoder, generator, discriminator, hyper
from src.loss.perceptual_similarity import perceptual_loss as ps
from default_config import ModelModes, ModelTypes, hific_args, directories
Intermediates = namedtuple("Intermediates",
["input_image", # [0, 1] (after scaling from [0, 255])
"reconstruction", # [0, 1]
"latents_quantized", # Latents post-quantization.
"n_bpp", # Differential entropy estimate.
"q_bpp"]) # Shannon entropy estimate.
Disc_out= namedtuple("disc_out",
["D_real", "D_gen", "D_real_logits", "D_gen_logits"])
class Model(nn.Module):
def __init__(self, args, logger, storage_train=defaultdict(list), storage_test=defaultdict(list), model_mode=ModelModes.TRAINING,
model_type=ModelTypes.COMPRESSION):
super(Model, self).__init__()
"""
Builds hific model from submodels in network.
"""
self.args = args
self.model_mode = model_mode
self.model_type = model_type
self.logger = logger
self.log_interval = args.log_interval
self.storage_train = storage_train
self.storage_test = storage_test
self.step_counter = 0
if self.args.use_latent_mixture_model is True:
self.args.latent_channels = self.args.latent_channels_DLMM
if not hasattr(ModelTypes, self.model_type.upper()):
raise ValueError("Invalid model_type: [{}]".format(self.model_type))
if not hasattr(ModelModes, self.model_mode.upper()):
raise ValueError("Invalid model_mode: [{}]".format(self.model_mode))
self.image_dims = self.args.image_dims # Assign from dataloader
self.batch_size = self.args.batch_size
self.entropy_code = False
if model_mode == ModelModes.EVALUATION:
self.entropy_code = True
self.Encoder = encoder.Encoder(self.image_dims, self.batch_size, C=self.args.latent_channels,
channel_norm=self.args.use_channel_norm)
self.Generator = generator.Generator(self.image_dims, self.batch_size, C=self.args.latent_channels,
n_residual_blocks=self.args.n_residual_blocks, channel_norm=self.args.use_channel_norm, sample_noise=
self.args.sample_noise, noise_dim=self.args.noise_dim)
if self.args.use_latent_mixture_model is True:
self.Hyperprior = hyperprior.HyperpriorDLMM(bottleneck_capacity=self.args.latent_channels,
likelihood_type=self.args.likelihood_type, mixture_components=self.args.mixture_components, entropy_code=self.entropy_code)
else:
self.Hyperprior = hyperprior.Hyperprior(bottleneck_capacity=self.args.latent_channels,
likelihood_type=self.args.likelihood_type, entropy_code=self.entropy_code)
self.amortization_models = [self.Encoder, self.Generator]
self.amortization_models.extend(self.Hyperprior.amortization_models)
# Use discriminator if GAN mode enabled and in training/validation
self.use_discriminator = (
self.model_type == ModelTypes.COMPRESSION_GAN
and (self.model_mode != ModelModes.EVALUATION)
)
if self.use_discriminator is True:
assert self.args.discriminator_steps > 0, 'Must specify nonzero training steps for D!'
self.discriminator_steps = self.args.discriminator_steps
self.logger.info('GAN mode enabled. Training discriminator for {} steps.'.format(
self.discriminator_steps))
self.Discriminator = discriminator.Discriminator(image_dims=self.image_dims,
context_dims=self.args.latent_dims, C=self.args.latent_channels)
self.gan_loss = partial(losses.gan_loss, args.gan_loss_type)
else:
self.discriminator_steps = 0
self.Discriminator = None
self.squared_difference = torch.nn.MSELoss(reduction='none')
# Expects [-1,1] images or [0,1] with normalize=True flag
self.perceptual_loss = ps.PerceptualLoss(model='net-lin', net='alex', use_gpu=torch.cuda.is_available(), gpu_ids=[args.gpu])
def store_loss(self, key, loss):
assert type(loss) == float, 'Call .item() on loss before storage'
if self.training is True:
storage = self.storage_train
else:
storage = self.storage_test
if self.writeout is True:
storage[key].append(loss)
def compression_forward(self, x):
"""
Forward pass through encoder, hyperprior, and decoder.
Inputs
x: Input image. Format (N,C,H,W), range [0,1],
or [-1,1] if args.normalize_image is True
torch.Tensor
Outputs
intermediates: NamedTuple of intermediate values
"""
image_dims = tuple(x.size()[1:]) # (C,H,W)
if self.model_mode == ModelModes.EVALUATION and (self.training is False):
n_encoder_downsamples = self.Encoder.n_downsampling_layers
factor = 2 ** n_encoder_downsamples
x = utils.pad_factor(x, x.size()[2:], factor)
# Encoder forward pass
y = self.Encoder(x)
if self.model_mode == ModelModes.EVALUATION and (self.training is False):
n_hyperencoder_downsamples = self.Hyperprior.analysis_net.n_downsampling_layers
factor = 2 ** n_hyperencoder_downsamples
y = utils.pad_factor(y, y.size()[2:], factor)
hyperinfo = self.Hyperprior(y, spatial_shape=x.size()[2:])
latents_quantized = hyperinfo.decoded
total_nbpp = hyperinfo.total_nbpp
total_qbpp = hyperinfo.total_qbpp
# Use quantized latents as input to G
reconstruction = self.Generator(latents_quantized)
if self.args.normalize_input_image is True:
reconstruction = torch.tanh(reconstruction)
# Undo padding
if self.model_mode == ModelModes.EVALUATION and (self.training is False):
reconstruction = reconstruction[:, :, :image_dims[1], :image_dims[2]]
intermediates = Intermediates(x, reconstruction, latents_quantized,
total_nbpp, total_qbpp)
return intermediates, hyperinfo
def discriminator_forward(self, intermediates, train_generator):
""" Train on gen/real batches simultaneously. """
x_gen = intermediates.reconstruction
x_real = intermediates.input_image
# Alternate between training discriminator and compression models
if train_generator is False:
x_gen = x_gen.detach()
D_in = torch.cat([x_real, x_gen], dim=0)
latents = intermediates.latents_quantized.detach()
latents = torch.repeat_interleave(latents, 2, dim=0)
D_out, D_out_logits = self.Discriminator(D_in, latents)
D_out = torch.squeeze(D_out)
D_out_logits = torch.squeeze(D_out_logits)
D_real, D_gen = torch.chunk(D_out, 2, dim=0)
D_real_logits, D_gen_logits = torch.chunk(D_out_logits, 2, dim=0)
return Disc_out(D_real, D_gen, D_real_logits, D_gen_logits)
def distortion_loss(self, x_gen, x_real):
# loss in [0,255] space but normalized by 255 to not be too big
# - Delegate scaling to weighting
sq_err = self.squared_difference(x_gen*255., x_real*255.) # / 255.
return torch.mean(sq_err)
def perceptual_loss_wrapper(self, x_gen, x_real, normalize=True):
""" Assumes inputs are in [0, 1] if normalize=True, else [-1, 1] """
LPIPS_loss = self.perceptual_loss.forward(x_gen, x_real, normalize=normalize)
return torch.mean(LPIPS_loss)
def compression_loss(self, intermediates, hyperinfo):
x_real = intermediates.input_image
x_gen = intermediates.reconstruction
if self.args.normalize_input_image is True:
# [-1.,1.] -> [0.,1.]
x_real = (x_real + 1.) / 2.
x_gen = (x_gen + 1.) / 2.
distortion_loss = self.distortion_loss(x_gen, x_real)
perceptual_loss = self.perceptual_loss_wrapper(x_gen, x_real, normalize=True)
weighted_distortion = self.args.k_M * distortion_loss
weighted_perceptual = self.args.k_P * perceptual_loss
weighted_rate, rate_penalty = losses.weighted_rate_loss(self.args, total_nbpp=intermediates.n_bpp,
total_qbpp=intermediates.q_bpp, step_counter=self.step_counter, ignore_schedule=self.args.ignore_schedule)
weighted_R_D_loss = weighted_rate + weighted_distortion
weighted_compression_loss = weighted_R_D_loss + weighted_perceptual
# Bookkeeping
if (self.step_counter % self.log_interval == 1):
self.store_loss('rate_penalty', rate_penalty)
self.store_loss('distortion', distortion_loss.item())
self.store_loss('perceptual', perceptual_loss.item())
self.store_loss('n_rate', intermediates.n_bpp.item())
self.store_loss('q_rate', intermediates.q_bpp.item())
self.store_loss('n_rate_latent', hyperinfo.latent_nbpp.item())
self.store_loss('q_rate_latent', hyperinfo.latent_qbpp.item())
self.store_loss('n_rate_hyperlatent', hyperinfo.hyperlatent_nbpp.item())
self.store_loss('q_rate_hyperlatent', hyperinfo.hyperlatent_qbpp.item())
self.store_loss('weighted_rate', weighted_rate.item())
self.store_loss('weighted_distortion', weighted_distortion.item())
self.store_loss('weighted_perceptual', weighted_perceptual.item())
self.store_loss('weighted_R_D', weighted_R_D_loss.item())
self.store_loss('weighted_compression_loss_sans_G', weighted_compression_loss.item())
return weighted_compression_loss
def GAN_loss(self, intermediates, train_generator=False):
"""
train_generator: Flag to send gradients to generator
"""
disc_out = self.discriminator_forward(intermediates, train_generator)
D_loss = self.gan_loss(disc_out, mode='discriminator_loss')
G_loss = self.gan_loss(disc_out, mode='generator_loss')
# Bookkeeping
if (self.step_counter % self.log_interval == 1):
self.store_loss('D_gen', torch.mean(disc_out.D_gen).item())
self.store_loss('D_real', torch.mean(disc_out.D_real).item())
self.store_loss('disc_loss', D_loss.item())
self.store_loss('gen_loss', G_loss.item())
self.store_loss('weighted_gen_loss', (self.args.beta * G_loss).item())
return D_loss, G_loss
def compress(self, x, silent=False):
"""
* Pass image through encoder to obtain latents: x -> Encoder() -> y
* Pass latents through hyperprior encoder to obtain hyperlatents:
y -> hyperencoder() -> z
* Encode hyperlatents via nonparametric entropy model.
* Pass hyperlatents through mean-scale hyperprior decoder to obtain mean,
scale over latents: z -> hyperdecoder() -> (mu, sigma).
* Encode latents via entropy model derived from (mean, scale) hyperprior output.
"""
assert self.model_mode == ModelModes.EVALUATION and (self.training is False), (
f'Set model mode to {ModelModes.EVALUATION} for compression.')
spatial_shape = tuple(x.size()[2:])
if self.model_mode == ModelModes.EVALUATION and (self.training is False):
n_encoder_downsamples = self.Encoder.n_downsampling_layers
factor = 2 ** n_encoder_downsamples
x = utils.pad_factor(x, x.size()[2:], factor)
# Encoder forward pass
y = self.Encoder(x)
if self.model_mode == ModelModes.EVALUATION and (self.training is False):
n_hyperencoder_downsamples = self.Hyperprior.analysis_net.n_downsampling_layers
factor = 2 ** n_hyperencoder_downsamples
y = utils.pad_factor(y, y.size()[2:], factor)
compression_output = self.Hyperprior.compress_forward(y, spatial_shape)
attained_hbpp = 32 * len(compression_output.hyperlatents_encoded) / np.prod(spatial_shape)
attained_lbpp = 32 * len(compression_output.latents_encoded) / np.prod(spatial_shape)
attained_bpp = 32 * ((len(compression_output.hyperlatents_encoded) +
len(compression_output.latents_encoded)) / np.prod(spatial_shape))
if silent is False:
self.logger.info('[ESTIMATED]')
self.logger.info(f'BPP: {compression_output.total_bpp:.3f}')
self.logger.info(f'HL BPP: {compression_output.hyperlatent_bpp:.3f}')
self.logger.info(f'L BPP: {compression_output.latent_bpp:.3f}')
self.logger.info('[ATTAINED]')
self.logger.info(f'BPP: {attained_bpp:.3f}')
self.logger.info(f'HL BPP: {attained_hbpp:.3f}')
self.logger.info(f'L BPP: {attained_lbpp:.3f}')
return compression_output
def decompress(self, compression_output):
"""
* Recover z* from compressed message.
* Pass recovered hyperlatents through mean-scale hyperprior decoder obtain mean,
scale over latents: z -> hyperdecoder() -> (mu, sigma).
* Use latent entropy model to recover y* from compressed image.
* Pass quantized latent through generator to obtain the reconstructed image.
y* -> Generator() -> x*.
"""
assert self.model_mode == ModelModes.EVALUATION and (self.training is False), (
f'Set model mode to {ModelModes.EVALUATION} for decompression.')
latents_decoded = self.Hyperprior.decompress_forward(compression_output, device=utils.get_device())
# Use quantized latents as input to G
reconstruction = self.Generator(latents_decoded)
if self.args.normalize_input_image is True:
reconstruction = torch.tanh(reconstruction)
# Undo padding
image_dims = compression_output.spatial_shape
reconstruction = reconstruction[:, :, :image_dims[0], :image_dims[1]]
if self.args.normalize_input_image is True:
# [-1.,1.] -> [0.,1.]
reconstruction = (reconstruction + 1.) / 2.
reconstruction = torch.clamp(reconstruction, min=0., max=1.)
return reconstruction
def forward(self, x, train_generator=False, return_intermediates=False, writeout=True):
self.writeout = writeout
losses = dict()
if train_generator is True:
# Define a 'step' as one cycle of G-D training
self.step_counter += 1
intermediates, hyperinfo = self.compression_forward(x)
if self.model_mode == ModelModes.EVALUATION:
reconstruction = intermediates.reconstruction
if self.args.normalize_input_image is True:
# [-1.,1.] -> [0.,1.]
reconstruction = (reconstruction + 1.) / 2.
reconstruction = torch.clamp(reconstruction, min=0., max=1.)
return reconstruction, intermediates.q_bpp
compression_model_loss = self.compression_loss(intermediates, hyperinfo)
if self.use_discriminator is True:
# Only send gradients to generator when training generator via
# `train_generator` flag
D_loss, G_loss = self.GAN_loss(intermediates, train_generator)
weighted_G_loss = self.args.beta * G_loss
compression_model_loss += weighted_G_loss
losses['disc'] = D_loss
losses['compression'] = compression_model_loss
# Bookkeeping
if (self.step_counter % self.log_interval == 1):
self.store_loss('weighted_compression_loss', compression_model_loss.item())
if return_intermediates is True:
return losses, intermediates
else:
return losses
if __name__ == '__main__':
compress_test = False
if compress_test is True:
model_mode = ModelModes.EVALUATION
else:
model_mode = ModelModes.TRAINING
logger = utils.logger_setup(logpath=os.path.join(directories.experiments, 'logs'), filepath=os.path.abspath(__file__))
device = utils.get_device()
logger.info(f'Using device {device}')
storage_train = defaultdict(list)
storage_test = defaultdict(list)
model = Model(hific_args, logger, storage_train, storage_test, model_mode=model_mode, model_type=ModelTypes.COMPRESSION_GAN)
model.to(device)
logger.info(model)
transform_param_names = list()
transform_params = list()
logger.info('ALL PARAMETERS')
for n, p in model.named_parameters():
if ('Encoder' in n) or ('Generator' in n):
transform_param_names.append(n)
transform_params.append(p)
if ('analysis' in n) or ('synthesis' in n):
transform_param_names.append(n)
transform_params.append(p)
logger.info(f'{n} - {p.shape}')
logger.info('AMORTIZATION PARAMETERS')
amortization_named_parameters = itertools.chain.from_iterable(
[am.named_parameters() for am in model.amortization_models])
for n, p in amortization_named_parameters:
logger.info(f'{n} - {p.shape}')
logger.info('AMORTIZATION PARAMETERS')
for n, p in zip(transform_param_names, transform_params):
logger.info(f'{n} - {p.shape}')
logger.info('HYPERPRIOR PARAMETERS')
for n, p in model.Hyperprior.hyperlatent_likelihood.named_parameters():
logger.info(f'{n} - {p.shape}')
if compress_test is False:
logger.info('DISCRIMINATOR PARAMETERS')
for n, p in model.Discriminator.named_parameters():
logger.info(f'{n} - {p.shape}')
logger.info("Number of trainable parameters: {}".format(utils.count_parameters(model)))
logger.info("Estimated size: {} MB".format(utils.count_parameters(model) * 4. / 10**6))
B = 10
shape = [B, 3, 256, 256]
x = torch.randn(shape).to(device)
start_time = time.time()
if compress_test is True:
model.eval()
logger.info('Starting compression with input shape {}'.format(shape))
compression_output = model.compress(x)
reconstruction = model.decompress(compression_output)
logger.info(f"n_bits: {compression_output.total_bits}")
logger.info(f"bpp: {compression_output.total_bpp}")
logger.info(f"MSE: {torch.mean(torch.square(reconstruction - x)).item()}")
else:
logger.info('Starting forward pass with input shape {}'.format(shape))
losses = model(x)
compression_loss, disc_loss = losses['compression'], losses['disc']
logger.info('Delta t {:.3f}s'.format(time.time() - start_time))
|
py | 7dfed8ae1de1547b8563eb675b7662244d347356 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_get_request(
vault_name, # type: str
resource_group_name, # type: str
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = "2018-12-20"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupstorageconfig/vaultstorageconfig')
path_format_arguments = {
"vaultName": _SERIALIZER.url("vault_name", vault_name, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_request(
vault_name, # type: str
resource_group_name, # type: str
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2018-12-20"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupstorageconfig/vaultstorageconfig')
path_format_arguments = {
"vaultName": _SERIALIZER.url("vault_name", vault_name, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_patch_request(
vault_name, # type: str
resource_group_name, # type: str
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2018-12-20"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupstorageconfig/vaultstorageconfig')
path_format_arguments = {
"vaultName": _SERIALIZER.url("vault_name", vault_name, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
# fmt: on
class BackupResourceStorageConfigsOperations(object):
"""BackupResourceStorageConfigsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.recoveryservicesbackup.passivestamp.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
vault_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.BackupResourceConfigResource"
"""Fetches resource storage config.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackupResourceConfigResource, or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupResourceConfigResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupResourceConfigResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.NewErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('BackupResourceConfigResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupstorageconfig/vaultstorageconfig'} # type: ignore
@distributed_trace
def update(
self,
vault_name, # type: str
resource_group_name, # type: str
parameters, # type: "_models.BackupResourceConfigResource"
**kwargs # type: Any
):
# type: (...) -> "_models.BackupResourceConfigResource"
"""Updates vault storage model type.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param parameters: Vault storage config request.
:type parameters:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupResourceConfigResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackupResourceConfigResource, or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupResourceConfigResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupResourceConfigResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'BackupResourceConfigResource')
request = build_update_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.NewErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('BackupResourceConfigResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupstorageconfig/vaultstorageconfig'} # type: ignore
@distributed_trace
def patch(
self,
vault_name, # type: str
resource_group_name, # type: str
parameters, # type: "_models.BackupResourceConfigResource"
**kwargs # type: Any
):
# type: (...) -> None
"""Updates vault storage model type.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param parameters: Vault storage config request.
:type parameters:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupResourceConfigResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'BackupResourceConfigResource')
request = build_patch_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.patch.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.NewErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
patch.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupstorageconfig/vaultstorageconfig'} # type: ignore
|
py | 7dfed94482a05e03b09c6f25fbff07c6beb93762 | """
Support pre-0.12 series pickle compatibility.
"""
import copy
import pickle as pkl
from typing import TYPE_CHECKING
import warnings
from pandas import Index
if TYPE_CHECKING:
from pandas import Series, DataFrame
def load_reduce(self):
stack = self.stack
args = stack.pop()
func = stack[-1]
if len(args) and type(args[0]) is type:
n = args[0].__name__ # noqa
try:
stack[-1] = func(*args)
return
except TypeError as err:
# If we have a deprecated function,
# try to replace and try again.
msg = "_reconstruct: First argument must be a sub-type of ndarray"
if msg in str(err):
try:
cls = args[0]
stack[-1] = object.__new__(cls)
return
except TypeError:
pass
raise
_sparse_msg = """\
Loading a saved '{cls}' as a {new} with sparse values.
'{cls}' is now removed. You should re-save this dataset in its new format.
"""
class _LoadSparseSeries:
# To load a SparseSeries as a Series[Sparse]
# https://github.com/python/mypy/issues/1020
# error: Incompatible return type for "__new__" (returns "Series", but must return
# a subtype of "_LoadSparseSeries")
def __new__(cls) -> "Series": # type: ignore
from pandas import Series
warnings.warn(
_sparse_msg.format(cls="SparseSeries", new="Series"),
FutureWarning,
stacklevel=6,
)
return Series(dtype=object)
class _LoadSparseFrame:
# To load a SparseDataFrame as a DataFrame[Sparse]
# https://github.com/python/mypy/issues/1020
# error: Incompatible return type for "__new__" (returns "DataFrame", but must
# return a subtype of "_LoadSparseFrame")
def __new__(cls) -> "DataFrame": # type: ignore
from pandas import DataFrame
warnings.warn(
_sparse_msg.format(cls="SparseDataFrame", new="DataFrame"),
FutureWarning,
stacklevel=6,
)
return DataFrame()
# If classes are moved, provide compat here.
_class_locations_map = {
("pandas.core.sparse.array", "SparseArray"): ("pandas.core.arrays", "SparseArray"),
# 15477
("pandas.core.base", "FrozenNDArray"): ("numpy", "ndarray"),
("pandas.core.indexes.frozen", "FrozenNDArray"): ("numpy", "ndarray"),
("pandas.core.base", "FrozenList"): ("pandas.core.indexes.frozen", "FrozenList"),
# 10890
("pandas.core.series", "TimeSeries"): ("pandas.core.series", "Series"),
("pandas.sparse.series", "SparseTimeSeries"): (
"pandas.core.sparse.series",
"SparseSeries",
),
# 12588, extensions moving
("pandas._sparse", "BlockIndex"): ("pandas._libs.sparse", "BlockIndex"),
("pandas.tslib", "Timestamp"): ("pandas._libs.tslib", "Timestamp"),
# 18543 moving period
("pandas._period", "Period"): ("pandas._libs.tslibs.period", "Period"),
("pandas._libs.period", "Period"): ("pandas._libs.tslibs.period", "Period"),
# 18014 moved __nat_unpickle from _libs.tslib-->_libs.tslibs.nattype
("pandas.tslib", "__nat_unpickle"): (
"pandas._libs.tslibs.nattype",
"__nat_unpickle",
),
("pandas._libs.tslib", "__nat_unpickle"): (
"pandas._libs.tslibs.nattype",
"__nat_unpickle",
),
# 15998 top-level dirs moving
("pandas.sparse.array", "SparseArray"): (
"pandas.core.arrays.sparse",
"SparseArray",
),
("pandas.sparse.series", "SparseSeries"): (
"pandas.compat.pickle_compat",
"_LoadSparseSeries",
),
("pandas.sparse.frame", "SparseDataFrame"): (
"pandas.core.sparse.frame",
"_LoadSparseFrame",
),
("pandas.indexes.base", "_new_Index"): ("pandas.core.indexes.base", "_new_Index"),
("pandas.indexes.base", "Index"): ("pandas.core.indexes.base", "Index"),
("pandas.indexes.numeric", "Int64Index"): (
"pandas.core.indexes.numeric",
"Int64Index",
),
("pandas.indexes.range", "RangeIndex"): ("pandas.core.indexes.range", "RangeIndex"),
("pandas.indexes.multi", "MultiIndex"): ("pandas.core.indexes.multi", "MultiIndex"),
("pandas.tseries.index", "_new_DatetimeIndex"): (
"pandas.core.indexes.datetimes",
"_new_DatetimeIndex",
),
("pandas.tseries.index", "DatetimeIndex"): (
"pandas.core.indexes.datetimes",
"DatetimeIndex",
),
("pandas.tseries.period", "PeriodIndex"): (
"pandas.core.indexes.period",
"PeriodIndex",
),
# 19269, arrays moving
("pandas.core.categorical", "Categorical"): ("pandas.core.arrays", "Categorical"),
# 19939, add timedeltaindex, float64index compat from 15998 move
("pandas.tseries.tdi", "TimedeltaIndex"): (
"pandas.core.indexes.timedeltas",
"TimedeltaIndex",
),
("pandas.indexes.numeric", "Float64Index"): (
"pandas.core.indexes.numeric",
"Float64Index",
),
("pandas.core.sparse.series", "SparseSeries"): (
"pandas.compat.pickle_compat",
"_LoadSparseSeries",
),
("pandas.core.sparse.frame", "SparseDataFrame"): (
"pandas.compat.pickle_compat",
"_LoadSparseFrame",
),
}
# our Unpickler sub-class to override methods and some dispatcher
# functions for compat
class Unpickler(pkl._Unpickler): # type: ignore
def find_class(self, module, name):
# override superclass
key = (module, name)
module, name = _class_locations_map.get(key, key)
return super().find_class(module, name)
Unpickler.dispatch = copy.copy(Unpickler.dispatch)
Unpickler.dispatch[pkl.REDUCE[0]] = load_reduce
def load_newobj(self):
args = self.stack.pop()
cls = self.stack[-1]
# compat
if issubclass(cls, Index):
obj = object.__new__(cls)
else:
obj = cls.__new__(cls, *args)
self.stack[-1] = obj
Unpickler.dispatch[pkl.NEWOBJ[0]] = load_newobj
def load_newobj_ex(self):
kwargs = self.stack.pop()
args = self.stack.pop()
cls = self.stack.pop()
# compat
if issubclass(cls, Index):
obj = object.__new__(cls)
else:
obj = cls.__new__(cls, *args, **kwargs)
self.append(obj)
try:
Unpickler.dispatch[pkl.NEWOBJ_EX[0]] = load_newobj_ex
except (AttributeError, KeyError):
pass
def load(fh, encoding=None, is_verbose=False):
"""load a pickle, with a provided encoding
Parameters
----------
fh : a filelike object
encoding : an optional encoding
is_verbose : show exception output
"""
try:
fh.seek(0)
if encoding is not None:
up = Unpickler(fh, encoding=encoding)
else:
up = Unpickler(fh)
up.is_verbose = is_verbose
return up.load()
except (ValueError, TypeError):
raise
|
py | 7dfeda747fe7ce5688cc86f5717ad68d4460c814 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 14 18:57:16 2020
@author: OLLIVANDER
"""
# Infix to Postfix Expression
preference = {'+':1 , '-':1, '*':2, '/':2, '^':3}
operators= set(['+', '-','*','/','^','(',')'])
expression = input("Enter infix expression: ")
fin =''
st=[]
flag=0
for i in expression:
#print(i)
if i not in operators:
fin+=i
# print("operand" , fin)
elif i=='(':
st.append('(')
# print('hi',st)
#print(st)
elif i ==')':
while st[-1]!='(':
fin+=st.pop()
st.pop()
print(" ) " , fin)
elif flag==0:
st.append(i)
flag=9
else:
while( len(st)!=0 and st[-1]!='(' and preference[st[-1]]>=preference[i]):
fin+=st.pop()
st.append(i)
print('i = ', i)
print("stack = ", st)
print("fin = " , fin)
while len(st)!=0:
fin+=st.pop()
print(len(st))
print(fin) |
py | 7dfedab0bf89598429864c2e9d90e92035f2380c | n = input('Digite o número: ')
print(n[0])
print(n[1])
print(n[2])
print(n[3]) |
py | 7dfedad1d4b6edf79a690413d6b8a7271034ef22 | # Scrapy settings for scrappers project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'scrappers'
SPIDER_MODULES = ['scrappers.spiders']
NEWSPIDER_MODULE = 'scrappers.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'yahoo_news (+https://friendlyuser.github.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# list of links for redirects
REDIRECT_ENABLED= False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 0.75
ROBOTSTXT_OBEY = True
HTTPERROR_ALLOWED_CODES = [302, 404]
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'scrappers.middlewares.ScrappersSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'scrappers.middlewares.ScrappersDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'scrappers.pipelines.ScrappersPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
py | 7dfedae922657ebcce88077e301f5848b90a4f45 | #!/usr/bin/env python
"""
Copyright (C) 2014 Ivan Gregor
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Computes precision and recall, including correction.
"""
import os
import argparse
from algbioi.com import fasta
from algbioi.com import taxonomy_ncbi
from algbioi.eval import cami
class _TaxonomyWrapperA():
"""
Wraps the functionality of the database.
"""
def __init__(self, databaseFile):
self._taxonomy = taxonomy_ncbi.TaxonomyNcbi(databaseFile)
self._rankToId = {}
self._ncbidToRankId = {}
self._predAtRankId = {} # rankId -> ncbid -> ncbid at given rank
self._noDefAtRankId = {} # rankId -> set of ncbids for which the ncbid at given rank is not defined
self._ncbidToNcbidParent = {} # ncbid -> parent ncbid
id = 0
for rank in taxonomy_ncbi.TAXONOMIC_RANKS:
self._rankToId[rank] = id
self._predAtRankId[id] = {}
self._noDefAtRankId[id] = set()
id += 1
def _getRankId(self, ncbid):
"""
Gets a rankId given an ncbi taxon id
@rtype: int
"""
rankId = self._ncbidToRankId.get(ncbid, None)
if rankId is not None:
return rankId
else:
rank = self._taxonomy.getRank(ncbid)
if rank is None:
return None
else:
rankId = self._rankToId.get(rank, None)
self._ncbidToRankId[ncbid] = rankId
return rankId
def _getParent(self, ncbid):
"""
Gets direct parent ncbi taxon id.
"""
parent = self._ncbidToNcbidParent.get(ncbid, None)
if parent is None:
parent = self._taxonomy.getParentNcbid(ncbid)
self._ncbidToNcbidParent[ncbid] = parent
return parent
def getPredDictAtRank(self, seqToNcbid, rank):
"""
Gets predictions at the given rank as a dictionary.
@param seqToNcbid: contain mapping, sequence name -> ncbi taxon id
@type seqToNcbid: dict
@param rank: the resulting dictionary will contain predictions a this rank
@type rank: str
@return: mapping, sequence name -> ncbi taxon id at given rank
@rtype: dict
"""
rankId = self._rankToId[rank]
retDict = {}
predAtRankBuff = self._predAtRankId[rankId]
noDefAtRankBuff = self._noDefAtRankId[rankId]
for seq, ncbid in seqToNcbid.iteritems():
pred = predAtRankBuff.get(ncbid, None)
if pred is not None:
retDict[seq] = pred # we already know the ncbid at given rank
continue
if ncbid in noDefAtRankBuff:
continue # the ncbid is not defined at this rank (we already know)
ncbidRankId = self._getRankId(ncbid)
#if ncbidRankId is None:
# noDefAtRankBuff.add(ncbid)
# continue # we have just found out that the ncbid is not defined at this rank
if ncbidRankId == rankId: # the ncbid is defined already at the right rank
predAtRankBuff[ncbid] = ncbid
retDict[seq] = ncbid
continue
if (ncbidRankId is None) or (ncbidRankId > rankId): # the right ncbid may be defined at a higher rank
current = self._getParent(ncbid)
while current is not None:
currentRankId = self._getRankId(current)
if currentRankId == rankId:
retDict[seq] = current
predAtRankBuff[ncbid] = current
break
current = self._getParent(current)
if current is None:
noDefAtRankBuff.add(ncbid) # we have just found out that the ncbid is not defined at this rank
return retDict
def close(self):
self._taxonomy.close()
class Accuracy():
"""
Implements computation of the "precision" and "recall" according to different definitions.
"""
def __init__(self, seqIdToBp, seqIdToPred, seqIdToTruePred, taxonomy, correctLabelThreshold=None):
"""
Initializes the accuracy object.
@param seqIdToBp: dictionary or a fasta file
@param seqIdToPred: dictionary or a prediction file
@param seqIdToTruePred: dictionary or a true prediction file
@param taxonomy: database file in the sqlite3 format, or taxonomy object retrieved from not closed Accuracy
"""
if isinstance(seqIdToBp, dict):
self._seqToBp = seqIdToBp
else:
assert os.path.isfile(seqIdToBp)
self._seqToBp = fasta.getSequenceToBpDict(seqIdToBp)
if isinstance(seqIdToPred, dict):
self._seqToPred = seqIdToPred
else:
assert os.path.isfile(seqIdToPred)
self._seqToPred = cami.readAssignments(seqIdToPred)
if isinstance(seqIdToTruePred, dict):
self._seqToTrue = seqIdToTruePred
else:
assert os.path.isfile(seqIdToTruePred)
self._seqToTrue = cami.readAssignments(seqIdToTruePred)
if isinstance(taxonomy, _TaxonomyWrapperA):
self._taxonomy = taxonomy
else:
assert os.path.isfile(taxonomy)
self._taxonomy = _TaxonomyWrapperA(taxonomy)
# correct the predictions self._seqToPred
if correctLabelThreshold is not None:
self._seqToPred = self._correctPredictions(
self._seqToBp, self._seqToPred, self._seqToTrue, self._taxonomy, correctLabelThreshold)
def _correctPredictions(self, seqIdToBp, seqIdToPred, seqIdToTruePred, taxonomy, correctLabelThreshold):
"""
"""
newPred = {}
ranks = taxonomy_ncbi.TAXONOMIC_RANKS[1:]
ranks.reverse()
for rank in ranks:
# get true clades at given rank
seqIdToLabelRank = taxonomy.getPredDictAtRank(seqIdToTruePred, rank)
# get pred clades at given rank
seqIdToPredRank = taxonomy.getPredDictAtRank(seqIdToPred, rank)
# map: true taxonId -> seqId
labelToSeqIdList = {}
for seqId, taxonId in seqIdToLabelRank.iteritems():
if taxonId in labelToSeqIdList:
labelToSeqIdList[taxonId].append(seqId)
else:
labelToSeqIdList[taxonId] = [seqId]
for taxonId, seqIdList in labelToSeqIdList.iteritems():
idToBp = {}
sumBp = 0
for seqId in seqIdList:
id = seqIdToPredRank.get(seqId, None)
if id is None:
continue
bp = seqIdToBp[seqId]
if id in idToBp:
idToBp[id] += bp
else:
idToBp[id] = bp
sumBp += bp
entryList = []
for id, bp in idToBp.iteritems():
entryList.append((id, bp))
if len(entryList) == 0:
continue
entryList.sort(key=lambda x: x[1], reverse=True)
id, bp = entryList[0]
if id == taxonId:
continue
percent = float(bp) / float(sumBp)
if percent >= correctLabelThreshold:
for seqId in seqIdList:
if seqIdToPred.get(seqId, None) == id:
newPred[seqId] = taxonId
for seqId, taxonId in seqIdToPred.iteritems():
if seqId not in newPred:
newPred[seqId] = taxonId
return newPred
def getAccuracy(self, rank, minFracClade=None, minFracPred=None, asBp=True, weightAccordingBinSize=True):
"""
Precision (specificity) and Recall (sensitivity) according to PhyloPythiaS and PhyloPythia papers.
The number of classes correspond to the number of classes in the true reference and param "minFracClades".
@param rank: on which taxonomic rank the predictions should be considered
@param minFracClade: a clade is considered only if the dataset (true labels) contain at least this
fraction of sequences that belong to the clade
@param minFracPred: a clade is considered only if the corresponding predicted bins contain at least this
fraction of the overall sequences (None ~ this criteria is not considered and only
true "reference" bins are used for the comparison).
@param asBp: count it according to the sequence lengths
@param weightAccordingBinSize: weight individual bins according to their bin size
@return: [precision, recall, classPrecisionNum, classRecallNum]
"""
predAtRankDict = self._taxonomy.getPredDictAtRank(self._seqToPred, rank)
trueAtRankDict = self._taxonomy.getPredDictAtRank(self._seqToTrue, rank)
tp = {} # class label -> count of sequences correctly assigned to clade i
t = {} # class label -> true count of sequences of clade i
p = {} # class label -> count of sequences assigned to clade i
tpOther = 0 # count of sequences correctly unassigned
tOther = 0 # true count of sequences that are unassigned at given rank
# iterate over all sequences
for seq, seqLen in self._seqToBp.iteritems():
# bp
if asBp:
bp = seqLen
else:
bp = 1
# true
i = None
if seq in trueAtRankDict:
i = trueAtRankDict[seq]
if i not in t:
t[i] = bp
else:
t[i] += bp
else:
tOther += bp
if seq not in predAtRankDict:
tpOther += bp
# pred
j = None
if seq in predAtRankDict:
j = predAtRankDict[seq]
if j not in p:
p[j] = bp
else:
p[j] += bp
# match
if i == j and i is not None:
if i not in tp:
tp[i] = bp
else:
tp[i] += bp
classesP = p.keys() # classes for precision
classesR = t.keys() # classes for recall
# filter out least abundant TRUE clades
if minFracClade is not None:
sumT = tOther # true bin containing all sequences undefined at this rank
for i in classesR:
sumT += t[i]
rmList = []
for i in classesR:
if (sumT == 0) or (float(t[i]) / float(sumT) < minFracClade):
rmList.append(i)
for i in rmList:
classesR.remove(i)
if (sumT == 0) or (float(tOther) / float(sumT) < minFracClade):
tOther = 0
# filter out least abundant PREDICTED clades
if minFracPred is not None:
sumT = 0
for i in classesP:
sumT += p[i]
rmList = []
for i in classesP:
if (sumT == 0) or (float(p[i]) / float(sumT) < minFracPred):
rmList.append(i)
for i in rmList:
classesP.remove(i)
# zero missing entries
for i in classesR:
if i not in tp:
tp[i] = 0
if i not in p:
p[i] = 0
for i in classesP:
if i not in tp:
tp[i] = 0
if i not in t:
t[i] = 0
wp = {} # weights for precision
wr = {} # weights for recall
if weightAccordingBinSize:
# compute weights of individual bins that correspond to the number of bp/sequences
# assigned to individual bins
sumP = 0.0
sumR = 0.0
for i in classesP:
sumP += p[i]
for i in classesR:
sumR += t[i]
sumR += tOther
for i in classesP:
wp[i] = float(p[i]) / sumP
for i in classesR:
wr[i] = float(t[i]) / sumR
if tOther > 0:
wrOther = float(tOther) / sumR
else:
# all bins are equally important
for i in classesP:
wp[i] = 1.0 / float(len(classesP))
for i in classesR:
if tOther > 0:
w = 1.0 / float(len(classesR) + 1)
wr[i] = w
wrOther = w
else:
wr[i] = 1.0 / float(len(classesR))
if len(classesR) == 0 and tOther > 0:
wrOther = 1.0
# precision
precision = 0.0
for i in classesP:
if p[i] > 0:
precision += (float(tp[i]) / float(p[i])) * wp[i]
# recall
recall = 0.0
classesRCount = len(classesR)
for i in classesR:
recall += (float(tp[i]) / float(t[i])) * wr[i]
if tOther > 0:
recall += (float(tpOther) / float(tOther)) * wrOther
classesRCount += 1
#
return [precision, recall, len(classesP), classesRCount]
def getAccuracyPrint(self, ranks, minFracClade, minFracPred, overview=True, asBp=True, weightAccordingBinSize=True):
"""
Gets the precision and recall values printed as a string
@param ranks: compute the precision and recall at these ranks
@rtype: str
"""
buff = '# precision, recall, #classes precision, #classes recall, seq. count/bp, weighted bins\n'
for rank in ranks:
if overview: # overview
buff += str(rank + ',--,--,--,----------,----------\n')
buff += self.getAccuracyPrintEntry(rank, minFracClade, minFracPred, False, False) # asBp, weighted
buff += self.getAccuracyPrintEntry(rank, minFracClade, minFracPred, True, False)
buff += self.getAccuracyPrintEntry(rank, minFracClade, minFracPred, False, True)
buff += self.getAccuracyPrintEntry(rank, minFracClade, minFracPred, True, True)
else: # custom
buff += self.getAccuracyPrintEntry(rank, minFracClade, minFracPred,
asBp=asBp, weightAccordingBinSize=weightAccordingBinSize)
return buff
def getAccuracyPrintEntry(self, rank, minFracClade, minFracPred, asBp=True, weightAccordingBinSize=True):
p, r, cp, cr = self.getAccuracy(rank, minFracClade, minFracPred, asBp, weightAccordingBinSize)
if asBp:
c = 'bp'
else:
c = 'count'
if weightAccordingBinSize:
w = 'weighted'
else:
w = 'not weighted'
return str('%s, %s, %s, %s, "%s", "%s"\n' % (round(p * 100.0, 1), round(r * 100.0, 1), cp, cr, c, w))
def getTaxonomy(self):
return self._taxonomy
def close(self, closeTaxonomy=True):
if closeTaxonomy:
self._taxonomy.close()
def _main():
parser = argparse.ArgumentParser(
description='Computes precision and recall measures according to different definitions.', epilog='')
parser.add_argument('-f', '--fasta', nargs=1, type=file, required=True, help='Fasta file.', metavar='contigs.fna',
dest='f')
parser.add_argument('-p', '--predictions', nargs=1, type=file, required=True,
help='Tab separated prediction file (first column sequence name, last column predicted ncbid).',
metavar='pred.csv', dest='p')
parser.add_argument('-t', '--true-assignments', nargs=1, type=file, required=True,
help='Tab separated true assignments file (first column sequence name, '
'last column predicted ncbid.', metavar='true_assignments.csv', dest='t')
parser.add_argument('-d', '--database', nargs=1, type=file, required=True,
help='Database file containing the NCBI taxonomy in the sqlite3 format.',
metavar='ncbitax_sqlite.db', dest='d')
parser.add_argument('-r', '--ranks', nargs=1, help='Compute the measures only for these ranks (given as comma '
'separated strings) Default ~ consider all ranks.',
metavar='order,family,genus', dest='r')
parser.add_argument('-c', '--min-frac-clade', nargs=1,
help='A clade is considered in the computation of "Recall" only if the reference (the true '
'assignments) contain at least this fraction of sequences that belong to the '
'corresponding clade at the corresponding rank. (e.g. value 0.01 means that all clades '
'that are considered in the computations of recall represent at least 1%% of the overall '
'dataset) Default ~ 0.01', metavar='0.01', dest='c')
parser.add_argument('-b', '--min-frac-bin', nargs=1,
help='In the computation of "Precision". A clade is considered only if the corresponding '
'predicted bins contain at least this fraction of the overall predicted sequences at the '
'corresponding rank (Default ~ 0.01)', metavar='0.01', dest='b')
parser.add_argument('-s', '--consider-seq-len', action='store_true',
help='Compute the measures based on the sequence lengths (in bp). '
'(Default ~ based on sequence counts)', dest='s')
parser.add_argument('-w', '--weight-bins', action='store_true',
help='The measures are computed using weighted averages over bin sizes. Size of true bins is '
'used to compute "Recall". Size of predicted bins is used to compute "Precision". '
'(Default ~ not weighted)', dest='w')
parser.add_argument('-o', '--overview', action='store_true',
help='Compute the measures according to several default settings. '
'You can still set the (-c) and (-b) options.', dest='o')
parser.add_argument('-m', '--map-by-correction', nargs=1,
help='Correct assignments by mapping to the most probable label. A float close to 0.9. '
'(Default ~ no correction)',
metavar='0.9', dest='m')
args = parser.parse_args()
if args.r:
ranks = str(args.r[0].name).strip("'").strip('"').split(',')
else:
ranks = taxonomy_ncbi.TAXONOMIC_RANKS[1:]
if args.c:
minFracClade = float(args.c[0])
else:
minFracClade = 0.01
if args.b:
minFracPred = float(args.b[0])
else:
minFracPred = 0.01
if args.m:
correction = float(args.m[0])
else:
correction = None
acc = Accuracy(args.f[0].name, args.p[0].name, args.t[0].name, args.d[0].name, correction)
print(acc.getAccuracyPrint(ranks, minFracClade, minFracPred,
overview=bool(args.o), asBp=bool(args.s), weightAccordingBinSize=bool(args.w)))
acc.close()
def _test():
fastaFilePath = '/Users/ivan/Documents/work/binning/data/simMC/AMGN_AMD.Arachne.contigs.fna'
predFilePath = '/Users/ivan/Documents/work/binning/tests/simMC/AMD05/output/AMGN_AMD.Arachne.contigs.fna.pOUT'
trueFilePath = '/Users/ivan/Documents/work/binning/data/simMC/AMD.Arachne.genus'
databaseFile = '/Users/ivan/Documents/work/binning/taxonomy/ncbi_taxonomy_20110629/ncbitax_sqlite.db'
ranks = ['superkingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species']
acc = Accuracy(fastaFilePath, predFilePath, trueFilePath, databaseFile)
print(acc.getAccuracyPrint(ranks, minFracClade=0.01, minFracPred=0.01, overview=True))
acc.close()
if __name__ == "__main__":
_main()
#_test() |
py | 7dfeddf45f8b82243ef575c320ccff4fc0b29489 | # ---------------------------------
# test the algorithm implementation
# on a multidimensional XOR problem
# > python xor.py
# ---------------------------------
import tensorflow as tf
import argparse
parser = argparse.ArgumentParser(description='main')
parser.add_argument('--algorithm', default="gradient", type=str, help='algorithm type')
parser.add_argument('--strategy', default="target", type=str, help='strategy type')
parser.add_argument('--center', default=None, type=str, help='center in strategy r')
parser.add_argument('--n', default=320, type=int, help='number of observations')
parser.add_argument('--size', default=16, type=int, help='number of neurons in layers')
parser.add_argument('--seed', default=0, type=int, help='random seed')
args = parser.parse_args()
N = args.n
SIZE = args.size
tf.random.set_seed(args.seed)
import code
import numpy as np
import pandas as pd
np.random.seed(args.seed)
x1 = np.random.normal(size=N)
x2 = np.random.normal(size=N)
x3 = np.random.normal(size=N)
y = 1 * (x1 * x2 * x3 > 0)
X = pd.DataFrame({'x1': x1, 'x2': x2, 'x3': x3})
normalizer = tf.keras.layers.experimental.preprocessing.Normalization()
normalizer.adapt(X)
model = tf.keras.Sequential()
model.add(normalizer)
model.add(tf.keras.layers.Dense(SIZE, activation="relu"))
model.add(tf.keras.layers.Dense(SIZE, activation="relu"))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=tf.keras.optimizers.SGD(),
metrics=['acc', 'AUC'])
model.fit(X, y, batch_size=int(N/10), epochs=300, verbose=0)
explainer = code.Explainer(model, X)
if args.algorithm == "gradient":
alg = code.GradientAlgorithm(explainer, variable="x1")
else:
alg = code.GeneticAlgorithm(explainer, variable="x1", std_ratio=1/6)
if args.strategy == "target":
alg.fool_aim(random_state=args.seed)
else:
alg.fool(center=args.center, random_state=args.seed)
alg.plot_losses()
alg.plot_explanation()
alg.plot_data(constant=False) |
py | 7dfede2ee5349b3ded9713a1e088f95b97b7f6a0 | import xbmc
import xbmcgui
import kodigui
import busy
import opener
import info
import videoplayer
import playersettings
import search
import dropdown
import windowutils
import optionsdialog
import preplayutils
from plexnet import plexplayer, media
from lib import colors
from lib import util
from lib import metadata
from lib.util import T
class PrePlayWindow(kodigui.ControlledWindow, windowutils.UtilMixin):
xmlFile = 'script-plex-pre_play.xml'
path = util.ADDON.getAddonInfo('path')
theme = 'Main'
res = '1080i'
width = 1920
height = 1080
THUMB_POSTER_DIM = (347, 518)
RELATED_DIM = (268, 397)
EXTRA_DIM = (329, 185)
ROLES_DIM = (268, 268)
PREVIEW_DIM = (343, 193)
EXTRA_LIST_ID = 400
RELATED_LIST_ID = 401
ROLES_LIST_ID = 403
OPTIONS_GROUP_ID = 200
PROGRESS_IMAGE_ID = 250
HOME_BUTTON_ID = 201
SEARCH_BUTTON_ID = 202
INFO_BUTTON_ID = 304
PLAY_BUTTON_ID = 302
TRAILER_BUTTON_ID = 303
SETTINGS_BUTTON_ID = 305
OPTIONS_BUTTON_ID = 306
PLAYER_STATUS_BUTTON_ID = 204
def __init__(self, *args, **kwargs):
kodigui.ControlledWindow.__init__(self, *args, **kwargs)
self.video = kwargs.get('video')
self.auto_play = kwargs.get('auto_play')
self.parentList = kwargs.get('parent_list')
self.videos = None
self.exitCommand = None
self.trailer = None
self.lastFocusID = None
def onFirstInit(self):
self.extraListControl = kodigui.ManagedControlList(self, self.EXTRA_LIST_ID, 5)
self.relatedListControl = kodigui.ManagedControlList(self, self.RELATED_LIST_ID, 5)
self.rolesListControl = kodigui.ManagedControlList(self, self.ROLES_LIST_ID, 5)
self.progressImageControl = self.getControl(self.PROGRESS_IMAGE_ID)
self.setup()
if self.auto_play:
self.auto_play = False
self.playVideo()
def onReInit(self):
self.video.reload()
self.refreshInfo()
def refreshInfo(self):
oldFocusId = self.getFocusId()
util.setGlobalProperty('hide.resume', '' if self.video.viewOffset.asInt() else '1')
self.setInfo()
xbmc.sleep(100)
if oldFocusId == self.PLAY_BUTTON_ID:
self.focusPlayButton()
def onAction(self, action):
try:
controlID = self.getFocusId()
if not controlID and self.lastFocusID and not action == xbmcgui.ACTION_MOUSE_MOVE:
self.setFocusId(self.lastFocusID)
if action in(xbmcgui.ACTION_NAV_BACK, xbmcgui.ACTION_CONTEXT_MENU):
if not xbmc.getCondVisibility('ControlGroup({0}).HasFocus(0)'.format(self.OPTIONS_GROUP_ID)):
if self.getProperty('on.extras'):
self.setFocusId(self.OPTIONS_GROUP_ID)
return
if action == xbmcgui.ACTION_LAST_PAGE and xbmc.getCondVisibility('ControlGroup(300).HasFocus(0)'):
self.next()
elif action == xbmcgui.ACTION_NEXT_ITEM:
self.setFocusId(300)
self.next()
elif action == xbmcgui.ACTION_FIRST_PAGE and xbmc.getCondVisibility('ControlGroup(300).HasFocus(0)'):
self.prev()
elif action == xbmcgui.ACTION_PREV_ITEM:
self.setFocusId(300)
self.prev()
except:
util.ERROR()
kodigui.ControlledWindow.onAction(self, action)
def onClick(self, controlID):
if controlID == self.HOME_BUTTON_ID:
self.goHome()
elif controlID == self.EXTRA_LIST_ID:
self.openItem(self.extraListControl)
elif controlID == self.RELATED_LIST_ID:
self.openItem(self.relatedListControl)
elif controlID == self.ROLES_LIST_ID:
self.roleClicked()
elif controlID == self.PLAY_BUTTON_ID:
self.playVideo()
elif controlID == self.PLAYER_STATUS_BUTTON_ID:
self.showAudioPlayer()
elif controlID == self.INFO_BUTTON_ID:
self.infoButtonClicked()
elif controlID == self.SETTINGS_BUTTON_ID:
self.settingsButtonClicked()
elif controlID == self.TRAILER_BUTTON_ID:
self.openItem(item=self.trailer)
elif controlID == self.OPTIONS_BUTTON_ID:
self.optionsButtonClicked()
elif controlID == self.SEARCH_BUTTON_ID:
self.searchButtonClicked()
def onFocus(self, controlID):
self.lastFocusID = controlID
if 399 < controlID < 500:
self.setProperty('hub.focus', str(controlID - 400))
if xbmc.getCondVisibility('ControlGroup(50).HasFocus(0) + ControlGroup(300).HasFocus(0)'):
self.setProperty('on.extras', '')
elif xbmc.getCondVisibility('ControlGroup(50).HasFocus(0) + !ControlGroup(300).HasFocus(0)'):
self.setProperty('on.extras', '1')
def searchButtonClicked(self):
self.processCommand(search.dialog(self, section_id=self.video.getLibrarySectionId() or None))
def settingsButtonClicked(self):
if not self.video.mediaChoice:
playerObject = plexplayer.PlexPlayer(self.video)
playerObject.build()
playersettings.showDialog(video=self.video, non_playback=True)
self.setAudioAndSubtitleInfo()
def infoButtonClicked(self):
opener.handleOpen(
info.InfoWindow,
title=self.video.title,
sub_title=self.getProperty('info'),
thumb=self.video.type == 'episode' and self.video.thumb or self.video.defaultThumb,
thumb_fallback='script.plexo/thumb_fallbacks/{0}.png'.format(self.video.type == 'episode' and 'show' or 'movie'),
info=self.video.summary,
background=self.getProperty('background'),
is_16x9=self.video.type == 'episode'
)
def optionsButtonClicked(self):
options = []
# if xbmc.getCondVisibility('Player.HasAudio + MusicPlayer.HasNext'):
# options.append({'key': 'play_next', 'display': 'Play Next'})
if len(self.video.media) > 1:
options.append({'key': 'play_version', 'display': T(32451, 'Play Version...')})
if self.video.isWatched and not self.video.viewOffset.asInt():
options.append({'key': 'mark_unwatched', 'display': T(32318, 'Mark Unwatched')})
else:
options.append({'key': 'mark_watched', 'display': T(32319, 'Mark Watched')})
options.append(dropdown.SEPARATOR)
if self.video.type == 'episode':
options.append({'key': 'to_season', 'display': T(32400, 'Go to Season')})
options.append({'key': 'to_show', 'display': T(32323, 'Go to Show')})
if self.video.type in ('episode', 'movie'):
options.append({'key': 'to_section', 'display': T(32324, u'Go to {0}').format(self.video.getLibrarySectionTitle())})
if self.video.server.allowsMediaDeletion:
options.append({'key': 'delete', 'display': T(32322, 'Delete')})
# if xbmc.getCondVisibility('Player.HasAudio') and self.section.TYPE == 'artist':
# options.append({'key': 'add_to_queue', 'display': 'Add To Queue'})
# if False:
# options.append({'key': 'add_to_playlist', 'display': 'Add To Playlist'})
posy = 880
if not util.getGlobalProperty('hide.resume'):
posy += 106
if self.getProperty('trailer.button'):
posy += 106
choice = dropdown.showDropdown(options, (posy, 618), close_direction='left')
if not choice:
return
if choice['key'] == 'play_version':
self.playVideo(play_version=True)
elif choice['key'] == 'play_next':
xbmc.executebuiltin('PlayerControl(Next)')
elif choice['key'] == 'mark_watched':
self.video.markWatched()
self.refreshInfo()
util.MONITOR.watchStatusChanged()
elif choice['key'] == 'mark_unwatched':
self.video.markUnwatched()
self.refreshInfo()
util.MONITOR.watchStatusChanged()
elif choice['key'] == 'to_season':
self.processCommand(opener.open(self.video.parentRatingKey))
elif choice['key'] == 'to_show':
self.processCommand(opener.open(self.video.grandparentRatingKey))
elif choice['key'] == 'to_section':
self.goHome(self.video.getLibrarySectionId())
elif choice['key'] == 'delete':
self.delete()
def delete(self):
button = optionsdialog.show(
T(32326, 'Really delete?'),
T(32327, 'Are you sure you really want to delete this media?'),
T(32328, 'Yes'),
T(32329, 'No')
)
if button != 0:
return
if self._delete():
self.doClose()
else:
util.messageDialog(T(32330, 'Message'), T(32331, 'There was a problem while attempting to delete the media.'))
@busy.dialog()
def _delete(self):
success = self.video.delete()
util.LOG('Media DELETE: {0} - {1}'.format(self.video, success and 'SUCCESS' or 'FAILED'))
return success
def roleClicked(self):
mli = self.rolesListControl.getSelectedItem()
if not mli:
return
sectionRoles = busy.widthDialog(mli.dataSource.sectionRoles, '')
if not sectionRoles:
util.DEBUG_LOG('No sections found for actor')
return
if len(sectionRoles) > 1:
x, y = self.getRoleItemDDPosition()
options = [{'role': r, 'display': r.reasonTitle} for r in sectionRoles]
choice = dropdown.showDropdown(options, (x, y), pos_is_bottom=True, close_direction='bottom')
if not choice:
return
role = choice['role']
else:
role = sectionRoles[0]
self.processCommand(opener.open(role))
def getVideos(self):
if not self.videos:
if self.video.TYPE == 'episode':
self.videos = self.video.show().episodes()
if not self.videos:
return False
return True
def next(self):
if not self._next():
return
self.setup()
@busy.dialog()
def _next(self):
if self.parentList:
mli = self.parentList.getListItemByDataSource(self.video)
if not mli:
return False
pos = mli.pos() + 1
if not self.parentList.positionIsValid(pos):
pos = 0
self.video = self.parentList.getListItem(pos).dataSource
else:
if not self.getVideos():
return False
if self.video not in self.videos:
return False
pos = self.videos.index(self.video)
pos += 1
if pos >= len(self.videos):
pos = 0
self.video = self.videos[pos]
return True
def prev(self):
if not self._prev():
return
self.setup()
@busy.dialog()
def _prev(self):
if self.parentList:
mli = self.parentList.getListItemByDataSource(self.video)
if not mli:
return False
pos = mli.pos() - 1
if pos < 0:
pos = self.parentList.size() - 1
self.video = self.parentList.getListItem(pos).dataSource
else:
if not self.getVideos():
return False
if self.video not in self.videos:
return False
pos = self.videos.index(self.video)
pos -= 1
if pos < 0:
pos = len(self.videos) - 1
self.video = self.videos[pos]
return True
def getRoleItemDDPosition(self):
y = 980
if xbmc.getCondVisibility('Control.IsVisible(500)'):
y += 360
if xbmc.getCondVisibility('Control.IsVisible(501)'):
y += 520
if xbmc.getCondVisibility('!String.IsEmpty(Window.Property(on.extras))'):
y -= 300
if xbmc.getCondVisibility('Integer.IsGreater(Window.Property(hub.focus),0) + Control.IsVisible(500)'):
y -= 500
if xbmc.getCondVisibility('Integer.IsGreater(Window.Property(hub.focus),1) + Control.IsVisible(501)'):
y -= 500
focus = int(xbmc.getInfoLabel('Container(403).Position'))
x = ((focus + 1) * 304) - 100
return x, y
def playVideo(self, play_version=False):
if not self.video.available():
util.messageDialog(T(32312, 'Unavailable'), T(32313, 'This item is currently unavailable.'))
return
if play_version:
if not preplayutils.chooseVersion(self.video):
return
else:
preplayutils.resetVersion(self.video)
resume = False
if self.video.viewOffset.asInt():
button = optionsdialog.show(
T(32314, 'In Progress'),
T(32315, 'Resume playback?'),
T(32316, 'Resume'),
T(32317, 'Play From Beginning')
)
if button is None:
return
resume = (button == 0)
self.processCommand(videoplayer.play(video=self.video, resume=resume))
def openItem(self, control=None, item=None):
if not item:
mli = control.getSelectedItem()
if not mli:
return
item = mli.dataSource
self.processCommand(opener.open(item))
def focusPlayButton(self):
try:
if not self.getFocusId() == self.PLAY_BUTTON_ID:
self.setFocusId(self.PLAY_BUTTON_ID)
except (SystemError, RuntimeError):
self.setFocusId(self.PLAY_BUTTON_ID)
@busy.dialog()
def setup(self):
self.focusPlayButton()
util.DEBUG_LOG('PrePlay: Showing video info: {0}'.format(self.video))
if self.video.type == 'episode':
self.setProperty('preview.yes', '1')
elif self.video.type == 'movie':
self.setProperty('preview.no', '1')
self.video.reload(checkFiles=1, includeRelated=1, includeRelatedCount=10, includeExtras=1, includeExtrasCount=10)
self.setInfo()
self.fillExtras()
hasPrev = self.fillRelated()
self.fillRoles(hasPrev)
def setInfo(self):
self.setProperty('background', self.video.art.asTranscodedImageURL(self.width, self.height, blur=128, opacity=60, background=colors.noAlpha.Background))
self.setProperty('title', self.video.title)
self.setProperty('duration', util.durationToText(self.video.duration.asInt()))
self.setProperty('summary', self.video.summary.strip().replace('\t', ' '))
directors = u' / '.join([d.tag for d in self.video.directors()][:5])
directorsLabel = len(self.video.directors) > 1 and T(32401, u'DIRECTORS').upper() or T(32383, u'DIRECTOR').upper()
self.setProperty('directors', directors and u'{0} {1}'.format(directorsLabel, directors) or '')
if self.video.type == 'episode':
self.setProperty('content.rating', '')
self.setProperty('thumb', self.video.defaultThumb.asTranscodedImageURL(*self.THUMB_POSTER_DIM))
self.setProperty('preview', self.video.thumb.asTranscodedImageURL(*self.PREVIEW_DIM))
self.setProperty('info', u'{0} {1} {2} {3}'.format(T(32303, 'Season'), self.video.parentIndex, T(32304, 'Episode'), self.video.index))
self.setProperty('date', util.cleanLeadingZeros(self.video.originallyAvailableAt.asDatetime('%B %d, %Y')))
writers = u' / '.join([w.tag for w in self.video.writers()][:5])
writersLabel = len(self.video.writers) > 1 and T(32403, u'WRITERS').upper() or T(32402, u'WRITER').upper()
self.setProperty('writers', writers and u'{0} {1}'.format(writersLabel, writers) or '')
self.setProperty('related.header', T(32306, 'Related Shows'))
elif self.video.type == 'movie':
self.setProperty('preview', '')
self.setProperty('thumb', self.video.thumb.asTranscodedImageURL(*self.THUMB_POSTER_DIM))
genres = u' / '.join([g.tag for g in self.video.genres()][:3])
self.setProperty('info', genres)
self.setProperty('date', self.video.year)
self.setProperty('content.rating', self.video.contentRating.split('/', 1)[-1])
cast = u' / '.join([r.tag for r in self.video.roles()][:5])
castLabel = 'CAST'
self.setProperty('writers', cast and u'{0} {1}'.format(castLabel, cast) or '')
self.setProperty('related.header', T(32404, 'Related Movies'))
self.setProperty('video.res', self.video.resolutionString())
self.setProperty('audio.codec', self.video.audioCodecString())
self.setProperty('audio.channels', self.video.audioChannelsString(metadata.apiTranslate))
self.setProperties(('rating.stars', 'rating', 'rating.image', 'rating2', 'rating2.image'), '')
if self.video.userRating:
stars = str(int(round((self.video.userRating.asFloat() / 10) * 5)))
self.setProperty('rating.stars', stars)
# elif self.video.rating:
# stars = str(int(round((self.video.rating.asFloat() / 10) * 5)))
# self.setProperty('rating.stars', stars)
if self.video.ratingImage:
rating = self.video.rating
audienceRating = self.video.audienceRating
if self.video.ratingImage.startswith('rottentomatoes:'):
rating = '{0}%'.format(int(rating.asFloat() * 10))
if audienceRating:
audienceRating = '{0}%'.format(int(audienceRating.asFloat() * 10))
self.setProperty('rating', rating)
self.setProperty('rating.image', 'script.plexo/ratings/{0}.png'.format(self.video.ratingImage.replace('://', '/')))
if self.video.audienceRatingImage:
self.setProperty('rating2', audienceRating)
self.setProperty('rating2.image', 'script.plexo/ratings/{0}.png'.format(self.video.audienceRatingImage.replace('://', '/')))
else:
self.setProperty('rating', self.video.rating)
self.setAudioAndSubtitleInfo()
self.setProperty('unavailable', not self.video.media()[0].isAccessible() and '1' or '')
if self.video.viewOffset.asInt():
width = self.video.viewOffset.asInt() and (1 + int((self.video.viewOffset.asInt() / self.video.duration.asFloat()) * self.width)) or 1
self.progressImageControl.setWidth(width)
else:
self.progressImageControl.setWidth(1)
def setAudioAndSubtitleInfo(self):
sas = self.video.selectedAudioStream()
self.setProperty('audio', sas and sas.getTitle(metadata.apiTranslate) or T(32309, 'None'))
sss = self.video.selectedSubtitleStream()
if sss:
if len(self.video.subtitleStreams) > 1:
self.setProperty(
'subtitles', u'{0} \u2022 {1} {2}'.format(sss.getTitle(metadata.apiTranslate), len(self.video.subtitleStreams) - 1, T(32307, 'More'))
)
else:
self.setProperty('subtitles', sss.getTitle(metadata.apiTranslate))
else:
if self.video.subtitleStreams:
self.setProperty('subtitles', u'{0} \u2022 {1} {2}'.format(T(32309, 'None'), len(self.video.subtitleStreams), T(32308, 'Available')))
else:
self.setProperty('subtitles', T(32309, u'None'))
def createListItem(self, obj):
mli = kodigui.ManagedListItem(obj.title or '', thumbnailImage=obj.thumb.asTranscodedImageURL(*self.EXTRA_DIM), data_source=obj)
return mli
def fillExtras(self):
items = []
idx = 0
if not self.video.extras:
self.extraListControl.reset()
return False
for extra in self.video.extras():
if not self.trailer and extra.extraType.asInt() == media.METADATA_RELATED_TRAILER:
self.trailer = extra
self.setProperty('trailer.button', '1')
continue
mli = self.createListItem(extra)
if mli:
mli.setProperty('index', str(idx))
mli.setProperty(
'thumb.fallback', 'script.plexo/thumb_fallbacks/{0}.png'.format(extra.type in ('show', 'season', 'episode') and 'show' or 'movie')
)
items.append(mli)
idx += 1
if not items:
return False
self.extraListControl.reset()
self.extraListControl.addItems(items)
return True
def fillRelated(self, has_prev=False):
items = []
idx = 0
if not self.video.related:
self.relatedListControl.reset()
return False
for rel in self.video.related()[0].items:
mli = kodigui.ManagedListItem(rel.title or '', thumbnailImage=rel.thumb.asTranscodedImageURL(*self.RELATED_DIM), data_source=rel)
if mli:
mli.setProperty('thumb.fallback', 'script.plexo/thumb_fallbacks/{0}.png'.format(rel.type in ('show', 'season', 'episode') and 'show' or 'movie'))
mli.setProperty('index', str(idx))
items.append(mli)
idx += 1
if not items:
return False
self.setProperty('divider.{0}'.format(self.RELATED_LIST_ID), has_prev and '1' or '')
self.relatedListControl.reset()
self.relatedListControl.addItems(items)
return True
def fillRoles(self, has_prev=False):
items = []
idx = 0
if not self.video.roles:
self.rolesListControl.reset()
return False
for role in self.video.roles():
mli = kodigui.ManagedListItem(role.tag, role.role, thumbnailImage=role.thumb.asTranscodedImageURL(*self.ROLES_DIM), data_source=role)
mli.setProperty('index', str(idx))
items.append(mli)
idx += 1
if not items:
return False
self.setProperty('divider.{0}'.format(self.ROLES_LIST_ID), has_prev and '1' or '')
self.rolesListControl.reset()
self.rolesListControl.addItems(items)
return True
|
py | 7dfee001c433fc7843dad1bfdab4e8099d435a8f | '''
Задача «Обращение фрагмента»
Условие
Дана строка, в которой буква h встречается как минимум два раза. Разверните последовательность символов, заключенную между первым и последним появлением буквы h, в противоположном порядке.
'''
s = input()
# s = '123h56h78'
s1 = s[:]
ind1 = s1.find('h')
ind2 = s1.rfind('h')
print(s[:ind1] + s[ind2:ind1:-1] + s[ind2:])
|
py | 7dfee08f58a989d86cdc5d2c30b69cc4199b2280 | import solutions
from solutions import *
from random import randint, randrange, random
assert sum_of_odd(3, 8) == 15
for _ in range(1000):
rand_a = randint(-100, 500)
rand_b = randint(-100, 500)
rand_a, rand_b = min(rand_a, rand_b), max(rand_a, rand_b)
odd_sum = sum_of_odd(rand_a, rand_b)
loop_sum = 0
for i in range(rand_a, rand_b + 1):
if i % 2 == 1:
loop_sum += i
assert odd_sum == loop_sum
lis = [3, 6, -1]
raise_by_one(lis)
assert lis == [4, 7, 0]
for _ in range(100):
comp = [randint(-1000, 1000) + random() for _ in range(500)]
comp2 = comp[::1]
raise_by_one(comp)
for i in range(len(comp2)):
assert comp2[i] + 1 == comp[i]
arr_list = ArrayList()
arr_list.prepend("str1")
arr_list.prepend("str2")
arr_list.print_list()
arr_list.set_at(1, "str3")
arr_list.print_list()
for i in range(1000):
arr_list.prepend(i)
arr_list.print_list()
assert arr_list.get_size() == 1002
rental = Rental()
rental.add_car("MFT67", "chrysler", 5)
car = rental.get_car("MFT67")
car.set_brand("fiat")
print(rental)
rental.add_car("ABC", "WowzerBus", 40)
rental.add_car("12345", "my car", 100)
rental.add_car("23456", "your car", 200)
rental.add_car("34567", "our car", 103)
print(rental)
|
py | 7dfee1ec0b9455af72726ca74ba1fc717b46f9b5 | # coding=utf-8
import os
import numpy as np
import librosa
import torch
from torch.utils.data import Dataset, DataLoader
import glob
import scipy.io as scio
from audio_util import *
#import pdb
def toTorch(x):
return torch.from_numpy(x.astype(np.float32))
class Generator_train_dataset(Dataset):
def __init__(self, file_list, noise_path):
self.file_list = file_list
self.noise_path = noise_path
self.target_score = np.asarray([1.0, 1.0],dtype=np.float32)
def __len__(self):
return len(self.file_list)
def __getitem__(self, idx):
pmag = 0.30
pban = 0.20
file = self.file_list[idx]
clean_wav,_ = librosa.load(self.file_list[idx], sr=44100)
noise_wav,_ = librosa.load(self.noise_path+self.file_list[idx].split('/')[-1], sr=44100)
# already power compression by 0.30
noise_mag,noise_phase = Sp_and_phase(noise_wav, Normalization=True)
clean_mag,clean_phase = Sp_and_phase(clean_wav, Normalization=True)
#bandNoise = compute_band_E(noise_wav) ** pban
#bandClean = compute_band_E(clean_wav) ** pban
return clean_mag,clean_phase,noise_mag,noise_phase,self.target_score
class Discriminator_train_dataset(Dataset):
def __init__(self, file_list, noise_path, clean_path):
self.file_list = file_list
self.noise_path = noise_path
self.clean_path = clean_path
def __len__(self):
return len(self.file_list)
def __getitem__(self,idx):
pban = 0.20
score_filepath = self.file_list[idx].split(',')
enhance_wav,_ = librosa.load(score_filepath[2], sr=44100)
enhance_mag, _ = Sp_and_phase(enhance_wav, Normalization=True)
#pdb.set_trace()
f = self.file_list[idx].split('/')[-1]
if '@' in f:
f = f.split('@')[0] + '.wav'
noise_wav,_ = librosa.load(self.noise_path+f, sr=44100)
noise_mag, _ = Sp_and_phase(noise_wav, Normalization=True)
clean_wav, _ = librosa.load(self.clean_path+f, sr=44100)
clean_mag, _ = Sp_and_phase(clean_wav, Normalization=True)
#bandNoise = compute_band_E(noise_wav) ** pban
#bandEnhan = compute_band_E(enhance_wav) ** pban
#bandClean = compute_band_E(clean_wav) ** pban
True_score = np.asarray([float(score_filepath[0]),float(score_filepath[1])],dtype=np.float32)
#noise_mag, clean_mag, bandNoise, bandClean = noise_mag.T, clean_mag.T, bandNoise.T, bandClean.T
#enhance_mag, bandEnhan = enhance_mag.T, bandEnhan.T
noise_mag, clean_mag, enhance_mag = noise_mag.T, clean_mag.T, enhance_mag.T
noise_mag = noise_mag.reshape(1,513,noise_mag.shape[1])
clean_mag = clean_mag.reshape(1,513,clean_mag.shape[1])
enhance_mag = enhance_mag.reshape(1,513,enhance_mag.shape[1])
#bandNoise = bandNoise.reshape(1,40,bandNoise.shape[1])
#bandClean = bandClean.reshape(1,40,bandClean.shape[1])
#bandEnhan = bandEnhan.reshape(1,40,bandEnhan.shape[1])
return np.concatenate((enhance_mag,noise_mag,clean_mag),axis=0), True_score
def create_dataloader(filelist, noise_path, clean_path=None, loader='G'):
if loader=='G':
return DataLoader(dataset=Generator_train_dataset(filelist, noise_path),
batch_size=1,
shuffle=True,
num_workers=6,
drop_last=True)
elif loader=='D':
return DataLoader(dataset=Discriminator_train_dataset(filelist, noise_path, clean_path),
batch_size=1,
shuffle=True,
num_workers=6,
drop_last=True)
else:
raise Exception("No such dataloader type!") |
py | 7dfee275b686a46773dfddf1aec93dabbe43b425 | # Copyright 2010 The NetWho Project. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IRC outgoing parser."""
__author__ = 'thomas%stromberg.org (Thomas Stromberg)'
import re
from base_parser import BaseParser, Identity
class IrcOutgoingParser(BaseParser):
DPORTS = [6667, 6668, 6669, 8001]
PROTOCOL = 'IRC'
CONNECT_RE = re.compile('NICK (\w+)\r\nUSER (\w+) .*? ([\w\.]+) :(.*?)\r\n')
TOPIC_RE = re.compile(':([\w\.]+) 332 (\w+) (\#\w+) :')
def parse(self, pkt, payload):
if not payload:
print 'none'
yield None
else:
match = self.CONNECT_RE.search(payload)
if match:
print match
(nick, username, server, full_name) = match.groups()
print match.groups()
yield Identity(service=server, event='connect', type='handle',
value=nick, certainty=0.7)
yield Identity(service=server, event='connect', type='name',
value=full_name, certainty=0.3)
yield Identity(service=server, event='connect', type='username',
value=username, certainty=0.25)
match = self.TOPIC_RE.search(payload)
if match:
(server, nick, channel) = match.groups()
yield Identity(service='%s: %s' % (server, channel), event='topic',
type='handle', value=nick, certainty=1)
|
py | 7dfee40df254a9cbc7d290aec1472e6129ddcd5f | from NIENV import *
# API METHODS --------------
# self.main_widget
# self.update_shape()
# Ports
# self.input(index)
# self.set_output_val(index, val)
# self.exec_output(index)
# self.create_new_input(type_, label, widget_name=None, widget_pos='under', pos=-1)
# self.delete_input(index)
# self.create_new_output(type_, label, pos=-1)
# self.delete_output(index)
# Logging
# mylog = self.new_log('Example Log')
# mylog.log('I\'m alive!!')
# self.log_message('hello global!', target='global')
# self.log_message('that\'s not good', target='error')
# --------------------------
from sklearn.svm import SVC
class SVCGetParams_NodeInstance(NodeInstance):
def __init__(self, params):
super(SVCGetParams_NodeInstance, self).__init__(params)
tmp = SVC()
params = tmp.get_params()
for key in params:
self.create_new_output(type_="data", label=key, pos=-1)
del tmp
self.create_new_output(type_="data", label="param dict", pos=-1)
# self.special_actions['action name'] = {'method': M(self.action_method)}
# ...
def update_event(self, input_called=-1):
if input_called == 0:
model = self.input(1)
params = model.get_params()
i = 0
for param in params:
self.set_output_val(i, params[param])
i += 1
self.set_output_val(i, params)
def get_data(self):
data = {}
return data
def set_data(self, data):
pass
def removing(self):
pass
|
py | 7dfee483a40c58406c352751dcb1561720b294f1 | from ..broker import Broker
class DiscoveryHintBroker(Broker):
controller = "discovery_hints"
def index(self, **kwargs):
"""Lists the available discovery hints. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for the discovery hint.
:type id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for the discovery hint.
:type id: Array of Integer
| ``api version min:`` 2
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param UnitID: The internal NetMRI identifier collector assigned to the discovery hint.
:type UnitID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param UnitID: The internal NetMRI identifier collector assigned to the discovery hint.
:type UnitID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, hint, device_type, UnitID, created_by, updated_by, created_at, updated_at, cli_user_name_secure_ssh, cli_user_password_secure_ssh, snmp_protocol, snmp_community_secure, snmp_auth_protocol, snmp_auth_password_secure, snmp_private_protocol, snmp_private_password_secure, secure_version, cli_user_name_secure_telnet, cli_user_password_secure_telnet, cli_enable_password_secure.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DiscoveryHint. Valid values are id, hint, device_type, UnitID, created_by, updated_by, created_at, updated_at, cli_user_name_secure_ssh, cli_user_password_secure_ssh, snmp_protocol, snmp_community_secure, snmp_auth_protocol, snmp_auth_password_secure, snmp_private_protocol, snmp_private_password_secure, secure_version, cli_user_name_secure_telnet, cli_user_password_secure_telnet, cli_enable_password_secure. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return discovery_hints: An array of the DiscoveryHint objects that match the specified input criteria.
:rtype discovery_hints: Array of DiscoveryHint
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def show(self, **kwargs):
"""Shows the details for the specified discovery hint.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for the discovery hint.
:type id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return discovery_hint: The discovery hint identified by the specified id.
:rtype discovery_hint: DiscoveryHint
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def create(self, **kwargs):
"""Creates a new discovery hint.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param hint: The hint used by the discovery engine.
:type hint: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param device_type: The device type applied to the given discovery hint.
:type device_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param UnitID: The internal NetMRI identifier collector assigned to the discovery hint.
:type UnitID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return id: The id of the newly created discovery hint.
:rtype id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return model: The class name of the newly created discovery hint.
:rtype model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return uri: A URI that may be used to retrieve the newly created discovery hint.
:rtype uri: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return discovery_hint: The newly created discovery hint.
:rtype discovery_hint: DiscoveryHint
"""
return self.api_request(self._get_method_fullname("create"), kwargs)
def update(self, **kwargs):
"""Updates an existing discovery hint.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for the discovery hint.
:type id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param hint: The hint used by the discovery engine. If omitted, this field will not be updated.
:type hint: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param device_type: The device type applied to the given discovery hint. If omitted, this field will not be updated.
:type device_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param UnitID: The internal NetMRI identifier collector assigned to the discovery hint. If omitted, this field will be updated to the default value.
:type UnitID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return id: The id of the updated discovery hint.
:rtype id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return model: The class name of the updated discovery hint.
:rtype model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return uri: A URI that may be used to retrieve the updated discovery hint.
:rtype uri: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return discovery_hint: The updated discovery hint.
:rtype discovery_hint: DiscoveryHint
"""
return self.api_request(self._get_method_fullname("update"), kwargs)
def destroy(self, **kwargs):
"""Deletes the specified discovery hint from NetMRI.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for the discovery hint.
:type id: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("destroy"), kwargs)
|
py | 7dfee4cc30f3be2b92aab17de3fbdbe1929f3365 | # Copyright 2019 Silverbackhq
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Third Party Library
from django.http import JsonResponse
from django.utils.translation import gettext as _
# Local Library
from app.modules.core.response import Response
def csrf_failure(request, reason=""):
correlation_id = request.META["X-Correlation-ID"] if "X-Correlation-ID" in request.META else ""
response = Response()
return JsonResponse(response.send_private_failure([{
"type": "error",
"message": _("Error! Access forbidden due to invalid CSRF token.")
}], {}, correlation_id))
|
py | 7dfee50ace417c571f220c4982667cd8f771373d | from django.shortcuts import render
from books.models import Book
def foo_view(request):
books = Book.objects.filter(title__icontains='world')
return render(request,'foobar/foo.html',{'books':books})
def bar_view(request):
books = Book.objects.filter(title__icontains='the')
return render(request,'foobar/bar.html',{'books':books})
def foo_bar_view(request,template_name,search_str):
books=Book.objects.filter(title__icontains=search_str)
return render(request,template_name,{'books':books})
|
py | 7dfee681a4e1b6fbefd0561d0d54c4f5e074348f | """
hubspot companies api
"""
from hubspot3.base import BaseClient
from hubspot3.utils import prettify, get_log
from typing import List, Dict, Optional, Union
COMPANIES_API_VERSION = "2"
class CompaniesClient(BaseClient):
"""
hubspot3 Companies client
:see: https://developers.hubspot.com/docs/methods/companies/companies-overview
"""
def __init__(self, *args, **kwargs):
super(CompaniesClient, self).__init__(*args, **kwargs)
self.log = get_log("hubspot3.companies")
def _get_path(self, subpath: str) -> str:
"""get the full api url for the given subpath on this client"""
return "companies/v{}/{}".format(
self.options.get("version") or COMPANIES_API_VERSION, subpath
)
def create(self, data: Dict = None, **options) -> Dict:
"""create a new company"""
data = data or {}
return self._call("companies/", data=data, method="POST", **options)
def update(self, company_id: str, data: Dict = None, **options) -> Dict:
"""update the given company with data"""
data = data or {}
return self._call(
"companies/{}".format(company_id), data=data, method="PUT", **options
)
def delete(self, company_id: str, **options) -> Dict:
"""delete a company"""
return self._call("companies/{}".format(company_id), method="DELETE", **options)
def get(self, company_id: str, **options) -> Dict:
"""get a single company by it's ID"""
return self._call("companies/{}".format(company_id), method="GET", **options)
def search_domain(
self, domain: str, limit: int = 1, extra_properties: Dict = None, **options
) -> Dict:
"""searches for companies by domain name. limit is max'd at 100"""
# default properties to fetch
properties = [
"domain",
"createdate",
"name",
"hs_lastmodifieddate",
"hubspot_owner_id",
]
# append extras if they exist
if extra_properties:
if isinstance(extra_properties, list):
properties += extra_properties
if isinstance(extra_properties, str):
properties.append(extra_properties)
return self._call(
"domains/{}/companies".format(domain),
method="POST",
data={"limit": limit, "requestOptions": {"properties": properties}},
**options,
)
def get_all(
self, extra_properties: Union[str, List] = None, **options
) -> Optional[List]:
"""get all companies, including extra properties if they are passed in"""
finished = False
output = []
offset = 0
query_limit = 250 # Max value according to docs
# default properties to fetch
properties = [
"name",
"description",
"address",
"address2",
"city",
"state",
"story",
"hubspot_owner_id",
]
# append extras if they exist
if extra_properties:
if isinstance(extra_properties, list):
properties += extra_properties
if isinstance(extra_properties, str):
properties.append(extra_properties)
while not finished:
batch = self._call(
"companies/paged",
method="GET",
doseq=True,
params={
"limit": query_limit,
"offset": offset,
"properties": properties,
},
**options,
)
output.extend(
[
prettify(company, id_key="companyId")
for company in batch["companies"]
if not company["isDeleted"]
]
)
finished = not batch["has-more"]
offset = batch["offset"]
return output
def _get_recent(self, recency_type: str, **options) -> Optional[List]:
"""
Returns either list of recently modified companies or recently created companies,
depending on recency_type passed in. Both API endpoints take identical parameters
and return identical formats, they differ only in the URLs
(companies/recent/created or companies/recent/modified)
:see: https://developers.hubspot.com/docs/methods/companies/get_companies_modified
:see: https://developers.hubspot.com/docs/methods/companies/get_companies_created
"""
finished = False
output = []
offset = 0
query_limit = 250 # Max value according to docs
while not finished:
batch = self._call(
"companies/recent/{}".format(recency_type),
method="GET",
doseq=True,
params={"count": query_limit, "offset": offset},
**options,
)
output.extend(
[
prettify(company, id_key="companyId")
for company in batch["results"]
if not company["isDeleted"]
]
)
finished = not batch["hasMore"]
offset = batch["offset"]
return output
def get_recently_modified(self, **options) -> Optional[List]:
return self._get_recent("modified", **options)
def get_recently_created(self, **options) -> Optional[List]:
return self._get_recent("created", **options)
def get_contacts_at_a_company(self, company_id: str, **options) -> Optional[List]:
"""
Returns all of the contacts who have an associatedcompanyid contact property of
`company_id`.
:see: https://developers.hubspot.com/docs/methods/companies/get_company_contacts
"""
return self._call(
"companies/{}/contacts".format(company_id), method="GET", **options
)
|
py | 7dfee7da0846f96403c4690fad9695f0960a8481 | from django.conf.urls import url, include
from django.contrib.auth import views as auth_views
from . import views
urlpatterns = [
url(r'^$', views.home, name='home'),
# url(r'^login/$', auth_views.login, {'template_name': 'login.html'}, name='login'),
# url(r'^logout/$', auth_views.logout, {'next_page': 'login'}, name='logout'),
url(r'^signup/$', views.signup, name='signup'),
url(r'^activate/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
views.activate, name='activate'),
] |
py | 7dfee932de4e33210406fb9a10f37de148bc595b | # -*- coding: utf-8 -*-
"""Setup tests for this package."""
import unittest
from plone import api
from plone.app.testing import TEST_USER_ID
from plone.app.testing import setRoles
from ..testing import TINYMCE_LATEX_INTEGRATION_TESTING
try:
from Products.CMFPlone.utils import get_installer
except ImportError:
get_installer = None
class TestSetup(unittest.TestCase):
"""Test that this is properly installed."""
layer = TINYMCE_LATEX_INTEGRATION_TESTING
def setUp(self):
"""Custom shared utility setup for tests."""
self.portal = self.layer['portal']
if get_installer:
self.installer = get_installer(self.portal, self.layer['request'])
else:
self.installer = api.portal.get_tool('portal_quickinstaller')
def test_product_installed(self):
"""Test if collective.tinymceplugins.latex is installed."""
self.assertTrue(self.installer.isProductInstalled(
'collective.tinymceplugins.latex'))
class TestUninstall(unittest.TestCase):
layer = TINYMCE_LATEX_INTEGRATION_TESTING
def setUp(self):
self.portal = self.layer['portal']
if get_installer:
self.installer = get_installer(self.portal, self.layer['request'])
else:
self.installer = api.portal.get_tool('portal_quickinstaller')
roles_before = api.user.get_roles(TEST_USER_ID)
setRoles(self.portal, TEST_USER_ID, ['Manager'])
self.installer.uninstallProducts(['collective.tinymceplugins.latex'])
setRoles(self.portal, TEST_USER_ID, roles_before)
def test_product_uninstalled(self):
"""Test if collective.tinymceplugins.latex is cleanly uninstalled."""
self.assertFalse(self.installer.isProductInstalled(
'collective.tinymceplugins.latex'))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.