ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a3fd7aa7c1eefadbf8b8f84e1a3171c71a294f4 | # Copyright (c) 2016 Ryan Rossiter
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
from tempest import exceptions
def read_role_sets_yaml(path):
# Reads in the role sets to use
try:
with open(path, 'r') as yaml_file:
role_sets = yaml.safe_load(yaml_file)
except IOError:
raise exceptions.InvalidConfiguration(
('The path for the role sets file: %s '
'could not be found.') % path)
return role_sets
class RoleSetProvider(object):
"""A class used to provide the role sets to be used."""
def __init__(self, role_sets_file):
super(RoleSetProvider, self).__init__()
role_sets = read_role_sets_yaml(role_sets_file)
self.role_sets = []
for name, roles in role_sets.items():
self.role_sets.append(RoleSet(name, roles))
self.role_sets = [RoleSet(n, r) for n, r in role_sets.items()]
def get_role_sets(self):
"""Gets the role sets to be used."""
return self.role_sets
class RoleSet(object):
"""An object used to hold the group of roles under a classificiation.
This associates a name to a group of OpenStack-defined roles. These
users are used to map to successes or failures in the test listing
file.
"""
def __init__(self, set_name, roles):
self._name = set_name
self._roles = roles
@property
def name(self):
return self._name
@property
def roles(self):
return self._roles
|
py | 1a3fd7c0d8d368e5c321a71bb82e52f10e29dd8e | """
ASGI config for ivr_recording project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ivr_recording.settings')
application = get_asgi_application()
|
py | 1a3fd890b28cada81383297763c0f48385368b81 |
from .base import Distribution
import tensorflow as tf
import numpy as np
TINY = 1e-8
class Bernoulli(Distribution):
def __init__(self, dim):
self._dim = dim
@property
def dim(self):
return self._dim
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
old_p = old_dist_info_vars["p"]
new_p = new_dist_info_vars["p"]
kl = old_p * (tf.log(old_p + TINY) - tf.log(new_p + TINY)) + \
(1 - old_p) * (tf.log(1 - old_p + TINY) - tf.log(1 - new_p + TINY))
ndims = kl.get_shape().ndims
return tf.reduce_sum(kl, axis=ndims - 1)
def kl(self, old_dist_info, new_dist_info):
old_p = old_dist_info["p"]
new_p = new_dist_info["p"]
kl = old_p * (np.log(old_p + TINY) - np.log(new_p + TINY)) + \
(1 - old_p) * (np.log(1 - old_p + TINY) - np.log(1 - new_p + TINY))
return np.sum(kl, axis=-1)
def sample(self, dist_info):
p = np.asarray(dist_info["p"])
return np.cast['int'](np.random.uniform(low=0., high=1., size=p.shape) < p)
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
old_p = old_dist_info_vars["p"]
new_p = new_dist_info_vars["p"]
ndims = old_p.get_shape().ndims
return tf.reduce_prod(x_var * new_p / (old_p + TINY) + (1 - x_var) * (1 - new_p) / (1 - old_p + TINY),
axis=ndims - 1)
def log_likelihood_sym(self, x_var, dist_info_vars):
p = dist_info_vars["p"]
ndims = p.get_shape().ndims
return tf.reduce_sum(x_var * tf.log(p + TINY) + (1 - x_var) * tf.log(1 - p + TINY), axis=ndims - 1)
def log_likelihood(self, xs, dist_info):
p = dist_info["p"]
return np.sum(xs * np.log(p + TINY) + (1 - xs) * np.log(1 - p + TINY), axis=-1)
def entropy(self, dist_info):
p = dist_info["p"]
return np.sum(- p * np.log(p + TINY) - (1 - p) * np.log(1 - p + TINY), axis=-1)
@property
def dist_info_keys(self):
return ["p"]
|
py | 1a3fd9564ebd92e4fb847d6ffa220968a8a2b967 | from viadot.flows import DuckDBTransform
from viadot.tasks import DuckDBQuery, DuckDBToDF
import pytest
import pandas as pd
from unittest import mock
from viadot.sources import DuckDB
import os
TABLE = "test_table"
SCHEMA = "test_schema"
TABLE_MULTIPLE_PARQUETS = "test_multiple_parquets"
DATABASE_PATH = "test_db_123.duckdb"
@pytest.fixture(scope="session")
def duckdb():
duckdb = DuckDB(credentials=dict(database=DATABASE_PATH))
yield duckdb
os.remove(DATABASE_PATH)
def test_create_table_from_parquet(duckdb, TEST_PARQUET_FILE_PATH):
duckdb.create_table_from_parquet(
schema=SCHEMA, table=TABLE, path=TEST_PARQUET_FILE_PATH
)
def test_duckdb_query():
db_query = DuckDBQuery(credentials=dict(database=DATABASE_PATH))
result = db_query.run(f"select * from {SCHEMA}.{TABLE}")
assert type(result) == list
assert len(result) > 1
def test_duckdb_to_df():
instance = DuckDBToDF(
schema=SCHEMA, table=TABLE, credentials=dict(database=DATABASE_PATH)
)
test_df = instance.run()
assert test_df.shape > (1, 1)
assert type(test_df) == pd.core.frame.DataFrame
def test_duckdb_transform_init():
instance = DuckDBTransform("test_duckdb_transform", query="select * from test")
assert instance
def test_duckdb_transform_flow_run():
instance = DuckDBTransform(
"test_duckdb_transform",
query=f"select * from {SCHEMA}.{TABLE}",
credentials=dict(database=DATABASE_PATH),
)
result = instance.run()
assert result.is_successful()
|
py | 1a3fd9e4c45e4f68df58b6a9c1d972e6399226af | from unittest import TestCase, mock
from unittest.mock import MagicMock
from sklearn.ensemble import RandomForestClassifier
from source.analysis.classification.classifier_service import ClassifierService
from source.analysis.setup.data_split import DataSplit
from source.analysis.performance.raw_performance import RawPerformance
import numpy as np
from test.test_helper import TestHelper
class TestClassifierService(TestCase):
@mock.patch('source.analysis.classification.classifier_service.Pool')
@mock.patch('source.analysis.classification.classifier_service.cpu_count')
@mock.patch('source.analysis.classification.classifier_service.partial')
def test_runs_training_and_testing_in_parallel(self, mock_partial, mock_cpu_count, mock_pool_constructor):
expected_partial = "I am a partial"
mock_partial.return_value = expected_partial
mock_pool = MagicMock()
mock_pool_constructor.return_value = mock_pool
data_splits = [DataSplit(training_set=["subjectA", "subjectB", "subjectC"], testing_set=["subjectD"]),
DataSplit(training_set=["subjectA", "subjectB", "subjectD"], testing_set=["subjectC"])]
classifier = RandomForestClassifier()
subject_dictionary = {}
feature_set = {}
mock_pool.map.return_value = expected_pool_return = [3, 4]
expected_number_of_cpus = 32
mock_cpu_count.return_value = expected_number_of_cpus
results = ClassifierService.run_sw(data_splits, classifier, subject_dictionary, feature_set)
mock_partial.assert_called_once_with(ClassifierService.run_single_data_split_sw,
attributed_classifier=classifier,
subject_dictionary=subject_dictionary, feature_set=feature_set)
mock_pool_constructor.assert_called_once_with(expected_number_of_cpus)
mock_pool.map.assert_called_once_with(expected_partial, data_splits)
self.assertEqual(expected_pool_return, results)
@mock.patch.object(ClassifierService, 'get_class_weights')
@mock.patch('source.analysis.classification.classifier_service.ParameterSearch')
@mock.patch('source.analysis.classification.classifier_service.ClassifierInputBuilder.get_sleep_wake_inputs')
def test_run_sleep_wake(self, mock_get_sleep_wake_inputs, mock_parameter_search, mock_class_weights):
mock_classifier = MagicMock()
mock_classifier.classifier.predict_proba.return_value = class_probabilities = np.array([[0.1, 0.9], [0, 1]])
training_x = np.array([1, 2, 3, 4])
training_y = np.array([0, 0, 0, 0])
testing_x = np.array([5, 6, 7, 8])
testing_y = np.array([0, 1, 0, 1])
mock_get_sleep_wake_inputs.side_effect = [(training_x, training_y), (testing_x, testing_y)]
mock_parameter_search.run_search.return_value = {}
mock_class_weights.return_value = {0: 0.2, 1: 0.8}
subject_dictionary = {}
feature_set = {}
data_split = DataSplit(training_set=["subjectA", "subjectB", "subjectC"],
testing_set=["subject1"])
raw_performance = ClassifierService.run_single_data_split_sw(data_split, mock_classifier, subject_dictionary,
feature_set)
self.assertListEqual(testing_y.tolist(), raw_performance.true_labels.tolist())
self.assertListEqual(class_probabilities.tolist(), raw_performance.class_probabilities.tolist())
mock_class_weights.assert_called_once_with(training_y)
mock_parameter_search.run_search.assert_called_once_with(mock_classifier, training_x, training_y,
scoring='roc_auc')
mock_classifier.classifier.fit.assert_called_once_with(training_x, training_y)
mock_classifier.classifier.predict_proba.assert_called_once_with(testing_x)
|
py | 1a3fda8d0343725b554cae860b7292e2e82233d8 | import contextlib
import time
from math import ceil, log
from mock import mock, MagicMock, Mock
from pyqryptonight.pyqryptonight import StringToUInt256
from qrl.core import config
from qrl.core.Block import Block
from qrl.core.ChainManager import ChainManager
from qrl.core.DifficultyTracker import DifficultyTracker
from qrl.core.GenesisBlock import GenesisBlock
from qrl.core.PoWValidator import PoWValidator
from qrl.core.State import State
from qrl.core.Transaction import SlaveTransaction
from qrl.core.qrlnode import QRLNode
from tests.misc.helper import get_alice_xmss, get_bob_xmss, set_qrl_dir
class MockedBlockchain(object):
MAXNUMBLOCKS = 1000
def __init__(self, qrlnode, time_mock, ntp_mock):
required_height = ceil(log(self.MAXNUMBLOCKS, 2))
required_height = int(required_height + required_height % 2)
self.qrlnode = qrlnode
self.time_mock = time_mock
self.ntp_mock = ntp_mock
self.alice_xmss = get_alice_xmss(xmss_height=required_height)
self.bob_xmss = get_bob_xmss()
def create_block(self, prev_hash, mining_address=None):
if not mining_address:
mining_address = self.alice_xmss.address
transactions = []
block_prev = self.qrlnode.get_block_from_hash(prev_hash)
block_idx = block_prev.block_number + 1
if block_idx == 1:
slave_tx = SlaveTransaction.create(slave_pks=[self.bob_xmss.pk],
access_types=[0],
fee=0,
xmss_pk=self.alice_xmss.pk)
slave_tx.sign(self.alice_xmss)
slave_tx._data.nonce = 1
transactions = [slave_tx]
self.time_mock.return_value = self.time_mock.return_value + 60
self.ntp_mock.return_value = self.ntp_mock.return_value + 60
block_new = Block.create(block_number=block_idx,
prevblock_headerhash=block_prev.headerhash,
transactions=transactions,
miner_address=mining_address)
while not PoWValidator().validate_mining_nonce(state=self.qrlnode._chain_manager.state,
blockheader=block_new.blockheader,
enable_logging=False):
block_new.set_nonces(block_new.mining_nonce + 1, 0)
return block_new
def add_block(self, block):
return self.qrlnode._chain_manager.add_block(block)
def add_new_block(self, mining_address=None):
block_prev = self.qrlnode.get_block_last()
block_new = self.create_block(prev_hash=block_prev.headerhash, mining_address=mining_address)
self.qrlnode._chain_manager.add_block(block_new)
@staticmethod
@contextlib.contextmanager
def create(num_blocks, mining_address=None):
start_time = time.time()
with mock.patch('qrl.core.misc.ntp.getTime') as ntp_mock, \
set_qrl_dir('no_data'), \
State() as state, \
mock.patch('time.time') as time_mock: # noqa
time_mock.return_value = start_time
ntp_mock.return_value = start_time
state.get_measurement = MagicMock(return_value=10000000)
genesis_difficulty = config.dev.genesis_difficulty
try:
config.dev.genesis_difficulty = 10
genesis_block = GenesisBlock()
chain_manager = ChainManager(state)
chain_manager.load(genesis_block)
chain_manager._difficulty_tracker = Mock()
dt = DifficultyTracker()
tmp_difficulty = StringToUInt256('2')
tmp_target = dt.get_target(tmp_difficulty)
chain_manager._difficulty_tracker.get = MagicMock(return_value=(tmp_difficulty, tmp_target))
qrlnode = QRLNode(state, mining_address=b'')
qrlnode.set_chain_manager(chain_manager)
mock_blockchain = MockedBlockchain(qrlnode, time_mock, ntp_mock)
for block_idx in range(1, num_blocks + 1):
mock_blockchain.add_new_block(mining_address)
yield mock_blockchain
finally:
config.dev.genesis_difficulty = genesis_difficulty
|
py | 1a3fdaa2a19dace1d3e85d33100af50f48158219 | """Unit tests for JWTAuthenticator"""
import datetime
from pathlib import Path
import pytest
import jwt
from karp.errors import ClientErrorCodes
from karp.domain.errors import AuthError
from karp.infrastructure.jwt.jwt_auth_service import JWTAuthenticator
from . import adapters
with open(Path(__file__).parent / ".." / "data/private_key.pem") as fp:
jwt_private_key = fp.read()
@pytest.fixture
def jwt_authenticator():
return JWTAuthenticator(
pubkey_path=Path("karp/tests/data/pubkey.pem"),
resource_uow=adapters.FakeResourceUnitOfWork(),
)
def test_authenticate_invalid_token(jwt_authenticator):
with pytest.raises(AuthError) as exc_info:
jwt_authenticator.authenticate("scheme", "invalid")
assert exc_info.value.code == ClientErrorCodes.AUTH_GENERAL_ERROR
def test_authenticate_expired_token(jwt_authenticator):
token = jwt.encode(
{"exp": datetime.datetime(2000, 1, 1)}, jwt_private_key, algorithm="RS256"
)
with pytest.raises(AuthError) as exc_info:
jwt_authenticator.authenticate("scheme", token)
assert exc_info.value.code == ClientErrorCodes.EXPIRED_JWT
|
py | 1a3fdae81ec5d076c7610e3770852ebe98c4b5c1 |
from .engine import Engine
import pyglet
from pyglet import gl
from gem import vector
import ctypes as ct
import random
import math
class Rect(object):
def __init__(self, minVec, maxVec):
self.min = minVec
self.max = maxVec
def clone(self):
return Rect(self.min.clone(), self.max.clone())
def check_aabb(self, rect2):
return (self.max.x >= rect2.min.x and
rect2.max.x >= self.min.x and
self.max.y >= rect2.min.y and
rect2.max.y >= self.min.y)
class BoundingBoxMixin(object):
def __init__(self):
self.ctPoints = None
self.ctPointT = (gl.GLfloat * 16)
self.bbColor = (1.0, 1.0, 1.0)
def set_bb_color(self, r, g, b):
self.bbColor = (r, g, b)
def render_bounding_box(self):
gl.glLineWidth(1.0)
self.ctPoints = self.ctPointT(
self.rect.min.x, self.rect.min.y,
self.rect.max.x, self.rect.min.y,
self.rect.max.x, self.rect.min.y,
self.rect.max.x, self.rect.max.y,
self.rect.max.x, self.rect.max.y,
self.rect.min.x, self.rect.max.y,
self.rect.min.x, self.rect.max.y,
self.rect.min.x, self.rect.min.y,
)
point_ptr = ct.cast(self.ctPoints, ct.c_void_p)
gl.glColor3f(*self.bbColor)
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glVertexPointer(2, gl.GL_FLOAT, 0, point_ptr)
gl.glDrawArrays(gl.GL_LINES, 0, 8)
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
class SelectionBox(BoundingBoxMixin):
def __init__(self):
super(SelectionBox, self).__init__()
self.rect = Rect(vector.Vector(2), vector.Vector(2))
def set_start(self, vec):
self.rect.min = vec.clone()
def set_end(self, vec):
self.rect.max = vec.clone()
def get_selected(self, objects):
selected = []
rect = self.rect.clone()
if self.rect.min.x > self.rect.max.x:
rect.min.x = self.rect.max.x
rect.max.x = self.rect.min.x
if self.rect.min.y > self.rect.max.y:
rect.min.y = self.rect.max.y
rect.max.y = self.rect.min.y
for obj in objects:
rec = obj.rect
if rect.check_aabb(rec):
selected.append(obj)
return selected
def render(self):
self.render_bounding_box()
class Unit(BoundingBoxMixin):
def __init__(self, imgPath, name):
super(Unit, self).__init__()
img = pyglet.image.load('data/player.png')
self.sprite = pyglet.sprite.Sprite(img)
self.position = vector.Vector(2)
self.rect = Rect(vector.Vector(2), vector.Vector(2))
self.width = self.sprite.width
self.height = self.sprite.height
self.size = vector.Vector(2, data=[self.width, self.height])
self.lenVelocity = vector.Vector(2, data=[random.random()*10, random.random()*10])
self.mass = 1.0
self.angVelocity = 0.0
self.angle = 0.0
self.momentOfInertia = (self.size.dot(self.size) * self.mass) / 12
self.torqe = vector.Vector(2)
self.set_bb_color(0.0, 0.0, 0.0)
self.update_rect()
def update_rect(self):
self.rect.min = self.position
self.rect.max.x = self.position.x + self.width
self.rect.max.y = self.position.y + self.height
def set_pos(self, vec):
self.position = vec.clone()
def update(self, dt):
self.sprite.x = self.position.x
self.sprite.y = self.position.y
self.sprite.rotation = math.degrees(self.angle)
self.update_rect()
def render(self):
self.sprite.draw()
class Particle(object):
def __init__(self, x,y):
pos = [x, y]
self.position = vector.Vector(2, data=pos)
self.velocity = vector.Vector(2, data=[random.random()*10, random.random()*10])
self.mass = 1.0 + random.random()
self.rect = Rect(self.position, self.position)
class Game(object):
def __init__(self):
self.engine = Engine()
self.engine.add_listener(self.process_events)
self.engine.register_run(self.do_run)
self.units = []
self.width = 0
self.height = 0
self.screenRect = Rect(vector.Vector(2), vector.Vector(2, data=[self.width, self.height]))
self.selecting = False
self.select = SelectionBox()
self.selected = None
self.unitsSelected = []
self.mousePos = vector.Vector(2)
self.currentClick = vector.Vector(2)
self.mouseButtons = []
self.points = []
#for i in range(10):
# self.points.append(Particle(random.random()*self.width, self.height))
self.ctPoints = None
self.keys = []
def process_events(self, event, data):
if event == 'mouse_move':
x, y = data
self.mousePos.x = x
self.mousePos.y = y
elif event == 'mouse_down':
button, modifiers = data
self.mouseButtons.append(button)
self.currentClick = self.mousePos.clone()
elif event == 'mouse_up':
button, modifiers = data
self.mouseButtons.remove(button)
if self.currentClick.x == self.mousePos.x and self.currentClick.y == self.mousePos.y:
self.unitsSelected = []
elif event == 'key_down':
self.keys.append(data[0])
elif event == 'key_up':
self.keys.remove(data[0])
elif event == 'resize':
width, height = data
self.resize(width, height)
elif event == 'on_close':
self.engine.stop()
def resize(self, width, height):
self.width = width
self.height = height
self.screenRect.max.x = width
self.screenRect.max.y = height
gl.glViewport(0, 0, width, height)
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
gl.glOrtho(0, width, 0, height, -1.0, 1.0)
gl.glMatrixMode(gl.GL_MODELVIEW)
def update(self, dt):
#if len(self.points) < 2000:
# for i in range(6):
# self.points.append(Particle(random.random()*self.width, self.height))
if pyglet.window.key.E in self.keys:
unit = Unit('data/player.png', 'unit')
unit.set_pos(self.mousePos)
self.units.append(unit)
elif pyglet.window.key.Q in self.keys:
for i in range(6):
self.points.append(Particle(self.mousePos.x, self.mousePos.y))
elif pyglet.window.key.M in self.keys:
speedPerTick = 100.0 * dt
for obj in self.unitsSelected:
objMin = obj.position
delta = self.mousePos - objMin
distance = delta.magnitude()
if distance > speedPerTick:
ratio = speedPerTick / distance
move = delta * ratio
final = objMin + move
else:
final = self.mousePos
obj.set_pos(final)
elif pyglet.window.key.DELETE in self.keys:
for obj in self.unitsSelected:
self.units.remove(obj)
self.unitsSelected = []
if 1 in self.mouseButtons:
if not self.selecting:
if self.currentClick != self.mousePos:
self.selecting = True
self.select.set_start(self.mousePos)
else:
if self.selecting:
self.selecting = False
if self.selecting:
self.select.set_end(self.mousePos)
self.unitsSelected = self.select.get_selected(self.units)
for unit in self.units:
unit.update(dt)
self.simulate_points(dt)
self.simulate_bodies(dt)
def simulate_points(self, dt):
for point in self.points:
if not self.screenRect.check_aabb(point.rect):
self.points.remove(point)
# point.__init__(random.random()*self.width, self.height)
force = vector.Vector(2, data=[0, point.mass * -9.81])
acceleration = force / point.mass
point.velocity += acceleration * dt
point.position += point.velocity * dt
def simulate_bodies(self, dt):
for unit in self.units:
# calc force
force = vector.Vector(2, data=[0, unit.mass * -9.81])
half = unit.size / 2
unit.torque = half.x * force.y - half.y * force.x
lenAcceleration = force / unit.mass
unit.lenVelocity += lenAcceleration * dt
unit.position += unit.lenVelocity * dt
angAcceleration = unit.torque / unit.momentOfInertia
unit.angVelocity += angAcceleration * dt
unit.angle += unit.angVelocity * dt
def render_points(self):
renderPoints = []
for point in self.points:
renderPoints.extend(point.position.vector)
self.ctPoints = (gl.GLfloat * len(renderPoints))(*renderPoints)
point_ptr = ct.cast(self.ctPoints, ct.c_void_p)
gl.glColor3f(1.0, 1.0, 1.0)
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glVertexPointer(2, gl.GL_FLOAT, 0, point_ptr)
gl.glDrawArrays(gl.GL_POINTS, 0, len(renderPoints)//2)
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
def render(self):
self.engine.window.switch_to()
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
gl.glClearColor(0.5, 0.5, 0.5, 1.0)
self.render_points()
for unit in self.units:
if unit in self.unitsSelected:
unit.render_bounding_box()
unit.render()
if self.selecting:
self.select.render()
self.engine.window.flip()
def do_run(self, dt):
self.update(dt)
self.render()
def run(self):
self.engine.run()
def main():
game = Game()
game.run()
|
py | 1a3fdaff5dbcd29cd50b156d2ab29ee43d9ace1f | #!/usr/bin/env python3
# This file is copied from GCoder.
#
# GCoder is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GCoder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import sys
import re
import math
import datetime
import logging
from array import array
gcode_parsed_args = ["x", "y", "e", "f", "z", "i", "j"]
gcode_parsed_nonargs = ["g", "t", "m", "n"]
to_parse = "".join(gcode_parsed_args + gcode_parsed_nonargs)
gcode_exp = re.compile("\([^\(\)]*\)|;.*|[/\*].*\n|([%s])([-+]?[0-9]*\.?[0-9]*)" % to_parse)
gcode_strip_comment_exp = re.compile("\([^\(\)]*\)|;.*|[/\*].*\n")
m114_exp = re.compile("\([^\(\)]*\)|[/\*].*\n|([XYZ]):?([-+]?[0-9]*\.?[0-9]*)")
specific_exp = "(?:\([^\(\)]*\))|(?:;.*)|(?:[/\*].*\n)|(%s[-+]?[0-9]*\.?[0-9]*)"
move_gcodes = ["G0", "G1", "G2", "G3"]
class PyLine:
__slots__ = ('x', 'y', 'z', 'e', 'f', 'i', 'j',
'raw', 'command', 'is_move',
'relative', 'relative_e',
'current_x', 'current_y', 'current_z', 'extruding',
'current_tool',
'gcview_end_vertex')
def __init__(self, l):
self.raw = l
def __getattr__(self, name):
return None
class PyLightLine:
__slots__ = ('raw', 'command')
def __init__(self, l):
self.raw = l
def __getattr__(self, name):
return None
try:
from . import gcoder_line
Line = gcoder_line.GLine
LightLine = gcoder_line.GLightLine
except Exception as e:
logging.warning("Memory-efficient GCoder implementation unavailable: %s" % e)
Line = PyLine
LightLine = PyLightLine
def find_specific_code(line, code):
exp = specific_exp % code
bits = [bit for bit in re.findall(exp, line.raw) if bit]
if not bits: return None
else: return float(bits[0][1:])
def S(line):
return find_specific_code(line, "S")
def P(line):
return find_specific_code(line, "P")
def split(line):
split_raw = gcode_exp.findall(line.raw.lower())
if split_raw and split_raw[0][0] == "n":
del split_raw[0]
if not split_raw:
line.command = line.raw
line.is_move = False
logging.warning("raw G-Code line \"%s\" could not be parsed" % line.raw)
return [line.raw]
command = split_raw[0]
line.command = command[0].upper() + command[1]
line.is_move = line.command in move_gcodes
return split_raw
def parse_coordinates(line, split_raw, imperial = False, force = False):
# Not a G-line, we don't want to parse its arguments
if not force and line.command[0] != "G":
return
unit_factor = 25.4 if imperial else 1
for bit in split_raw:
code = bit[0]
if code not in gcode_parsed_nonargs and bit[1]:
setattr(line, code, unit_factor * float(bit[1]))
class Layer(list):
__slots__ = ("duration", "z")
def __init__(self, lines, z = None):
super(Layer, self).__init__(lines)
self.z = z
class GCode:
line_class = Line
lines = None
layers = None
all_layers = None
layer_idxs = None
line_idxs = None
append_layer = None
append_layer_id = None
imperial = False
relative = False
relative_e = False
current_tool = 0
# Home position: current absolute position counted from machine origin
home_x = 0
home_y = 0
home_z = 0
# Current position: current absolute position counted from machine origin
current_x = 0
current_y = 0
current_z = 0
# For E this is the absolute position from machine start
current_e = 0
current_e_multi=[0]
total_e = 0
total_e_multi=[0]
max_e = 0
max_e_multi=[0]
# Current feedrate
current_f = 0
# Offset: current offset between the machine origin and the machine current
# absolute coordinate system (as shifted by G92s)
offset_x = 0
offset_y = 0
offset_z = 0
offset_e = 0
offset_e_multi = [0]
# Expected behavior:
# - G28 X => X axis is homed, offset_x <- 0, current_x <- home_x
# - G92 Xk => X axis does not move, so current_x does not change
# and offset_x <- current_x - k,
# - absolute G1 Xk => X axis moves, current_x <- offset_x + k
# How to get...
# current abs X from machine origin: current_x
# current abs X in machine current coordinate system: current_x - offset_x
filament_length = None
filament_length_multi=[0]
duration = None
xmin = None
xmax = None
ymin = None
ymax = None
zmin = None
zmax = None
width = None
depth = None
height = None
est_layer_height = None
# abs_x is the current absolute X in machine current coordinate system
# (after the various G92 transformations) and can be used to store the
# absolute position of the head at a given time
def _get_abs_x(self):
return self.current_x - self.offset_x
abs_x = property(_get_abs_x)
def _get_abs_y(self):
return self.current_y - self.offset_y
abs_y = property(_get_abs_y)
def _get_abs_z(self):
return self.current_z - self.offset_z
abs_z = property(_get_abs_z)
def _get_abs_e(self):
return self.current_e - self.offset_e
abs_e = property(_get_abs_e)
def _get_abs_e_multi(self,i):
return self.current_e_multi[i] - self.offset_e_multi[i]
abs_e = property(_get_abs_e)
def _get_abs_pos(self):
return (self.abs_x, self.abs_y, self.abs_z)
abs_pos = property(_get_abs_pos)
def _get_current_pos(self):
return (self.current_x, self.current_y, self.current_z)
current_pos = property(_get_current_pos)
def _get_home_pos(self):
return (self.home_x, self.home_y, self.home_z)
def _set_home_pos(self, home_pos):
if home_pos:
self.home_x, self.home_y, self.home_z = home_pos
home_pos = property(_get_home_pos, _set_home_pos)
def _get_layers_count(self):
return len(self.all_zs)
layers_count = property(_get_layers_count)
def __init__(self, data = None, home_pos = None,
layer_callback = None, deferred = False):
if not deferred:
self.prepare(data, home_pos, layer_callback)
def prepare(self, data = None, home_pos = None, layer_callback = None):
self.home_pos = home_pos
if data:
line_class = self.line_class
self.lines = [line_class(l2) for l2 in
(l.strip() for l in data)
if l2]
self._preprocess(build_layers = True,
layer_callback = layer_callback)
else:
self.lines = []
self.append_layer_id = 0
self.append_layer = Layer([])
self.all_layers = [self.append_layer]
self.all_zs = set()
self.layers = {}
self.layer_idxs = array('I', [])
self.line_idxs = array('I', [])
def has_index(self, i):
return i < len(self)
def __len__(self):
return len(self.line_idxs)
def __iter__(self):
return self.lines.__iter__()
def prepend_to_layer(self, commands, layer_idx):
# Prepend commands in reverse order
commands = [c.strip() for c in commands[::-1] if c.strip()]
layer = self.all_layers[layer_idx]
# Find start index to append lines
# and end index to append new indices
start_index = self.layer_idxs.index(layer_idx)
for i in range(start_index, len(self.layer_idxs)):
if self.layer_idxs[i] != layer_idx:
end_index = i
break
else:
end_index = i + 1
end_line = self.line_idxs[end_index - 1]
for i, command in enumerate(commands):
gline = Line(command)
# Split to get command
split(gline)
# Force is_move to False
gline.is_move = False
# Insert gline at beginning of layer
layer.insert(0, gline)
# Insert gline at beginning of list
self.lines.insert(start_index, gline)
# Update indices arrays & global gcodes list
self.layer_idxs.insert(end_index + i, layer_idx)
self.line_idxs.insert(end_index + i, end_line + i + 1)
return commands[::-1]
def rewrite_layer(self, commands, layer_idx):
# Prepend commands in reverse order
commands = [c.strip() for c in commands[::-1] if c.strip()]
layer = self.all_layers[layer_idx]
# Find start index to append lines
# and end index to append new indices
start_index = self.layer_idxs.index(layer_idx)
for i in range(start_index, len(self.layer_idxs)):
if self.layer_idxs[i] != layer_idx:
end_index = i
break
else:
end_index = i + 1
self.layer_idxs = self.layer_idxs[:start_index] + array('I', len(commands) * [layer_idx]) + self.layer_idxs[end_index:]
self.line_idxs = self.line_idxs[:start_index] + array('I', range(len(commands))) + self.line_idxs[end_index:]
del self.lines[start_index:end_index]
del layer[:]
for i, command in enumerate(commands):
gline = Line(command)
# Split to get command
split(gline)
# Force is_move to False
gline.is_move = False
# Insert gline at beginning of layer
layer.insert(0, gline)
# Insert gline at beginning of list
self.lines.insert(start_index, gline)
return commands[::-1]
def append(self, command, store = True):
command = command.strip()
if not command:
return
gline = Line(command)
self._preprocess([gline])
if store:
self.lines.append(gline)
self.append_layer.append(gline)
self.layer_idxs.append(self.append_layer_id)
self.line_idxs.append(len(self.append_layer))
return gline
def _preprocess(self, lines = None, build_layers = False,
layer_callback = None):
"""Checks for imperial/relativeness settings and tool changes"""
if not lines:
lines = self.lines
imperial = self.imperial
relative = self.relative
relative_e = self.relative_e
current_tool = self.current_tool
current_x = self.current_x
current_y = self.current_y
current_z = self.current_z
offset_x = self.offset_x
offset_y = self.offset_y
offset_z = self.offset_z
# Extrusion computation
current_e = self.current_e
offset_e = self.offset_e
total_e = self.total_e
max_e = self.max_e
current_e_multi = self.current_e_multi[current_tool]
offset_e_multi = self.offset_e_multi[current_tool]
total_e_multi = self.total_e_multi[current_tool]
max_e_multi = self.max_e_multi[current_tool]
# Store this one out of the build_layers scope for efficiency
cur_layer_has_extrusion = False
# Initialize layers and other global computations
if build_layers:
# Bounding box computation
xmin = float("inf")
ymin = float("inf")
zmin = 0
xmax = float("-inf")
ymax = float("-inf")
zmax = float("-inf")
# Also compute extrusion-only values
xmin_e = float("inf")
ymin_e = float("inf")
xmax_e = float("-inf")
ymax_e = float("-inf")
# Duration estimation
# TODO:
# get device caps from firmware: max speed, acceleration/axis
# (including extruder)
# calculate the maximum move duration accounting for above ;)
lastx = lasty = lastz = laste = lastf = 0.0
lastdx = 0
lastdy = 0
x = y = e = f = 0.0
currenttravel = 0.0
moveduration = 0.0
totalduration = 0.0
acceleration = 2000.0 # mm/s^2
layerbeginduration = 0.0
# Initialize layers
all_layers = self.all_layers = []
all_zs = self.all_zs = set()
layer_idxs = self.layer_idxs = []
line_idxs = self.line_idxs = []
layer_id = 0
layer_line = 0
last_layer_z = None
prev_z = None
prev_base_z = (None, None)
cur_z = None
cur_lines = []
if self.line_class != Line:
get_line = lambda l: Line(l.raw)
else:
get_line = lambda l: l
for true_line in lines:
# # Parse line
# Use a heavy copy of the light line to preprocess
line = get_line(true_line)
split_raw = split(line)
if line.command:
# Update properties
if line.is_move:
line.relative = relative
line.relative_e = relative_e
line.current_tool = current_tool
elif line.command == "G20":
imperial = True
elif line.command == "G21":
imperial = False
elif line.command == "G90":
relative = False
relative_e = False
elif line.command == "G91":
relative = True
relative_e = True
elif line.command == "M82":
relative_e = False
elif line.command == "M83":
relative_e = True
elif line.command[0] == "T":
try:
current_tool = int(line.command[1:])
except:
pass #handle T? by treating it as no tool change
while(current_tool+1>len(self.current_e_multi)):
self.current_e_multi+=[0]
self.offset_e_multi+=[0]
self.total_e_multi+=[0]
self.max_e_multi+=[0]
current_e_multi = self.current_e_multi[current_tool]
offset_e_multi = self.offset_e_multi[current_tool]
total_e_multi = self.total_e_multi[current_tool]
max_e_multi = self.max_e_multi[current_tool]
if line.command[0] == "G":
parse_coordinates(line, split_raw, imperial)
# Compute current position
if line.is_move:
x = line.x
y = line.y
z = line.z
if line.f is not None:
self.current_f = line.f
if line.relative:
x = current_x + (x or 0)
y = current_y + (y or 0)
z = current_z + (z or 0)
else:
if x is not None: x = x + offset_x
if y is not None: y = y + offset_y
if z is not None: z = z + offset_z
if x is not None: current_x = x
if y is not None: current_y = y
if z is not None: current_z = z
elif line.command == "G28":
home_all = not any([line.x, line.y, line.z])
if home_all or line.x is not None:
offset_x = 0
current_x = self.home_x
if home_all or line.y is not None:
offset_y = 0
current_y = self.home_y
if home_all or line.z is not None:
offset_z = 0
current_z = self.home_z
elif line.command == "G92":
if line.x is not None: offset_x = current_x - line.x
if line.y is not None: offset_y = current_y - line.y
if line.z is not None: offset_z = current_z - line.z
line.current_x = current_x
line.current_y = current_y
line.current_z = current_z
# # Process extrusion
if line.e is not None:
if line.is_move:
if line.relative_e:
line.extruding = line.e > 0
total_e += line.e
current_e += line.e
total_e_multi += line.e
current_e_multi += line.e
else:
new_e = line.e + offset_e
line.extruding = new_e > current_e
total_e += new_e - current_e
current_e = new_e
new_e_multi = line.e + offset_e_multi
total_e_multi += new_e_multi - current_e_multi
current_e_multi = new_e_multi
max_e = max(max_e, total_e)
max_e_multi=max(max_e_multi, total_e_multi)
cur_layer_has_extrusion |= line.extruding
elif line.command == "G92":
offset_e = current_e - line.e
offset_e_multi = current_e_multi - line.e
self.current_e_multi[current_tool]=current_e_multi
self.offset_e_multi[current_tool]=offset_e_multi
self.max_e_multi[current_tool]=max_e_multi
self.total_e_multi[current_tool]=total_e_multi
# # Create layers and perform global computations
if build_layers:
# Update bounding box
if line.is_move:
if line.extruding:
if line.current_x is not None:
xmin_e = min(xmin_e, line.current_x)
xmax_e = max(xmax_e, line.current_x)
if line.current_y is not None:
ymin_e = min(ymin_e, line.current_y)
ymax_e = max(ymax_e, line.current_y)
if max_e <= 0:
if line.current_x is not None:
xmin = min(xmin, line.current_x)
xmax = max(xmax, line.current_x)
if line.current_y is not None:
ymin = min(ymin, line.current_y)
ymax = max(ymax, line.current_y)
# Compute duration
if line.command == "G0" or line.command == "G1":
x = line.x if line.x is not None else lastx
y = line.y if line.y is not None else lasty
z = line.z if line.z is not None else lastz
e = line.e if line.e is not None else laste
# mm/s vs mm/m => divide by 60
f = line.f / 60.0 if line.f is not None else lastf
# given last feedrate and current feedrate calculate the
# distance needed to achieve current feedrate.
# if travel is longer than req'd distance, then subtract
# distance to achieve full speed, and add the time it took
# to get there.
# then calculate the time taken to complete the remaining
# distance
# FIXME: this code has been proven to be super wrong when 2
# subsquent moves are in opposite directions, as requested
# speed is constant but printer has to fully decellerate
# and reaccelerate
# The following code tries to fix it by forcing a full
# reacceleration if this move is in the opposite direction
# of the previous one
dx = x - lastx
dy = y - lasty
if dx * lastdx + dy * lastdy <= 0:
lastf = 0
currenttravel = math.hypot(dx, dy)
if currenttravel == 0:
if line.z is not None:
currenttravel = abs(line.z) if line.relative else abs(line.z - lastz)
elif line.e is not None:
currenttravel = abs(line.e) if line.relative_e else abs(line.e - laste)
# Feedrate hasn't changed, no acceleration/decceleration planned
if f == lastf:
moveduration = currenttravel / f if f != 0 else 0.
else:
# FIXME: review this better
# this looks wrong : there's little chance that the feedrate we'll decelerate to is the previous feedrate
# shouldn't we instead look at three consecutive moves ?
distance = 2 * abs(((lastf + f) * (f - lastf) * 0.5) / acceleration) # multiply by 2 because we have to accelerate and decelerate
if distance <= currenttravel and lastf + f != 0 and f != 0:
moveduration = 2 * distance / (lastf + f) # This is distance / mean(lastf, f)
moveduration += (currenttravel - distance) / f
else:
moveduration = 2 * currenttravel / (lastf + f) # This is currenttravel / mean(lastf, f)
# FIXME: probably a little bit optimistic, but probably a much better estimate than the previous one:
# moveduration = math.sqrt(2 * distance / acceleration) # probably buggy : not taking actual travel into account
lastdx = dx
lastdy = dy
totalduration += moveduration
lastx = x
lasty = y
lastz = z
laste = e
lastf = f
elif line.command == "G4":
moveduration = P(line)
if moveduration:
moveduration /= 1000.0
totalduration += moveduration
# FIXME : looks like this needs to be tested with "lift Z on move"
if line.z is not None:
if line.command == "G92":
cur_z = line.z
elif line.is_move:
if line.relative and cur_z is not None:
cur_z += line.z
else:
cur_z = line.z
# FIXME: the logic behind this code seems to work, but it might be
# broken
if cur_z != prev_z:
if prev_z is not None and last_layer_z is not None:
offset = self.est_layer_height if self.est_layer_height else 0.01
if abs(prev_z - last_layer_z) < offset:
if self.est_layer_height is None:
zs = sorted([l.z for l in all_layers if l.z is not None])
heights = [round(zs[i + 1] - zs[i], 3) for i in range(len(zs) - 1)]
heights = [height for height in heights if height]
if len(heights) >= 2: self.est_layer_height = heights[1]
elif heights: self.est_layer_height = heights[0]
else: self.est_layer_height = 0.1
base_z = round(prev_z - (prev_z % self.est_layer_height), 2)
else:
base_z = round(prev_z, 2)
else:
base_z = prev_z
if base_z != prev_base_z:
new_layer = Layer(cur_lines, base_z)
new_layer.duration = totalduration - layerbeginduration
layerbeginduration = totalduration
all_layers.append(new_layer)
if cur_layer_has_extrusion and prev_z not in all_zs:
all_zs.add(prev_z)
cur_lines = []
cur_layer_has_extrusion = False
layer_id += 1
layer_line = 0
last_layer_z = base_z
if layer_callback is not None:
layer_callback(self, len(all_layers) - 1)
prev_base_z = base_z
if build_layers:
cur_lines.append(true_line)
layer_idxs.append(layer_id)
line_idxs.append(layer_line)
layer_line += 1
prev_z = cur_z
# ## Loop done
# Store current status
self.imperial = imperial
self.relative = relative
self.relative_e = relative_e
self.current_tool = current_tool
self.current_x = current_x
self.current_y = current_y
self.current_z = current_z
self.offset_x = offset_x
self.offset_y = offset_y
self.offset_z = offset_z
self.current_e = current_e
self.offset_e = offset_e
self.max_e = max_e
self.total_e = total_e
self.current_e_multi[current_tool]=current_e_multi
self.offset_e_multi[current_tool]=offset_e_multi
self.max_e_multi[current_tool]=max_e_multi
self.total_e_multi[current_tool]=total_e_multi
# Finalize layers
if build_layers:
if cur_lines:
new_layer = Layer(cur_lines, prev_z)
new_layer.duration = totalduration - layerbeginduration
layerbeginduration = totalduration
all_layers.append(new_layer)
if cur_layer_has_extrusion and prev_z not in all_zs:
all_zs.add(prev_z)
self.append_layer_id = len(all_layers)
self.append_layer = Layer([])
self.append_layer.duration = 0
all_layers.append(self.append_layer)
self.layer_idxs = array('I', layer_idxs)
self.line_idxs = array('I', line_idxs)
# Compute bounding box
all_zs = self.all_zs.union({zmin}).difference({None})
zmin = min(all_zs)
zmax = max(all_zs)
self.filament_length = self.max_e
while len(self.filament_length_multi)<len(self.max_e_multi):
self.filament_length_multi+=[0]
for i in enumerate(self.max_e_multi):
self.filament_length_multi[i[0]]=i[1]
if self.filament_length > 0:
self.xmin = xmin_e if not math.isinf(xmin_e) else 0
self.xmax = xmax_e if not math.isinf(xmax_e) else 0
self.ymin = ymin_e if not math.isinf(ymin_e) else 0
self.ymax = ymax_e if not math.isinf(ymax_e) else 0
else:
self.xmin = xmin if not math.isinf(xmin) else 0
self.xmax = xmax if not math.isinf(xmax) else 0
self.ymin = ymin if not math.isinf(ymin) else 0
self.ymax = ymax if not math.isinf(ymax) else 0
self.zmin = zmin if not math.isinf(zmin) else 0
self.zmax = zmax if not math.isinf(zmax) else 0
self.width = self.xmax - self.xmin
self.depth = self.ymax - self.ymin
self.height = self.zmax - self.zmin
# Finalize duration
totaltime = datetime.timedelta(seconds = int(totalduration))
self.duration = totaltime
def idxs(self, i):
return self.layer_idxs[i], self.line_idxs[i]
def estimate_duration(self):
return self.layers_count, self.duration
class LightGCode(GCode):
line_class = LightLine
def main():
if len(sys.argv) < 2:
print("usage: %s filename.gcode" % sys.argv[0])
return
print("Line object size:", sys.getsizeof(Line("G0 X0")))
print("Light line object size:", sys.getsizeof(LightLine("G0 X0")))
gcode = GCode(open(sys.argv[1], "rU"))
print("Dimensions:")
xdims = (gcode.xmin, gcode.xmax, gcode.width)
print("\tX: %0.02f - %0.02f (%0.02f)" % xdims)
ydims = (gcode.ymin, gcode.ymax, gcode.depth)
print("\tY: %0.02f - %0.02f (%0.02f)" % ydims)
zdims = (gcode.zmin, gcode.zmax, gcode.height)
print("\tZ: %0.02f - %0.02f (%0.02f)" % zdims)
print("Filament used: %0.02fmm" % gcode.filament_length)
for i in enumerate(gcode.filament_length_multi):
print("E%d %0.02fmm" % (i[0],i[1]))
print("Number of layers: %d" % gcode.layers_count)
print("Estimated duration: %s" % gcode.estimate_duration()[1])
if __name__ == '__main__':
main()
|
py | 1a3fdc4aea7280e45a23945776d7be3d6e40ab11 | """
Define functions needed for the demos.
"""
import numpy as np
from scipy.fftpack import fft2, ifft2, fftshift, ifftshift
from scipy.signal import fftconvolve
from bm3d import gaussian_kernel
def get_psnr(y_est: np.ndarray, y_ref: np.ndarray) -> float:
"""
Return PSNR value for y_est and y_ref presuming the noise-free maximum is 1.
:param y_est: Estimate array
:param y_ref: Noise-free reference
:return: PSNR value
"""
return 10 * np.log10(1 / np.mean(((y_est - y_ref).ravel()) ** 2))
def get_cropped_psnr(y_est: np.ndarray, y_ref: np.ndarray, crop: tuple) -> float:
"""
Return PSNR value for y_est and y_ref presuming the noise-free maximum is 1.
Crop the images before calculating the value by crop.
:param y_est: Estimate array
:param y_ref: Noise-free reference
:param crop: Tuple of crop-x and crop-y from both stides
:return: PSNR value
"""
return get_psnr(np.atleast_3d(y_est)[crop[0]:-crop[0], crop[1]:-crop[1], :],
np.atleast_3d(y_ref)[crop[0]:-crop[0], crop[1]:-crop[1], :])
def get_experiment_kernel(noise_type: str, noise_var: float, sz: tuple = np.array((101, 101))):
"""
Get kernel for generating noise from specific experiment from the paper.
:param noise_type: Noise type string, g[0-4](w|)
:param noise_var: noise variance
:param sz: size of image, used only for g4 and g4w
:return: experiment kernel with the l2-norm equal to variance
"""
# if noiseType == gw / g0
kernel = np.array([[1]])
noise_types = ['gw', 'g0', 'g1', 'g2', 'g3', 'g4', 'g1w', 'g2w', 'g3w', 'g4w']
if noise_type not in noise_types:
raise ValueError("Noise type must be one of " + str(noise_types))
if noise_type != "g4" and noise_type != "g4w":
# Crop this size of kernel when generating,
# unless pink noise, in which
# if noiseType == we want to use the full image size
sz = np.array([101, 101])
else:
sz = np.array(sz)
# Sizes for meshgrids
sz2 = -(1 - (sz % 2)) * 1 + np.floor(sz / 2)
sz1 = np.floor(sz / 2)
uu, vv = np.meshgrid([i for i in range(-int(sz1[0]), int(sz2[0]) + 1)],
[i for i in range(-int(sz1[1]), int(sz2[1]) + 1)])
beta = 0.8
if noise_type[0:2] == 'g1':
# Horizontal line
kernel = np.atleast_2d(16 - abs(np.linspace(1, 31, 31) - 16))
elif noise_type[0:2] == 'g2':
# Circular repeating pattern
scale = 1
dist = uu ** 2 + vv ** 2
kernel = np.cos(np.sqrt(dist) / scale) * gaussian_kernel((sz[0], sz[1]), 10)
elif noise_type[0:2] == 'g3':
# Diagonal line pattern kernel
scale = 1
kernel = np.cos((uu + vv) / scale) * gaussian_kernel((sz[0], sz[1]), 10)
elif noise_type[0:2] == 'g4':
# Pink noise
dist = uu ** 2 + vv ** 2
n = sz[0] * sz[1]
spec = (np.sqrt((np.sqrt(n) * 1e-2) / (np.sqrt(dist) + np.sqrt(n) * 1e-2)))
kernel = fftshift(ifft2(ifftshift(spec)))
else: # gw and g0 are white
beta = 0
# -- Noise with additional white component --
if len(noise_type) > 2 and noise_type[2] == 'w':
kernel = kernel / np.sqrt(np.sum(kernel ** 2))
kalpha = np.sqrt((1 - beta) + beta * abs(fft2(kernel, (sz[0], sz[1]))) ** 2)
kernel = fftshift(ifft2(kalpha))
kernel = np.real(kernel)
# Correct variance
kernel = kernel / np.sqrt(np.sum(kernel ** 2)) * np.sqrt(noise_var)
return kernel
def get_experiment_noise(noise_type: str, noise_var: float, realization: int, sz: tuple)\
-> (np.ndarray, np.ndarray, np.ndarray):
"""
Generate noise for experiment with specified kernel, variance, seed and size.
Return noise and relevant parameters.
The generated noise is non-circular.
:param noise_type: Noise type, see get_experiment_kernel for list of accepted types.
:param noise_var: Noise variance of the resulting noise
:param realization: Seed for the noise realization
:param sz: image size -> size of resulting noise
:return: noise, PSD, and kernel
"""
np.random.seed(realization)
# Get pre-specified kernel
kernel = get_experiment_kernel(noise_type, noise_var, sz)
# Create noisy image
half_kernel = np.ceil(np.array(kernel.shape) / 2)
if len(sz) == 3 and half_kernel.size == 2:
half_kernel = [half_kernel[0], half_kernel[1], 0]
kernel = np.atleast_3d(kernel)
half_kernel = np.array(half_kernel, dtype=int)
# Crop edges
noise = fftconvolve(np.random.normal(size=(sz + 2 * half_kernel)), kernel, mode='same')
noise = np.atleast_3d(noise)[half_kernel[0]:-half_kernel[0], half_kernel[1]:-half_kernel[1], :]
psd = abs(fft2(kernel, (sz[0], sz[1]), axes=(0, 1))) ** 2 * sz[0] * sz[1]
return noise, psd, kernel
|
py | 1a3fdc8e5ed92172d9b88b57c6f7c5ea5db4e760 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014-18 Richard Hull and contributors
# See LICENSE.rst for details.
# PYTHON_ARGCOMPLETE_OK
"""
Rotating 3D box wireframe & color dithering.
Adapted from:
http://codentronix.com/2011/05/12/rotating-3d-cube-using-python-and-pygame/
"""
import sys
import math
from operator import itemgetter
from demo_opts import get_device
from luma.core.render import canvas
from luma.core.sprite_system import framerate_regulator
def radians(degrees):
return degrees * math.pi / 180
class point(object):
def __init__(self, x, y, z):
self.coords = (x, y, z)
self.xy = (x, y)
self.z = z
def rotate_x(self, angle):
x, y, z = self.coords
rad = radians(angle)
c = math.cos(rad)
s = math.sin(rad)
return point(x, y * c - z * s, y * s + z * c)
def rotate_y(self, angle):
x, y, z = self.coords
rad = radians(angle)
c = math.cos(rad)
s = math.sin(rad)
return point(z * s + x * c, y, z * c - x * s)
def rotate_z(self, angle):
x, y, z = self.coords
rad = radians(angle)
c = math.cos(rad)
s = math.sin(rad)
return point(x * c - y * s, x * s + y * c, z)
def project(self, size, fov, viewer_distance):
x, y, z = self.coords
factor = fov / (viewer_distance + z)
return point(x * factor + size[0] / 2, -y * factor + size[1] / 2, z)
def sine_wave(min, max, step=1):
angle = 0
diff = max - min
diff2 = diff / 2
offset = min + diff2
while True:
yield angle, offset + math.sin(radians(angle)) * diff2
angle += step
def main(num_iterations=sys.maxsize):
regulator = framerate_regulator(fps=30)
vertices = [
point(-1, 1, -1),
point(1, 1, -1),
point(1, -1, -1),
point(-1, -1, -1),
point(-1, 1, 1),
point(1, 1, 1),
point(1, -1, 1),
point(-1, -1, 1)
]
faces = [
((0, 1, 2, 3), "red"),
((1, 5, 6, 2), "green"),
((0, 4, 5, 1), "blue"),
((5, 4, 7, 6), "magenta"),
((4, 0, 3, 7), "yellow"),
((3, 2, 6, 7), "cyan")
]
a, b, c = 0, 0, 0
for angle, dist in sine_wave(8, 40, 1.5):
with regulator:
num_iterations -= 1
if num_iterations == 0:
break
t = [v.rotate_x(a).rotate_y(b).rotate_z(c).project(device.size, 256, dist)
for v in vertices]
depth = []
for idx, face in enumerate(faces):
v1, v2, v3, v4 = face[0]
avg_z = (t[v1].z + t[v2].z + t[v3].z + t[v4].z) / 4.0
depth.append((idx, avg_z))
with canvas(device, dither=True) as draw:
for idx, depth in sorted(depth, key=itemgetter(1), reverse=True)[3:]:
(v1, v2, v3, v4), color = faces[idx]
if angle // 720 % 2 == 0:
fill, outline = color, color
else:
fill, outline = "black", "white"
draw.polygon(t[v1].xy + t[v2].xy + t[v3].xy + t[v4].xy, fill, outline)
a += 0.3
b -= 1.1
c += 0.85
if __name__ == "__main__":
try:
device = get_device()
main()
except KeyboardInterrupt:
pass
|
py | 1a3fde61453e39cad19c3b102a00cdd85270bba8 | # -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2020 Matteo Ingrosso
In combination with top_3 script, this one plot the top 3 patches with their values.
"""
from get_top_3 import *
import matplotlib.pyplot as plt
import os
from PIL import Image
Image.MAX_IMAGE_PIXELS = 1000000000
from matplotlib import rcParams
rcParams['axes.titlesize'] = 35
rcParams['font.size'] = 40
# from the other file
#folder = input('gimme the folder: ')
region = input('Gimme the region: ')
rows = 2
cols = 3
def display_multiple_img(images, rows, cols):
figure, ax = plt.subplots(nrows=rows,ncols=cols )
figure.set_figheight(15)
figure.set_figwidth(20)
figure.set_dpi(300)
figure.subplots_adjust(hspace=0.2)
figure.subplots_adjust(wspace=0.4)
for ind,key in enumerate(images):
ax.ravel()[ind].imshow(Image.open(images[key], mode='r'))
ax.ravel()[ind].set_axis_off()
plt.figtext(0.128, 0.5, ssim_1, va='center')
plt.figtext(0.5, 0.5, ssim_2, va='center', ha='center')
plt.figtext(0.775, 0.5, ssim_3, va='center')
plt.figtext(-0.02, 0.5, region, va='center', ha="left", rotation=90, fontweight='bold')
# plt.figtext(0.5, 0.98, 'SSIM values', ha="center")
figure.suptitle('SSIM values', fontsize=40, fontweight='bold')
plt.tight_layout()
plt.show()
images = {'Image0': os.path.join(folder, 'validation', 'fake','save'+str(ssim_ind_1)+'.jpg')
, 'Image1': os.path.join(folder, 'validation', 'fake','save'+str(ssim_ind_2)+'.jpg')
, 'Image2': os.path.join(folder, 'validation', 'fake','save'+str(ssim_ind_3)+'.jpg')
, 'Image3': os.path.join(folder, 'validation', 'real','save'+str(ssim_ind_1)+'.jpg')
, 'Image4': os.path.join(folder, 'validation', 'real','save'+str(ssim_ind_2)+'.jpg')
, 'Image5': os.path.join(folder, 'validation', 'real','save'+str(ssim_ind_3)+'.jpg')}
display_multiple_img(images, rows, cols)
|
py | 1a3fdf73f6b0324013e1adb6ee402ceeb70590ff | import requests
import json
import uuid
from zabbixapi_exception import ZabbixIncompatibleApi
from zabbixapi_exception import ZabbixNotPermitted
class ZabbixApi(object):
"""
main zabbix api class it calls zabbix api and allow to set basic headers connection
timeout and ssl certificate validation. It also supports dynamic method binding so
we don't have to define every api call but we can model them following the Zabbix doc
https://www.zabbix.com/documentation/3.4/manual/api
"""
def __init__(self, url, json_rpc='2.0', content_type='application/json-rpc', invalid_cert=False, timeout=7, enable_debug=False):
self.url = url.rstrip('/') + '/api_jsonrpc.php'
self.content_type = content_type
self.json_rpc = json_rpc
self.token = None
self.version = None
self.timeout = timeout
self.ssl_verify = invalid_cert
self.debug_enabled = enable_debug
requests.packages.urllib3.disable_warnings()
def call_api(self, method, headers, body):
"""
this function handle all api call to zabbix server and automatically insert
id and auth token if it exists
"""
call_id = str(uuid.uuid4())
if body is not None:
body.update({'id': call_id})
if self.token is not None:
body.update({'auth': self.token})
if self.debug_enabled:
print('\033[92m[DEBUG request]: {}\033[0m'.format(json.dumps(body)))
try:
if method == 'post':
response = requests.post(self.url, headers=headers, data=json.dumps(body), verify=self.ssl_verify,
timeout=self.timeout)
elif method == 'get':
response = requests.get(self.url, headers=headers, verify=self.ssl_verify, timeout=self.timeout)
else:
raise NotImplemented('Invalid method'.format(method))
if self.debug_enabled:
print('\033[92m[DEBUG response]: {}\033[0m'.format(response.text))
except Exception as ex:
print("\033[91m[ERROR]: {}\033[0m".format(ex.message))
response = {'result': ex.message}
return response
try:
response.raise_for_status()
except Exception:
print("Bad return code {}".format(response.status_code))
json_response = json.loads(response.text)
try:
if json_response['error']['code'] == -32602:
raise ZabbixIncompatibleApi("\033[91m[ERROR]:{} code {}\033[0m".format(json_response['error']['data'],
json_response['error']['code']))
if json_response['error']['code'] == -32500:
raise ZabbixNotPermitted("\033[91m[ERROR]:{} code {}\033[0m".format(json_response['error']['data'],
json_response['error']['code']))
except KeyError:
pass
return json_response['result']
# todo create api version control
def get_info(self):
headers = {'Content-Type': self.content_type}
params = {
'jsonrpc': self.json_rpc,
'method': 'apiinfo.version',
'params': {}
}
r = self.call_api('post', headers, params)
self.version = r
def login(self, username, password):
headers = {'Content-Type': self.content_type}
params = {
'jsonrpc': self.json_rpc,
'method': 'user.login',
'params': {'user': username, 'password': password},
}
r = self.call_api('post', headers, params)
if type(r) is str or type(r) is unicode:
self.token = r
else:
self.token = None
return True
def logout(self):
if self.token is not None:
headers = {'Content-Type': self.content_type}
params = {
'jsonrpc': self.json_rpc,
'method': 'user.logout',
'params': {},
}
r = self.call_api('post', headers, params)
if str(r).lower() == 'true':
self.token = None
else:
return False
return True
def __getattr__(self, zbobj):
"""
dynamic method binding with this function we can all
every type of function and if it exists as Zabbix API we can
call it directly without create a specific function for every method
"""
return ZabbixAPICommonObj(zbobj, self)
class ZabbixAPICommonObj(object):
def __init__(self, zbobj, parent):
self.zbobj = zbobj
self.parent = parent
def __getattr__(self, zbmethod):
# print('Calling __getattr__: {}'.format(zbmethod))
self.zbmethod = zbmethod
def get_arguments(*arg, **kw):
"""
kw is a dictionary of key=value that fit
perfectly in our params request body
"""
#print('kw->{}'.format(kw))
#print('arg->{}'.format(arg))
headers = {'Content-Type': self.parent.content_type}
params = {
'jsonrpc': self.parent.json_rpc,
'method': self.zbobj + '.' + self.zbmethod,
'params': kw or arg
}
r = self.parent.call_api('post', headers, params)
# todo create a debug mode to print call details
#print('{}'.format(r))
return r
return get_arguments
|
py | 1a3fdf97f1651fa4edf8d56b122a24951f5cb0ac | #!/usr/bin/env python
import metadata.io
import phylodist.io
import phylodist.histogram
DATA_ROOT = '/dacb/globus'
metadataDF = metadata.io.loadFile(
DATA_ROOT + '/metadata.tab',
indexCols=['origin_O2', 'O2', 'week', 'replicate', 'sample', 'date', 'type'],
verbose=True
)
phylodistSampleDict = phylodist.io.sweepFiles(
DATA_ROOT,
sampleNameExtractionFunction=metadata.io.defaultSampleNameExtractionFunction
)
sampleDictTaxHistDict = phylodist.histogram.computeAllForSamples(
phylodistSampleDict
)
taxonomyDictTaxHist = phylodist.histogram.mergeAcrossSamplesTaxLevels(
sampleDictTaxHistDict,
metadata=metadataDF
)
# filter at 2.5% abundance
for taxonomyLevel in TAXONOMY_HIERARCHY:
dF = taxonomyDictTaxHist[taxonomyLevel]
taxonomyDictTaxHist[taxonomyLevel] = dF.where(dF >= 2.5)
taxonomyDictTaxHist[taxonomyLevel].dropna(how='all', inplace=True)
phylodist.io.writeExcelTaxonomyDictTaxHist(
DATA_ROOT + '/phylodist.xlsx',
taxonomyDictTaxHist
)
|
py | 1a3fdfd4d90f52bd578b3ae2dcb3f32855be8721 | class GRAPH:
"""docstring for GRAPH"""
def __init__(self, nodes):
self.nodes=nodes
self.graph=[[0]*nodes for i in range (nodes)]
self.visited=[0]*nodes
def show(self):
for i in self.graph:
for j in i:
print(j, end=' ')
print(' ')
def bfs(self,v):
visited = [False]*self.vertex
visited[v - 1] = True
print('%d visited' % (v))
queue = [v - 1]
while len(queue) > 0:
v = queue[0]
for u in range(self.vertex):
if self.graph[v][u] == 1:
if visited[u]== False:
visited[u] = True
queue.append(u)
print('%d visited' % (u +1))
queue.pop(0)
g = Graph(10)
g.add_edge(1,2)
g.add_edge(1,3)
g.add_edge(1,4)
g.add_edge(2,5)
g.add_edge(3,6)
g.add_edge(3,7)
g.add_edge(4,8)
g.add_edge(5,9)
g.add_edge(6,10)
g.bfs(4)
=======
print self.graph
def add_edge(self, i, j):
self.graph[i][j]=1
self.graph[j][i]=1
def bfs(self,s):
queue=[s]
self.visited[s]=1
while len(queue)!=0:
x=queue.pop(0)
print(x)
for i in range(0,self.nodes):
if self.graph[x][i]==1 and self.visited[i]==0:
queue.append(i)
self.visited[i]=1
n=int(input("Enter the number of Nodes : "))
g=GRAPH(n)
e=int(input("Enter the no of edges : "))
print("Enter the edges (u v)")
for i in range(0,e):
u,v=map(int, raw_input().split())
g.add_edge(u,v)
s=int(input("Enter the source node :"))
g.bfs(s)
|
py | 1a3fe0840303716486c1b81b1a04e85e820ef50f | import os
from Crypto.Cipher import Blowfish
from Crypto.Random import get_random_bytes
import codecs
import kbr.file_utils as file_utils
import re
import sys
import requests
import time
id_cipher = None
def init( id_secret:str) -> None:
global id_cipher
id_cipher = Blowfish.new(id_secret.encode('utf-8'), mode=Blowfish.MODE_ECB)
def decrypt_value(value:str) -> str:
value = str(value)
value_hex = codecs.decode(value, 'hex')
decrypted_value = id_cipher.decrypt( value_hex ).decode("utf-8").lstrip("!")
return decrypted_value
def encrypt_value(value:str) -> str:
value = str(value)
value = value.encode('utf-8')
s = (b"!" * (8 - len(value) % 8)) + value
# Encrypt
return codecs.encode(id_cipher.encrypt(s), 'hex').decode("utf-8")
def directory_hash_id(id):
s = str(id)
l = len(s)
# Shortcut -- ids 0-999 go under ../000/
if l < 4:
return ["000"]
# Pad with zeros until a multiple of three
padded = ((3 - len(s) % 3) * "0") + s
# Drop the last three digits -- 1000 files per directory
padded = padded[:-3]
# Break into chunks of three
return [padded[i * 3:(i + 1) * 3] for i in range(len(padded) // 3)]
def construct_file_path(obj_id, file_dir=None):
"""
Taken and adjusted from the galaxy code base.
Construct the absolute path for accessing the object identified by `obj_id`.
:type file_dir: string
:param file_dir: A key in self.extra_dirs corresponding to the base
directory in which this object should be created, or
None to specify the default directory.
This option is used for backward compatibility. If
`True` then the composed directory structure does not include a
hash id (e.g., /files/dataset_10.dat (old) vs.
/files/000/dataset_10.dat (new))
"""
# base = os.path.abspath(file_dir, self.file_path))
base = file_dir
# extra_dir should never be constructed from provided data but just
# make sure there are no shenannigans afoot
# Construct hashed path
rel_path = os.path.join(*directory_hash_id(obj_id))
# Create a subdirectory for the object ID
path = os.path.join(base, rel_path)
path = os.path.join(path, "dataset_%s.dat" % obj_id)
print( f"Trying new style path {path} ")
if os.path.isfile(path):
return path
#Try old style dir names:
path = base
path = os.path.join(path, "dataset_%s.dat" % obj_id)
if os.path.isfile( path ):
return path
path = file_utils.find_first("dataset_%s.dat" % obj_id, file_dir)
if path is not None:
return path
raise RuntimeError(f"Cannot find dataset: 'dataset_{obj_id}.dat'")
def create_uuid(length=16):
# Generate a unique, high entropy random number.
# Length 16 --> 128 bit
long_uuid = codecs.encode(get_random_bytes(length), 'hex').decode("utf-8")
return long_uuid[:32]
def encrypt_ids(entry: any) -> []:
if isinstance(entry, list):
return list_encrypt_ids(entry)
if entry == [] or entry == {}:
return entry
if isinstance(entry, dict):
for key in entry.keys():
if key == 'nels_id':
continue
if key == 'id' or key.find('_id') > -1 and isinstance(entry[key], int):
entry[f"{key}"] = encrypt_value(entry[key])
else:
raise RuntimeError(f"Cannot change ids in {entry}")
return entry
def list_encrypt_ids(entries: []) -> []:
for entry in entries:
entry = encrypt_ids(entry)
return entries
def readable_date(timestamp:str) -> str:
if timestamp is None:
return None
timestamp = timestamp.replace('T', ' ')
timestamp = re.sub(r'\.\d+', '', timestamp)
return timestamp
def timedelta_to_epoc(timerange) -> int:
''' 3h, 2d, 1w --> now - delta as epoc secs '''
if timerange == '' or timerange is None:
return 0
ts = time.time()
time_delta = ts - timedelta_to_sec( timerange)
return time_delta
def timedelta_to_sec(timerange) -> int:
''' 1m, 3h, 2d, 1w --> now - delta as epoc secs '''
if timerange == '' or timerange is None:
return 0
time_delta = 0
try:
g = re.match(r'(\d+)([mhdwMY])', timerange)
num, range = g.groups(0)
if range == 'm':
time_delta = 60*int(num)
if range == 'h':
time_delta = 3600*int(num)
elif range == 'd':
time_delta = 24*3600*int(num)
elif range == 'w':
time_delta = 24*3600*7*int(num)
elif range == 'M':
time_delta = 30*24*3600*7*int(num)
elif range == '1Y':
time_delta = 365*24*3600*7*int(num)
except Exception as e:
print( f"timerange {timerange} is invalid valid examples: 5m, 1d, 2h, 1w, 1M, 1Y")
sys.exit(1)
return time_delta
def get_ssh_credential(config, nels_id: int, tmpfile=True):
nels_storage_client_key = config['nels_storage_client_key']
nels_storage_client_secret = config['nels_storage_client_secret']
nels_storage_url = config['nels_storage_url'].rstrip("/")
# make sure the id is a string
# nels_id = str(nels_id)
# api_url = 'https://nels.bioinfo.no/'
# api_url = 'https://test-fe.cbu.uib.no/nels-'
api_url = f"{nels_storage_url}/users/{nels_id}"
# logger.debug(f"API URL: {api_url}")
response = requests.get(api_url, auth=(nels_storage_client_key, nels_storage_client_secret))
if (response.status_code == requests.codes.ok):
json_response = response.json()
if tmpfile:
tmp = tempfile.NamedTemporaryFile(mode='w+t', suffix=".txt", dir=tmp_dir, delete=False)
tmp.write(json_response['key-rsa'])
tmp.close()
json_response['key_file'] = tmp.name
else:
outfile = f"{nels_id}.rsa"
file_utils.write(outfile, json_response['key-rsa'])
os.chmod(outfile, 0o600)
json_response['key_file'] = outfile
return json_response
else:
raise Exception("HTTP response code=%s" % str(response.status_code))
|
py | 1a3fe0a1e121af2c8b1fdcdc16bd16867500e0b0 | import numpy as np
from numpy.core.umath_tests import inner1d
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.interpolation import map_coordinates
def image_histogram_equalization(image, number_bins=256):
'''histogram equalization the image
'''
# from http://www.janeriksolem.net/2009/06/histogram-equalization-with-python-and.html
# get image histogram
image_histogram, bins = np.histogram(
image.flatten(), number_bins, density=True)
cdf = image_histogram.cumsum() # cumulative distribution function
cdf = 255 * cdf / cdf[-1] # normalize
# use linear interpolation of cdf to find new pixel values
image_equalized = np.interp(image.flatten(), bins[:-1], cdf)
return image_equalized.reshape(image.shape) # , cdf
def elastic_transform(image, alpha=512, sigma=20, spline_order=1, mode='nearest', random_state=np.random):
"""Elastic deformation of image as described in [Simard2003]_.
.. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
"""
image = image.reshape((256, 512, 1))
assert image.ndim == 3
shape = image.shape[:2]
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1),
sigma, mode="constant", cval=0) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1),
sigma, mode="constant", cval=0) * alpha
x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
indices = [np.reshape(x + dx, (-1, 1)), np.reshape(y + dy, (-1, 1))]
result = np.empty_like(image)
for i in range(image.shape[2]):
result[:, :, i] = map_coordinates(
image[:, :, i], indices, order=spline_order, mode=mode).reshape(shape)
return result
def center_crop(layer, target_size, target_size2):
_, _, layer_width, layer_height = layer.size()
xy1 = (layer_width - target_size) // 2
xy2 = (layer_height - target_size2) // 2
return layer[:, :, xy1:(xy1 + target_size), xy2:(xy2 + target_size2)]
def pixel_list(im):
ret = []
i = 0
for x in im:
j = 0
for y in x:
if y > 0:
ret.append([i, j])
j += 1
i += 1
return np.array(ret)
def HausdorffDist(A, B):
# Hausdorf Distance: Compute the Hausdorff distance between two point
# clouds.
# Let A and B be subsets of metric space (Z,dZ),
# The Hausdorff distance between A and B, denoted by dH(A,B),
# is defined by:
# dH(A,B) = max(h(A,B),h(B,A)),
# where h(A,B) = max(min(d(a,b))
# and d(a,b) is a L2 norm
# dist_H = hausdorff(A,B)
# A: First point sets (MxN, with M observations in N dimension)
# B: Second point sets (MxN, with M observations in N dimension)
# ** A and B may have different number of rows, but must have the same
# number of columns.
#
# Edward DongBo Cui; Stanford University; 06/17/2014
# Find pairwise distance
D_mat = np.sqrt(inner1d(A, A)[np.newaxis].T +
inner1d(B, B)-2*(np.dot(A, B.T)))
# Find DH
dH = np.max(
np.array([np.max(np.min(D_mat, axis=0)), np.max(np.min(D_mat, axis=1))]))
return(dH)
def get_n_fold(total, fold, idx):
if len(total) % fold != 0 or idx < 0 or idx >= fold:
raise ValueError
fd = total[idx::fold]
for f in fd:
total.remove(f)
return fd
if __name__ == "__main__":
from PIL import Image
from matplotlib import pyplot as plt
prev_mask = Image.open('./data/ultrasound/ground truth/G0/01/0000.png')
prev_mask = elastic_transform(
np.array(prev_mask)).reshape(256, 512)
prev_mask = Image.fromarray(prev_mask)
plt.imshow(prev_mask, cmap='gray')
plt.show()
|
py | 1a3fe0d932841ca5f4e571d9c3240224b272d665 | import numpy
from panda3d.core import Point3, TransformState, Vec3
from panda3d.bullet import BulletSphereShape, BulletRigidBodyNode
from panda3d.ode import OdeBody, OdeMass, OdeSphereGeom
from .Ingredient import Ingredient
import cellpack.autopack as autopack
helper = autopack.helper
class SingleSphereIngr(Ingredient):
"""
This Ingredient is represented by a single sphere
and either a single radius, or a list of radii and offset vectors
for each sphere representing the ingredient
"""
def __init__(
self,
molarity=0.0,
radius=None,
position=None,
sphereFile=None,
packingPriority=0,
name=None,
pdb=None,
color=None,
nbJitter=5,
jitterMax=(1, 1, 1),
perturbAxisAmplitude=0.1,
principalVector=(1, 0, 0),
meshFile=None,
packingMode="random",
placeType="jitter",
Type="SingleSphere",
meshObject=None,
nbMol=0,
**kw
):
Ingredient.__init__(
self,
molarity=molarity,
radii=[[radius]],
positions=[[position]], # positions2=None,
sphereFile=sphereFile,
packingPriority=packingPriority,
name=name,
pdb=pdb,
color=color,
nbJitter=nbJitter,
jitterMax=jitterMax,
perturbAxisAmplitude=perturbAxisAmplitude,
principalVector=principalVector,
meshFile=meshFile,
packingMode=packingMode,
placeType=placeType,
meshObject=meshObject,
nbMol=nbMol,
Type=Type,
**kw
)
self.modelType = "Spheres"
if name is None:
name = "%5.2f_%f" % (radius, molarity)
self.name = name
self.singleSphere = True
# min and max radius for a single sphere should be the same
self.minRadius = radius
self.encapsulatingRadius = radius
# make a sphere ?->rapid ?
if self.mesh is None and autopack.helper is not None:
if not autopack.helper.nogui:
# if not autopack.helper.nogui :
# build a cylinder and make it length uLength, radius radii[0]
# this mesh is used bu RAPID for collision
p = autopack.helper.getObject("autopackHider")
if p is None:
p = autopack.helper.newEmpty("autopackHider")
if autopack.helper.host.find("blender") == -1:
autopack.helper.toggleDisplay(p, False)
self.mesh = autopack.helper.Sphere(
self.name + "_basic",
radius=self.radii[0][0],
color=self.color,
parent=p,
res=24,
)[0]
else:
self.mesh = autopack.helper.unitSphere(
self.name + "_basic", 5, radius=self.radii[0][0]
)[0]
self.getData()
# should do that for all ingredient type
if self.representation is None and not hasattr(
self.mesh, "getFaces"
): # this is not working with dejavu
# and should go in the graphics.
if not autopack.helper.nogui:
self.representation = autopack.helper.Sphere(
self.name + "_rep",
radius=self.radii[0][0],
color=self.color,
parent=self.mesh,
res=24,
)[0]
else:
self.representation = autopack.helper.Icosahedron(
self.name + "_rep", radius=self.radii[0][0]
)[0]
def collides_with_compartment(
self,
jtrans,
rotMat,
level,
gridPointsCoords,
histoVol,
):
"""
Check spheres for collision
TODO improve the testwhen grid stepSize is larger that size of the ingredient
"""
centers = self.positions[level]
radii = (self.radii[level],)
centT = self.transformPoints(jtrans, rotMat, centers) # this should be jtrans
for radc, posc in zip(radii, centT):
ptsInSphere = histoVol.grid.getPointsInSphere(posc, radc[0]) # indices
compIdsSphere = numpy.take(histoVol.grid.gridPtId, ptsInSphere, 0)
if self.compNum <= 0:
wrongPt = [cid for cid in compIdsSphere if cid != self.compNum]
if len(wrongPt):
print("OK false compartment", len(wrongPt))
return True
return False
def get_new_distance_values(
self, jtrans, rotMatj, gridPointsCoords, distance, dpad, level=0
):
self.centT = centT = self.transformPoints(
jtrans, rotMatj, self.positions[level]
)
centT = self.centT # self.transformPoints(jtrans, rotMatj, self.positions[-1])
insidePoints = {}
newDistPoints = {}
for radc, posc in zip(self.radii[-1], centT):
rad = radc + dpad
ptsInSphere = self.env.grid.getPointsInSphere(posc, rad)
delta = numpy.take(gridPointsCoords, ptsInSphere, 0) - posc
delta *= delta
distA = numpy.sqrt(delta.sum(1))
for pti in range(len(ptsInSphere)):
pt = ptsInSphere[pti]
dist = distA[pti]
d = dist - radc
if d <= 0: # point is inside dropped sphere
if pt in insidePoints:
if abs(d) < abs(insidePoints[pt]):
insidePoints[pt] = d
else:
insidePoints[pt] = d
elif d < distance[pt]: # point in region of influence
if pt in newDistPoints:
if d < newDistPoints[pt]:
newDistPoints[pt] = d
else:
newDistPoints[pt] = d
return insidePoints, newDistPoints
def add_rb_node(self, worldNP):
shape = BulletSphereShape(self.encapsulatingRadius)
inodenp = worldNP.attachNewNode(BulletRigidBodyNode(self.name))
inodenp.node().setMass(1.0)
# inodenp.node().addShape(shape)
inodenp.node().addShape(
shape, TransformState.makePos(Point3(0, 0, 0))
) # rotation ?
# spherenp.setPos(-2, 0, 4)
return inodenp
def add_rb_node_ode(self, world, jtrans, pMat):
body = OdeBody(world)
M = OdeMass()
M.setSphereTotal(1.0, self.encapsulatingRadius)
body.setMass(M)
body.setPosition(Vec3(jtrans[0], jtrans[1], jtrans[2]))
body.setRotation(pMat)
# the geometry for the collision ?
geom = OdeSphereGeom(self.ode_space, self.encapsulatingRadius)
geom.setBody(body)
return geom
|
py | 1a3fe12a2c5d6d7c7714b966a46afe0f491a0c4d | import argparse
import json
import sys
import time
import uuid
import os
import sh
from sh import docker
parentdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
os.sys.path.insert(0, parentdir)
from configfinder import config_settings
def build_and_commit(package: str, fuzzer_image: str, json_output_path: str = None, qemu=False, timeout=None) -> str:
"""
This builds a package inside a docker container and then commits the container to an image.
:return:
"""
start = time.time()
docker_image_name = package + "_" + str(uuid.uuid4())[:8]
docker_container_name = str(uuid.uuid4())
try:
if not qemu:
build_process = docker.run('--cpus=0.90', "--privileged", "--name", docker_container_name, "--entrypoint",
"python", fuzzer_image, "/inputinferer/configfinder/builder_wrapper.py", "-p",
package, _out=sys.stdout, _ok_code=[config_settings.BUILDER_BUILD_NORMAL,
config_settings.BUILDER_BUILD_FAILED,
config_settings.BUILDER_BUILD_QEMU],
_timeout=timeout) # type: sh.RunningCommand
else:
build_process = docker.run('--cpus=0.90', "--privileged", "--name", docker_container_name, "--entrypoint",
"python", fuzzer_image, "/inputinferer/configfinder/builder_wrapper.py",
"-p", package, "-Q",
_out=sys.stdout,
_ok_code=[config_settings.BUILDER_BUILD_NORMAL,
config_settings.BUILDER_BUILD_FAILED,
config_settings.BUILDER_BUILD_QEMU],
_timeout=timeout) # type: sh.RunningCommand
except sh.TimeoutException as e:
print("Building {0} timed out!".format(package))
return None
exit_code = build_process.exit_code
if exit_code == -1:
print("Failed to build image for package {0}, not commiting".format(package))
return None
docker.commit(docker_container_name, docker_image_name, _out=sys.stdout)
end = time.time()
if json_output_path is not None:
json_dict = {}
json_dict["docker_image_name"] = docker_image_name
if exit_code == config_settings.BUILDER_BUILD_NORMAL:
json_dict["qemu"] = False
elif exit_code == config_settings.BUILDER_BUILD_QEMU:
json_dict["qemu"] = True
json_dict["time"] = end - start
with open(json_output_path, "w") as json_output_fp:
json.dump(json_dict, json_output_fp)
docker.rm(docker_container_name) # Remove the image after we commited
return docker_image_name
def return_current_package_image(package: str, fuzzer_image: str, package_image: str, json_output_path: str = None,
qemu=False, timeout=None) -> str:
"""
Checks if the current package_image still exists and if not creates a new one.
"""
output = str(docker.images(package_image))
print(output.split("\n"))
if len(output.split("\n")) > 2:
return package_image
else:
return build_and_commit(package, fuzzer_image=fuzzer_image, json_output_path=json_output_path, qemu=qemu,
timeout=timeout)
def get_image_or_store_in_buildfile(package: str, fuzzer_image, buildfile_path: str, qemu=False):
if not os.path.exists(buildfile_path):
return build_and_commit(package, fuzzer_image=fuzzer_image, json_output_path=buildfile_path, qemu=qemu)
else:
with open(buildfile_path, "r") as fp:
build_dict = json.load(fp)
return return_current_package_image(package, fuzzer_image, build_dict["docker_image_name"],
json_output_path=buildfile_path, qemu=qemu)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Start the building Process')
parser.add_argument("-di", "--base_image", required=True, type=str, help="Fuzzer image.")
parser.add_argument("-p", "--package", required=True, type=str,
help="The package to build")
parser.add_argument("-out", "--output_path", required=False, type=str, default=None,
help="Where to store the json configuration?")
arguments = parser.parse_args()
build_and_commit(package=arguments.package, fuzzer_image=arguments.docker_image,
json_output_path=arguments.output_path)
|
py | 1a3fe4e3fbf5f750f4cc98a381d43578efe71abc | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 11 10:08:27 2018
@author: rflamary
"""
import numpy as np
import pylab as pl
import scipy
import scipy.optimize
import stdgrb
import time
t_start=time.clock()
def tic():
global t_start
t_start=time.clock()
def toc():
global t_start
t=time.clock()-t_start
print('Elapsed time: {:1.3f}s'.format(t))
return t
#%%
n=2000
d=200
np.random.seed(0)
c=-np.random.rand(d)
A=np.random.rand(n,d)
b=np.random.rand(n)
lb=np.zeros(d)
ub=np.ones(d)
#%%
print('Scipy simplex solver')
tic()
sol=scipy.optimize.linprog(c,A,b)
x0=sol.x
v0=sol.fun
toc()
print('Scipy interior point solver')
tic()
sol=scipy.optimize.linprog(c,A,b,method='interior-point')
x00=sol.x
v00=sol.fun
toc()
print('Default method')
tic()
x1,v1=stdgrb.lp_solve(c,A,b,lb,ub,logtoconsole=0)
toc()
print('Simplex method')
tic()
x2,v2=stdgrb.lp_solve(c,A,b,lb,ub,1,logtoconsole=0)
toc()
print('Interior point method')
tic()
x3,v3=stdgrb.lp_solve(c,A,b,lb,ub,2,logtoconsole=0,crossover=0)
toc()
#%%
|
py | 1a3fe6405fe577d0765e5e8ddad6772cb99b75fc | from .pymap import pyMAP
|
py | 1a3fe65ec2a53c7cc81adf95273e2c1b74c14dbc | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-06 14:49
from __future__ import unicode_literals
import cms.models.streamfield
from django.db import migrations
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
import wagtail.wagtaildocs.blocks
import wagtail.wagtailembeds.blocks
import wagtail.wagtailimages.blocks
class Migration(migrations.Migration):
dependencies = [
('cms', '0029_livefeedsblock'),
]
operations = [
migrations.AlterField(
model_name='blogindexpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField([(b'banner', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock()), (b'subtitle', wagtail.wagtailcore.blocks.CharBlock()), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), (b'image_copyright', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'style', cms.models.streamfield.BannerStyleChoiceBlock())], label='Banner section')), (b'ordered_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock()), ('description', wagtail.wagtailcore.blocks.TextBlock())])))], help_text='Use this for sections similar to process', label='Ordered list section')), (b'image_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock()), ('subtitle', wagtail.wagtailcore.blocks.CharBlock()), ('description', wagtail.wagtailcore.blocks.TextBlock()), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock())])))], label='Image list section')), (b'image_grid', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('url', wagtail.wagtailcore.blocks.URLBlock(required=False)), ('page', wagtail.wagtailcore.blocks.PageChooserBlock(required=False))])))], icon='table', label='Image grid section')), (b'featured_pages', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'starred_page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.PageChooserBlock(), required=False))], icon='doc-full', label='Featured pages section')), (b'live_feeds', wagtail.wagtailcore.blocks.StructBlock([(b'blog_index_page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'twitter', wagtail.wagtailcore.blocks.CharBlock())], icon='wagtail', label='Live feeds section (blog/twitter)')), (b'h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h5', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'pullquote', wagtail.wagtailcore.blocks.StructBlock([(b'quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), (b'attribution', wagtail.wagtailcore.blocks.CharBlock()), (b'affiliation', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'style', cms.models.streamfield.PullQuoteStyleChoiceBlock())], icon='openquote')), (b'image', wagtail.wagtailcore.blocks.StructBlock([(b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'caption', wagtail.wagtailcore.blocks.RichTextBlock()), (b'alignment', cms.models.streamfield.ImageFormatChoiceBlock())], icon='image', label='Aligned image')), (b'document', wagtail.wagtaildocs.blocks.DocumentChooserBlock(icon='doc-full-inverse')), (b'link', wagtail.wagtailcore.blocks.StructBlock([(b'url', wagtail.wagtailcore.blocks.URLBlock(required=False)), (b'page', wagtail.wagtailcore.blocks.PageChooserBlock(required=False)), (b'label', wagtail.wagtailcore.blocks.CharBlock()), (b'style', cms.models.streamfield.LinkStyleChoiceBlock())], icon='link')), (b'embed', wagtail.wagtailembeds.blocks.EmbedBlock(icon='media')), (b'html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML'))]),
),
migrations.AlterField(
model_name='blogpost',
name='body',
field=wagtail.wagtailcore.fields.StreamField([(b'banner', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock()), (b'subtitle', wagtail.wagtailcore.blocks.CharBlock()), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), (b'image_copyright', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'style', cms.models.streamfield.BannerStyleChoiceBlock())], label='Banner section')), (b'ordered_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock()), ('description', wagtail.wagtailcore.blocks.TextBlock())])))], help_text='Use this for sections similar to process', label='Ordered list section')), (b'image_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock()), ('subtitle', wagtail.wagtailcore.blocks.CharBlock()), ('description', wagtail.wagtailcore.blocks.TextBlock()), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock())])))], label='Image list section')), (b'image_grid', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('url', wagtail.wagtailcore.blocks.URLBlock(required=False)), ('page', wagtail.wagtailcore.blocks.PageChooserBlock(required=False))])))], icon='table', label='Image grid section')), (b'featured_pages', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'starred_page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.PageChooserBlock(), required=False))], icon='doc-full', label='Featured pages section')), (b'live_feeds', wagtail.wagtailcore.blocks.StructBlock([(b'blog_index_page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'twitter', wagtail.wagtailcore.blocks.CharBlock())], icon='wagtail', label='Live feeds section (blog/twitter)')), (b'h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h5', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'pullquote', wagtail.wagtailcore.blocks.StructBlock([(b'quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), (b'attribution', wagtail.wagtailcore.blocks.CharBlock()), (b'affiliation', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'style', cms.models.streamfield.PullQuoteStyleChoiceBlock())], icon='openquote')), (b'image', wagtail.wagtailcore.blocks.StructBlock([(b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'caption', wagtail.wagtailcore.blocks.RichTextBlock()), (b'alignment', cms.models.streamfield.ImageFormatChoiceBlock())], icon='image', label='Aligned image')), (b'document', wagtail.wagtaildocs.blocks.DocumentChooserBlock(icon='doc-full-inverse')), (b'link', wagtail.wagtailcore.blocks.StructBlock([(b'url', wagtail.wagtailcore.blocks.URLBlock(required=False)), (b'page', wagtail.wagtailcore.blocks.PageChooserBlock(required=False)), (b'label', wagtail.wagtailcore.blocks.CharBlock()), (b'style', cms.models.streamfield.LinkStyleChoiceBlock())], icon='link')), (b'embed', wagtail.wagtailembeds.blocks.EmbedBlock(icon='media')), (b'html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML'))]),
),
migrations.AlterField(
model_name='homepage',
name='body',
field=wagtail.wagtailcore.fields.StreamField([(b'banner', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock()), (b'subtitle', wagtail.wagtailcore.blocks.CharBlock()), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), (b'image_copyright', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'style', cms.models.streamfield.BannerStyleChoiceBlock())], label='Banner section')), (b'ordered_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock()), ('description', wagtail.wagtailcore.blocks.TextBlock())])))], help_text='Use this for sections similar to process', label='Ordered list section')), (b'image_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock()), ('subtitle', wagtail.wagtailcore.blocks.CharBlock()), ('description', wagtail.wagtailcore.blocks.TextBlock()), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock())])))], label='Image list section')), (b'image_grid', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('url', wagtail.wagtailcore.blocks.URLBlock(required=False)), ('page', wagtail.wagtailcore.blocks.PageChooserBlock(required=False))])))], icon='table', label='Image grid section')), (b'featured_pages', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'starred_page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.PageChooserBlock(), required=False))], icon='doc-full', label='Featured pages section')), (b'live_feeds', wagtail.wagtailcore.blocks.StructBlock([(b'blog_index_page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'twitter', wagtail.wagtailcore.blocks.CharBlock())], icon='wagtail', label='Live feeds section (blog/twitter)')), (b'h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h5', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'pullquote', wagtail.wagtailcore.blocks.StructBlock([(b'quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), (b'attribution', wagtail.wagtailcore.blocks.CharBlock()), (b'affiliation', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'style', cms.models.streamfield.PullQuoteStyleChoiceBlock())], icon='openquote')), (b'image', wagtail.wagtailcore.blocks.StructBlock([(b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'caption', wagtail.wagtailcore.blocks.RichTextBlock()), (b'alignment', cms.models.streamfield.ImageFormatChoiceBlock())], icon='image', label='Aligned image')), (b'document', wagtail.wagtaildocs.blocks.DocumentChooserBlock(icon='doc-full-inverse')), (b'link', wagtail.wagtailcore.blocks.StructBlock([(b'url', wagtail.wagtailcore.blocks.URLBlock(required=False)), (b'page', wagtail.wagtailcore.blocks.PageChooserBlock(required=False)), (b'label', wagtail.wagtailcore.blocks.CharBlock()), (b'style', cms.models.streamfield.LinkStyleChoiceBlock())], icon='link')), (b'embed', wagtail.wagtailembeds.blocks.EmbedBlock(icon='media')), (b'html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML'))]),
),
migrations.AlterField(
model_name='indexpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField([(b'banner', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock()), (b'subtitle', wagtail.wagtailcore.blocks.CharBlock()), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), (b'image_copyright', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'style', cms.models.streamfield.BannerStyleChoiceBlock())], label='Banner section')), (b'ordered_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock()), ('description', wagtail.wagtailcore.blocks.TextBlock())])))], help_text='Use this for sections similar to process', label='Ordered list section')), (b'image_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock()), ('subtitle', wagtail.wagtailcore.blocks.CharBlock()), ('description', wagtail.wagtailcore.blocks.TextBlock()), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock())])))], label='Image list section')), (b'image_grid', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('url', wagtail.wagtailcore.blocks.URLBlock(required=False)), ('page', wagtail.wagtailcore.blocks.PageChooserBlock(required=False))])))], icon='table', label='Image grid section')), (b'featured_pages', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'starred_page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.PageChooserBlock(), required=False))], icon='doc-full', label='Featured pages section')), (b'live_feeds', wagtail.wagtailcore.blocks.StructBlock([(b'blog_index_page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'twitter', wagtail.wagtailcore.blocks.CharBlock())], icon='wagtail', label='Live feeds section (blog/twitter)')), (b'h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h5', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'pullquote', wagtail.wagtailcore.blocks.StructBlock([(b'quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), (b'attribution', wagtail.wagtailcore.blocks.CharBlock()), (b'affiliation', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'style', cms.models.streamfield.PullQuoteStyleChoiceBlock())], icon='openquote')), (b'image', wagtail.wagtailcore.blocks.StructBlock([(b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'caption', wagtail.wagtailcore.blocks.RichTextBlock()), (b'alignment', cms.models.streamfield.ImageFormatChoiceBlock())], icon='image', label='Aligned image')), (b'document', wagtail.wagtaildocs.blocks.DocumentChooserBlock(icon='doc-full-inverse')), (b'link', wagtail.wagtailcore.blocks.StructBlock([(b'url', wagtail.wagtailcore.blocks.URLBlock(required=False)), (b'page', wagtail.wagtailcore.blocks.PageChooserBlock(required=False)), (b'label', wagtail.wagtailcore.blocks.CharBlock()), (b'style', cms.models.streamfield.LinkStyleChoiceBlock())], icon='link')), (b'embed', wagtail.wagtailembeds.blocks.EmbedBlock(icon='media')), (b'html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML'))]),
),
migrations.AlterField(
model_name='organisationindexpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField([(b'banner', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock()), (b'subtitle', wagtail.wagtailcore.blocks.CharBlock()), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), (b'image_copyright', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'style', cms.models.streamfield.BannerStyleChoiceBlock())], label='Banner section')), (b'ordered_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock()), ('description', wagtail.wagtailcore.blocks.TextBlock())])))], help_text='Use this for sections similar to process', label='Ordered list section')), (b'image_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock()), ('subtitle', wagtail.wagtailcore.blocks.CharBlock()), ('description', wagtail.wagtailcore.blocks.TextBlock()), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock())])))], label='Image list section')), (b'image_grid', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('url', wagtail.wagtailcore.blocks.URLBlock(required=False)), ('page', wagtail.wagtailcore.blocks.PageChooserBlock(required=False))])))], icon='table', label='Image grid section')), (b'featured_pages', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'starred_page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.PageChooserBlock(), required=False))], icon='doc-full', label='Featured pages section')), (b'live_feeds', wagtail.wagtailcore.blocks.StructBlock([(b'blog_index_page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'twitter', wagtail.wagtailcore.blocks.CharBlock())], icon='wagtail', label='Live feeds section (blog/twitter)')), (b'h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h5', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'pullquote', wagtail.wagtailcore.blocks.StructBlock([(b'quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), (b'attribution', wagtail.wagtailcore.blocks.CharBlock()), (b'affiliation', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'style', cms.models.streamfield.PullQuoteStyleChoiceBlock())], icon='openquote')), (b'image', wagtail.wagtailcore.blocks.StructBlock([(b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'caption', wagtail.wagtailcore.blocks.RichTextBlock()), (b'alignment', cms.models.streamfield.ImageFormatChoiceBlock())], icon='image', label='Aligned image')), (b'document', wagtail.wagtaildocs.blocks.DocumentChooserBlock(icon='doc-full-inverse')), (b'link', wagtail.wagtailcore.blocks.StructBlock([(b'url', wagtail.wagtailcore.blocks.URLBlock(required=False)), (b'page', wagtail.wagtailcore.blocks.PageChooserBlock(required=False)), (b'label', wagtail.wagtailcore.blocks.CharBlock()), (b'style', cms.models.streamfield.LinkStyleChoiceBlock())], icon='link')), (b'embed', wagtail.wagtailembeds.blocks.EmbedBlock(icon='media')), (b'html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML'))]),
),
migrations.AlterField(
model_name='organisationpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField([(b'banner', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock()), (b'subtitle', wagtail.wagtailcore.blocks.CharBlock()), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), (b'image_copyright', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'style', cms.models.streamfield.BannerStyleChoiceBlock())], label='Banner section')), (b'ordered_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock()), ('description', wagtail.wagtailcore.blocks.TextBlock())])))], help_text='Use this for sections similar to process', label='Ordered list section')), (b'image_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock()), ('subtitle', wagtail.wagtailcore.blocks.CharBlock()), ('description', wagtail.wagtailcore.blocks.TextBlock()), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock())])))], label='Image list section')), (b'image_grid', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('url', wagtail.wagtailcore.blocks.URLBlock(required=False)), ('page', wagtail.wagtailcore.blocks.PageChooserBlock(required=False))])))], icon='table', label='Image grid section')), (b'featured_pages', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'starred_page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.PageChooserBlock(), required=False))], icon='doc-full', label='Featured pages section')), (b'live_feeds', wagtail.wagtailcore.blocks.StructBlock([(b'blog_index_page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'twitter', wagtail.wagtailcore.blocks.CharBlock())], icon='wagtail', label='Live feeds section (blog/twitter)')), (b'h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h5', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'pullquote', wagtail.wagtailcore.blocks.StructBlock([(b'quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), (b'attribution', wagtail.wagtailcore.blocks.CharBlock()), (b'affiliation', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'style', cms.models.streamfield.PullQuoteStyleChoiceBlock())], icon='openquote')), (b'image', wagtail.wagtailcore.blocks.StructBlock([(b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'caption', wagtail.wagtailcore.blocks.RichTextBlock()), (b'alignment', cms.models.streamfield.ImageFormatChoiceBlock())], icon='image', label='Aligned image')), (b'document', wagtail.wagtaildocs.blocks.DocumentChooserBlock(icon='doc-full-inverse')), (b'link', wagtail.wagtailcore.blocks.StructBlock([(b'url', wagtail.wagtailcore.blocks.URLBlock(required=False)), (b'page', wagtail.wagtailcore.blocks.PageChooserBlock(required=False)), (b'label', wagtail.wagtailcore.blocks.CharBlock()), (b'style', cms.models.streamfield.LinkStyleChoiceBlock())], icon='link')), (b'embed', wagtail.wagtailembeds.blocks.EmbedBlock(icon='media')), (b'html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML'))]),
),
migrations.AlterField(
model_name='personindexpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField([(b'banner', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock()), (b'subtitle', wagtail.wagtailcore.blocks.CharBlock()), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), (b'image_copyright', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'style', cms.models.streamfield.BannerStyleChoiceBlock())], label='Banner section')), (b'ordered_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock()), ('description', wagtail.wagtailcore.blocks.TextBlock())])))], help_text='Use this for sections similar to process', label='Ordered list section')), (b'image_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock()), ('subtitle', wagtail.wagtailcore.blocks.CharBlock()), ('description', wagtail.wagtailcore.blocks.TextBlock()), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock())])))], label='Image list section')), (b'image_grid', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('url', wagtail.wagtailcore.blocks.URLBlock(required=False)), ('page', wagtail.wagtailcore.blocks.PageChooserBlock(required=False))])))], icon='table', label='Image grid section')), (b'featured_pages', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'starred_page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.PageChooserBlock(), required=False))], icon='doc-full', label='Featured pages section')), (b'live_feeds', wagtail.wagtailcore.blocks.StructBlock([(b'blog_index_page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'twitter', wagtail.wagtailcore.blocks.CharBlock())], icon='wagtail', label='Live feeds section (blog/twitter)')), (b'h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h5', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'pullquote', wagtail.wagtailcore.blocks.StructBlock([(b'quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), (b'attribution', wagtail.wagtailcore.blocks.CharBlock()), (b'affiliation', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'style', cms.models.streamfield.PullQuoteStyleChoiceBlock())], icon='openquote')), (b'image', wagtail.wagtailcore.blocks.StructBlock([(b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'caption', wagtail.wagtailcore.blocks.RichTextBlock()), (b'alignment', cms.models.streamfield.ImageFormatChoiceBlock())], icon='image', label='Aligned image')), (b'document', wagtail.wagtaildocs.blocks.DocumentChooserBlock(icon='doc-full-inverse')), (b'link', wagtail.wagtailcore.blocks.StructBlock([(b'url', wagtail.wagtailcore.blocks.URLBlock(required=False)), (b'page', wagtail.wagtailcore.blocks.PageChooserBlock(required=False)), (b'label', wagtail.wagtailcore.blocks.CharBlock()), (b'style', cms.models.streamfield.LinkStyleChoiceBlock())], icon='link')), (b'embed', wagtail.wagtailembeds.blocks.EmbedBlock(icon='media')), (b'html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML'))]),
),
migrations.AlterField(
model_name='personpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField([(b'banner', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock()), (b'subtitle', wagtail.wagtailcore.blocks.CharBlock()), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), (b'image_copyright', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'style', cms.models.streamfield.BannerStyleChoiceBlock())], label='Banner section')), (b'ordered_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock()), ('description', wagtail.wagtailcore.blocks.TextBlock())])))], help_text='Use this for sections similar to process', label='Ordered list section')), (b'image_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock()), ('subtitle', wagtail.wagtailcore.blocks.CharBlock()), ('description', wagtail.wagtailcore.blocks.TextBlock()), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock())])))], label='Image list section')), (b'image_grid', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('url', wagtail.wagtailcore.blocks.URLBlock(required=False)), ('page', wagtail.wagtailcore.blocks.PageChooserBlock(required=False))])))], icon='table', label='Image grid section')), (b'featured_pages', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'starred_page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.PageChooserBlock(), required=False))], icon='doc-full', label='Featured pages section')), (b'live_feeds', wagtail.wagtailcore.blocks.StructBlock([(b'blog_index_page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'twitter', wagtail.wagtailcore.blocks.CharBlock())], icon='wagtail', label='Live feeds section (blog/twitter)')), (b'h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h5', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'pullquote', wagtail.wagtailcore.blocks.StructBlock([(b'quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), (b'attribution', wagtail.wagtailcore.blocks.CharBlock()), (b'affiliation', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'style', cms.models.streamfield.PullQuoteStyleChoiceBlock())], icon='openquote')), (b'image', wagtail.wagtailcore.blocks.StructBlock([(b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'caption', wagtail.wagtailcore.blocks.RichTextBlock()), (b'alignment', cms.models.streamfield.ImageFormatChoiceBlock())], icon='image', label='Aligned image')), (b'document', wagtail.wagtaildocs.blocks.DocumentChooserBlock(icon='doc-full-inverse')), (b'link', wagtail.wagtailcore.blocks.StructBlock([(b'url', wagtail.wagtailcore.blocks.URLBlock(required=False)), (b'page', wagtail.wagtailcore.blocks.PageChooserBlock(required=False)), (b'label', wagtail.wagtailcore.blocks.CharBlock()), (b'style', cms.models.streamfield.LinkStyleChoiceBlock())], icon='link')), (b'embed', wagtail.wagtailembeds.blocks.EmbedBlock(icon='media')), (b'html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML'))]),
),
migrations.AlterField(
model_name='richtextpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField([(b'banner', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock()), (b'subtitle', wagtail.wagtailcore.blocks.CharBlock()), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), (b'image_copyright', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'style', cms.models.streamfield.BannerStyleChoiceBlock())], label='Banner section')), (b'ordered_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock()), ('description', wagtail.wagtailcore.blocks.TextBlock())])))], help_text='Use this for sections similar to process', label='Ordered list section')), (b'image_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock()), ('subtitle', wagtail.wagtailcore.blocks.CharBlock()), ('description', wagtail.wagtailcore.blocks.TextBlock()), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock())])))], label='Image list section')), (b'image_grid', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('url', wagtail.wagtailcore.blocks.URLBlock(required=False)), ('page', wagtail.wagtailcore.blocks.PageChooserBlock(required=False))])))], icon='table', label='Image grid section')), (b'featured_pages', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'starred_page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.PageChooserBlock(), required=False))], icon='doc-full', label='Featured pages section')), (b'live_feeds', wagtail.wagtailcore.blocks.StructBlock([(b'blog_index_page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'twitter', wagtail.wagtailcore.blocks.CharBlock())], icon='wagtail', label='Live feeds section (blog/twitter)')), (b'h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h5', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'pullquote', wagtail.wagtailcore.blocks.StructBlock([(b'quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), (b'attribution', wagtail.wagtailcore.blocks.CharBlock()), (b'affiliation', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'style', cms.models.streamfield.PullQuoteStyleChoiceBlock())], icon='openquote')), (b'image', wagtail.wagtailcore.blocks.StructBlock([(b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'caption', wagtail.wagtailcore.blocks.RichTextBlock()), (b'alignment', cms.models.streamfield.ImageFormatChoiceBlock())], icon='image', label='Aligned image')), (b'document', wagtail.wagtaildocs.blocks.DocumentChooserBlock(icon='doc-full-inverse')), (b'link', wagtail.wagtailcore.blocks.StructBlock([(b'url', wagtail.wagtailcore.blocks.URLBlock(required=False)), (b'page', wagtail.wagtailcore.blocks.PageChooserBlock(required=False)), (b'label', wagtail.wagtailcore.blocks.CharBlock()), (b'style', cms.models.streamfield.LinkStyleChoiceBlock())], icon='link')), (b'embed', wagtail.wagtailembeds.blocks.EmbedBlock(icon='media')), (b'html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML'))]),
),
migrations.AlterField(
model_name='workindexpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField([(b'banner', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock()), (b'subtitle', wagtail.wagtailcore.blocks.CharBlock()), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), (b'image_copyright', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'style', cms.models.streamfield.BannerStyleChoiceBlock())], label='Banner section')), (b'ordered_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock()), ('description', wagtail.wagtailcore.blocks.TextBlock())])))], help_text='Use this for sections similar to process', label='Ordered list section')), (b'image_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock()), ('subtitle', wagtail.wagtailcore.blocks.CharBlock()), ('description', wagtail.wagtailcore.blocks.TextBlock()), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock())])))], label='Image list section')), (b'image_grid', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('url', wagtail.wagtailcore.blocks.URLBlock(required=False)), ('page', wagtail.wagtailcore.blocks.PageChooserBlock(required=False))])))], icon='table', label='Image grid section')), (b'featured_pages', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'starred_page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.PageChooserBlock(), required=False))], icon='doc-full', label='Featured pages section')), (b'live_feeds', wagtail.wagtailcore.blocks.StructBlock([(b'blog_index_page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'twitter', wagtail.wagtailcore.blocks.CharBlock())], icon='wagtail', label='Live feeds section (blog/twitter)')), (b'h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h5', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'pullquote', wagtail.wagtailcore.blocks.StructBlock([(b'quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), (b'attribution', wagtail.wagtailcore.blocks.CharBlock()), (b'affiliation', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'style', cms.models.streamfield.PullQuoteStyleChoiceBlock())], icon='openquote')), (b'image', wagtail.wagtailcore.blocks.StructBlock([(b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'caption', wagtail.wagtailcore.blocks.RichTextBlock()), (b'alignment', cms.models.streamfield.ImageFormatChoiceBlock())], icon='image', label='Aligned image')), (b'document', wagtail.wagtaildocs.blocks.DocumentChooserBlock(icon='doc-full-inverse')), (b'link', wagtail.wagtailcore.blocks.StructBlock([(b'url', wagtail.wagtailcore.blocks.URLBlock(required=False)), (b'page', wagtail.wagtailcore.blocks.PageChooserBlock(required=False)), (b'label', wagtail.wagtailcore.blocks.CharBlock()), (b'style', cms.models.streamfield.LinkStyleChoiceBlock())], icon='link')), (b'embed', wagtail.wagtailembeds.blocks.EmbedBlock(icon='media')), (b'html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML'))]),
),
migrations.AlterField(
model_name='workpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField([(b'banner', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock()), (b'subtitle', wagtail.wagtailcore.blocks.CharBlock()), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), (b'image_copyright', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'style', cms.models.streamfield.BannerStyleChoiceBlock())], label='Banner section')), (b'ordered_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock()), ('description', wagtail.wagtailcore.blocks.TextBlock())])))], help_text='Use this for sections similar to process', label='Ordered list section')), (b'image_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock()), ('subtitle', wagtail.wagtailcore.blocks.CharBlock()), ('description', wagtail.wagtailcore.blocks.TextBlock()), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock())])))], label='Image list section')), (b'image_grid', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('url', wagtail.wagtailcore.blocks.URLBlock(required=False)), ('page', wagtail.wagtailcore.blocks.PageChooserBlock(required=False))])))], icon='table', label='Image grid section')), (b'featured_pages', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'starred_page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.PageChooserBlock(), required=False))], icon='doc-full', label='Featured pages section')), (b'live_feeds', wagtail.wagtailcore.blocks.StructBlock([(b'blog_index_page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'twitter', wagtail.wagtailcore.blocks.CharBlock())], icon='wagtail', label='Live feeds section (blog/twitter)')), (b'h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h5', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'pullquote', wagtail.wagtailcore.blocks.StructBlock([(b'quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), (b'attribution', wagtail.wagtailcore.blocks.CharBlock()), (b'affiliation', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'style', cms.models.streamfield.PullQuoteStyleChoiceBlock())], icon='openquote')), (b'image', wagtail.wagtailcore.blocks.StructBlock([(b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'caption', wagtail.wagtailcore.blocks.RichTextBlock()), (b'alignment', cms.models.streamfield.ImageFormatChoiceBlock())], icon='image', label='Aligned image')), (b'document', wagtail.wagtaildocs.blocks.DocumentChooserBlock(icon='doc-full-inverse')), (b'link', wagtail.wagtailcore.blocks.StructBlock([(b'url', wagtail.wagtailcore.blocks.URLBlock(required=False)), (b'page', wagtail.wagtailcore.blocks.PageChooserBlock(required=False)), (b'label', wagtail.wagtailcore.blocks.CharBlock()), (b'style', cms.models.streamfield.LinkStyleChoiceBlock())], icon='link')), (b'embed', wagtail.wagtailembeds.blocks.EmbedBlock(icon='media')), (b'html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML'))]),
),
]
|
py | 1a3fe6c7ce6958de8760c23eaab983faa8f80f85 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines test inputs and invocations for JAX primitives.
Used to test various implementations of JAX primitives, e.g., against
NumPy (lax_reference) or TensorFlow.
"""
import operator
from typing import Any, Callable, Dict, Iterable, Optional, NamedTuple, Sequence, Tuple, Union
from functools import partial
from absl import testing
import jax
from jax import config
from jax import dtypes
from jax import test_util as jtu
from jax import lax
from jax import lax_linalg
from jax import numpy as jnp
from jaxlib import xla_client
import numpy as np
FLAGS = config.FLAGS
Rng = Any # A random number generator
class RandArg(NamedTuple):
"""Descriptor for a randomly generated argument.
See description of `Harness`.
"""
shape: Tuple[int, ...]
dtype: np.dtype
class StaticArg(NamedTuple):
"""Descriptor for a static argument.
See description of `Harness`.
"""
value: Any
class Harness:
"""Specifies inputs and callable for a primitive.
A harness is conceptually a callable and a list of arguments, that together
exercise a use case. The harness can optionally have additional parameters
that can be used by the test.
The arguments are specified through argument descriptors. An argument
descriptor can be:
* a numeric value or ndarray, or
* an instance of ``RandArg(shape, dtype)`` to be used with a PRNG to generate
random tensor of the given shape and type, or
* an instance of ``StaticArg(value)``. These are values that specialize the
callable, but are not exposed as external arguments.
For example, a harness for ``lax.take(arr, indices, axis=None)`` may want
to expose as external (dynamic) argument the array and the indices, and
keep the axis as a static argument (technically specializing the `take` to
a axis):
Harness(f"take_axis={axis}",
lax.take,
[RandArg((2, 4), np.float32), np.array([-1, 0, 1]), StaticArg(axis)],
axis=axis)
"""
# Descriptive name of the harness, used as a testcase_name. Unique in a group.
name: str
# The function taking all arguments (static and dynamic).
fun: Callable
arg_descriptors: Sequence[Union[RandArg, StaticArg, Any]]
rng_factory: Callable
params: Dict[str, Any]
def __init__(self, name, fun, arg_descriptors, *,
rng_factory=jtu.rand_default, **params):
self.name = name
self.fun = fun
self.arg_descriptors = arg_descriptors
self.rng_factory = rng_factory
self.params = params
def __str__(self):
return self.name
def _arg_maker(self, arg_descriptor, rng: Rng):
if isinstance(arg_descriptor, StaticArg):
return arg_descriptor.value
if isinstance(arg_descriptor, RandArg):
return self.rng_factory(rng)(arg_descriptor.shape, arg_descriptor.dtype)
return arg_descriptor
def args_maker(self, rng: Rng) -> Sequence:
"""All-argument maker, including the static ones."""
return [self._arg_maker(ad, rng) for ad in self.arg_descriptors]
def dyn_args_maker(self, rng: Rng) -> Sequence:
"""A dynamic-argument maker, for use with `dyn_fun`."""
return [self._arg_maker(ad, rng) for ad in self.arg_descriptors
if not isinstance(ad, StaticArg)]
def dyn_fun(self, *dyn_args):
"""Invokes `fun` given just the dynamic arguments."""
all_args = self._args_from_dynargs(dyn_args)
return self.fun(*all_args)
def _args_from_dynargs(self, dyn_args: Sequence) -> Sequence:
"""All arguments, including the static ones."""
next_dynamic_argnum = 0
all_args = []
for ad in self.arg_descriptors:
if isinstance(ad, StaticArg):
all_args.append(ad.value)
else:
all_args.append(dyn_args[next_dynamic_argnum])
next_dynamic_argnum += 1
return all_args
def parameterized(harness_group: Iterable[Harness],
one_containing : Optional[str] = None):
"""Decorator for tests.
The tests receive a `harness` argument.
The `one_containing` parameter is useful for debugging. If given, then
picks only one harness whose name contains the string. The whole set of
parameterized tests is reduced to one test, whose name is not decorated
to make it easier to pick for running.
"""
cases = tuple(
dict(testcase_name=harness.name if one_containing is None else "",
harness=harness)
for harness in harness_group
if one_containing is None or one_containing in harness.name)
if one_containing is not None:
if not cases:
raise ValueError(f"Cannot find test case with name containing {one_containing}."
"Names are:"
"\n".join([harness.name for harness in harness_group]))
cases = cases[0:1]
return testing.parameterized.named_parameters(*cases)
### Harness definitions ###
###
_LAX_UNARY_ELEMENTWISE = (
lax.abs, lax.acosh, lax.asinh, lax.atanh, lax.bessel_i0e, lax.bessel_i1e,
lax.ceil, lax.cos, lax.cosh, lax.digamma, lax.erf, lax.erf_inv, lax.erfc,
lax.exp, lax.expm1, lax.floor, lax.is_finite, lax.lgamma, lax.log,
lax.log1p, lax.neg, lax.round, lax.rsqrt, lax.sign, lax.sin, lax.sinh,
lax.sqrt, lax.tan, lax.tanh)
lax_unary_elementwise = tuple(
Harness(f"{f_lax.__name__}_{jtu.dtype_str(dtype)}",
f_lax,
[arg],
lax_name=f_lax.__name__,
dtype=dtype)
for f_lax in _LAX_UNARY_ELEMENTWISE
for dtype in jtu.dtypes.all_floating
for arg in [
np.array([-1.6, -1.4, -1.0, 0.0, 0.1, 0.2, 1., 1.4, 1.6], dtype=dtype)
]
)
lax_bitwise_not = tuple(
[Harness(f"{jtu.dtype_str(dtype)}",
lax.bitwise_not,
[arg],
dtype=dtype)
for dtype in jtu.dtypes.all_integer + jtu.dtypes.all_unsigned
for arg in [
np.array([-1, -3, -2, 0, 0, 2, 1, 3], dtype=dtype),
]] +
[Harness("bool",
f_lax,
[arg],
lax_name=f_lax.__name__,
dtype=np.bool_)
for f_lax in [lax.bitwise_not]
for arg in [
np.array([True, False])
]]
)
lax_population_count = tuple(
Harness(f"{jtu.dtype_str(dtype)}",
lax.population_count,
[arg],
dtype=dtype)
for dtype in jtu.dtypes.all_integer + jtu.dtypes.all_unsigned
for arg in [
np.array([-1, -2, 0, 1], dtype=dtype)
]
)
def _get_max_identity(dtype):
if dtypes.issubdtype(dtype, np.inexact):
return np.array(-np.inf, dtype)
elif dtypes.issubdtype(dtype, np.integer):
return np.array(dtypes.iinfo(dtype).min, dtype)
elif dtypes.issubdtype(dtype, np.bool_):
return np.array(False, np.bool_)
def _get_min_identity(dtype):
if dtypes.issubdtype(dtype, np.inexact):
return np.array(np.inf, dtype)
elif dtypes.issubdtype(dtype, np.integer):
return np.array(dtypes.iinfo(dtype).max, dtype)
elif dtypes.issubdtype(dtype, np.bool_):
return np.array(True, np.bool_)
lax_add_mul = tuple(
Harness(f"fun={f_jax.__name__}_{jtu.dtype_str(dtype)}",
f_jax,
[lhs, rhs],
f_jax=f_jax,
dtype=dtype)
for f_jax in [lax.add, lax.mul]
for dtype in filter(lambda t: t != np.bool_, jtu.dtypes.all)
for lhs, rhs in [
(np.array([1, 2], dtype=dtype), np.array([3, 4], dtype=dtype))
]
) + tuple(
Harness(f"fun={f_jax.__name__}_bounds_{jtu.dtype_str(dtype)}",
f_jax,
[StaticArg(lhs), StaticArg(rhs)],
f_jax=f_jax,
dtype=dtype)
for f_jax in [lax.add, lax.mul]
for dtype in filter(lambda t: t != np.bool_, jtu.dtypes.all)
for lhs, rhs in [
(np.array([3, 3], dtype=dtype),
np.array([_get_max_identity(dtype), _get_min_identity(dtype)], dtype=dtype))
]
)
lax_min_max = tuple(
Harness(f"fun={f_jax.__name__}_{jtu.dtype_str(dtype)}",
f_jax,
[lhs, rhs],
f_jax=f_jax,
dtype=dtype)
for f_jax in [lax.min, lax.max]
for dtype in jtu.dtypes.all
for lhs, rhs in [
(np.array([1, 2], dtype=dtype), np.array([3, 4], dtype=dtype))
]
) + tuple(
Harness(f"fun={f_jax.__name__}_inf_nan_{jtu.dtype_str(dtype)}_{lhs[0]}_{rhs[0]}",
f_jax,
[StaticArg(lhs), StaticArg(rhs)],
f_jax=f_jax,
dtype=dtype)
for f_jax in [lax.min, lax.max]
for dtype in jtu.dtypes.all_floating + jtu.dtypes.complex
for lhs, rhs in [
(np.array([np.inf], dtype=dtype), np.array([np.nan], dtype=dtype)),
(np.array([-np.inf], dtype=dtype), np.array([np.nan], dtype=dtype))
]
)
_LAX_BINARY_ELEMENTWISE = (
lax.add, lax.atan2, lax.div, lax.igamma, lax.igammac, lax.max, lax.min,
lax.nextafter, lax.rem, lax.sub)
lax_binary_elementwise = tuple(
Harness(f"{f_lax.__name__}_{jtu.dtype_str(dtype)}",
f_lax,
[arg1, arg2],
lax_name=f_lax.__name__,
dtype=dtype
)
for f_lax in _LAX_BINARY_ELEMENTWISE
for dtype in jtu.dtypes.all_floating
for arg1, arg2 in [
(np.array([-1.6, -1.4, -1.0, 0.0, 0.1, 0.2, 1., 1.4, 1.6], dtype=dtype),
np.array([-1.6, 1.4, 1.0, 0.0, 0.1, 0.2, 1., 1.4, -1.6], dtype=dtype))
]
)
_LAX_BINARY_ELEMENTWISE_LOGICAL = (
lax.bitwise_and, lax.bitwise_or, lax.bitwise_xor, lax.shift_left,
)
lax_binary_elementwise_logical = tuple(
[Harness(f"{f_lax.__name__}_{jtu.dtype_str(dtype)}",
f_lax,
[arg1, arg2],
lax_name=f_lax.__name__,
dtype=dtype)
for f_lax in _LAX_BINARY_ELEMENTWISE_LOGICAL
for dtype in jtu.dtypes.all_integer + jtu.dtypes.all_unsigned
for arg1, arg2 in [
(np.array([1, 3, 2, 0, 0, 2, 1, 3], dtype=dtype),
np.array([1, 2, 3, 0, 1, 0, 2, 3], dtype=dtype))
]
] +
[Harness(f"{f_lax.__name__}_bool",
f_lax,
[arg1, arg2],
lax_name=f_lax.__name__,
dtype=np.bool_)
for f_lax in [lax.bitwise_and, lax.bitwise_or, lax.bitwise_xor]
for arg1, arg2 in [
(np.array([True, True, False, False]),
np.array([True, False, True, False])),
]
]
)
lax_betainc = tuple(
Harness(f"_{jtu.dtype_str(dtype)}",
lax.betainc,
[arg1, arg2, arg3],
dtype=dtype)
for dtype in jtu.dtypes.all_floating
for arg1, arg2, arg3 in [
(np.array([-1.6, -1.4, -1.0, 0.0, 0.1, 0.3, 1, 1.4, 1.6], dtype=dtype),
np.array([-1.6, 1.4, 1.0, 0.0, 0.2, 0.1, 1, 1.4, -1.6], dtype=dtype),
np.array([1.0, -1.0, 2.0, 1.0, 0.3, 0.3, -1.0, 2.4, 1.6], dtype=dtype))
]
)
_gather_input = np.arange(1000, dtype=np.float32).reshape((10, 10, 10))
lax_gather = tuple(
# Construct gather harnesses using take
[Harness(f"from_take_indices_shape={indices.shape}_axis={axis}",
lambda a, i, axis: jnp.take(a, i, axis=axis),
[_gather_input,
indices,
StaticArg(axis)])
for indices in [
# Ensure each set of indices has a distinct shape
np.array(2, dtype=np.int32),
np.array([2], dtype=np.int32),
np.array([2, 4], dtype=np.int32),
np.array([[2, 4], [5, 6]], dtype=np.int32),
np.array([0, 1, 10], dtype=np.int32), # Index out of bounds
np.array([0, 1, 2, -1], dtype=np.int32), # Index out of bounds
]
for axis in [0, 1, 2]] +
# Directly from lax.gather in lax_test.py.
[Harness(
f"_shape={shape}_idxs_shape={idxs.shape}_dnums={dnums}_slice_sizes={slice_sizes}",
lambda op, idxs, dnums, slice_sizes: lax.gather(op, idxs, dimension_numbers=dnums, slice_sizes=slice_sizes),
[RandArg(shape, np.float32),
idxs, StaticArg(dnums), StaticArg(slice_sizes)])
for shape, idxs, dnums, slice_sizes in [
((5,), np.array([[0], [2]]), lax.GatherDimensionNumbers(
offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1,)),
((10,), np.array([[0], [0], [0]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,)),
(2,)),
((10, 5,), np.array([[0], [2], [1]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1, 3)),
((10, 5), np.array([[0, 2], [1, 0]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1)),
(1, 3)),
]
]
)
lax_scatter = tuple(
# Directly from lax.scatter in tests/lax_test.py
Harness(
f"fun={f_lax.__name__}_shape={jtu.format_shape_dtype_string(shape, dtype)}_scatterindices={scatter_indices.tolist()}_updateshape={update_shape}_updatewindowdims={dimension_numbers.update_window_dims}_insertedwindowdims={dimension_numbers.inserted_window_dims}_scatterdimstooperanddims={dimension_numbers.scatter_dims_to_operand_dims}_indicesaresorted={indices_are_sorted}_uniqueindices={unique_indices}".replace(' ', ''),
partial(f_lax, indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices),
[RandArg(shape, dtype), StaticArg(scatter_indices),
RandArg(update_shape, dtype), StaticArg(dimension_numbers)],
f_lax=f_lax,
shape=shape,
dtype=dtype,
scatter_indices=scatter_indices,
update_shape=update_shape,
dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
# We explicitly decide against testing lax.scatter, as its reduction function
# is lambda x, y: y, which is not commutative and thus makes results
# non-deterministic when an index into the operand is updated several times.
for f_lax in [lax.scatter_min, lax.scatter_max, lax.scatter_mul,
lax.scatter_add]
for dtype in { lax.scatter_min: jtu.dtypes.all
, lax.scatter_max: jtu.dtypes.all
# lax.scatter_mul and lax.scatter_add are not compatible with
# np.bool_ operands.
, lax.scatter_mul: filter(lambda t: t != np.bool_, jtu.dtypes.all)
, lax.scatter_add: filter(lambda t: t != np.bool_, jtu.dtypes.all)
}[f_lax]
for shape, scatter_indices, update_shape, dimension_numbers in [
((5,), np.array([[0], [2]]), (2,), lax.ScatterDimensionNumbers(
update_window_dims=(), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
((10,), np.array([[0], [0], [0]]), (3, 2), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(),
scatter_dims_to_operand_dims=(0,))),
((10, 5,), np.array([[0], [2], [1]]), (3, 3), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
]
for indices_are_sorted in [False, True]
# `unique_indices` does not affect correctness, only performance, and thus
# does not need to be tested here. If/when it will make sense to add a test
# with `unique_indices` = True, particular care will have to be taken with
# regards to the choice of parameters, as the results are only predictable
# when all the indices to be updated are pairwise non-overlapping. Identifying
# such cases is non-trivial.
for unique_indices in [False]
)
lax_pad = tuple(
Harness(f"_inshape={jtu.format_shape_dtype_string(arg_shape, dtype)}_pads={pads}",
lax.pad,
[RandArg(arg_shape, dtype), np.array(0, dtype), StaticArg(pads)],
rng_factory=jtu.rand_small,
arg_shape=arg_shape, dtype=dtype, pads=pads)
for arg_shape in [(2, 3)]
for dtype in jtu.dtypes.all
for pads in [
[(0, 0, 0), (0, 0, 0)], # no padding
[(1, 1, 0), (2, 2, 0)], # only positive edge padding
[(1, 2, 1), (0, 1, 0)], # edge padding and interior padding
[(0, 0, 0), (-1, -1, 0)], # negative padding
[(0, 0, 0), (-2, -2, 4)], # add big dilation then remove from edges
[(0, 0, 0), (-2, -3, 1)], # remove everything in one dimension
]
)
lax_top_k = tuple( # random testing
Harness(f"_inshape={jtu.format_shape_dtype_string(shape, dtype)}_k={k}",
lax.top_k,
[RandArg(shape, dtype), StaticArg(k)],
shape=shape,
dtype=dtype,
k=k)
for dtype in jtu.dtypes.all
for shape in [(3,), (5, 3)]
for k in [-1, 1, 3, 4]
for rng_factory in [jtu.rand_default]
) + tuple( # stability test
Harness(f"stability_inshape={jtu.format_shape_dtype_string(arr.shape, arr.dtype)}_k={k}",
lax.top_k,
[arr, StaticArg(k)],
shape=arr.shape,
dtype=arr.dtype,
k=k)
for arr in [
np.array([5, 7, 5, 8, 8, 5], dtype=np.int32)
]
for k in [1, 3, 6]
) + tuple( # nan/inf sorting test
Harness(f"nan_inshape={jtu.format_shape_dtype_string(arr.shape, arr.dtype)}_k={k}",
lax.top_k,
[arr, StaticArg(k)],
shape=arr.shape,
dtype=arr.dtype,
k=k)
for arr in [
np.array([+np.inf, np.nan, -np.nan, np.nan, -np.inf, 3], dtype=np.float32)
]
for k in [1, 3, 6]
)
lax_sort = tuple( # one array, random data, all axes, all dtypes
Harness(f"one_array_shape={jtu.format_shape_dtype_string(shape, dtype)}_axis={dimension}_isstable={is_stable}",
lax.sort,
[RandArg(shape, dtype), StaticArg(dimension), StaticArg(is_stable)],
shape=shape,
dimension=dimension,
dtype=dtype,
is_stable=is_stable)
for dtype in jtu.dtypes.all
for shape in [(5,), (5, 7)]
for dimension in range(len(shape))
for is_stable in [False, True]
) + tuple( # one array, potential edge cases
Harness(f"one_special_array_shape={jtu.format_shape_dtype_string(arr.shape, arr.dtype)}_axis={dimension}_isstable={is_stable}",
lax.sort,
[arr, StaticArg(dimension), StaticArg(is_stable)],
shape=arr.shape,
dimension=dimension,
dtype=arr.dtype,
is_stable=is_stable)
for arr, dimension in [
[np.array([+np.inf, np.nan, -np.nan, -np.inf, 2, 4, 189], dtype=np.float32), -1]
]
for is_stable in [False, True]
) + tuple( # 2 arrays, random data, all axes, all dtypes
Harness(f"two_arrays_shape={jtu.format_shape_dtype_string(shape, dtype)}_axis={dimension}_isstable={is_stable}",
lambda *args: lax.sort_p.bind(*args[:-2], dimension=args[-2], is_stable=args[-1], num_keys=1),
[RandArg(shape, dtype), RandArg(shape, dtype), StaticArg(dimension), StaticArg(is_stable)],
shape=shape,
dimension=dimension,
dtype=dtype,
is_stable=is_stable)
for dtype in jtu.dtypes.all
for shape in [(5,), (5, 7)]
for dimension in range(len(shape))
for is_stable in [False, True]
) + tuple( # 3 arrays, random data, all axes, all dtypes
Harness(f"three_arrays_shape={jtu.format_shape_dtype_string(shape, dtype)}_axis={dimension}_isstable={is_stable}",
lambda *args: lax.sort_p.bind(*args[:-2], dimension=args[-2], is_stable=args[-1], num_keys=1),
[RandArg(shape, dtype), RandArg(shape, dtype), RandArg(shape, dtype),
StaticArg(dimension), StaticArg(is_stable)],
shape=shape,
dimension=dimension,
dtype=dtype,
is_stable=is_stable)
for dtype in jtu.dtypes.all
for shape in [(5,)]
for dimension in (0,)
for is_stable in [False, True]
)
lax_linalg_qr = tuple(
Harness(f"multi_array_shape={jtu.format_shape_dtype_string(shape, dtype)}_fullmatrices={full_matrices}",
lax_linalg.qr,
[RandArg(shape, dtype), StaticArg(full_matrices)],
shape=shape,
dtype=dtype,
full_matrices=full_matrices)
for dtype in jtu.dtypes.all_floating + jtu.dtypes.complex
for shape in [(1, 1), (3, 3), (3, 4), (2, 10, 5), (2, 200, 100)]
for full_matrices in [False, True]
)
def _fft_harness_gen(nb_axes):
def _fft_rng_factory(dtype):
_all_integers = jtu.dtypes.all_integer + jtu.dtypes.all_unsigned + jtu.dtypes.boolean
# For integer types, use small values to keep the errors small
if dtype in _all_integers:
return jtu.rand_small
else:
return jtu.rand_default
return tuple(
Harness(f"{nb_axes}d_shape={jtu.format_shape_dtype_string(shape, dtype)}_ffttype={fft_type}_fftlengths={fft_lengths}",
lax.lax_fft.fft,
[RandArg(shape, dtype), StaticArg(fft_type), StaticArg(fft_lengths)],
rng_factory=_fft_rng_factory(dtype),
shape=shape,
dtype=dtype,
fft_type=fft_type,
fft_lengths=fft_lengths)
for dtype in jtu.dtypes.all
for shape in filter(lambda x: len(x) >= nb_axes,
[(10,), (12, 13), (14, 15, 16), (14, 15, 16, 17)])
for fft_type, fft_lengths in [(xla_client.FftType.FFT, shape[-nb_axes:]),
(xla_client.FftType.IFFT, shape[-nb_axes:]),
(xla_client.FftType.RFFT, shape[-nb_axes:]),
(xla_client.FftType.IRFFT,
shape[-nb_axes:-1] + ((shape[-1] - 1) * 2,))]
if not (dtype in jtu.dtypes.complex and fft_type == xla_client.FftType.RFFT)
)
lax_fft = tuple(_fft_harness_gen(1) + _fft_harness_gen(2) + _fft_harness_gen(3) +
_fft_harness_gen(4))
lax_linalg_svd = tuple(
Harness(f"shape={jtu.format_shape_dtype_string(shape, dtype)}_fullmatrices={full_matrices}_computeuv={compute_uv}",
lambda *args: lax_linalg.svd_p.bind(args[0], full_matrices=args[1],
compute_uv=args[2]),
[RandArg(shape, dtype), StaticArg(full_matrices), StaticArg(compute_uv)],
shape=shape,
dtype=dtype,
full_matrices=full_matrices,
compute_uv=compute_uv)
for dtype in jtu.dtypes.all_floating + jtu.dtypes.complex
for shape in [(2, 2), (2, 7), (29, 29), (2, 3, 53), (2, 3, 29, 7)]
for full_matrices in [False, True]
for compute_uv in [False, True]
)
lax_slice = tuple(
Harness(f"_shape={shape}_start_indices={start_indices}_limit_indices={limit_indices}_strides={strides}", # type: ignore
lax.slice,
[RandArg(shape, dtype), # type: ignore
StaticArg(start_indices), # type: ignore
StaticArg(limit_indices), # type: ignore
StaticArg(strides)], # type: ignore
shape=shape, # type: ignore
start_indices=start_indices, # type: ignore
limit_indices=limit_indices) # type: ignore
for shape, start_indices, limit_indices, strides in [
[(3,), (1,), (2,), None],
[(7,), (4,), (7,), None],
[(5,), (1,), (5,), (2,)],
[(8,), (1,), (6,), (2,)],
[(5, 3), (1, 1), (3, 2), None],
[(5, 3), (1, 1), (3, 1), None],
[(7, 5, 3), (4, 0, 1), (7, 1, 3), None],
[(5, 3), (1, 1), (2, 1), (1, 1)],
[(5, 3), (1, 1), (5, 3), (2, 1)],
# out-of-bounds cases
[(5,), (-1,), (0,), None],
[(5,), (-1,), (1,), None],
[(5,), (-4,), (-2,), None],
[(5,), (-5,), (-2,), None],
[(5,), (-6,), (-5,), None],
[(5,), (-10,), (-9,), None],
[(5,), (-100,), (-99,), None],
[(5,), (5,), (6,), None],
[(5,), (10,), (11,), None],
[(5,), (0,), (100,), None],
[(5,), (3,), (6,), None]
]
for dtype in [np.float32]
)
# Use lax_slice, but (a) make the start_indices dynamic arg, and (b) no strides.
lax_dynamic_slice = [
Harness(harness.name,
lax.dynamic_slice,
[harness.arg_descriptors[0],
np.array(list(start_indices)),
StaticArg(tuple(map(operator.sub, limit_indices, start_indices)))],
**harness.params)
for harness in lax_slice
for start_indices in [harness.params["start_indices"]]
for limit_indices in [harness.params["limit_indices"]]
]
lax_dynamic_update_slice = tuple(
Harness((f"_operand={jtu.format_shape_dtype_string(shape, dtype)}" # type: ignore
f"_update={jtu.format_shape_dtype_string(update_shape, update_dtype)}"
f"_start_indices={start_indices}"),
lax.dynamic_update_slice,
[RandArg(shape, dtype), # type: ignore
RandArg(update_shape, update_dtype), # type: ignore
np.array(start_indices)], # type: ignore
shape=shape, # type: ignore
start_indices=start_indices, # type: ignore
update_shape=update_shape) # type: ignore
for shape, start_indices, update_shape in [
[(3,), (1,), (1,)],
[(5, 3), (1, 1), (3, 1)],
[(7, 5, 3), (4, 1, 0), (2, 0, 1)],
[(3,), (-1,), (1,)], # out-of-bounds
[(3,), (10,), (1,)], # out-of-bounds
[(3,), (10,), (4,)], # out-of-bounds shape too big
[(3,), (10,), (2,)], # out-of-bounds
]
for dtype, update_dtype in [
(np.float32, np.float32),
(np.float64, np.float64)
])
lax_squeeze = tuple(
Harness(f"_inshape={jtu.format_shape_dtype_string(arg_shape, dtype)}_dimensions={dimensions}", # type: ignore
lax.squeeze,
[RandArg(arg_shape, dtype), StaticArg(dimensions)], # type: ignore[has-type]
arg_shape=arg_shape, dtype=dtype, dimensions=dimensions) # type: ignore[has-type]
for arg_shape, dimensions in [
[(1,), (0,)],
[(1,), (-1,)],
[(2, 1, 4), (1,)],
[(2, 1, 4), (-2,)],
[(2, 1, 3, 1), (1,)],
[(2, 1, 3, 1), (1, 3)],
[(2, 1, 3, 1), (3,)],
[(2, 1, 3, 1), (1, -1)],
]
for dtype in [np.float32]
)
shift_inputs = [
(arg, dtype, shift_amount)
for dtype in jtu.dtypes.all_unsigned + jtu.dtypes.all_integer
for arg in [
np.array([-250, -1, 0, 1, 250], dtype=dtype),
]
for shift_amount in [0, 1, 2, 3, 7]
]
lax_shift_left = tuple(
Harness(f"_dtype={dtype.__name__}_shift_amount={shift_amount}", # type: ignore
lax.shift_left,
[arg, StaticArg(np.array([shift_amount], dtype=dtype))])
for arg, dtype, shift_amount in shift_inputs
)
lax_shift_right_logical = tuple(
Harness(f"_dtype={dtype.__name__}_shift_amount={shift_amount}", # type: ignore
lax.shift_right_logical,
[arg, StaticArg(np.array([shift_amount], dtype=dtype))],
dtype=dtype)
for arg, dtype, shift_amount in shift_inputs
)
lax_shift_right_arithmetic = tuple(
Harness(f"_dtype={dtype.__name__}_shift_amount={shift_amount}", # type: ignore
lax.shift_right_arithmetic,
[arg, StaticArg(np.array([shift_amount], dtype=dtype))],
dtype=dtype)
for arg, dtype, shift_amount in shift_inputs
)
lax_select_and_gather_add = tuple(
# Tests with 2d shapes (see tests.lax_autodiff_test.testReduceWindowGrad)
Harness(f"2d_shape={jtu.format_shape_dtype_string(shape, dtype)}_selectprim={select_prim}_windowdimensions={window_dimensions}_windowstrides={window_strides}_padding={padding}_basedilation={base_dilation}_windowdilation={window_dilation}",
lax._select_and_gather_add,
[RandArg(shape, dtype), RandArg(shape, dtype), StaticArg(select_prim),
StaticArg(window_dimensions), StaticArg(window_strides),
StaticArg(padding), StaticArg(base_dilation),
StaticArg(window_dilation)],
shape=shape,
dtype=dtype,
window_dimensions=window_dimensions,
window_strides=window_strides,
padding=padding,
base_dilation=base_dilation,
window_dilation=window_dilation)
for dtype in jtu.dtypes.all_floating
for shape in [(4, 6)]
for select_prim in [lax.le_p, lax.ge_p]
for window_dimensions in [(2, 1), (1, 2)]
for window_strides in [(1, 1), (2, 1), (1, 2)]
for padding in tuple(set([tuple(lax.padtype_to_pads(shape, window_dimensions,
window_strides, p))
for p in ['VALID', 'SAME']] +
[((0, 3), (1, 2))]))
for base_dilation in [(1, 1)]
for window_dilation in [(1, 1)]
) + tuple(
# Tests with 4d shapes (see tests.lax_autodiff_test.testReduceWindowGrad)
Harness(f"4d_shape={jtu.format_shape_dtype_string(shape, dtype)}_selectprim={select_prim}_windowdimensions={window_dimensions}_windowstrides={window_strides}_padding={padding}_basedilation={base_dilation}_windowdilation={window_dilation}",
lax._select_and_gather_add,
[RandArg(shape, dtype), RandArg(shape, dtype), StaticArg(select_prim),
StaticArg(window_dimensions), StaticArg(window_strides),
StaticArg(padding), StaticArg(base_dilation),
StaticArg(window_dilation)],
shape=shape,
dtype=dtype,
window_dimensions=window_dimensions,
window_strides=window_strides,
padding=padding,
base_dilation=base_dilation,
window_dilation=window_dilation)
for dtype in jtu.dtypes.all_floating
for shape in [(3, 2, 4, 6)]
for select_prim in [lax.le_p, lax.ge_p]
for window_dimensions in [(1, 1, 2, 1), (2, 1, 2, 1)]
for window_strides in [(1, 2, 2, 1), (1, 1, 1, 1)]
for padding in tuple(set([tuple(lax.padtype_to_pads(shape, window_dimensions,
window_strides, p))
for p in ['VALID', 'SAME']] +
[((0, 1), (1, 0), (2, 3), (0, 2))]))
for base_dilation in [(1, 1, 1, 1)]
for window_dilation in [(1, 1, 1, 1)]
)
lax_reduce_window = tuple(
# Tests with 2d shapes (see tests.lax_test.testReduceWindow)
Harness(f"2d_shape={jtu.format_shape_dtype_string(shape, dtype)}_initvalue={init_value}_computation={computation.__name__}_windowdimensions={window_dimensions}_windowstrides={window_strides}_padding={padding}_basedilation={base_dilation}_windowdilation={window_dilation}".replace(' ', ''),
lax.reduce_window,
[RandArg(shape, dtype), StaticArg(init_value), StaticArg(computation),
StaticArg(window_dimensions), StaticArg(window_strides),
StaticArg(padding), StaticArg(base_dilation), StaticArg(window_dilation)],
shape=shape,
dtype=dtype,
init_value=init_value,
computation=computation,
window_dimensions=window_dimensions,
window_strides=window_strides,
padding=padding,
base_dilation=base_dilation,
window_dilation=window_dilation)
for computation in [lax.add, lax.max, lax.min, lax.mul]
for dtype in { lax.add: filter(lambda t: t != np.bool_, jtu.dtypes.all)
, lax.mul: filter(lambda t: t != np.bool_, jtu.dtypes.all)
, lax.max: jtu.dtypes.all
, lax.min: jtu.dtypes.all
}[computation]
for init_value in map(
dtype,
(lambda ts: ts[0] if not dtype in jtu.dtypes.all_floating else ts[1])(
{ lax.add: ([0, 1], [0, 1])
, lax.mul: ([1], [1])
, lax.max: ([1], [-np.inf, 1])
, lax.min: ([0], [np.inf, 0])
}[computation]
)
)
for shape in [(4, 6)]
for window_dimensions in [(1, 2)]
for window_strides in [(2, 1)]
for padding in tuple(set([tuple(lax.padtype_to_pads(shape, window_dimensions,
window_strides, p))
for p in ['VALID', 'SAME']] +
[((0, 3), (1, 2))]))
for base_dilation in [(2, 3)]
for window_dilation in [(1, 2)]
) + tuple(
# Tests with 4d shapes (see tests.lax_test.testReduceWindow)
Harness(f"4d_shape={jtu.format_shape_dtype_string(shape, dtype)}_initvalue={init_value}_computation={computation.__name__}_windowdimensions={window_dimensions}_windowstrides={window_strides}_padding={padding}_basedilation={base_dilation}_windowdilation={window_dilation}".replace(' ', ''),
lax.reduce_window,
[RandArg(shape, dtype), StaticArg(init_value), StaticArg(computation),
StaticArg(window_dimensions), StaticArg(window_strides),
StaticArg(padding), StaticArg(base_dilation), StaticArg(window_dilation)],
shape=shape,
dtype=dtype,
init_value=init_value,
computation=computation,
window_dimensions=window_dimensions,
window_strides=window_strides,
padding=padding,
base_dilation=base_dilation,
window_dilation=window_dilation)
for computation in [lax.add, lax.max, lax.min, lax.mul]
for dtype in { lax.add: filter(lambda t: t != np.bool_, jtu.dtypes.all)
, lax.mul: filter(lambda t: t != np.bool_, jtu.dtypes.all)
, lax.max: jtu.dtypes.all
, lax.min: jtu.dtypes.all
}[computation]
for init_value in map(
dtype,
(lambda ts: ts[0] if not dtype in jtu.dtypes.all_floating else ts[1])(
{ lax.add: ([0, 1], [0, 1])
, lax.mul: ([1], [1])
, lax.max: ([1], [-np.inf, 1])
, lax.min: ([0], [np.inf, 0])
}[computation]
)
)
for shape in [(3, 2, 4, 6)]
for window_dimensions in [(1, 1, 2, 1)]
for window_strides in [(1, 2, 2, 1)]
for padding in tuple(set([tuple(lax.padtype_to_pads(shape, window_dimensions,
window_strides, p))
for p in ['VALID', 'SAME']] +
[((0, 1), (1, 0), (2, 3), (0, 2))]))
for base_dilation in [(2, 1, 3, 2)]
for window_dilation in [(1, 2, 2, 1)]
)
random_gamma = tuple(
Harness(f"_shape={jtu.format_shape_dtype_string(shape, dtype)}",
jax.jit(jax.random.gamma),
[np.array([42, 43], dtype=np.uint32), RandArg(shape, dtype)])
for shape in ((), (3,))
for dtype in (np.float32, np.float64)
)
random_split = tuple(
Harness(f"_i={key_i}",
jax.jit(lambda key: jax.random.split(key, 2)),
[key])
for key_i, key in enumerate([np.array([0, 0], dtype=np.uint32),
np.array([42, 43], dtype=np.uint32),
np.array([0xFFFFFFFF, 0], dtype=np.uint32),
np.array([0, 0xFFFFFFFF], dtype=np.uint32),
np.array([0xFFFFFFFF, 0xFFFFFFFF], dtype=np.uint32)])
)
def _make_conv_harness(name, *, lhs_shape=(2, 3, 9, 10), rhs_shape=(3, 3, 4, 5),
dtype=np.float32, window_strides=(1, 1), precision=None,
padding=((0, 0), (0, 0)), lhs_dilation=(1, 1),
rhs_dilation=(1, 1), feature_group_count=1,
dimension_numbers=("NCHW", "OIHW", "NCHW"),
batch_group_count=1):
return Harness(f"_{name}_lhs={jtu.format_shape_dtype_string(lhs_shape, dtype)}_rhs={jtu.format_shape_dtype_string(rhs_shape, dtype)}_windowstrides={window_strides}_padding={padding}_lhsdilation={lhs_dilation}_rhsdilation={rhs_dilation}_dimensionnumbers={dimension_numbers}_featuregroupcount={feature_group_count}_batchgroupcount={batch_group_count}_precision={precision}".replace(' ', ''),
lax.conv_general_dilated,
[RandArg(lhs_shape, dtype), RandArg(rhs_shape, dtype),
StaticArg(window_strides), StaticArg(padding),
StaticArg(lhs_dilation), StaticArg(rhs_dilation),
StaticArg(dimension_numbers), StaticArg(feature_group_count),
StaticArg(batch_group_count), StaticArg(precision)],
lhs_shape=lhs_shape,
rhs_shape=rhs_shape,
dtype=dtype,
window_strides=window_strides,
padding=padding,
lhs_dilation=lhs_dilation,
rhs_dilation=rhs_dilation,
dimension_numbers=dimension_numbers,
feature_group_count=feature_group_count,
batch_group_count=batch_group_count,
precision=precision)
lax_conv_general_dilated = tuple( # Validate dtypes and precision
# This first harness runs the tests for all dtypes and precisions using
# default values for all the other parameters. Variations of other parameters
# can thus safely skip testing their corresponding default value.
_make_conv_harness("dtype_precision", dtype=dtype, precision=precision)
for dtype in jtu.dtypes.all_inexact
for precision in [None, lax.Precision.DEFAULT, lax.Precision.HIGH,
lax.Precision.HIGHEST]
) + tuple( # Validate variations of feature_group_count and batch_group_count
_make_conv_harness("group_counts", lhs_shape=lhs_shape, rhs_shape=rhs_shape,
feature_group_count=feature_group_count,
batch_group_count=batch_group_count)
for batch_group_count, feature_group_count in [
(1, 2), # feature_group_count != 1
(2, 1), # batch_group_count != 1
]
for lhs_shape, rhs_shape in [
((2 * batch_group_count, 3 * feature_group_count, 9, 10),
(3 * feature_group_count * batch_group_count, 3, 4, 5))
]
) + tuple( # Validate variations of window_strides
_make_conv_harness("window_strides", window_strides=window_strides)
for window_strides in [
(2, 3) # custom window
]
) + tuple( # Validate variations of padding
_make_conv_harness("padding", padding=padding)
for padding in [
((1, 2), (0, 0)), # padding only one spatial axis
((1, 2), (2, 1)) # padding on both spatial axes
]
) + tuple( # Validate variations of dilations
_make_conv_harness("dilations", lhs_dilation=lhs_dilation,
rhs_dilation=rhs_dilation)
for lhs_dilation, rhs_dilation in [
((2, 2), (1, 1)), # dilation only on LHS (transposed)
((1, 1), (2, 3)), # dilation only on RHS (atrous)
((2, 3), (3, 2)) # dilation on both LHS and RHS (transposed & atrous)
]
) + tuple(
_make_conv_harness("dimension_numbers", lhs_shape=lhs_shape,
rhs_shape=rhs_shape, dimension_numbers=dimension_numbers)
# Dimension numbers and corresponding permutation
for dimension_numbers, lhs_shape, rhs_shape in [
(("NHWC", "HWIO", "NHWC"), (2, 9, 10, 3), (4, 5, 3, 3)), # TF default
(("NCHW", "HWIO", "NHWC"), (2, 3, 9, 10), (4, 5, 3, 3)), # custom
]
)
|
py | 1a3fe6d68550a60f414db234fb4131805106d6ee | # coding: utf-8
import toml
import logging
import argparse
from laputa.watch import Watcher
from laputa.record import Recorder
from laputa.notify import IFTTTNotifier
def read_config(file_name):
with open(file_name) as config_file:
config = toml.loads(config_file.read())
return config
def parse():
parser = argparse.ArgumentParser(description='Laputa, flying in the sky')
parser.add_argument('-c', metavar='CONFIG_FILE',
required=True, help='config file')
return parser.parse_args()
def main():
args = parse()
config = read_config(args.c)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
handler = logging.FileHandler(config['run']['log_file'])
handler.setLevel(logging.INFO)
logging_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(logging_format)
handler.setFormatter(formatter)
logger.addHandler(handler)
watcher = Watcher(config['laputa']['weibo_uid'],
Recorder(config['run']['record_file']),
IFTTTNotifier(config['laputa']['ifttt_key'],
config['laputa']['ifttt_event']))
watcher.watch()
|
py | 1a3fe77c4d8f7ccfb7d47f892644a5f8b4585d62 | import re
from pyson.JsonValue import JsonValue
class PartyId:
'''
Unique ID of a party in a negotiation. The name should start with a short
string indicating the party (eg, the party name plus some machine
identifier), optionally followed by an "_", and more characters to make the
partyid unique. We require the name and "-" so that the string up to the
first "-" can be used to determine between sessions which opponents are
instances of the same class.
<h2>Note</h2> Normally, negotiation parties should not create new Party IDs
as all needed IDs should be provided by the protocol.
'''
def __init__(self,name:str):
'''
@param name a simple name, starting with letter, followed by zero or more
letters, digits or _.
'''
if name == None or not re.fullmatch("[a-zA-Z]\\w*", name):
raise ValueError("name '" + name
+ "' is not a letter followed by zero or more word characters (letter, digit or _)");
self._name = name;
@JsonValue()
def getName(self) -> str:
return self._name
def __repr__(self):
return self._name
def __eq__(self, other):
return super().__eq__(other) and isinstance(other, self.__class__) and \
self._name==other._name
def __hash__(self):
'''
support for using this in dict etc
'''
return hash(self._name)
|
py | 1a3fe865d2a7a8b2422c235bd64417fa1d0b8ee3 | import numpy as np
import pytest
from pandas import DataFrame, Series
import pandas._testing as tm
from pandas.api.indexers import BaseIndexer, FixedForwardWindowIndexer
from pandas.core.window.indexers import ExpandingIndexer
def test_bad_get_window_bounds_signature():
class BadIndexer(BaseIndexer):
def get_window_bounds(self):
return None
indexer = BadIndexer()
with pytest.raises(ValueError, match="BadIndexer does not implement"):
Series(range(5)).rolling(indexer)
def test_expanding_indexer():
s = Series(range(10))
indexer = ExpandingIndexer()
result = s.rolling(indexer).mean()
expected = s.expanding().mean()
tm.assert_series_equal(result, expected)
def test_indexer_constructor_arg():
# Example found in computation.rst
use_expanding = [True, False, True, False, True]
df = DataFrame({"values": range(5)})
class CustomIndexer(BaseIndexer):
def get_window_bounds(self, num_values, min_periods, center, closed):
start = np.empty(num_values, dtype=np.int64)
end = np.empty(num_values, dtype=np.int64)
for i in range(num_values):
if self.use_expanding[i]:
start[i] = 0
end[i] = i + 1
else:
start[i] = i
end[i] = i + self.window_size
return start, end
indexer = CustomIndexer(window_size=1, use_expanding=use_expanding)
result = df.rolling(indexer).sum()
expected = DataFrame({"values": [0.0, 1.0, 3.0, 3.0, 10.0]})
tm.assert_frame_equal(result, expected)
def test_indexer_accepts_rolling_args():
df = DataFrame({"values": range(5)})
class CustomIndexer(BaseIndexer):
def get_window_bounds(self, num_values, min_periods, center, closed):
start = np.empty(num_values, dtype=np.int64)
end = np.empty(num_values, dtype=np.int64)
for i in range(num_values):
if center and min_periods == 1 and closed == "both" and i == 2:
start[i] = 0
end[i] = num_values
else:
start[i] = i
end[i] = i + self.window_size
return start, end
indexer = CustomIndexer(window_size=1)
result = df.rolling(indexer, center=True, min_periods=1, closed="both").sum()
expected = DataFrame({"values": [0.0, 1.0, 10.0, 3.0, 4.0]})
tm.assert_frame_equal(result, expected)
def test_win_type_not_implemented():
class CustomIndexer(BaseIndexer):
def get_window_bounds(self, num_values, min_periods, center, closed):
return np.array([0, 1]), np.array([1, 2])
df = DataFrame({"values": range(2)})
indexer = CustomIndexer()
with pytest.raises(NotImplementedError, match="BaseIndexer subclasses not"):
df.rolling(indexer, win_type="boxcar")
@pytest.mark.parametrize("func", ["skew", "cov", "corr"])
def test_notimplemented_functions(func):
# GH 32865
class CustomIndexer(BaseIndexer):
def get_window_bounds(self, num_values, min_periods, center, closed):
return np.array([0, 1]), np.array([1, 2])
df = DataFrame({"values": range(2)})
indexer = CustomIndexer()
with pytest.raises(NotImplementedError, match=f"{func} is not supported"):
getattr(df.rolling(indexer), func)()
@pytest.mark.parametrize("constructor", [Series, DataFrame])
@pytest.mark.parametrize(
"func,np_func,expected,np_kwargs",
[
("count", len, [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, np.nan], {},),
("min", np.min, [0.0, 1.0, 2.0, 3.0, 4.0, 6.0, 6.0, 7.0, 8.0, np.nan], {},),
(
"max",
np.max,
[2.0, 3.0, 4.0, 100.0, 100.0, 100.0, 8.0, 9.0, 9.0, np.nan],
{},
),
(
"std",
np.std,
[
1.0,
1.0,
1.0,
55.71654452,
54.85739087,
53.9845657,
1.0,
1.0,
0.70710678,
np.nan,
],
{"ddof": 1},
),
(
"var",
np.var,
[
1.0,
1.0,
1.0,
3104.333333,
3009.333333,
2914.333333,
1.0,
1.0,
0.500000,
np.nan,
],
{"ddof": 1},
),
],
)
def test_rolling_forward_window(constructor, func, np_func, expected, np_kwargs):
# GH 32865
values = np.arange(10)
values[5] = 100.0
indexer = FixedForwardWindowIndexer(window_size=3)
match = "Forward-looking windows can't have center=True"
with pytest.raises(ValueError, match=match):
rolling = constructor(values).rolling(window=indexer, center=True)
result = getattr(rolling, func)()
match = "Forward-looking windows don't support setting the closed argument"
with pytest.raises(ValueError, match=match):
rolling = constructor(values).rolling(window=indexer, closed="right")
result = getattr(rolling, func)()
rolling = constructor(values).rolling(window=indexer, min_periods=2)
result = getattr(rolling, func)()
expected = constructor(expected)
tm.assert_equal(result, expected)
expected2 = constructor(rolling.apply(lambda x: np_func(x, **np_kwargs)))
tm.assert_equal(result, expected2)
|
py | 1a3fe93bb53789b23ba889d2f9c3549aaaaa4d84 | #@+leo-ver=5-thin
#@+node:ekr.20101110092851.5742: * @file leoOPML.py
#@+<< docstring >>
#@+node:ekr.20060904103412.1: ** << docstring >>
#@@language rest
r'''A plugin to read and write Leo outlines in .opml
(http://en.wikipedia.org/wiki/OPML) format.
The OPML plugin creates two new commands that read and write Leo outlines in
OPML format. The read-opml-file command creates a Leo outline from an .opml
file. The write-opml-file command writes the present Leo outline to an .opml
file.
Various settings control what gets written to .opml files, and in what format.
As usual, you specify settings for the OPML plugin using leoSettings.leo. The
settings for the OPML are found in the node: @settings-->Plugins-->opml plugin.
Here are the settings that control the format of .opml files. The default values
are shown.
- @string opml_namespace = leo:com:leo-opml-version-1
The namespace urn for the xmlns attribute of <opml> elements. This value
typically is not used, but it should refer to Leo in some way.
- @bool opml_use_outline_elements = True
- If True, Leo writes body text to <leo:body> elements nested in <outline>
elements. Otherwise, Leo writes body text to leo:body attributes of <outline>
elements.
- @string opml_version = 2.0
The opml version string written to the <OPML> element. Use 2.0 unless there is a
specific reason to use 1.0.
- @bool opml_write_body_text = True
Leo writes body text to the OPML file only if this is True.
- @bool opml_write_leo_details = True
If True, Leo writes the native attributes of Leo's <v> elements as attributes of
the opml <outline> elements.
The native attributes of <v> elements are a, t, vtag (new), tnodeList,
marks, expanded and descendentTnodeUnknownAttributes.
- @bool opml_write_leo_globals_attributes = True
If True, Leo writes body_outline_ratio` and global_window_position attributes to
the <head> element of the .opml file.
- @bool opml_write_ua_attributes
If True, write unknownAttributes **NOTE**: ua_attributes are not currently read
from opml.
- @bool opml_expand_ua_dictionary
If True, expand an unknownAttriubte 'x' of type dict to 'ua_x_key0', 'ua_x_key1'
etc. **WARNING**: using this feature may prevent reading these ua_attributes from
opml, if that feature is implemented in the future.
- @bool opml_skip_ua_dictionary_blanks
If True, when expanding as above, skip blank dict entries.
'''
#@-<< docstring >>
# 2014/10/21: support Android outliner by treating _note attributes as body text.
# To do: read/write uA's.
printElements = [] # ['all','outline','head','body',]
# For traces.
#@+<< imports >>
#@+node:ekr.20060904103412.3: ** << imports >>
import leo.core.leoGlobals as g
import leo.core.leoPlugins as leoPlugins
import leo.core.leoNodes as leoNodes
import xml.sax
import xml.sax.saxutils
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
#@-<< imports >>
#@+others
#@+node:ekr.20060904132527.9: ** Module level
#@+node:ekr.20060904103412.4: *3* init
def init():
'''Return True if the plugin has loaded successfully.'''
leoPlugins.registerHandler(('open2', 'new'), onCreate)
g.plugin_signon(__name__)
return True
#@+node:ekr.20060904103412.5: *3* onCreate
def onCreate(tag, keys):
c = keys.get('c')
if c:
c.opmlController = OpmlController(c)
#@+node:ekr.20060904141220: ** class NodeClass
class NodeClass:
'''
A class representing one outline element.
Use getters to access the attributes, properties and rules of this mode.
'''
#@+others
#@+node:ekr.20060904141220.1: *3* node.__init__
def __init__(self):
self.attributes = {}
self.bodyString = ''
self.headString = ''
self.children = []
self.gnx = None
#@+node:ekr.20060904141220.2: *3* node.__str__ & __repr__
def __str__(self):
return '<node: %s>' % self.headString
__repr__ = __str__
#@+node:ekr.20060913220507: *3* dump
def dump(self):
print('\nnode: %s: %s' % (self.gnx, self.headString))
if self.children:
print('children:[')
for child in self.children:
print(' node: %s: %s' % (child.gnx, child.headString))
print(']')
else:
print('children:[]')
print('attrs: %s' % self.attributes.values())
#@-others
#@+node:ekr.20060904103412.6: ** class OpmlController
class OpmlController:
'''The controller class for this plugin.'''
#@+others
#@+node:ekr.20060904103412.7: *3* oc.__init__& reloadSettings
def __init__(self, c):
'''Ctor for OpmlController class.'''
self.c = c
c.opmlCommands = self
c.k.registerCommand('read-opml-file', self.readOpmlCommand)
c.k.registerCommand('write-opml-file', self.writeOpmlCommand)
self.currentVnode = None
self.topVnode = None
self.generated_gnxs = {} # Keys are gnx's (strings). Values are vnodes.
self.reloadSettings()
def reloadSettings(self):
c = self.c
c.registerReloadSettings(self)
self.opml_read_derived_files = c.config.getBool('opml-read-derived-files')
self.opml_write_derived_files = c.config.getBool('opml-write-derived-files')
#@+node:ekr.20060914163456: *3* oc.createVnodes & helpers
def createVnodes(self, c, dummyRoot):
'''**Important**: this method and its helpers are low-level code
corresponding to link/unlink methods in leoNodes.py.
Modify this with extreme care.'''
self.generated_gnxs = {}
parent_v = c.hiddenRootNode
parent_v.children = []
children = self.createChildren(c, dummyRoot, parent_v)
assert c.hiddenRootNode.children == children
return children
#@+node:ekr.20060914171659.2: *4* oc.createChildren
# node is a NodeClass object, parent_v is a VNode.
def createChildren(self, c, node, parent_v):
children = []
for child in node.children:
gnx = child.gnx
v = gnx and self.generated_gnxs.get(gnx)
if not v:
v = self.createVnode(c, child, v)
self.createChildren(c, child, v)
children.append(v)
parent_v.children = children
for child in children:
child.parents.append(parent_v)
return children
#@+node:ekr.20060914171659.1: *4* oc.createVnode & helpers
def createVnode(self, c, node, v=None):
if not v:
v = leoNodes.VNode(context=c)
v.b, v.h = node.bodyString, node.headString
if node.gnx:
ni = g.app.nodeIndices
v.fileIndex = ni.tupleToString(ni.scanGnx(node.gnx))
self.generated_gnxs[node.gnx] = v
self.handleVnodeAttributes(node, v)
return v
#@+node:ekr.20060917213611: *5* oc.handleVnodeAttributes
def handleVnodeAttributes(self, node, v):
a = node.attributes.get('leo:a')
if a:
# 'C' (clone) and 'D' bits are not used.
if 'M' in a: v.setMarked()
if 'E' in a: v.expand()
# if 'O' in a: v.setOrphan()
if 'T' in a: self.topVnode = v
if 'V' in a: self.currentVnode = v
if 0: # Leo no longer uses the tnodeList.
s = node.attributes.get('leo:tnodeList')
tnodeList = s and s.split(',')
if tnodeList:
# This tnode list will be resolved later.
v.tempTnodeList = tnodeList
#@+node:ekr.20060913220707: *3* oc.dumpTree
def dumpTree(self, root, dummy=True):
if not dummy:
root.dump()
for child in root.children:
self.dumpTree(child, dummy=False)
#@+node:ekr.20111003220434.15488: *3* oc.parse_opml_file & helper
def parse_opml_file(self, fn):
c = self.c
if not fn or not fn.endswith('.opml'):
return g.trace('bad file name: %s' % repr(fn))
c = self.c
path = g.os_path_normpath(g.os_path_join(g.app.loadDir, fn))
try:
f = open(path, 'rb')
s = f.read() # type(s) is bytes for Python 3.x.
s = self.cleanSaxInputString(s)
except IOError:
return g.trace('can not open %s' % path)
# pylint:disable=catching-non-exception
try:
theFile = BytesIO(s)
parser = xml.sax.make_parser()
parser.setFeature(xml.sax.handler.feature_external_ges, 1)
# Do not include external general entities.
# The actual feature name is "http://xml.org/sax/features/external-general-entities"
parser.setFeature(xml.sax.handler.feature_external_pes, 0)
handler = SaxContentHandler(c, fn)
parser.setContentHandler(handler)
parser.parse(theFile) # expat does not support parseString
sax_node = handler.getNode()
except xml.sax.SAXParseException:
g.error('error parsing', fn)
g.es_exception()
sax_node = None
except Exception:
g.error('unexpected exception parsing', fn)
g.es_exception()
sax_node = None
return sax_node
#@+node:ekr.20111003220434.15490: *4* oc.cleanSaxInputString
def cleanSaxInputString(self, s):
'''Clean control characters from s.
s may be a bytes or a (unicode) string.'''
# Note: form-feed ('\f') is 12 decimal.
badchars = [chr(ch) for ch in range(32)]
badchars.remove('\t')
badchars.remove('\r')
badchars.remove('\n')
flatten = ''.join(badchars)
pad = ' ' * len(flatten)
flatten = bytes(flatten, 'utf-8')
pad = bytes(pad, 'utf-8')
transtable = bytes.maketrans(flatten, pad)
return s.translate(transtable)
#@+node:ekr.20141020112451.18342: *3* oc.putToOPML
def putToOPML(self, owner):
'''
Write the c.p as OPML, using the owner's put method.'''
PutToOPML(owner)
#@+node:ekr.20060904103721: *3* oc.readFile & helpers
def readFile(self, fileName):
'''Read the opml file.'''
dumpTree = False
if not fileName:
g.trace('no fileName')
return None
c = self.c.new()
# Create the new commander *now*
# so that created vnodes will have the proper context.
# Pass one: create the intermediate nodes.
dummyRoot = self.parse_opml_file(fileName)
if not dummyRoot:
return None
if dumpTree:
self.dumpTree(dummyRoot)
# Pass two: create the outline from the sax nodes.
children = self.createVnodes(c, dummyRoot)
p = leoNodes.Position(v=children[0], childIndex=0, stack=None)
# Check the outline.
errors = c.checkOutline()
if errors:
c.dumpOutline()
g.trace('%s errors!' % errors)
return None
# if self.opml_read_derived_files:
# at = c.atFileCommands
# c.fileCommands.tnodesDict = self.createTnodesDict()
# self.resolveTnodeLists(c)
# if self.opml_read_derived_files:
# c.atFileCommands.readAll(c.rootPosition())
c.selectPosition(p)
c.redraw()
return c # for testing.
#@+node:ekr.20060921153603: *4* oc.createTnodesDict
def createTnodesDict(self):
'''
Create c.tnodesDict by from self.generated_gnxs
by converting VNode entries to tnodes.
'''
d = {}
for key in list(self.generated_gnxs.keys()):
v = self.generated_gnxs.get(key)
d[key] = v
return d
#@+node:ekr.20060917214140: *4* oc.setCurrentPosition
def setCurrentPosition(self, c):
v = self.currentVnode
if not v:
return
for p in c.allNodes_iter():
if p.v == v:
c.selectPosition(p)
break
#@+node:ekr.20060918132045: *4* oc.resolveTnodeLists
def resolveTnodeLists(self, c):
for p in c.allNodes_iter():
if hasattr(p.v, 'tempTnodeList'):
result = []
for gnx in p.v.tempTnodeList:
v = self.generated_gnxs.get(gnx)
if v:
result.append(v)
else:
g.trace('No tnode for %s' % gnx)
p.v.tnodeList = result
delattr(p.v, 'tempTnodeList')
#@+node:ekr.20060919201810: *3* oc.readOpmlCommand
def readOpmlCommand(self, event=None):
'''Open a Leo window containing the contents of an .opml file.'''
c = self.c
fileName = g.app.gui.runOpenFileDialog(c,
title="Read OPML",
filetypes=[("OPML files", "*.opml"), ("All files", "*")],
defaultextension=".opml")
c.bringToFront()
if fileName:
self.readFile(fileName)
else:
c.bodyWantsFocus()
#@+node:ekr.20060904103721.1: *3* oc.writeFile
def writeFile(self, fileName):
'''Write fileName as an OPML file.'''
if not fileName:
return
ok = self.c.fileCommands.write_Leo_file(
fileName,
outlineOnlyFlag=not self.opml_write_derived_files,
toString=False, toOPML=True)
if ok:
g.es_print('wrote %s' % fileName)
else:
g.es_print('did not write %s' % fileName)
#@+node:ekr.20060919201330: *3* oc.writeOpmlCommand
def writeOpmlCommand(self, event=None):
'''Save a Leo outline to an OPMLfile.'''
c = self.c
if g.app.disableSave:
g.es("Save commands disabled", color="purple")
return
# Make sure we never pass None to the ctor.
if not c.mFileName:
c.frame.title = ""
initialfile = g.ensure_extension(c.mFileName, ".opml")
# set local fileName, _not_ c.mFileName
fileName = g.app.gui.runSaveFileDialog(c,
initialfile=initialfile,
title="Write OPML",
filetypes=[("OPML files", "*.opml")],
defaultextension=".opml")
c.bringToFront()
if fileName:
fileName = g.ensure_extension(fileName, ".opml")
c.opmlCommands.writeFile(fileName)
#@-others
#@+node:ekr.20060919172012.2: ** class PutToOPML
class PutToOPML:
'''Write c.p's tree as OPML, using the owner's put method.'''
def __init__(self, owner):
self.c = owner.c
self.leo_file_encoding = owner.leo_file_encoding
self.owner = owner # a leoFileCommands.FileCommand instance.
self.initConfig()
self.putAll()
def put(self, s):
return self.owner.put(s)
#@+others
#@+node:ekr.20141020112451.18340: *3* initConfig
def initConfig(self):
'''Init all configuration settings.'''
c = self.c
# These prevent pylint warnings
self.opml_use_outline_elements = True
self.opml_write_derived_files = True
self.opml_write_leo_details = True
self.opml_write_leo_globals_attributes = True
self.opml_write_body_text = True
self.opml_write_ua_attributes = True
self.opml_expand_ua_dictionary = True
self.opml_skip_ua_dictionary_blanks = True
for ivar in (
'opml_use_outline_elements',
'opml_write_derived_files',
'opml_write_leo_details',
'opml_write_leo_globals_attributes',
'opml_write_body_text',
'opml_write_ua_attributes',
'opml_expand_ua_dictionary',
'opml_skip_ua_dictionary_blanks',
):
setattr(self, ivar, c.config.getBool(ivar))
#@+node:ekr.20141020112451.18337: *3* putAll
def putAll(self):
'''
Put the selected outline as OPML.
All elements and attributes prefixed by 'leo:' are leo-specific.
All other elements and attributes are specified by the OPML 1 spec.
'''
self.putXMLLine()
self.putOPMLProlog()
self.putOPMLHeader()
self.putOPMLNodes()
self.putOPMLPostlog()
#@+node:ekr.20060919172012.3: *3* putOPMLProlog
def putOPMLProlog(self):
s = self.c.config.getString('opml-namespace') or 'leo:com:leo-opml'
ver = self.c.config.getString('opml-version') or '2.0'
self.put('<opml version="%s" xmlns:leo="%s">' % (ver, s))
#@+node:ekr.20060919172012.4: *3* putOPMLHeader
def putOPMLHeader(self):
'''Put the OPML header, including attributes for globals, prefs and find settings.'''
c = self.c; indent = ' ' * 4
if self.opml_write_leo_globals_attributes:
self.put('\n<head leo:body_outline_ratio="%s">' % str(c.frame.ratio))
width, height, left, top = c.frame.get_window_info()
self.put('\n%s<leo:global_window_position' % indent)
self.put(' top="%s" left="%s" height="%s" width="%s"/>' % (
str(top), str(left), str(height), str(width)))
self.put('\n</head>')
else:
self.put('\n<head/>')
#@+node:ekr.20060919172012.5: *3* putOPMLNodes
def putOPMLNodes(self):
c = self.c; root = c.rootPosition()
self.put('\n<body>')
for p in root.self_and_siblings_iter():
self.putOPMLNode(p)
self.put('\n</body>')
#@+node:ekr.20060919172012.6: *3* putOPMLNode
def putOPMLNode(self, p):
indent = ' ' * (4 * p.level()) # Always use 4-space indents.
body = p.bodyString() or ''; head = p.headString() or ''
attrFormat = ' %s="%s"'
self.put('\n%s<outline' % indent)
if self.opml_write_leo_details: # Put leo-specific attributes.
for name, val in (
('leo:v', p.v.fileIndex),
('leo:a', self.aAttributes(p)),
# ('leo:tnodeList',self.tnodeListAttributes(p)),
):
if val: self.put(attrFormat % (name, val))
data = self.uAAttributes(p)
if data:
# for name,val in data.iteritems():
for name in list(data.keys()):
val = data.get(name)
self.put(attrFormat % (name, val))
self.put(attrFormat % ('text', self.attributeEscape(head)))
closed = False
if body and self.opml_write_body_text:
if self.opml_use_outline_elements:
self.put('>'); closed = True
self.put('<leo:body>%s</leo:body>' % xml.sax.saxutils.escape(body))
else:
self.put(attrFormat % ('leo:body', self.attributeEscape(body)))
if p.hasChildren():
if not closed:
self.put('>'); closed = True
for p2 in p.children_iter():
self.putOPMLNode(p2)
if closed:
self.put('\n%s</outline>' % indent)
# self.put('</outline>\n')
else:
self.put('/>')
#@+node:ekr.20060919172012.7: *4* attributeEscape
def attributeEscape(self, s):
# Unlike xml.sax.saxutils.escape, replace " by " and replace newlines by character reference.
s = s or ''
return (
s.replace('&', '&')
.replace('<', '<')
.replace('>', '>')
.replace('"', '"')
.replace('\n', ' \n')
)
#@+node:ekr.20060919172012.8: *4* aAttributes
def aAttributes(self, p):
c = self.c
attr = []
if p.isExpanded(): attr.append('E')
if p.isMarked(): attr.append('M')
if c.isCurrentPosition(p): attr.append('V')
return ''.join(attr)
#@+node:ekr.20060919172012.9: *4* tnodeListAttributes (Not used)
# Based on fileCommands.putTnodeList.
def tnodeListAttributes(self, p):
'''Put the tnodeList attribute of p.v'''
# Remember: entries in the tnodeList correspond to @+node sentinels, _not_ to tnodes!
if not hasattr(p.v, 'tnodeList') or not p.v.tnodeList:
return None
# Assign fileIndices.
for v in p.v.tnodeList:
try: # Will fail for None or any pre 4.1 file index.
theId, time, n = p.v.fileIndex
except Exception:
g.trace("assigning gnx for ", p.v)
gnx = g.app.nodeIndices.getNewIndex()
p.v.setFileIndex(gnx) # Don't convert to string until the actual write.
s = ','.join([g.app.nodeIndices.toString(v.fileIndex) for v in p.v.tnodeList])
return s
#@+node:tbrown.20061004094757: *4* uAAttributes
def uAAttributes(self, p):
"""write unknownAttributes with various levels of expansion"""
data = {}
if self.opml_write_ua_attributes and hasattr(p.v, 'unknownAttributes'):
# for uak, uav in p.v.unknownAttributes.iteritems():
d = p.u
for uak in list(d.keys()):
uav = d.get(uak)
if self.opml_expand_ua_dictionary and isinstance(uav, dict):
# for uakc, uavc in uav.iteritems():
for uakc in list(uav.keys()):
uavc = uav.get(uakc)
if str(uavc) != '' or not self.opml_skip_ua_dictionary_blanks:
data['leo:ua_' + uak + '_' + uakc] = self.attributeEscape(str(uavc))
else:
data['leo:ua_' + uak] = self.attributeEscape(str(uav))
return data
#@+node:ekr.20060919172012.11: *3* putOPMLPostlog
def putOPMLPostlog(self):
self.put('\n</opml>\n')
#@+node:ekr.20141020112451.18339: *3* putXMLLine
def putXMLLine(self):
'''Put the **properly encoded** <?xml> element.'''
self.put('%s"%s"%s\n' % (
g.app.prolog_prefix_string,
self.leo_file_encoding,
g.app.prolog_postfix_string))
#@-others
#@+node:ekr.20060904134958.164: ** class SaxContentHandler (XMLGenerator)
class SaxContentHandler(xml.sax.saxutils.XMLGenerator):
'''A sax content handler class that reads OPML files.'''
#@+others
#@+node:ekr.20060904134958.165: *3* __init__ & helper
def __init__(self, c, inputFileName):
'''Ctor for SaxContentHandler class (OMPL plugin).'''
self.c = c
self.inputFileName = inputFileName
super().__init__()
self.dispatchDict = self.define_dispatch_dict()
# Semantics.
self.content = []
self.elementStack = []
self.errors = 0
self.level = 0
self.node = None
self.nodeStack = []
self.ratio = 0.5 # body-outline ratio.
self.rootNode = None
#@+node:ekr.20060917185525: *4* define_disptatch_dict
def define_dispatch_dict(self):
# There is no need for an 'end' method if all info is carried in attributes.
# Keys are **elements**.
d = {
'body': (None, None),
'head': (self.startHead, None),
'opml': (None, None),
'outline': (self.startOutline, self.endOutline),
'leo:body': (self.startBodyText, self.endBodyText),
'leo:global_window_position': (self.startWinPos, None),
}
return d
#@+node:ekr.20060904134958.166: *3* helpers
#@+node:ekr.20060904134958.167: *4* attrsToList
def attrsToList(self, attrs):
'''
Convert the attributes to a list of g.Bunches.
attrs: an Attributes item passed to startElement.
'''
return [g.Bunch(name=name, val=attrs.getValue(name))
for name in attrs.getNames()]
#@+node:ekr.20060904134958.170: *4* error
def error(self, message):
print('\n\nXML error: %s\n' % (message))
self.errors += 1
#@+node:ekr.20060917185525.1: *4* inElement
def inElement(self, name):
return self.elementStack and name in self.elementStack
#@+node:ekr.20060904134958.171: *4* printStartElement & helpers
def printStartElement(self, name, attrs):
indent = '\t' * self.level or ''
if attrs.getLength() > 0:
print('%s<%s %s>' % (
indent,
self.clean(name).strip(),
self.attrsToString(attrs, sep=' ')))
else:
print('%s<%s>' % (
indent,
self.clean(name).strip()))
if name.lower() in ['outline', 'head', 'body',]:
print('')
#@+node:ekr.20060904134958.168: *5* attrsToString
def attrsToString(self, attrs, sep='\n'):
'''Convert the attributes to a string.
attrs: an Attributes item passed to startElement.
sep: the separator charater between attributes.'''
result = [
'%s="%s"' % (bunch.name, bunch.val)
for bunch in self.attrsToList(attrs)
]
return sep.join(result)
#@+node:ekr.20060904134958.169: *5* clean
def clean(self, s):
return g.toEncodedString(s, "ascii")
#@+node:ekr.20060904134958.174: *3* Do nothing...
#@+node:ekr.20060904134958.175: *4* other methods
def ignorableWhitespace(self, content):
g.trace()
def processingInstruction(self, target, data):
g.trace()
def skippedEntity(self, name):
g.trace(name)
def startElementNS(self, name, qname, attrs):
g.trace(name)
def endElementNS(self, name, qname):
g.trace(name)
#@+node:ekr.20060904134958.176: *4* endDocument
def endDocument(self):
pass
#@+node:ekr.20060904134958.177: *4* startDocument
def startDocument(self):
pass
#@+node:ekr.20060904134958.178: *3* characters
def characters(self, content):
name = self.elementStack[-1].lower() if self.elementStack else '<no element name>'
# Opml elements should not have content: everything is carried in attributes.
if name == 'leo:body':
if self.node:
self.content.append(content)
else:
self.error('No node for %s content' % (name))
else:
if content.strip():
print('content:', name, repr(content))
#@+node:ekr.20060904134958.179: *3* endElement & helpers
def endElement(self, name):
name = name.lower()
if name in printElements or 'all' in printElements:
indent = '\t' * (self.level - 1) or ''
print('%s</%s>' % (indent, self.clean(name).strip()))
data = self.dispatchDict.get(name)
if data is None:
g.trace('unknown element', name)
else:
junk, func = data
if func:
func()
name2 = self.elementStack.pop()
assert name == name2
#@+node:ekr.20060919193501: *4* endBodyText
def endBodyText(self):
'''End a <leo:body> element.'''
if self.content:
self.node.bodyString = ''.join(self.content)
self.content = []
#@+node:ekr.20060917185948: *4* endOutline
def endOutline(self):
self.level -= 1
self.node = self.nodeStack.pop()
#@+node:ekr.20060904134958.180: *3* startElement & helpers
def startElement(self, name, attrs):
name = name.lower()
if name in printElements or 'all' in printElements:
self.printStartElement(name, attrs)
self.elementStack.append(name)
data = self.dispatchDict.get(name)
if data is None:
g.trace('unknown element', name)
else:
func, junk = data
if func:
func(attrs)
#@+node:ekr.20060919193501.1: *4* startBodyText
def startBodyText(self, attrs):
'''Start a <leo:body> element.'''
self.content = []
#@+node:ekr.20060922072852: *4* startHead
def startHead(self, attrs):
if not self.inElement('opml'):
self.error('<head> outside <opml>')
self.doHeadAttributes(attrs)
#@+node:ekr.20060922072852.1: *5* doHeadAttributes
def doHeadAttributes(self, attrs):
ratio = 0.5
for bunch in self.attrsToList(attrs):
name = bunch.name; val = bunch.val
if name == 'leo:body_outline_ratio':
try:
ratio = float(val)
except ValueError:
pass
self.ratio = ratio
#@+node:ekr.20060917190349: *4* startOutline
def startOutline(self, attrs):
if self.inElement('head'):
self.error('<outline> inside <head>')
if not self.inElement('body'):
self.error('<outline> outside <body>')
self.level += 1
if self.rootNode:
parent = self.node
else:
self.rootNode = parent = NodeClass() # The dummy parent node.
parent.headString = 'dummyNode'
self.node = NodeClass()
parent.children.append(self.node)
self.doOutlineAttributes(attrs)
self.nodeStack.append(parent)
#@+node:ekr.20060904141220.34: *5* doOutlineAttributes
def doOutlineAttributes(self, attrs):
node = self.node
for bunch in self.attrsToList(attrs):
name, val = bunch.name, bunch.val
if name == 'text': # Text is the 'official' opml attribute for headlines.
node.headString = val
elif name in ('_note', 'leo:body'):
# Android outliner uses _note.
node.bodyString = val
elif name == 'leo:v':
node.gnx = val
else:
node.attributes[name] = val
#@+node:ekr.20060922071010: *4* startWinPos
def startWinPos(self, attrs):
if not self.inElement('head'):
self.error('<leo:global_window_position> outside <body>')
self.doGlobalWindowAttributes(attrs)
#@+node:ekr.20060922071010.1: *5* doGlobalWindowAttributes
def doGlobalWindowAttributes(self, attrs):
c = self.c
top = 50; left = 50; height = 500; width = 700 # Reasonable defaults.
try:
for bunch in self.attrsToList(attrs):
name = bunch.name; val = bunch.val
if name == 'top': top = int(val)
elif name == 'left': left = int(val)
elif name == 'height': height = int(val)
elif name == 'width': width = int(val)
except ValueError:
pass
c.frame.setTopGeometry(width, height, left, top)
c.frame.deiconify()
c.frame.lift()
c.frame.update()
#@+node:ekr.20060904134958.183: *3* getNode
def getNode(self):
return self.rootNode
#@-others
#@-others
#@@language python
#@@tabwidth -4
#@@pagewidth 80
#@-leo
|
bzl | 1a3feb2a4b493bc7b6824789a6a11dbb620a367b | load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def repo():
http_archive(
name = "com_github_fmtlib_fmt",
urls = ["https://github.com/fmtlib/fmt/releases/download/7.1.2/fmt-7.1.2.zip"],
build_file = "@bazel_ext_repo//fmtlib/7.1.2:fmtlib.BUILD",
strip_prefix = "fmt-7.1.2",
) |
py | 1a3feb5d8fed7718bcdb6a95f5c835dda2f089f6 | """Class implementation for the scale_y_from_point interfaces.
"""
from typing import Any
from typing import Dict
from apysc._animation.animation_scale_y_from_point_interface import \
AnimationScaleYFromPointInterface
from apysc._type.dictionary import Dictionary
from apysc._type.expression_string import ExpressionString
from apysc._type.int import Int
from apysc._type.number import Number
from apysc._type.revert_interface import RevertInterface
class ScaleYFromPointInterface(
AnimationScaleYFromPointInterface, RevertInterface):
_scale_y_from_point: Dictionary[str, Number]
def _initialize_scale_y_from_point_if_not_initialized(self) -> None:
"""
Initialize the `_scale_y_from_point` attribute if it hasn't been
initialized yet.
"""
if hasattr(self, '_scale_y_from_point'):
return
self._scale_y_from_point = Dictionary({})
def get_scale_y_from_point(self, y: Int) -> Number:
"""
Get a scale-y value from the given y-coordinate.
Parameters
----------
y : Int
Y-coordinate.
Returns
-------
scale_y : ap.Number
Scale-y value from the given y-coordinate.
References
----------
- GraphicsBase scale_from_point interfaces document
- https://bit.ly/3xRBhlw
"""
import apysc as ap
with ap.DebugInfo(
callable_=self.get_scale_y_from_point, locals_=locals(),
module_name=__name__, class_=ScaleYFromPointInterface):
from apysc._display import scale_interface_helper
from apysc._validation import number_validation
number_validation.validate_integer(integer=y)
self._initialize_scale_y_from_point_if_not_initialized()
default_val: ap.Number = ap.Number(1.0)
key_exp_str: ExpressionString = scale_interface_helper.\
get_coordinate_key_for_expression(coordinate=int(y._value))
scale_y: ap.Number = self._scale_y_from_point.get(
key=key_exp_str, default=default_val)
return scale_y
def set_scale_y_from_point(self, scale_y: Number, y: Int) -> None:
"""
Update a scale-y value from the given y-coordinate.
Parameters
----------
scale_y : Number
Scale-y value to set.
y : Int
Y-coordinate.
References
----------
- GraphicsBase scale_from_point interfaces document
- https://bit.ly/3xRBhlw
"""
import apysc as ap
with ap.DebugInfo(
callable_=self.set_scale_y_from_point, locals_=locals(),
module_name=__name__, class_=ScaleYFromPointInterface):
from apysc._display import scale_interface_helper
from apysc._validation import number_validation
number_validation.validate_num(num=scale_y)
number_validation.validate_integer(integer=y)
self._initialize_scale_y_from_point_if_not_initialized()
key_exp_str: ExpressionString = scale_interface_helper.\
get_coordinate_key_for_expression(coordinate=int(y._value))
self._scale_y_from_point._value[key_exp_str.value] = scale_y
self._append_scale_y_from_point_update_expression(y=y)
def _append_scale_y_from_point_update_expression(
self, *, y: Int) -> None:
"""
Append the scale-y from the specified y-coordinate updating
expression.
Parameters
----------
y : Int
Y-coordinate.
"""
import apysc as ap
with ap.DebugInfo(
callable_=self.set_scale_y_from_point, locals_=locals(),
module_name=__name__, class_=ScaleYFromPointInterface):
from apysc._display import scale_interface_helper
expression: str
expression = scale_interface_helper.get_scale_updating_expression(
coordinate=y,
scale_dict=self._scale_y_from_point,
interface_variable_name=self.variable_name,
coordinate_type=scale_interface_helper.CoordinateType.Y)
ap.append_js_expression(expression=expression)
_scale_y_from_point_snapshots: Dict[str, Dict[str, Any]]
def _make_snapshot(self, *, snapshot_name: str) -> None:
"""
Make a value's snapshot.
Parameters
----------
snapshot_name : str
Target snapshot name.
"""
self._initialize_scale_y_from_point_if_not_initialized()
self._set_single_snapshot_val_to_dict(
dict_name='_scale_y_from_point_snapshots',
value={**self._scale_y_from_point._value},
snapshot_name=snapshot_name)
def _revert(self, *, snapshot_name: str) -> None:
"""
Revert a value if snapshot exists.
Parameters
----------
snapshot_name : str
Target snapshot name.
"""
if not self._snapshot_exists(snapshot_name=snapshot_name):
return
self._scale_y_from_point._value = self._scale_y_from_point_snapshots[
snapshot_name]
|
py | 1a3feb8510428d1355ac40228baa971b100d8999 | from __future__ import unicode_literals
from django.db.models import Manager
class HostingServiceAccountManager(Manager):
"""A manager for HostingServiceAccount models."""
def accessible(self, visible_only=True, local_site=None, filter_local_site=True):
"""Return hosting service accounts that are accessible.
These will include all visible accounts that are compatible with the
specified :term:`Local Site`.
Args:
visible_only (bool, optional):
Whether to only include visible accounts in the results.
local_site (reviewboard.site.models.LocalSite, optional):
A :term:`Local Site` that the accounts must be associated with.
If not specified, returned accounts won't be bound to a
Local Site.
filter_local_site (bool, optional):
Whether to factor in the ``local_site`` argument. If ``False``,
the :term:`Local Site` will be ignored.
Returns:
django.db.models.query.QuerySet:
The resulting queryset.
"""
qs = self.all()
if visible_only:
qs = qs.filter(visible=True)
qs = qs.distinct()
if filter_local_site:
qs = qs.filter(local_site=local_site)
return qs
def can_create(self, user, local_site=None):
return user.has_perm("hostingsvcs.create_hostingserviceaccount", local_site) |
py | 1a3febc5c6e248a413bb9526fdb77b6e20fdb4fc | import datetime
def get_pages(posts):
""" Groups blog posts into 'pages' of five posts """
pages = []
for i in range(4, len(posts), 5):
pages.append(posts[i-4: i+1])
r = len(posts) % 5
if r > 0:
pages.append(posts[len(posts) - r:])
return pages
def gen_tags(posts):
""" Returns a list of dictionaries indicating tag name and tag count
sorted by tag count. """
tag_list = {}
for post in posts:
for tag in post['tags']:
if tag in tag_list:
tag_list[tag] += 1
else:
tag_list[tag] = 1
tags = [{'tag': x, 'count': tag_list[x]} for x in tag_list]
tags.sort(key = lambda x: x['count'], reverse = True)
return tags
class Blog():
def __init__(self, flatpages, post_dir, draft_dir):
self.flatpages = flatpages
self.posts = [page for page in self.flatpages
if page.path.startswith(post_dir)]
self.posts.sort(key = lambda i:
datetime.datetime.strptime(i['date'], '%d %B %Y'),
reverse = True)
self.drafts = [page for page in self.flatpages
if page.path.startswith(draft_dir)]
self.pages = get_pages(self.posts)
self.tags = gen_tags(self.posts)
for post in self.posts:
post.slug = post.path.split('/')[1]
|
py | 1a3fec527ef3e69130b60239943ac351f6089e45 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: consensus_create_topic.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import basic_types_pb2 as basic__types__pb2
import duration_pb2 as duration__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='consensus_create_topic.proto',
package='proto',
syntax='proto3',
serialized_options=b'\n\"com.hederahashgraph.api.proto.javaP\001',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1c\x63onsensus_create_topic.proto\x12\x05proto\x1a\x11\x62\x61sic_types.proto\x1a\x0e\x64uration.proto\"\xc6\x01\n#ConsensusCreateTopicTransactionBody\x12\x0c\n\x04memo\x18\x01 \x01(\t\x12\x1c\n\x08\x61\x64minKey\x18\x02 \x01(\x0b\x32\n.proto.Key\x12\x1d\n\tsubmitKey\x18\x03 \x01(\x0b\x32\n.proto.Key\x12(\n\x0f\x61utoRenewPeriod\x18\x06 \x01(\x0b\x32\x0f.proto.Duration\x12*\n\x10\x61utoRenewAccount\x18\x07 \x01(\x0b\x32\x10.proto.AccountIDB&\n\"com.hederahashgraph.api.proto.javaP\x01\x62\x06proto3'
,
dependencies=[basic__types__pb2.DESCRIPTOR,duration__pb2.DESCRIPTOR,])
_CONSENSUSCREATETOPICTRANSACTIONBODY = _descriptor.Descriptor(
name='ConsensusCreateTopicTransactionBody',
full_name='proto.ConsensusCreateTopicTransactionBody',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='memo', full_name='proto.ConsensusCreateTopicTransactionBody.memo', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='adminKey', full_name='proto.ConsensusCreateTopicTransactionBody.adminKey', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='submitKey', full_name='proto.ConsensusCreateTopicTransactionBody.submitKey', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='autoRenewPeriod', full_name='proto.ConsensusCreateTopicTransactionBody.autoRenewPeriod', index=3,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='autoRenewAccount', full_name='proto.ConsensusCreateTopicTransactionBody.autoRenewAccount', index=4,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=75,
serialized_end=273,
)
_CONSENSUSCREATETOPICTRANSACTIONBODY.fields_by_name['adminKey'].message_type = basic__types__pb2._KEY
_CONSENSUSCREATETOPICTRANSACTIONBODY.fields_by_name['submitKey'].message_type = basic__types__pb2._KEY
_CONSENSUSCREATETOPICTRANSACTIONBODY.fields_by_name['autoRenewPeriod'].message_type = duration__pb2._DURATION
_CONSENSUSCREATETOPICTRANSACTIONBODY.fields_by_name['autoRenewAccount'].message_type = basic__types__pb2._ACCOUNTID
DESCRIPTOR.message_types_by_name['ConsensusCreateTopicTransactionBody'] = _CONSENSUSCREATETOPICTRANSACTIONBODY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ConsensusCreateTopicTransactionBody = _reflection.GeneratedProtocolMessageType('ConsensusCreateTopicTransactionBody', (_message.Message,), {
'DESCRIPTOR' : _CONSENSUSCREATETOPICTRANSACTIONBODY,
'__module__' : 'consensus_create_topic_pb2'
# @@protoc_insertion_point(class_scope:proto.ConsensusCreateTopicTransactionBody)
})
_sym_db.RegisterMessage(ConsensusCreateTopicTransactionBody)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
py | 1a3fed3ef7cc80a8fe57bbc92277b3ec568ad67b | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import uuid
from django.contrib import auth as django_auth
from django.contrib.auth.hashers import make_password
from .accounts_settings import COOKIE_AGE, COOKIE_NAME
from common_utils.cookies import set_cookie
def create_token_string(user, token=None):
from .models import RememberToken
token_value = uuid.uuid4().hex
token_hash = make_password(token_value)
token = RememberToken(
token_hash=token_hash,
user=user
)
token.save()
return '%d:%s' % (user.id, token_value)
def preset_cookie(request, token_string):
if token_string:
setattr(request, '_auth_remember_token', token_string)
else:
setattr(request, '_auth_remember_token', '')
def delete_cookie(response):
response.delete_cookie(COOKIE_NAME)
def remember_user(response, user):
token_string = create_token_string(user, None)
set_cookie(response, COOKIE_NAME, token_string, COOKIE_AGE)
return response
def authenticate_user(request):
token = request.COOKIES.get(COOKIE_NAME, None)
if token is None:
return False
user = django_auth.authenticate(token_string=token, request=request)
if user:
django_auth.login(request, user)
return user
|
py | 1a3fed993042e27f1b1082af2a70b9b9e26548ad | import pytest
from seedwork.domain.exceptions import BusinessRuleValidationException
from seedwork.domain.value_objects import Money
from modules.catalog.domain.entities import Seller, Listing
from modules.catalog.domain.value_objects import ListingStatus
def test_seller_publishes_listing_happy_path():
seller = Seller(id=Seller.next_id())
listing = Listing(
id=Listing.next_id(),
title="Tiny dragon",
description="Tiny dragon for sale",
price=Money(1),
seller_id=seller.id,
)
seller.publish_listing(listing)
assert listing.status == ListingStatus.PUBLISHED
def test_seller_fails_to_publish_listing_with_zero_price():
seller = Seller(id=Seller.next_id())
listing = Listing(
id=Listing.next_id(),
title="Tiny dragon",
description="Tiny dragon for sale",
price=Money(0),
seller_id=seller.id,
)
with pytest.raises(BusinessRuleValidationException):
seller.publish_listing(listing)
|
py | 1a3fedf25e8b2d0193a3a7729147ec2911d2c623 | # Copyright 2016 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects for classifier models"""
import copy
from core.domain import classifier_registry
from core.platform import models
import feconf
import utils
(classifier_models,) = models.Registry.import_models([models.NAMES.classifier])
class Classifier(object):
"""Domain object for a classifier.
A classifier is a machine learning model created using a particular
classification algorithm which is used for answer classification
task.
Attributes:
id: str. The unique id of the classifier.
exp_id: str. The exploration id to which this classifier belongs.
exp_version_when_created: str. The version of the exploration when
this classification model was created.
state_name: str. The name of the state to which the classifier belongs.
algorithm_id: str. The id of the algorithm used for generating
classifier.
cached_classifier_data: dict. The actual classifier model used for
classification purpose.
data_schema_version: int. Schema version of the data used by the
classifier. This depends on the algorithm ID.
"""
def __init__(self, classifier_id, exp_id, exp_version_when_created,
state_name, algorithm_id, cached_classifier_data,
data_schema_version):
"""Constructs an Classifier domain object.
Args:
classifier_id: str. The unique id of the classifier.
exp_id: str. The exploration id to which the classifier belongs.
exp_version_when_created: int. The version of the exploration when
this classification model was created.
state_name: str. The name of the state to which the classifier
belongs.
algorithm_id: str. The id of the algorithm used for generating
classifier.
cached_classifier_data: dict. The actual classifier model used for
classification purpose.
data_schema_version: int. Schema version of the
data used by the classifier.
"""
self._id = classifier_id
self._exp_id = exp_id
self._exp_version_when_created = exp_version_when_created
self._state_name = state_name
self._algorithm_id = algorithm_id
self._cached_classifier_data = copy.deepcopy(cached_classifier_data)
self._data_schema_version = data_schema_version
@property
def id(self):
return self._id
@property
def exp_id(self):
return self._exp_id
@property
def exp_version_when_created(self):
return self._exp_version_when_created
@property
def state_name(self):
return self._state_name
@property
def algorithm_id(self):
return self._algorithm_id
@property
def cached_classifier_data(self):
return self._cached_classifier_data
@property
def data_schema_version(self):
return self._data_schema_version
def update_state_name(self, state_name):
"""Updates the state_name attribute of the Classifier domain object.
Args:
state_name: str. The name of the updated state to which the
classifier belongs.
"""
self._state_name = state_name
def to_dict(self):
"""Constructs a dict representation of Classifier domain object.
Returns:
A dict representation of Classifier domain object.
"""
return {
'classifier_id': self._id,
'exp_id': self._exp_id,
'exp_version_when_created': self._exp_version_when_created,
'state_name': self._state_name,
'algorithm_id': self._algorithm_id,
'cached_classifier_data': self._cached_classifier_data,
'data_schema_version': self._data_schema_version
}
def validate(self):
"""Validates the classifier before it is saved to storage."""
if not isinstance(self.id, basestring):
raise utils.ValidationError(
'Expected id to be a string, received %s' % self.id)
if not isinstance(self.exp_id, basestring):
raise utils.ValidationError(
'Expected exp_id to be a string, received %s' % self.exp_id)
if not isinstance(self.exp_version_when_created, int):
raise utils.ValidationError(
'Expected exp_version_when_created to be a int, received %s' %
self.exp_version_when_created)
if not isinstance(self.state_name, basestring):
raise utils.ValidationError(
'Expected id to be a string, received %s' % self.state_name)
utils.require_valid_name(self.state_name, 'the state name')
if not isinstance(self.algorithm_id, basestring):
raise utils.ValidationError(
'Expected algorithm_id to be a string, received %s' %
self.algorithm_id)
utils.require_valid_name(
self.algorithm_id, 'the algorithm id')
if self.algorithm_id not in (
feconf.INTERACTION_CLASSIFIER_MAPPING.values()):
raise utils.ValidationError(
'Invalid algorithm id: %s' % self.algorithm_id)
if not isinstance(self.cached_classifier_data, dict):
raise utils.ValidationError(
'Expected cached_classifier_data to be a dict, received %s' %(
self.cached_classifier_data))
classifier_class = (
classifier_registry.Registry.get_classifier_by_algorithm_id(
self.algorithm_id))
classifier_class.validate(self.cached_classifier_data)
|
py | 1a3fee5c9ae88df8f6b8e876f69888221848bb20 | from mock import Mock, call, patch
from pip._internal.commands.install import build_wheels
class TestWheelCache:
def check_build_wheels(
self,
pep517_requirements,
legacy_requirements,
):
"""
Return: (mock_calls, return_value).
"""
def build(reqs, **kwargs):
# Fail the first requirement.
return [reqs[0]]
builder = Mock()
builder.build.side_effect = build
build_failures = build_wheels(
builder=builder,
pep517_requirements=pep517_requirements,
legacy_requirements=legacy_requirements,
)
return (builder.build.mock_calls, build_failures)
@patch('pip._internal.commands.install.is_wheel_installed')
def test_build_wheels__wheel_installed(self, is_wheel_installed):
is_wheel_installed.return_value = True
mock_calls, build_failures = self.check_build_wheels(
pep517_requirements=['a', 'b'],
legacy_requirements=['c', 'd'],
)
# Legacy requirements were built.
assert mock_calls == [
call(['a', 'b'], should_unpack=True),
call(['c', 'd'], should_unpack=True),
]
# Legacy build failures are not included in the return value.
assert build_failures == ['a']
@patch('pip._internal.commands.install.is_wheel_installed')
def test_build_wheels__wheel_not_installed(self, is_wheel_installed):
is_wheel_installed.return_value = False
mock_calls, build_failures = self.check_build_wheels(
pep517_requirements=['a', 'b'],
legacy_requirements=['c', 'd'],
)
# Legacy requirements were not built.
assert mock_calls == [
call(['a', 'b'], should_unpack=True),
]
assert build_failures == ['a']
|
py | 1a3ff0f03190af3a45c3b45611ff8da6346807ac | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from google.cloud.scheduler_v1 import types
from google.cloud.scheduler_v1.gapic import cloud_scheduler_client
from google.cloud.scheduler_v1.gapic import enums
class CloudSchedulerClient(cloud_scheduler_client.CloudSchedulerClient):
__doc__ = cloud_scheduler_client.CloudSchedulerClient.__doc__
enums = enums
__all__ = ("enums", "types", "CloudSchedulerClient")
|
py | 1a3ff15ee74b2104a1fc074870406e05829cc731 | """Add manual_triggered to Event model
Revision ID: 2f5a2f4385a0
Revises: 2acc88805404
Create Date: 2017-11-01 14:03:02.555397
"""
# revision identifiers, used by Alembic.
revision = '2f5a2f4385a0'
down_revision = '2acc88805404'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('events', sa.Column('manual_triggered', sa.Boolean()))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('events', 'manual_triggered')
### end Alembic commands ###
|
py | 1a3ff16a70147dbb246c4c9aca6d04175680e6ae | from services.module.moduleService import ModuleService
from repositories.demoddata.demoddataRepo import DemoddataRepo
from repositories.payload.payloadRepo import PayloadRepo
from repositories.waterfall.waterfallRepo import WaterfallRepo
from repositories.observation.observationsRepo import ObservationRepo
class ObservationsService:
def __init__(self, cmd):
self.__cmd = cmd
self.__module_service = ModuleService(self.__cmd)
repos = self.filter_repositories()
self.__observations_repo = ObservationRepo(self.__cmd, repos)
def filter_repositories(self):
downloadable_data_repos = []
all = not self.__cmd.payloads and not self.__cmd.waterfalls and not self.__cmd.demoddata
if all or self.__cmd.payloads:
downloadable_data_repos.append(PayloadRepo(self.__cmd.working_dir, self.__module_service.loadPayloadModules()))
if all == True or self.__cmd.waterfalls:
downloadable_data_repos.append(WaterfallRepo(self.__cmd.working_dir, self.__module_service.loadWaterfallModules()))
if all == True or self.__cmd.demoddata:
downloadable_data_repos.append(DemoddataRepo(self.__cmd.working_dir, self.__module_service.loadDemoddataModules()))
return downloadable_data_repos
def extract(self):
self.__observations_repo.extract()
|
py | 1a3ff1715c7dc6b33dbaa5fa65d354e5dda58b1b | """evaluate.py
This script is used to evalute trained ImageNet models.
"""
import sys
import argparse
import tensorflow as tf
import numpy as np
import tensorflow_datasets as tfds
from config import config
from utils.utils import config_keras_backend, clear_keras_session
from utils.dataset import get_dataset
from models.adamw import AdamW
from keras.utils import to_categorical
from methods import run_attack
#from tensorflow.keras.applications import InceptionV3
#from tensorflow.keras.applications import VGG19
#from tensorflow.keras.applications import ResNet152V2
from keras.applications.resnet50 import ResNet50
from keras.applications.resnet50 import preprocess_input as resnet_preprocess_input
#from keras.applications.resnet101 import ResNet101
from keras.applications.vgg19 import VGG19, decode_predictions
from keras.applications.vgg19 import preprocess_input as vgg_preprocess_input
from keras.applications.inception_v3 import InceptionV3
from keras.applications.inception_v3 import preprocess_input as inception_preprocess_input
from methods import get_accuracy, run_attack
#from tf.keras.preprocessing.image import ImageDataGenerator
import cv2
import copy
DESCRIPTION = """For example:
$ python3 evaluate.py --dataset_dir ${HOME}/data/ILSVRC2012/tfrecords \
--batch_size 64 \
saves/mobilenet_v2-model-final.h5
python3 evaluate_resnet_all.py --dataset_dir /l/IMAGENET_ORIGINAL/train/imagenet_tfrecord --inv_model_file /l/keras_imagenet-master/saves/inception_v3-ckpt-030_orig.h5
"""
def main():
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('--dataset_dir', type=str,
default=config.DEFAULT_DATASET_DIR)
parser.add_argument('--batch_size', type=int, default=20)
parser.add_argument('--inv_model_file', type=str,
help='a saved model (.h5) file')
args = parser.parse_args()
config_keras_backend()
if not args.inv_model_file.endswith('.h5'):
sys.exit('model_file is not a .h5')
inv_model = tf.keras.models.load_model(
args.inv_model_file,
compile=False,
custom_objects={'AdamW': AdamW})
inv_model.compile(
optimizer='sgd',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
ds_validation = get_dataset(
args.dataset_dir, 'validation', args.batch_size)
## VGG
vgg_model = VGG19(include_top=True, weights='imagenet', classes=1000)
vgg_model.compile(
optimizer='sgd',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# InceptionV3
inception_model = InceptionV3(include_top=True, weights='imagenet', classes=1000)
inception_model.compile(
optimizer='sgd',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
## ResNet
resnet_model = ResNet50(include_top=True, weights='imagenet', classes=1000)
resnet_model.compile(
optimizer='sgd',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Process batches
iteration = 0
sum1 = 0
sum2 = 0
for images, labels in tfds.as_numpy(ds_validation):
if iteration < 199:
print('continuing')
iteration += 1
continue
if iteration == 500:
exit()
labels = np.argmax(labels, axis=1)
#adv_imgs = run_attack(True, 'CarliniL2Method', inception_model, images, labels, batch_size=args.batch_size, dataset='cifar', fgsm_epsilon=0.3, cwl2_confidence=0)
#adv_imgs = run_attack(False, 'DeepFool', inception_model, images, labels, batch_size=args.batch_size, dataset='cifar', fgsm_epsilon=0.3, cwl2_confidence=0)
adv_imgs = run_attack(False, 'FastGradientMethod', inception_model, images, labels, batch_size=args.batch_size, dataset='cifar', fgsm_epsilon=0.3, cwl2_confidence=0)
#adv_imgs = run_attack(False, 'ProjectedGradientDescent', inception_model, images, labels, batch_size=10, dataset='cifar', fgsm_epsilon=0.1, cwl2_confidence=0)
## VGG ################################################
#img *= (2.0/255) # normalize to: 0.0~2.0
#img -= 1.0 # subtract mean to make it: -1.0~1.0
#img = np.expand_dims(img, axis=0)
vgg_imgs = []
resnet_imgs = []
inc_imgs = []
flip_imgs = []
inv_imgs = []
adv_vgg_imgs = []
adv_resnet_imgs = []
adv_inc_imgs = []
adv_flip_imgs = []
adv_inv_imgs = []
for ii in range(images.shape[0]):
img = copy.deepcopy(images[ii,:,:,:])
img += 1.0
#img /= (2.0/255)
img *= (255.0/2.0)
## VGG
vgg_img = copy.deepcopy(img)
vgg_img = cv2.resize(vgg_img, (224, 224))
vgg_img = vgg_preprocess_input(vgg_img)
vgg_imgs.append(vgg_img)
## Resnet
resnet_img = copy.deepcopy(img)
resnet_img = cv2.resize(resnet_img, (224, 224))
resnet_img = resnet_preprocess_input(resnet_img)
resnet_imgs.append(resnet_img)
## InceptionV3
inc_img = copy.deepcopy(img)
inc_img = cv2.resize(inc_img, (299, 299))
inc_img = inception_preprocess_input(inc_img)
inc_imgs.append(inc_img)
## Flipped
#flip_img = copy.deepcopy(img)
#flip_img = cv2.resize(flip_img, (299, 299))
#flip_img = cv2.flip(flip_img, 1)
#flip_img = inception_preprocess_input(flip_img)
#flip_imgs.append(flip_img)
flip_img = copy.deepcopy(images[ii,:,:,:])
flip_img = cv2.flip(flip_img, 1)
flip_imgs.append(flip_img)
## Inverse
inv_img = copy.deepcopy(images[ii,:,:,:])#########
inv_img += 1.0
inv_img /= 2.0
inv_img = 1 - inv_img
inv_img *= 255.0
inv_img = cv2.resize(inv_img, (299, 299))
inv_img = inception_preprocess_input(inv_img)
inv_imgs.append(inv_img)
#==========================================
# ADVERSARIAL ---------------
adv_img = copy.deepcopy(adv_imgs[ii,:,:,:])
adv_img += 1.0
#adv_img /= (2.0/255)
adv_img *= (255.0/2.0)
# VGG
adv_vgg_img = copy.deepcopy(adv_img)
adv_vgg_img = cv2.resize(adv_vgg_img, (224, 224))
adv_vgg_img = vgg_preprocess_input(adv_vgg_img)
adv_vgg_imgs.append(adv_vgg_img)
# Resnet
adv_resnet_img = copy.deepcopy(adv_img)
adv_resnet_img = cv2.resize(adv_resnet_img, (224, 224))
adv_resnet_img = resnet_preprocess_input(adv_resnet_img)
adv_resnet_imgs.append(adv_resnet_img)
# InceptionV3
adv_inc_img = copy.deepcopy(adv_img)
adv_inc_img = cv2.resize(adv_inc_img, (299, 299))
adv_inc_img = inception_preprocess_input(adv_inc_img)
adv_inc_imgs.append(adv_inc_img)
## Flipped
#adv_flip_img = copy.deepcopy(img)
#adv_flip_img = cv2.resize(adv_flip_img, (299, 299))
#adv_flip_img = cv2.flip(adv_flip_img, 1)
#adv_flip_img = inception_preprocess_input(adv_flip_img)
#adv_flip_imgs.append(adv_flip_img)
adv_flip_img = copy.deepcopy(adv_imgs[ii,:,:,:])
adv_flip_img = cv2.flip(adv_flip_img, 1)
adv_flip_imgs.append(adv_flip_img)
## Inverse
##test on inverse Inceptionv3
adv_inv_img = copy.deepcopy(adv_imgs[ii,:,:,:])#########
adv_inv_img += 1.0
adv_inv_img /= 2.0
adv_inv_img = 1 - adv_inv_img
adv_inv_img *= 255.0
adv_inv_img = cv2.resize(adv_inv_img, (299, 299))
adv_inv_img = inception_preprocess_input(adv_inv_img)
adv_inv_imgs.append(adv_inv_img)
# Horizontal Flipping
# test on Resnet
vgg_imgs = np.asarray(vgg_imgs)
resnet_imgs = np.asarray(resnet_imgs)
inc_imgs = np.asarray(inc_imgs)
flip_imgs = np.asarray(flip_imgs)
inv_imgs = np.asarray(inv_imgs)
adv_vgg_imgs = np.asarray(adv_vgg_imgs)
adv_resnet_imgs = np.asarray(adv_resnet_imgs)
adv_inc_imgs = np.asarray(adv_inc_imgs)
adv_flip_imgs = np.asarray(adv_flip_imgs)
adv_inv_imgs = np.asarray(adv_inv_imgs)
# Default ResNet accuracy
_, results1 = resnet_model.evaluate(x=resnet_imgs, y=labels, verbose=0)
_, results2 = vgg_model.evaluate(x=vgg_imgs, y=labels, verbose=0)
_, results3 = inception_model.evaluate(x=inc_imgs, y=labels, verbose=0)
_, results4 = inception_model.evaluate(x=flip_imgs, y=labels, verbose=0)
_, results5 = inv_model.evaluate(x=inv_imgs, y=labels, verbose=0)
# print('-----------------------------------------------------')
_, results6 = resnet_model.evaluate(x=adv_resnet_imgs, y=labels, verbose=0)
_, results7 = vgg_model.evaluate(x=adv_vgg_imgs, y=labels, verbose=0)
_, results8 = inception_model.evaluate(x=adv_inc_imgs, y=labels, verbose=0)
_, results9 = inception_model.evaluate(x=adv_flip_imgs, y=labels, verbose=0)
_, results10 = inv_model.evaluate(x=adv_inv_imgs, y=labels, verbose=0)
print(iteration)
print(results1, results6)
print(results2, results7)
print(results3, results8)
print(results4, results9)
print(results5, results10)
with open("kot_fgsm_untarg.txt", "a") as myfile:
myfile.write(str(results1) + ' ' + str(results2) + ' ' + str(results3) + ' ' + str(results4) + ' ' + str(results5) + ' ' + str(results6) + ' ' + str(results7) + ' ' + str(results8) + ' ' + str(results9) + ' ' + str(results10) + '\n' )
iteration += 1
#exit()
#results = resnet_model.evaluate(x=adv_imgs, y=to_categorical(labels, 1000))
#print('RESNET test loss, test acc:', results)
#results = vgg_model.evaluate(x=adv_imgs, y=to_categorical(labels, 1000))
#print('VGG test loss, test acc:', results)
# labels = np.argmax(labels, axis=1)
#
# #results = model.evaluate(
# # x=images, y=to_categorical(labels, 1000))
# #print('test loss, test acc:', results)
# total = total + images.shape[0]
# print(total)
exit()
results = resnet_model.evaluate(
x=ds_validation,
steps=50000 // args.batch_size)
print('test loss, test acc:', results)
clear_keras_session()
if __name__ == '__main__':
main()
|
py | 1a3ff1db6ac8a57312c52896ad3f13de8c53486e | # def fib(n): # write Fibonacci series up to n
# a, b = 0, 1
# while a < n:
# print(a, end=' ')
# a, b = b, a+b
# print()
# def fib2(n): # return Fibonacci series up to n
# result = []
# a, b = 0, 1
# while a < n:
# result.append(a)
# a, b = b, a+b
# return result
# a = __name__
# print("The name of the module is: ", a)
import sys
old = sys.getrecursionlimit()
print("Initial recursion depth", old)
sys.setrecursionlimit(1000000)
old = sys.getrecursionlimit()
print("new recursion limit", old) |
py | 1a3ff1f212e204d083443f522f131d3af94abc77 | import numpy as np
import torch
import trajnetplusplustools
def pre_process_test(sc_, obs_len=8):
obs_frames = [primary_row.frame for primary_row in sc_[0]][:obs_len]
last_frame = obs_frames[-1]
sc_ = [[row for row in ped] for ped in sc_ if ped[0].frame <= last_frame]
return sc_
def trajnet_loader(data_loader, args):
batch = {'src': [], 'trg': []}
num_batches = 0
for batch_idx, (filename, scene_id, paths) in enumerate(data_loader):
## make new scene
pos_scene = trajnetplusplustools.Reader.paths_to_xy(paths)[:, 0] # primary ped
vel_scene = np.zeros_like(pos_scene)
vel_scene[1:] = pos_scene[1:] - pos_scene[:-1]
attr_scene = np.concatenate((pos_scene, vel_scene), axis=1)
batch['src'].append(attr_scene[:args.obs])
batch['trg'].append(attr_scene[-args.preds:])
num_batches += 1
if (num_batches % args.batch_size != 0) and (batch_idx + 1 != len(data_loader)):
continue
batch['src'] = torch.Tensor(np.stack(batch['src']))
batch['trg'] = torch.Tensor(np.stack(batch['trg']))
yield batch
batch = {'src': [], 'trg': []}
def trajnet_test_loader(data_loader, args):
batch = {'src': [], 'trg': []}
seq_start_end = []
num_batches = 0
for batch_idx, (filename, scene_id, paths) in enumerate(data_loader):
## make new scene
paths = pre_process_test(paths, args.obs)
pos_scene = trajnetplusplustools.Reader.paths_to_xy(paths)
vel_scene = np.zeros_like(pos_scene)
vel_scene[1:] = pos_scene[1:] - pos_scene[:-1]
attr_scene = np.concatenate((pos_scene, vel_scene), axis=2)
seq_start_end.append(pos_scene.shape[1])
batch['src'].append(attr_scene[:args.obs])
batch['trg'].append(attr_scene[-args.preds:])
num_batches += 1
if (num_batches % args.batch_size != 0) and (batch_idx + 1 != len(data_loader)):
continue
batch['src'] = torch.Tensor(np.concatenate(batch['src'], axis=1)).permute(1, 0, 2)
batch['trg'] = torch.Tensor(np.concatenate(batch['trg'], axis=1)).permute(1, 0, 2)
seq_start_end = [0] + seq_start_end
seq_start_end = torch.LongTensor(np.array(seq_start_end).cumsum())
seq_start_end = torch.stack((seq_start_end[:-1], seq_start_end[1:]), dim=1)
yield batch, seq_start_end
batch = {'src': [], 'trg': []}
seq_start_end = []
|
py | 1a3ff2438a036d88303012ebecd3b4a218f05ca0 | # Generated by Django 2.2.4 on 2019-11-01 14:20
from django.db import migrations, models
import hipo_django_core.models
import hipo_django_core.utils
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.BigIntegerField(default=hipo_django_core.utils.generate_unique_id, editable=False, primary_key=True, serialize=False)),
('creation_datetime', models.DateTimeField(auto_now_add=True)),
('update_datetime', models.DateTimeField(auto_now=True)),
('event_type', models.CharField(choices=[('hipotalks', 'Hipotalks'), ('townhall', 'Townhall')], max_length=255)),
('date', models.DateField()),
],
options={
'verbose_name': 'Event',
'verbose_name_plural': 'Events',
'ordering': ('-date',),
},
bases=(hipo_django_core.models.LogEntryMixin, models.Model),
),
]
|
py | 1a3ff30560b886956254580fdc5cf86417753939 | # Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from osc_lib.command import command
from osc_lib import utils as oscutils
from manilaclient.common._i18n import _
class ShareLimitsShow(command.Lister):
"""Show a list of share limits for a user."""
_description = _("Show a list of share limits for a user.")
def get_parser(self, prog_name):
parser = super(ShareLimitsShow, self).get_parser(prog_name)
limit_type_group = parser.add_mutually_exclusive_group(required=True)
limit_type_group.add_argument(
'--absolute',
action='store_true',
default=False,
help=_('Get the absolute limits for the user')
)
limit_type_group.add_argument(
'--rate',
action='store_true',
default=False,
help=_('Get the API rate limits for the user')
)
return parser
def take_action(self, parsed_args):
share_client = self.app.client_manager.share
# limit_type = 'absolute'
if parsed_args.rate:
# limit_type = 'rate'
columns = [
"Verb",
"Regex",
"URI",
"Value",
"Remaining",
"Unit",
"Next Available",
]
data = list(share_client.limits.get().rate)
else:
columns = [
'Name',
'Value',
]
data = list(share_client.limits.get().absolute)
return (columns, (oscutils.get_item_properties(s, columns)
for s in data))
|
py | 1a3ff3e3099a16cb8853bae97a0f1be9e671e21b | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
setup(name='urepr',
version='0.0.1',
description='Uncertainty representation',
author='João Faria',
author_email='[email protected]',
license='MIT',
url='https://github.com/j-faria/urepr',
packages=['urepr'],
)
|
py | 1a3ff4a469764dacf71e142baf8567c92f818ba3 | import numpy as np
from lazy import lazy
from .cec2013lsgo import CEC2013LSGO
class F13(CEC2013LSGO):
"""
7-nonseparable, 1-separable Shifted and Rotated Elliptic Function
"""
def __init__(
self,
*,
rng_seed: int = 42,
use_shuffle: bool = False,
verbose: int = 0
):
super(F13, self).__init__(
rng_seed=rng_seed,
use_shuffle=use_shuffle,
verbose=verbose,
)
self.c = np.cumsum(self.s)
self.m = 5
@property
def genome_size(self) -> np.ndarray:
return 905
@lazy
def lower_bound(self) -> np.ndarray:
lower_bound = [-100] * self.genome_size
return np.array(lower_bound)
@lazy
def upper_bound(self) -> np.ndarray:
upper_bound = [100] * self.genome_size
return np.array(upper_bound)
def _evaluate(self, x: np.ndarray) -> np.ndarray:
out_of_bounds = self.check_bounds(x)
out_of_bounds = np.any(out_of_bounds, axis=1)
x = x - self.xopt
fitness = 0
ldim = 0
for i in range(len(self.s)):
if i > 0:
ldim = self.c[i-1] - i * self.m
udim = self.c[i] - i * self.m
f: np.ndarray
z = x[:, self.p[ldim:udim] - 1].T
if self.s[i] == 25:
f = self.R25
elif self.s[i] == 50:
f = self.R50
elif self.s[i] == 100:
f = self.R100
f = f @ z
f = self._schwefel(f.T)
fitness += self.w[i] * f
fitness[out_of_bounds] = None
return fitness
|
py | 1a3ff4dbfaa8fe37b6f38ca7e8b938bc9a0081c5 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2017 Gauvain Pocentek <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from types import ModuleType
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Tuple,
Type,
TYPE_CHECKING,
Union,
)
import requests
import gitlab
from gitlab import base, cli
from gitlab import exceptions as exc
from gitlab import types as g_types
from gitlab import utils
__all__ = [
"GetMixin",
"GetWithoutIdMixin",
"RefreshMixin",
"ListMixin",
"RetrieveMixin",
"CreateMixin",
"UpdateMixin",
"SetMixin",
"DeleteMixin",
"CRUDMixin",
"NoUpdateMixin",
"SaveMixin",
"ObjectDeleteMixin",
"UserAgentDetailMixin",
"AccessRequestMixin",
"DownloadMixin",
"SubscribableMixin",
"TodoMixin",
"TimeTrackingMixin",
"ParticipantsMixin",
"BadgeRenderMixin",
]
if TYPE_CHECKING:
# When running mypy we use these as the base classes
_RestManagerBase = base.RESTManager
_RestObjectBase = base.RESTObject
else:
_RestManagerBase = object
_RestObjectBase = object
class GetMixin(_RestManagerBase):
_computed_path: Optional[str]
_from_parent_attrs: Dict[str, Any]
_obj_cls: Optional[Type[base.RESTObject]]
_optional_get_attrs: Tuple[str, ...] = ()
_parent: Optional[base.RESTObject]
_parent_attrs: Dict[str, Any]
_path: Optional[str]
gitlab: gitlab.Gitlab
@exc.on_http_error(exc.GitlabGetError)
def get(
self, id: Union[str, int], lazy: bool = False, **kwargs: Any
) -> base.RESTObject:
"""Retrieve a single object.
Args:
id: ID of the object to retrieve
lazy: If True, don't request the server, but create a
shallow object giving access to the managers. This is
useful if you want to avoid useless calls to the API.
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
The generated RESTObject.
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the server cannot perform the request
"""
if isinstance(id, str):
id = utils.EncodedId(id)
path = f"{self.path}/{id}"
if TYPE_CHECKING:
assert self._obj_cls is not None
if lazy is True:
if TYPE_CHECKING:
assert self._obj_cls._id_attr is not None
return self._obj_cls(self, {self._obj_cls._id_attr: id})
server_data = self.gitlab.http_get(path, **kwargs)
if TYPE_CHECKING:
assert not isinstance(server_data, requests.Response)
return self._obj_cls(self, server_data)
class GetWithoutIdMixin(_RestManagerBase):
_computed_path: Optional[str]
_from_parent_attrs: Dict[str, Any]
_obj_cls: Optional[Type[base.RESTObject]]
_optional_get_attrs: Tuple[str, ...] = ()
_parent: Optional[base.RESTObject]
_parent_attrs: Dict[str, Any]
_path: Optional[str]
gitlab: gitlab.Gitlab
@exc.on_http_error(exc.GitlabGetError)
def get(
self, id: Optional[Union[int, str]] = None, **kwargs: Any
) -> Optional[base.RESTObject]:
"""Retrieve a single object.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
The generated RESTObject
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the server cannot perform the request
"""
if TYPE_CHECKING:
assert self.path is not None
server_data = self.gitlab.http_get(self.path, **kwargs)
if server_data is None:
return None
if TYPE_CHECKING:
assert not isinstance(server_data, requests.Response)
assert self._obj_cls is not None
return self._obj_cls(self, server_data)
class RefreshMixin(_RestObjectBase):
_id_attr: Optional[str]
_attrs: Dict[str, Any]
_module: ModuleType
_parent_attrs: Dict[str, Any]
_updated_attrs: Dict[str, Any]
manager: base.RESTManager
@exc.on_http_error(exc.GitlabGetError)
def refresh(self, **kwargs: Any) -> None:
"""Refresh a single object from server.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Returns None (updates the object)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the server cannot perform the request
"""
if self._id_attr:
path = f"{self.manager.path}/{self.encoded_id}"
else:
if TYPE_CHECKING:
assert self.manager.path is not None
path = self.manager.path
server_data = self.manager.gitlab.http_get(path, **kwargs)
if TYPE_CHECKING:
assert not isinstance(server_data, requests.Response)
self._update_attrs(server_data)
class ListMixin(_RestManagerBase):
_computed_path: Optional[str]
_from_parent_attrs: Dict[str, Any]
_list_filters: Tuple[str, ...] = ()
_obj_cls: Optional[Type[base.RESTObject]]
_parent: Optional[base.RESTObject]
_parent_attrs: Dict[str, Any]
_path: Optional[str]
gitlab: gitlab.Gitlab
@exc.on_http_error(exc.GitlabListError)
def list(self, **kwargs: Any) -> Union[base.RESTObjectList, List[base.RESTObject]]:
"""Retrieve a list of objects.
Args:
all: If True, return all the items, without pagination
per_page: Number of items to retrieve per request
page: ID of the page to return (starts with page 1)
as_list: If set to False and no pagination option is
defined, return a generator instead of a list
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
The list of objects, or a generator if `as_list` is False
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the server cannot perform the request
"""
# Duplicate data to avoid messing with what the user sent us
data = kwargs.copy()
if self.gitlab.per_page:
data.setdefault("per_page", self.gitlab.per_page)
# global keyset pagination
if self.gitlab.pagination:
data.setdefault("pagination", self.gitlab.pagination)
if self.gitlab.order_by:
data.setdefault("order_by", self.gitlab.order_by)
# We get the attributes that need some special transformation
if self._types:
for attr_name, type_cls in self._types.items():
if attr_name in data.keys():
type_obj = type_cls(data[attr_name])
data[attr_name] = type_obj.get_for_api()
# Allow to overwrite the path, handy for custom listings
path = data.pop("path", self.path)
if TYPE_CHECKING:
assert self._obj_cls is not None
obj = self.gitlab.http_list(path, **data)
if isinstance(obj, list):
return [self._obj_cls(self, item, created_from_list=True) for item in obj]
else:
return base.RESTObjectList(self, self._obj_cls, obj)
class RetrieveMixin(ListMixin, GetMixin):
_computed_path: Optional[str]
_from_parent_attrs: Dict[str, Any]
_obj_cls: Optional[Type[base.RESTObject]]
_parent: Optional[base.RESTObject]
_parent_attrs: Dict[str, Any]
_path: Optional[str]
gitlab: gitlab.Gitlab
pass
class CreateMixin(_RestManagerBase):
_computed_path: Optional[str]
_from_parent_attrs: Dict[str, Any]
_obj_cls: Optional[Type[base.RESTObject]]
_parent: Optional[base.RESTObject]
_parent_attrs: Dict[str, Any]
_path: Optional[str]
gitlab: gitlab.Gitlab
def _check_missing_create_attrs(self, data: Dict[str, Any]) -> None:
missing = []
for attr in self._create_attrs.required:
if attr not in data:
missing.append(attr)
continue
if missing:
raise AttributeError(f"Missing attributes: {', '.join(missing)}")
@exc.on_http_error(exc.GitlabCreateError)
def create(
self, data: Optional[Dict[str, Any]] = None, **kwargs: Any
) -> base.RESTObject:
"""Create a new object.
Args:
data: parameters to send to the server to create the
resource
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
A new instance of the managed object class built with
the data sent by the server
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabCreateError: If the server cannot perform the request
"""
if data is None:
data = {}
self._check_missing_create_attrs(data)
files = {}
# We get the attributes that need some special transformation
if self._types:
# Duplicate data to avoid messing with what the user sent us
data = data.copy()
for attr_name, type_cls in self._types.items():
if attr_name in data.keys():
type_obj = type_cls(data[attr_name])
# if the type if FileAttribute we need to pass the data as
# file
if isinstance(type_obj, g_types.FileAttribute):
k = type_obj.get_file_name(attr_name)
files[attr_name] = (k, data.pop(attr_name))
else:
data[attr_name] = type_obj.get_for_api()
# Handle specific URL for creation
path = kwargs.pop("path", self.path)
server_data = self.gitlab.http_post(path, post_data=data, files=files, **kwargs)
if TYPE_CHECKING:
assert not isinstance(server_data, requests.Response)
assert self._obj_cls is not None
return self._obj_cls(self, server_data)
class UpdateMixin(_RestManagerBase):
_computed_path: Optional[str]
_from_parent_attrs: Dict[str, Any]
_obj_cls: Optional[Type[base.RESTObject]]
_parent: Optional[base.RESTObject]
_parent_attrs: Dict[str, Any]
_path: Optional[str]
_update_uses_post: bool = False
gitlab: gitlab.Gitlab
def _check_missing_update_attrs(self, data: Dict[str, Any]) -> None:
if TYPE_CHECKING:
assert self._obj_cls is not None
# Remove the id field from the required list as it was previously moved
# to the http path.
required = tuple(
[k for k in self._update_attrs.required if k != self._obj_cls._id_attr]
)
missing = []
for attr in required:
if attr not in data:
missing.append(attr)
continue
if missing:
raise AttributeError(f"Missing attributes: {', '.join(missing)}")
def _get_update_method(
self,
) -> Callable[..., Union[Dict[str, Any], requests.Response]]:
"""Return the HTTP method to use.
Returns:
http_put (default) or http_post
"""
if self._update_uses_post:
http_method = self.gitlab.http_post
else:
http_method = self.gitlab.http_put
return http_method
@exc.on_http_error(exc.GitlabUpdateError)
def update(
self,
id: Optional[Union[str, int]] = None,
new_data: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> Dict[str, Any]:
"""Update an object on the server.
Args:
id: ID of the object to update (can be None if not required)
new_data: the update data for the object
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
The new object data (*not* a RESTObject)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabUpdateError: If the server cannot perform the request
"""
new_data = new_data or {}
if id is None:
path = self.path
else:
path = f"{self.path}/{utils.EncodedId(id)}"
self._check_missing_update_attrs(new_data)
files = {}
# We get the attributes that need some special transformation
if self._types:
# Duplicate data to avoid messing with what the user sent us
new_data = new_data.copy()
for attr_name, type_cls in self._types.items():
if attr_name in new_data.keys():
type_obj = type_cls(new_data[attr_name])
# if the type if FileAttribute we need to pass the data as
# file
if isinstance(type_obj, g_types.FileAttribute):
k = type_obj.get_file_name(attr_name)
files[attr_name] = (k, new_data.pop(attr_name))
else:
new_data[attr_name] = type_obj.get_for_api()
http_method = self._get_update_method()
result = http_method(path, post_data=new_data, files=files, **kwargs)
if TYPE_CHECKING:
assert not isinstance(result, requests.Response)
return result
class SetMixin(_RestManagerBase):
_computed_path: Optional[str]
_from_parent_attrs: Dict[str, Any]
_obj_cls: Optional[Type[base.RESTObject]]
_parent: Optional[base.RESTObject]
_parent_attrs: Dict[str, Any]
_path: Optional[str]
gitlab: gitlab.Gitlab
@exc.on_http_error(exc.GitlabSetError)
def set(self, key: str, value: str, **kwargs: Any) -> base.RESTObject:
"""Create or update the object.
Args:
key: The key of the object to create/update
value: The value to set for the object
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabSetError: If an error occurred
Returns:
The created/updated attribute
"""
path = f"{self.path}/{utils.EncodedId(key)}"
data = {"value": value}
server_data = self.gitlab.http_put(path, post_data=data, **kwargs)
if TYPE_CHECKING:
assert not isinstance(server_data, requests.Response)
assert self._obj_cls is not None
return self._obj_cls(self, server_data)
class DeleteMixin(_RestManagerBase):
_computed_path: Optional[str]
_from_parent_attrs: Dict[str, Any]
_obj_cls: Optional[Type[base.RESTObject]]
_parent: Optional[base.RESTObject]
_parent_attrs: Dict[str, Any]
_path: Optional[str]
gitlab: gitlab.Gitlab
@exc.on_http_error(exc.GitlabDeleteError)
def delete(self, id: Optional[Union[str, int]] = None, **kwargs: Any) -> None:
"""Delete an object on the server.
Args:
id: ID of the object to delete
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabDeleteError: If the server cannot perform the request
"""
if id is None:
path = self.path
else:
path = f"{self.path}/{utils.EncodedId(id)}"
if TYPE_CHECKING:
assert path is not None
self.gitlab.http_delete(path, **kwargs)
class CRUDMixin(GetMixin, ListMixin, CreateMixin, UpdateMixin, DeleteMixin):
_computed_path: Optional[str]
_from_parent_attrs: Dict[str, Any]
_obj_cls: Optional[Type[base.RESTObject]]
_parent: Optional[base.RESTObject]
_parent_attrs: Dict[str, Any]
_path: Optional[str]
gitlab: gitlab.Gitlab
pass
class NoUpdateMixin(GetMixin, ListMixin, CreateMixin, DeleteMixin):
_computed_path: Optional[str]
_from_parent_attrs: Dict[str, Any]
_obj_cls: Optional[Type[base.RESTObject]]
_parent: Optional[base.RESTObject]
_parent_attrs: Dict[str, Any]
_path: Optional[str]
gitlab: gitlab.Gitlab
pass
class SaveMixin(_RestObjectBase):
"""Mixin for RESTObject's that can be updated."""
_id_attr: Optional[str]
_attrs: Dict[str, Any]
_module: ModuleType
_parent_attrs: Dict[str, Any]
_updated_attrs: Dict[str, Any]
manager: base.RESTManager
def _get_updated_data(self) -> Dict[str, Any]:
updated_data = {}
for attr in self.manager._update_attrs.required:
# Get everything required, no matter if it's been updated
updated_data[attr] = getattr(self, attr)
# Add the updated attributes
updated_data.update(self._updated_attrs)
return updated_data
def save(self, **kwargs: Any) -> Optional[Dict[str, Any]]:
"""Save the changes made to the object to the server.
The object is updated to match what the server returns.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
The new object data (*not* a RESTObject)
Raise:
GitlabAuthenticationError: If authentication is not correct
GitlabUpdateError: If the server cannot perform the request
"""
updated_data = self._get_updated_data()
# Nothing to update. Server fails if sent an empty dict.
if not updated_data:
return None
# call the manager
obj_id = self.encoded_id
if TYPE_CHECKING:
assert isinstance(self.manager, UpdateMixin)
server_data = self.manager.update(obj_id, updated_data, **kwargs)
self._update_attrs(server_data)
return server_data
class ObjectDeleteMixin(_RestObjectBase):
"""Mixin for RESTObject's that can be deleted."""
_id_attr: Optional[str]
_attrs: Dict[str, Any]
_module: ModuleType
_parent_attrs: Dict[str, Any]
_updated_attrs: Dict[str, Any]
manager: base.RESTManager
def delete(self, **kwargs: Any) -> None:
"""Delete the object from the server.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabDeleteError: If the server cannot perform the request
"""
if TYPE_CHECKING:
assert isinstance(self.manager, DeleteMixin)
assert self.encoded_id is not None
self.manager.delete(self.encoded_id, **kwargs)
class UserAgentDetailMixin(_RestObjectBase):
_id_attr: Optional[str]
_attrs: Dict[str, Any]
_module: ModuleType
_parent_attrs: Dict[str, Any]
_updated_attrs: Dict[str, Any]
manager: base.RESTManager
@cli.register_custom_action(("Snippet", "ProjectSnippet", "ProjectIssue"))
@exc.on_http_error(exc.GitlabGetError)
def user_agent_detail(self, **kwargs: Any) -> Dict[str, Any]:
"""Get the user agent detail.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the server cannot perform the request
"""
path = f"{self.manager.path}/{self.encoded_id}/user_agent_detail"
result = self.manager.gitlab.http_get(path, **kwargs)
if TYPE_CHECKING:
assert not isinstance(result, requests.Response)
return result
class AccessRequestMixin(_RestObjectBase):
_id_attr: Optional[str]
_attrs: Dict[str, Any]
_module: ModuleType
_parent_attrs: Dict[str, Any]
_updated_attrs: Dict[str, Any]
manager: base.RESTManager
@cli.register_custom_action(
("ProjectAccessRequest", "GroupAccessRequest"), (), ("access_level",)
)
@exc.on_http_error(exc.GitlabUpdateError)
def approve(
self, access_level: int = gitlab.const.DEVELOPER_ACCESS, **kwargs: Any
) -> None:
"""Approve an access request.
Args:
access_level: The access level for the user
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabUpdateError: If the server fails to perform the request
"""
path = f"{self.manager.path}/{self.encoded_id}/approve"
data = {"access_level": access_level}
server_data = self.manager.gitlab.http_put(path, post_data=data, **kwargs)
if TYPE_CHECKING:
assert not isinstance(server_data, requests.Response)
self._update_attrs(server_data)
class DownloadMixin(_RestObjectBase):
_id_attr: Optional[str]
_attrs: Dict[str, Any]
_module: ModuleType
_parent_attrs: Dict[str, Any]
_updated_attrs: Dict[str, Any]
manager: base.RESTManager
@cli.register_custom_action(("GroupExport", "ProjectExport"))
@exc.on_http_error(exc.GitlabGetError)
def download(
self,
streamed: bool = False,
action: Optional[Callable] = None,
chunk_size: int = 1024,
**kwargs: Any,
) -> Optional[bytes]:
"""Download the archive of a resource export.
Args:
streamed: If True the data will be processed by chunks of
`chunk_size` and each chunk is passed to `action` for
treatment
action: Callable responsible of dealing with chunk of
data
chunk_size: Size of each chunk
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the server failed to perform the request
Returns:
The blob content if streamed is False, None otherwise
"""
path = f"{self.manager.path}/download"
result = self.manager.gitlab.http_get(
path, streamed=streamed, raw=True, **kwargs
)
if TYPE_CHECKING:
assert isinstance(result, requests.Response)
return utils.response_content(result, streamed, action, chunk_size)
class SubscribableMixin(_RestObjectBase):
_id_attr: Optional[str]
_attrs: Dict[str, Any]
_module: ModuleType
_parent_attrs: Dict[str, Any]
_updated_attrs: Dict[str, Any]
manager: base.RESTManager
@cli.register_custom_action(
("ProjectIssue", "ProjectMergeRequest", "ProjectLabel", "GroupLabel")
)
@exc.on_http_error(exc.GitlabSubscribeError)
def subscribe(self, **kwargs: Any) -> None:
"""Subscribe to the object notifications.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
raises:
GitlabAuthenticationError: If authentication is not correct
GitlabSubscribeError: If the subscription cannot be done
"""
path = f"{self.manager.path}/{self.encoded_id}/subscribe"
server_data = self.manager.gitlab.http_post(path, **kwargs)
if TYPE_CHECKING:
assert not isinstance(server_data, requests.Response)
self._update_attrs(server_data)
@cli.register_custom_action(
("ProjectIssue", "ProjectMergeRequest", "ProjectLabel", "GroupLabel")
)
@exc.on_http_error(exc.GitlabUnsubscribeError)
def unsubscribe(self, **kwargs: Any) -> None:
"""Unsubscribe from the object notifications.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
raises:
GitlabAuthenticationError: If authentication is not correct
GitlabUnsubscribeError: If the unsubscription cannot be done
"""
path = f"{self.manager.path}/{self.encoded_id}/unsubscribe"
server_data = self.manager.gitlab.http_post(path, **kwargs)
if TYPE_CHECKING:
assert not isinstance(server_data, requests.Response)
self._update_attrs(server_data)
class TodoMixin(_RestObjectBase):
_id_attr: Optional[str]
_attrs: Dict[str, Any]
_module: ModuleType
_parent_attrs: Dict[str, Any]
_updated_attrs: Dict[str, Any]
manager: base.RESTManager
@cli.register_custom_action(("ProjectIssue", "ProjectMergeRequest"))
@exc.on_http_error(exc.GitlabTodoError)
def todo(self, **kwargs: Any) -> None:
"""Create a todo associated to the object.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTodoError: If the todo cannot be set
"""
path = f"{self.manager.path}/{self.encoded_id}/todo"
self.manager.gitlab.http_post(path, **kwargs)
class TimeTrackingMixin(_RestObjectBase):
_id_attr: Optional[str]
_attrs: Dict[str, Any]
_module: ModuleType
_parent_attrs: Dict[str, Any]
_updated_attrs: Dict[str, Any]
manager: base.RESTManager
@cli.register_custom_action(("ProjectIssue", "ProjectMergeRequest"))
@exc.on_http_error(exc.GitlabTimeTrackingError)
def time_stats(self, **kwargs: Any) -> Dict[str, Any]:
"""Get time stats for the object.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTimeTrackingError: If the time tracking update cannot be done
"""
# Use the existing time_stats attribute if it exist, otherwise make an
# API call
if "time_stats" in self.attributes:
return self.attributes["time_stats"]
path = f"{self.manager.path}/{self.encoded_id}/time_stats"
result = self.manager.gitlab.http_get(path, **kwargs)
if TYPE_CHECKING:
assert not isinstance(result, requests.Response)
return result
@cli.register_custom_action(("ProjectIssue", "ProjectMergeRequest"), ("duration",))
@exc.on_http_error(exc.GitlabTimeTrackingError)
def time_estimate(self, duration: str, **kwargs: Any) -> Dict[str, Any]:
"""Set an estimated time of work for the object.
Args:
duration: Duration in human format (e.g. 3h30)
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTimeTrackingError: If the time tracking update cannot be done
"""
path = f"{self.manager.path}/{self.encoded_id}/time_estimate"
data = {"duration": duration}
result = self.manager.gitlab.http_post(path, post_data=data, **kwargs)
if TYPE_CHECKING:
assert not isinstance(result, requests.Response)
return result
@cli.register_custom_action(("ProjectIssue", "ProjectMergeRequest"))
@exc.on_http_error(exc.GitlabTimeTrackingError)
def reset_time_estimate(self, **kwargs: Any) -> Dict[str, Any]:
"""Resets estimated time for the object to 0 seconds.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTimeTrackingError: If the time tracking update cannot be done
"""
path = f"{self.manager.path}/{self.encoded_id}/reset_time_estimate"
result = self.manager.gitlab.http_post(path, **kwargs)
if TYPE_CHECKING:
assert not isinstance(result, requests.Response)
return result
@cli.register_custom_action(("ProjectIssue", "ProjectMergeRequest"), ("duration",))
@exc.on_http_error(exc.GitlabTimeTrackingError)
def add_spent_time(self, duration: str, **kwargs: Any) -> Dict[str, Any]:
"""Add time spent working on the object.
Args:
duration: Duration in human format (e.g. 3h30)
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTimeTrackingError: If the time tracking update cannot be done
"""
path = f"{self.manager.path}/{self.encoded_id}/add_spent_time"
data = {"duration": duration}
result = self.manager.gitlab.http_post(path, post_data=data, **kwargs)
if TYPE_CHECKING:
assert not isinstance(result, requests.Response)
return result
@cli.register_custom_action(("ProjectIssue", "ProjectMergeRequest"))
@exc.on_http_error(exc.GitlabTimeTrackingError)
def reset_spent_time(self, **kwargs: Any) -> Dict[str, Any]:
"""Resets the time spent working on the object.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTimeTrackingError: If the time tracking update cannot be done
"""
path = f"{self.manager.path}/{self.encoded_id}/reset_spent_time"
result = self.manager.gitlab.http_post(path, **kwargs)
if TYPE_CHECKING:
assert not isinstance(result, requests.Response)
return result
class ParticipantsMixin(_RestObjectBase):
_id_attr: Optional[str]
_attrs: Dict[str, Any]
_module: ModuleType
_parent_attrs: Dict[str, Any]
_updated_attrs: Dict[str, Any]
manager: base.RESTManager
@cli.register_custom_action(("ProjectMergeRequest", "ProjectIssue"))
@exc.on_http_error(exc.GitlabListError)
def participants(self, **kwargs: Any) -> Dict[str, Any]:
"""List the participants.
Args:
all: If True, return all the items, without pagination
per_page: Number of items to retrieve per request
page: ID of the page to return (starts with page 1)
as_list: If set to False and no pagination option is
defined, return a generator instead of a list
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the list could not be retrieved
Returns:
The list of participants
"""
path = f"{self.manager.path}/{self.encoded_id}/participants"
result = self.manager.gitlab.http_get(path, **kwargs)
if TYPE_CHECKING:
assert not isinstance(result, requests.Response)
return result
class BadgeRenderMixin(_RestManagerBase):
@cli.register_custom_action(
("GroupBadgeManager", "ProjectBadgeManager"), ("link_url", "image_url")
)
@exc.on_http_error(exc.GitlabRenderError)
def render(self, link_url: str, image_url: str, **kwargs: Any) -> Dict[str, Any]:
"""Preview link_url and image_url after interpolation.
Args:
link_url: URL of the badge link
image_url: URL of the badge image
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabRenderError: If the rendering failed
Returns:
The rendering properties
"""
path = f"{self.path}/render"
data = {"link_url": link_url, "image_url": image_url}
result = self.gitlab.http_get(path, data, **kwargs)
if TYPE_CHECKING:
assert not isinstance(result, requests.Response)
return result
class PromoteMixin(_RestObjectBase):
_id_attr: Optional[str]
_attrs: Dict[str, Any]
_module: ModuleType
_parent_attrs: Dict[str, Any]
_updated_attrs: Dict[str, Any]
_update_uses_post: bool = False
manager: base.RESTManager
def _get_update_method(
self,
) -> Callable[..., Union[Dict[str, Any], requests.Response]]:
"""Return the HTTP method to use.
Returns:
http_put (default) or http_post
"""
if self._update_uses_post:
http_method = self.manager.gitlab.http_post
else:
http_method = self.manager.gitlab.http_put
return http_method
@exc.on_http_error(exc.GitlabPromoteError)
def promote(self, **kwargs: Any) -> Dict[str, Any]:
"""Promote the item.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabPromoteError: If the item could not be promoted
GitlabParsingError: If the json data could not be parsed
Returns:
The updated object data (*not* a RESTObject)
"""
path = f"{self.manager.path}/{self.encoded_id}/promote"
http_method = self._get_update_method()
result = http_method(path, **kwargs)
if TYPE_CHECKING:
assert not isinstance(result, requests.Response)
return result
|
py | 1a3ff6f6d1155b0c98366670c4b77dc23a91e333 | # Generated by Django 2.0.7 on 2018-08-02 18:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sberbank', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='payment',
name='client_id',
field=models.TextField(blank=True, null=True, verbose_name='client ID'),
),
migrations.AlterField(
model_name='logentry',
name='request_type',
field=models.CharField(choices=[(0, 'CREATE'), (1, 'CALLBACK'), (2, 'CHECK_STATUS'), (3, 'REDIRECT'), (4, 'GET_BINDINGS')], db_index=True, max_length=1, verbose_name='request type'),
),
]
|
py | 1a3ff74a3319da887cf4c5d0152f05e970a4ee10 | import json
import numpy as np
import pdb
import copy
import torch
from scipy.special import binom
MISSING_VALUE = -1
HASNT_HAPPENED_VALUE = -5
RACE_CODE_TO_NAME = {
1: 'White',
2: 'African American',
3: 'American Indian, Eskimo, Aleut',
4: 'Asian or Pacific Islander',
5: 'Other Race',
6: 'Caribbean/West Indian',
7: 'Unknown',
8: 'Hispanic',
9: 'Chinese',
10: 'Japanese',
11: 'Filipino',
12: 'Hawaiian',
13: 'Other Asian'
}
TREAT_MISSING_AS_NEGATIVE = False
NEGATIVE_99 = -99
class RiskFactorVectorizer():
def __init__(self, args):
self.risk_factor_metadata = parse_risk_factors(args)
self.risk_factor_transformers = \
{'binary_family_history': self.transform_binary_family_history,
'binary_biopsy_benign': self.get_binary_occurence_transformer(
'biopsy_hyperplasia', 'biopsy_hyperplasia_age'),
'binary_biopsy_LCIS': self.get_binary_occurence_transformer(
'biopsy_LCIS', 'biopsy_LCIS_age'),
'binary_biopsy_atypical_hyperplasia': self.get_binary_occurence_transformer(
'biopsy_atypical_hyperplasia', 'biopsy_atypical_hyperplasia_age'),
'age': self.get_exam_one_hot_risk_factor_transformer('age', [40, 50, 60, 70, 80]),
'menarche_age': self.get_age_based_risk_factor_transformer('menarche_age', [10, 12, 14, 16]),
'menopause_age': self.get_age_based_risk_factor_transformer('menopause_age', [45, 50, 55, 60]),
'first_pregnancy_age': self.get_age_based_risk_factor_transformer( 'first_pregnancy_age', [20, 25, 30, 35, 40]),
'density': self.get_image_biomarker_transformer('density'),
'bpe': self.get_image_biomarker_transformer('bpe'),
'5yearcancer': self.get_binary_transformer('5yearcancer'),
'prior_hist': self.get_binary_transformer('prior_hist'),
'years_to_cancer': self.get_exam_one_hot_risk_factor_transformer('years_to_cancer', [0, 1, 2, 3, 4, 10]),
'race': self.transform_race,
'parous': self.transform_parous,
'menopausal_status': self.transform_menopausal_status,
'weight': self.get_exam_one_hot_risk_factor_transformer('weight', [100, 130, 160, 190, 220, 250]),
'height': self.get_exam_one_hot_risk_factor_transformer('height', [50, 55, 60, 65, 70, 75]),
'ovarian_cancer': self.get_binary_occurence_transformer('ovarian_cancer',
'ovarian_cancer_age'),
'ovarian_cancer_age': self.get_age_based_risk_factor_transformer('ovarian_cancer_age',[30, 40, 50, 60, 70]),
'ashkenazi': self.get_binary_transformer('ashkenazi', use_patient_factors=True),
'brca': self.transform_brca,
'mom_bc_cancer_history': self.get_binary_relative_cancer_history_transformer('M'),
'm_aunt_bc_cancer_history': self.get_binary_relative_cancer_history_transformer('MA'),
'p_aunt_bc_cancer_history': self.get_binary_relative_cancer_history_transformer('PA'),
'm_grandmother_bc_cancer_history': self.get_binary_relative_cancer_history_transformer('MG'),
'p_grantmother_bc_cancer_history': self.get_binary_relative_cancer_history_transformer('PG'),
'brother_bc_cancer_history': self.get_binary_relative_cancer_history_transformer('B'),
'father_bc_cancer_history': self.get_binary_relative_cancer_history_transformer('F'),
'daughter_bc_cancer_history': self.get_binary_relative_cancer_history_transformer('D'),
'sister_bc_cancer_history': self.get_binary_relative_cancer_history_transformer('S'),
'mom_oc_cancer_history': self.get_binary_relative_cancer_history_transformer('M', cancer='ovarian_cancer'),
'm_aunt_oc_cancer_history': self.get_binary_relative_cancer_history_transformer('MA', cancer='ovarian_cancer'),
'p_aunt_oc_cancer_history': self.get_binary_relative_cancer_history_transformer('PA', cancer='ovarian_cancer'),
'm_grandmother_oc_cancer_history': self.get_binary_relative_cancer_history_transformer('MG', cancer='ovarian_cancer'),
'p_grantmother_oc_cancer_history': self.get_binary_relative_cancer_history_transformer('PG', cancer='ovarian_cancer'),
'sister_oc_cancer_history': self.get_binary_relative_cancer_history_transformer('S', cancer='ovarian_cancer'),
'daughter_oc_cancer_history': self.get_binary_relative_cancer_history_transformer('D', cancer='ovarian_cancer'),
'hrt_type': self.get_hrt_information_transformer('type'),
'hrt_duration': self.get_hrt_information_transformer('duration'),
'hrt_years_ago_stopped': self.get_hrt_information_transformer('years_ago_stopped')
}
self.risk_factor_keys = args.risk_factor_keys
self.feature_names = []
self.risk_factor_key_to_num_class = {}
for k in self.risk_factor_keys:
if k not in self.risk_factor_transformers.keys():
raise Exception("Risk factor key '{}' not supported.".format(k))
names = self.risk_factor_transformers[k](None, None, just_return_feature_names=True)
self.risk_factor_key_to_num_class[k] = len(names)
self.feature_names.extend(names)
args.risk_factor_key_to_num_class = self.risk_factor_key_to_num_class
@property
def vector_length(self):
return len(self.feature_names)
def get_feature_names(self):
return copy.deepcopy(self.feature_names)
def one_hot_vectorizor(self, value, cutoffs):
one_hot_vector = torch.zeros(len(cutoffs) + 1)
if value == MISSING_VALUE:
return one_hot_vector
for i, cutoff in enumerate(cutoffs):
if value <= cutoff:
one_hot_vector[i] = 1
return one_hot_vector
one_hot_vector[-1] = 1
return one_hot_vector
def one_hot_feature_names(self, risk_factor_name, cutoffs):
feature_names = [""] * (len(cutoffs) + 1)
feature_names[0] = "{}_lt_{}".format(risk_factor_name, cutoffs[0])
feature_names[-1] = "{}_gt_{}".format(risk_factor_name, cutoffs[-1])
for i in range(1, len(cutoffs)):
feature_names[i] = "{}_{}_{}".format(risk_factor_name, cutoffs[i - 1], cutoffs[i])
return feature_names
def get_age_based_risk_factor_transformer(self, risk_factor_key, age_cutoffs):
def transform_age_based_risk_factor(patient_factors, exam_factors, just_return_feature_names=False):
if just_return_feature_names:
return self.one_hot_feature_names(risk_factor_key, age_cutoffs)
# if age-based risk factor, like menopause_age or first_pregnancy_age, is after the age at the exam, then treat it like it has not happened yet.
exam_age = int(exam_factors['age'])
age_based_risk_factor = int(patient_factors[risk_factor_key])
if exam_age != MISSING_VALUE and exam_age < age_based_risk_factor:
age_based_risk_factor = MISSING_VALUE # effectively same as missing
return self.one_hot_vectorizor(age_based_risk_factor, age_cutoffs)
return transform_age_based_risk_factor
def get_exam_one_hot_risk_factor_transformer(self, risk_factor_key, cutoffs):
def transform_exam_one_hot_risk_factor(patient_factors, exam_factors, just_return_feature_names=False):
if just_return_feature_names:
return self.one_hot_feature_names(risk_factor_key, cutoffs)
risk_factor = int(exam_factors[risk_factor_key])
return self.one_hot_vectorizor(risk_factor, cutoffs)
return transform_exam_one_hot_risk_factor
def get_binary_occurence_transformer(self, occurence_key, occurence_age_key):
def transform_binary_occurence(patient_factors, exam_factors, just_return_feature_names=False):
if just_return_feature_names:
return ['binary_{}'.format(occurence_key)]
binary_occurence = torch.zeros(1)
occurence = int(patient_factors[occurence_key])
occurence_age = int(patient_factors[occurence_age_key])
exam_age = int(exam_factors['age'])
if occurence and (occurence_age == MISSING_VALUE or exam_age >= occurence_age):
binary_occurence[0] = 1
return binary_occurence
return transform_binary_occurence
def get_binary_transformer(self, risk_factor_key, use_patient_factors=False):
def transform_binary(patient_factors, exam_factors, just_return_feature_names=False):
if just_return_feature_names:
return ['binary_{}'.format(risk_factor_key)]
binary_risk_factor = torch.zeros(1)
risk_factor = int(patient_factors[risk_factor_key]) if use_patient_factors else int(
exam_factors[risk_factor_key])
# If a binary risk factor is -1, we also want to treat it as negative (0)
binary_risk_factor[0] = 1 if risk_factor == 1 else 0
return binary_risk_factor
return transform_binary
def get_binary_relative_cancer_history_transformer(self, relative_code, cancer='breast_cancer'):
def transform_binary_relative_cancer_history(patient_factors, exam_factors, just_return_feature_names=False):
if just_return_feature_names:
return ['{}_{}_hist'.format(relative_code, cancer)]
binary_relative_cancer_history = torch.zeros(1)
relative_list = patient_factors['relatives'][relative_code]
for rel in relative_list:
if rel[cancer] == 1:
binary_relative_cancer_history[0] = 1
return binary_relative_cancer_history
return transform_binary_relative_cancer_history
def get_image_biomarker_transformer(self, name):
def image_biomarker_transformer(patient_factors, exam_factors, just_return_feature_names=False):
if just_return_feature_names:
return (["{}_{}".format(name, i) for i in range(1,5)])
image_biomarker_vector = torch.zeros(4)
image_biomarker = int(exam_factors[name])
if image_biomarker != MISSING_VALUE:
image_biomarker_vector[image_biomarker - 1] = 1
return image_biomarker_vector
return image_biomarker_transformer
def transform_binary_family_history(self, patient_factors, exam_factors, just_return_feature_names=False):
if just_return_feature_names:
return (['binary_family_history'])
relatives_dict = patient_factors['relatives']
binary_family_history = torch.zeros(1)
for relative, relative_list in relatives_dict.items():
if len(relative_list) > 0:
binary_family_history[0] = 1
return binary_family_history
def transform_parous(self, patient_factors, exam_factors, just_return_feature_names=False):
if just_return_feature_names:
return (['parous'])
binary_parous = torch.zeros(1)
exam_age = int(exam_factors['age'])
binary_parous[0] = 1 if patient_factors['num_births'] != MISSING_VALUE else 0
if patient_factors['first_pregnancy_age'] != MISSING_VALUE:
binary_parous[0] = 1 if patient_factors['first_pregnancy_age'] < exam_age else 0
return binary_parous
def transform_race(self, patient_factors, exam_factors, just_return_feature_names=False):
values = range(1, 14)
race_vector = torch.zeros(len(values))
if just_return_feature_names:
return [RACE_CODE_TO_NAME[i] for i in values]
race = int(patient_factors['race'])
race_vector[race - 1] = 1
return race_vector
def transform_menopausal_status(self, patient_factors, exam_factors, just_return_feature_names=False):
if just_return_feature_names:
return ['pre', 'peri', 'post', 'unknown']
exam_age = int(exam_factors['age'])
menopausal_status = 3 # unknown
age_at_menopause = patient_factors['menopause_age'] \
if patient_factors['menopause_age'] != MISSING_VALUE else NEGATIVE_99
if age_at_menopause != NEGATIVE_99:
if age_at_menopause < exam_age:
menopausal_status = 2
elif age_at_menopause == exam_age:
menopausal_status = 1
elif age_at_menopause > exam_age:
menopausal_status = 0
else:
if TREAT_MISSING_AS_NEGATIVE:
menopausal_status = 0
menopausal_status_vector = torch.zeros(4)
menopausal_status_vector[menopausal_status] = 1
return menopausal_status_vector
def transform_brca(self, patient_factors, exam_factors, just_return_feature_names=False):
if just_return_feature_names:
return ['never or unknown', 'negative result', 'brca1', 'brca2']
genetic_testing_patient = 0
brca1 = patient_factors['brca1']
brca2 = patient_factors['brca2']
if brca2 == 1:
genetic_testing_patient = 3
elif brca1 == 1:
genetic_testing_patient = 2
elif brca1 == 0:
genetic_testing_patient = 1
genetic_testing_vector = torch.zeros(4)
genetic_testing_vector[genetic_testing_patient] = 1
return genetic_testing_vector
def get_hrt_information_transformer(self, piece):
def transform_hrt_information(patient_factors, exam_factors, just_return_feature_names=False):
year_cutoffs = [1,3,5,7]
piece_to_feature_names = {'type': ['hrt_combined', 'hrt_estrogen', 'hrt_unknown'],
'duration': self.one_hot_feature_names('hrt_duration', year_cutoffs),
'years_ago_stopped': self.one_hot_feature_names('hrt_years_ago_stopped', year_cutoffs)}
assert piece in piece_to_feature_names.keys()
if just_return_feature_names:
return piece_to_feature_names[piece]
hrt_vector = torch.zeros(3)
duration = MISSING_VALUE
hrt_type = MISSING_VALUE
hrt_years_ago_stopped = MISSING_VALUE
first_age_key = None
last_age_key = None
duration_key = None
current_age = int(exam_factors['age'])
if patient_factors['combined_hrt']:
hrt_type = 0
first_age_key = 'combined_hrt_first_age'
last_age_key = 'combined_hrt_last_age'
duration_key = 'combined_hrt_duration'
elif patient_factors['estrogen_hrt']:
hrt_type = 1
first_age_key = 'estrogen_hrt_first_age'
last_age_key = 'estrogen_hrt_last_age'
duration_key = 'estrogen_hrt_duration'
elif patient_factors['unknown_hrt']:
hrt_type = 2
first_age_key = 'unknown_hrt_first_age'
last_age_key = 'unknown_hrt_last_age'
duration_key = 'unknown_hrt_duration'
if first_age_key:
first_age = patient_factors[first_age_key]
last_age = patient_factors[last_age_key]
extracted_duration = patient_factors[duration_key]
if last_age >= current_age and current_age != MISSING_VALUE:
if first_age != MISSING_VALUE and first_age > current_age:
# future_user
hrt_type = MISSING_VALUE
elif extracted_duration != MISSING_VALUE and last_age - extracted_duration > current_age:
# future_user
hrt_type = MISSING_VALUE
else:
duration = current_age - first_age if current_age != MISSING_VALUE and first_age != MISSING_VALUE else extracted_duration
elif last_age != MISSING_VALUE:
hrt_years_ago_stopped = current_age - last_age
if extracted_duration != MISSING_VALUE:
duration = extracted_duration
elif first_age != MISSING_VALUE and last_age != MISSING_VALUE:
duration = last_age - first_age
assert duration >= 0
else:
duration = extracted_duration if extracted_duration != MISSING_VALUE else MISSING_VALUE
if hrt_type > MISSING_VALUE:
hrt_vector[hrt_type] = 1
piece_to_feature_names = {'type': hrt_vector,
'duration': self.one_hot_vectorizor(duration, year_cutoffs),
'years_ago_stopped': self.one_hot_vectorizor(hrt_years_ago_stopped, year_cutoffs)}
return piece_to_feature_names[piece]
return transform_hrt_information
def transform_5yearcancer(self, patient_factors, exam_factors, just_return_feature_names=False):
if just_return_feature_names:
return (['5yearcancer'])
binary_5yearcancer = torch.zeros(1)
binary_5yearcancer[0] = int(exam_factors['5yearcancer'])
return binary_5yearcancer
def transform(self, patient_factors, exam_factors):
risk_factor_vecs = [self.risk_factor_transformers[key](patient_factors, exam_factors) for key in
self.risk_factor_keys]
return risk_factor_vecs
def get_risk_factors_for_sample(self, sample):
sample_patient_factors = self.risk_factor_metadata[sample['ssn']]
sample_exam_factors = self.risk_factor_metadata[sample['ssn']]['accessions'][sample['exam']]
risk_factor_vector = self.transform(sample_patient_factors, sample_exam_factors)
return risk_factor_vector
def get_buckets_for_sample(self, sample):
sample_patient_factors = self.risk_factor_metadata[sample['ssn']]
sample_exam_factors = self.risk_factor_metadata[sample['ssn']]['accessions'][sample['exam']]
buckets = {}
for key in self.risk_factor_keys:
names = self.risk_factor_transformers[key](None, None, just_return_feature_names=True)
vectorized = self.risk_factor_transformers[key](sample_patient_factors, sample_exam_factors)
if sum(vectorized) == 0:
buckets[key] = 'missing_or_negative'
else:
name_index = int(vectorized.dot(torch.arange(len(vectorized))))
buckets[key] = names[name_index]
return buckets
return self.transform(sample_patient_factors, sample_exam_factors)
def parse_risk_factors(args):
'''
Parse the risk factors json file and return a dict mapping ssns to patient dictionaries. Each patient dictionary
contains patient-level risk factors (e.g. race), as well as an 'accessions' key, that maps to a dictionary
mapping accesion#s to dictionaries containing exam-level risk factors (e.g. age).
'''
try:
metadata_json = json.load(open(args.metadata_path, 'r'))
except Exception as e:
raise Exception("Not found {} {}".format(args.metadata_path, e))
try:
risk_factor_metadata = json.load(open(args.risk_factor_metadata_path, 'r'))
except Exception as e:
raise Exception(
"Metadata file {} could not be parsed! Exception: {}!".format(args.risk_factor_metadata_path, e))
if '5yearcancer' in args.risk_factor_keys:
for patient in metadata_json:
ssn = patient['ssn']
for exam in patient['accessions']:
acc = exam['accession']
label = 1 if exam['label'] == 'POS' else 0
risk_factor_metadata[ssn]['accessions'][acc]['5yearcancer'] = label
if 'prior_hist' in args.risk_factor_keys:
for patient in metadata_json:
if 'nwh' in args.dataset:
ssn = patient['mrn']
risk_factor_metadata[ssn]['accessions'][ssn]['prior_hist'] = 0
else:
ssn = patient['ssn']
for exam in patient['accessions']:
acc = exam['accession']
risk_factor_metadata[ssn]['accessions'][acc]['prior_hist'] = exam['prior_hist']
if 'years_to_cancer' in args.risk_factor_keys:
for patient in metadata_json:
ssn = patient['ssn']
for exam in patient['accessions']:
acc = exam['accession']
risk_factor_metadata[ssn]['accessions'][acc]['years_to_cancer'] = exam['years_to_cancer']
if 'bpe' in args.risk_factor_keys:
for patient in metadata_json:
ssn = patient['ssn']
for exam in patient['accessions']:
acc = exam['accession']
risk_factor_metadata[ssn]['accessions'][acc]['bpe'] = exam['bpe'] if 'bpe' in exam else MISSING_VALUE
return risk_factor_metadata
|
py | 1a3ff7e322479c306c63b05a7b0654c93ae82438 | from typing import Tuple
import torch
from kornia.geometry.bbox import infer_bbox_shape3d, validate_bbox3d
from .projwarp import get_perspective_transform3d, warp_affine3d
__all__ = [
"crop_and_resize3d",
"crop_by_boxes3d",
"crop_by_transform_mat3d",
"center_crop3d",
]
def crop_and_resize3d(
tensor: torch.Tensor,
boxes: torch.Tensor,
size: Tuple[int, int, int],
interpolation: str = 'bilinear',
align_corners: bool = False,
) -> torch.Tensor:
r"""Extract crops from 3D volumes (5D tensor) and resize them.
Args:
tensor: the 3D volume tensor with shape (B, C, D, H, W).
boxes: a tensor with shape (B, 8, 3) containing the coordinates of the bounding boxes
to be extracted. The tensor must have the shape of Bx8x3, where each box is defined in the clockwise
order: front-top-left, front-top-right, front-bottom-right, front-bottom-left, back-top-left,
back-top-right, back-bottom-right, back-bottom-left. The coordinates must be in x, y, z order.
size: a tuple with the height and width that will be
used to resize the extracted patches.
interpolation: Interpolation flag.
align_corners: mode for grid_generation.
Returns:
tensor containing the patches with shape (Bx)CxN1xN2xN3.
Example:
>>> input = torch.arange(64, dtype=torch.float32).view(1, 1, 4, 4, 4)
>>> input
tensor([[[[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]],
<BLANKLINE>
[[16., 17., 18., 19.],
[20., 21., 22., 23.],
[24., 25., 26., 27.],
[28., 29., 30., 31.]],
<BLANKLINE>
[[32., 33., 34., 35.],
[36., 37., 38., 39.],
[40., 41., 42., 43.],
[44., 45., 46., 47.]],
<BLANKLINE>
[[48., 49., 50., 51.],
[52., 53., 54., 55.],
[56., 57., 58., 59.],
[60., 61., 62., 63.]]]]])
>>> boxes = torch.tensor([[
... [1., 1., 1.],
... [3., 1., 1.],
... [3., 3., 1.],
... [1., 3., 1.],
... [1., 1., 2.],
... [3., 1., 2.],
... [3., 3., 2.],
... [1., 3., 2.],
... ]]) # 1x8x3
>>> crop_and_resize3d(input, boxes, (2, 2, 2), align_corners=True)
tensor([[[[[21.0000, 23.0000],
[29.0000, 31.0000]],
<BLANKLINE>
[[37.0000, 39.0000],
[45.0000, 47.0000]]]]])
"""
if not isinstance(tensor, (torch.Tensor)):
raise TypeError(f"Input tensor type is not a torch.Tensor. Got {type(tensor)}")
if not isinstance(boxes, (torch.Tensor)):
raise TypeError(f"Input boxes type is not a torch.Tensor. Got {type(boxes)}")
if not isinstance(size, (tuple, list)) and len(size) != 3:
raise ValueError(f"Input size must be a tuple/list of length 3. Got {size}")
if len(tensor.shape) != 5:
raise AssertionError(f"Only tensor with shape (B, C, D, H, W) supported. Got {tensor.shape}.")
# unpack input data
dst_d, dst_h, dst_w = size[0], size[1], size[2]
# [x, y, z] origin
# from front to back
# top-left, top-right, bottom-right, bottom-left
points_src: torch.Tensor = boxes
# [x, y, z] destination
# from front to back
# top-left, top-right, bottom-right, bottom-left
points_dst: torch.Tensor = torch.tensor(
[
[
[0, 0, 0],
[dst_w - 1, 0, 0],
[dst_w - 1, dst_h - 1, 0],
[0, dst_h - 1, 0],
[0, 0, dst_d - 1],
[dst_w - 1, 0, dst_d - 1],
[dst_w - 1, dst_h - 1, dst_d - 1],
[0, dst_h - 1, dst_d - 1],
]
],
dtype=tensor.dtype,
device=tensor.device,
).expand(points_src.shape[0], -1, -1)
return crop_by_boxes3d(tensor, points_src, points_dst, interpolation, align_corners)
def center_crop3d(
tensor: torch.Tensor, size: Tuple[int, int, int], interpolation: str = 'bilinear', align_corners: bool = True
) -> torch.Tensor:
r"""Crop the 3D volumes (5D tensor) at the center.
Args:
tensor: the 3D volume tensor with shape (B, C, D, H, W).
size: a tuple with the expected depth, height and width
of the output patch.
interpolation: Interpolation flag.
align_corners : mode for grid_generation.
Returns:
the output tensor with patches.
Examples:
>>> input = torch.arange(64, dtype=torch.float32).view(1, 1, 4, 4, 4)
>>> input
tensor([[[[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]],
<BLANKLINE>
[[16., 17., 18., 19.],
[20., 21., 22., 23.],
[24., 25., 26., 27.],
[28., 29., 30., 31.]],
<BLANKLINE>
[[32., 33., 34., 35.],
[36., 37., 38., 39.],
[40., 41., 42., 43.],
[44., 45., 46., 47.]],
<BLANKLINE>
[[48., 49., 50., 51.],
[52., 53., 54., 55.],
[56., 57., 58., 59.],
[60., 61., 62., 63.]]]]])
>>> center_crop3d(input, (2, 2, 2), align_corners=True)
tensor([[[[[21.0000, 22.0000],
[25.0000, 26.0000]],
<BLANKLINE>
[[37.0000, 38.0000],
[41.0000, 42.0000]]]]])
"""
if not isinstance(tensor, (torch.Tensor)):
raise TypeError(f"Input tensor type is not a torch.Tensor. Got {type(tensor)}")
if len(tensor.shape) != 5:
raise AssertionError(f"Only tensor with shape (B, C, D, H, W) supported. Got {tensor.shape}.")
if not isinstance(size, (tuple, list)) and len(size) == 3:
raise ValueError(f"Input size must be a tuple/list of length 3. Got {size}")
# unpack input sizes
dst_d, dst_h, dst_w = size
src_d, src_h, src_w = tensor.shape[-3:]
# compute start/end offsets
dst_d_half = dst_d / 2
dst_h_half = dst_h / 2
dst_w_half = dst_w / 2
src_d_half = src_d / 2
src_h_half = src_h / 2
src_w_half = src_w / 2
start_x = src_w_half - dst_w_half
start_y = src_h_half - dst_h_half
start_z = src_d_half - dst_d_half
end_x = start_x + dst_w - 1
end_y = start_y + dst_h - 1
end_z = start_z + dst_d - 1
# [x, y, z] origin
# top-left-front, top-right-front, bottom-right-front, bottom-left-front
# top-left-back, top-right-back, bottom-right-back, bottom-left-back
points_src: torch.Tensor = torch.tensor(
[
[
[start_x, start_y, start_z],
[end_x, start_y, start_z],
[end_x, end_y, start_z],
[start_x, end_y, start_z],
[start_x, start_y, end_z],
[end_x, start_y, end_z],
[end_x, end_y, end_z],
[start_x, end_y, end_z],
]
],
device=tensor.device,
)
# [x, y, z] destination
# top-left-front, top-right-front, bottom-right-front, bottom-left-front
# top-left-back, top-right-back, bottom-right-back, bottom-left-back
points_dst: torch.Tensor = torch.tensor(
[
[
[0, 0, 0],
[dst_w - 1, 0, 0],
[dst_w - 1, dst_h - 1, 0],
[0, dst_h - 1, 0],
[0, 0, dst_d - 1],
[dst_w - 1, 0, dst_d - 1],
[dst_w - 1, dst_h - 1, dst_d - 1],
[0, dst_h - 1, dst_d - 1],
]
],
device=tensor.device,
).expand(points_src.shape[0], -1, -1)
return crop_by_boxes3d(
tensor, points_src.to(tensor.dtype), points_dst.to(tensor.dtype), interpolation, align_corners
)
def crop_by_boxes3d(
tensor: torch.Tensor,
src_box: torch.Tensor,
dst_box: torch.Tensor,
interpolation: str = 'bilinear',
align_corners: bool = False,
) -> torch.Tensor:
"""Perform crop transform on 3D volumes (5D tensor) by bounding boxes.
Given an input tensor, this function selected the interested areas by the provided bounding boxes (src_box).
Then the selected areas would be fitted into the targeted bounding boxes (dst_box) by a perspective transformation.
So far, the ragged tensor is not supported by PyTorch right now. This function hereby requires the bounding boxes
in a batch must be rectangles with same width, height and depth.
Args:
tensor : the 3D volume tensor with shape (B, C, D, H, W).
src_box : a tensor with shape (B, 8, 3) containing the coordinates of the bounding boxes
to be extracted. The tensor must have the shape of Bx8x3, where each box is defined in the clockwise
order: front-top-left, front-top-right, front-bottom-right, front-bottom-left, back-top-left,
back-top-right, back-bottom-right, back-bottom-left. The coordinates must be in x, y, z order.
dst_box: a tensor with shape (B, 8, 3) containing the coordinates of the bounding boxes
to be placed. The tensor must have the shape of Bx8x3, where each box is defined in the clockwise
order: front-top-left, front-top-right, front-bottom-right, front-bottom-left, back-top-left,
back-top-right, back-bottom-right, back-bottom-left. The coordinates must be in x, y, z order.
interpolation: Interpolation flag.
align_corners: mode for grid_generation.
Returns:
the output tensor with patches.
Examples:
>>> input = torch.tensor([[[
... [[ 0., 1., 2., 3.],
... [ 4., 5., 6., 7.],
... [ 8., 9., 10., 11.],
... [12., 13., 14., 15.]],
... [[16., 17., 18., 19.],
... [20., 21., 22., 23.],
... [24., 25., 26., 27.],
... [28., 29., 30., 31.]],
... [[32., 33., 34., 35.],
... [36., 37., 38., 39.],
... [40., 41., 42., 43.],
... [44., 45., 46., 47.]]]]])
>>> src_box = torch.tensor([[
... [1., 1., 1.],
... [3., 1., 1.],
... [3., 3., 1.],
... [1., 3., 1.],
... [1., 1., 2.],
... [3., 1., 2.],
... [3., 3., 2.],
... [1., 3., 2.],
... ]]) # 1x8x3
>>> dst_box = torch.tensor([[
... [0., 0., 0.],
... [2., 0., 0.],
... [2., 2., 0.],
... [0., 2., 0.],
... [0., 0., 1.],
... [2., 0., 1.],
... [2., 2., 1.],
... [0., 2., 1.],
... ]]) # 1x8x3
>>> crop_by_boxes3d(input, src_box, dst_box, interpolation='nearest', align_corners=True)
tensor([[[[[21., 22., 23.],
[25., 26., 27.],
[29., 30., 31.]],
<BLANKLINE>
[[37., 38., 39.],
[41., 42., 43.],
[45., 46., 47.]]]]])
"""
validate_bbox3d(src_box)
validate_bbox3d(dst_box)
if len(tensor.shape) != 5:
raise AssertionError(f"Only tensor with shape (B, C, D, H, W) supported. Got {tensor.shape}.")
# compute transformation between points and warp
# Note: Tensor.dtype must be float. "solve_cpu" not implemented for 'Long'
dst_trans_src: torch.Tensor = get_perspective_transform3d(src_box.to(tensor.dtype), dst_box.to(tensor.dtype))
# simulate broadcasting
dst_trans_src = dst_trans_src.expand(tensor.shape[0], -1, -1).type_as(tensor)
bbox = infer_bbox_shape3d(dst_box)
if not ((bbox[0] == bbox[0][0]).all() and (bbox[1] == bbox[1][0]).all() and (bbox[2] == bbox[2][0]).all()):
raise AssertionError(
"Cropping height, width and depth must be exact same in a batch."
f"Got height {bbox[0]}, width {bbox[1]} and depth {bbox[2]}."
)
patches: torch.Tensor = crop_by_transform_mat3d(
tensor,
dst_trans_src,
(int(bbox[0][0].item()), int(bbox[1][0].item()), int(bbox[2][0].item())),
mode=interpolation,
align_corners=align_corners,
)
return patches
def crop_by_transform_mat3d(
tensor: torch.Tensor,
transform: torch.Tensor,
out_size: Tuple[int, int, int],
mode: str = 'bilinear',
padding_mode: str = 'zeros',
align_corners: bool = True,
) -> torch.Tensor:
"""Perform crop transform on 3D volumes (5D tensor) given a perspective transformation matrix.
Args:
tensor: the 2D image tensor with shape (B, C, H, W).
transform: a perspective transformation matrix with shape (B, 4, 4).
out_size: size of the output image (depth, height, width).
mode: interpolation mode to calculate output values
``'bilinear'`` | ``'nearest'``.
padding_mode: padding mode for outside grid values
``'zeros'`` | ``'border'`` | ``'reflection'``.
align_corners: mode for grid_generation.
Returns:
the output tensor with patches.
"""
# simulate broadcasting
dst_trans_src = transform.expand(tensor.shape[0], -1, -1)
patches: torch.Tensor = warp_affine3d(
tensor, dst_trans_src[:, :3, :], out_size, flags=mode, padding_mode=padding_mode, align_corners=align_corners
)
return patches
|
py | 1a3ff8951f0a80ea3794b866352553c47713e0cd | # -*- coding: UTF-8 -*-
# -*- Mode: Python; py-indent-offset: 4 -*-
"""
Ojuba Virtual CD
Copyright © 2011, Ojuba Team <[email protected]>
PyGtk+ front-end for fuseiso
Released under terms of Waqf Public License.
This program is free software; you can redistribute it and/or modify
it under the terms of the latest version Waqf Public License as
published by Ojuba.org.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
The Latest version of the license can be found on
"http://www.ojuba.org/wiki/doku.php/رخصة_وقف_العامة"
"""
import sys,os,os.path
import time
from gi.repository import Gtk, GObject
from subprocess import Popen,PIPE
import gettext
import re
from glob import glob
label_re=re.compile(r"""'([^']+)'""")
mount_prefix=os.path.expanduser('~/.virtuals')
_ps=[]
gettext.install('ojuba-virtual-cd', "/usr/share/locale", unicode=0)
def run_in_bg(cmd):
global _ps
setsid = getattr(os, 'setsid', None)
if not setsid: setsid = getattr(os, 'setpgrp', None)
_ps=filter(lambda x: x.poll()!=None,_ps) # remove terminated processes from _ps list
_ps.append(Popen(cmd,0,'/bin/sh',shell=True, preexec_fn=setsid))
def get_pids(l):
pids=[]
for i in l:
p=Popen(['/sbin/pidof',i], 0, stdout=PIPE)
l=p.communicate()[0].strip().split()
r=p.returncode
if r==0: pids.extend(l)
pids.sort()
return pids
def get_desktop():
"""return 1 for kde, 0 for gnome, -1 none of them"""
l=get_pids(('kwin','ksmserver',))
if l: kde=l[0]
else: kde=None
l=get_pids(('gnome-session',))
if l: gnome=l[0]
else: gnome=None
if kde:
if not gnome or kde<gnome: return 1
else: return 0
if gnome: return 0
else: return -1
def run_file_man(mp):
# TODO: add Dolphin here
if get_desktop()==0: run_in_bg("nautilus --no-desktop '%s'" % mp)
elif get_desktop()==1: run_in_bg("konqueror '%s'" % mp)
elif os.path.exists('/usr/bin/thunar'): run_in_bg("thunar '%s'" % mp)
elif os.path.exists('/usr/bin/pcmanfm'): run_in_bg("pcmanfm '%s'" % mp)
elif os.path.exists('/usr/bin/nautilus'): run_in_bg("nautilus --no-desktop '%s'" % mp)
elif os.path.exists('/usr/bin/konqueror'): run_in_bg("konqueror '%s'" % mp)
def bad(msg):
dlg = Gtk.MessageDialog (None,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.ERROR, Gtk.ButtonsType.CLOSE,
msg)
dlg.run()
dlg.destroy()
def check_mount_prefix():
if not os.path.exists(mount_prefix):
try: os.makedirs(mount_prefix)
except OSError:
bad( _("Mount prefix [%s] is not found, please create it.") % mount_prefix )
sys.exit(1)
class VCDAbout(Gtk.AboutDialog):
def __init__(self, parent=None):
Gtk.AboutDialog.__init__(self, parent=parent)
self.set_default_response(Gtk.ResponseType.CLOSE)
self.connect('delete-event', lambda w, *a: w.hide() or True)
self.connect('response', lambda w, *a: w.hide() or True)
try: self.set_program_name("ojuba-virtual-cd")
except: pass
self.set_name(_("Ojuba Virtual CD"))
#about_dlg.set_version(version)
self.set_copyright("Copyright (c) 2008-2009 Muayyad Saleh Alsadi <[email protected]>")
self.set_comments(_("Mount CD/DVD images (iso, nrg, bin, mdf, img, ..etc.)"))
self.set_license("""
Released under terms on Waqf Public License.
This program is free software; you can redistribute it and/or modify
it under the terms of the latest version Waqf Public License as
published by Ojuba.org.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
The Latest version of the license can be found on
"http://www.ojuba.org/wiki/doku.php/waqf/license"
""")
self.set_website("http://virtualcd.ojuba.org/")
self.set_website_label("http://virtualcd.ojuba.org")
self.set_authors(["Muayyad Saleh Alsadi <[email protected]>", "a.atalla <[email protected]>"])
self.run()
self.destroy()
class VCD_mount_dlg(Gtk.FileChooserDialog):
def __init__(self):
Gtk.FileChooserDialog.__init__(self,_("Select CD/DVD image file"),buttons=(Gtk.STOCK_CANCEL, Gtk.ResponseType.REJECT, Gtk.STOCK_OK, Gtk.ResponseType.ACCEPT))
ff=Gtk.FileFilter()
ff.add_mime_type('application/x-cd-image')
for i in ('iso','nrg', 'bin','mdf','img'):
l=list(i)
ff.add_pattern('*.[%s%s][%s%s][%s%s]' % ( l[0],l[0].upper(), l[1],l[1].upper(), l[2],l[2].upper()))
self.set_filter(ff)
self.connect('delete-event', lambda w, *a: w.hide() or True)
self.connect('response', lambda w, *a: w.hide() or True)
class VCDStatusIcon(Gtk.StatusIcon):
def __init__(self):
Gtk.StatusIcon.__init__(self)
self.connect ('popup-menu', self.right_click_event)
self.set_title(_("OjubaVirtualCD"))
self.set_from_stock(Gtk.STOCK_CDROM)
self.mount_dlg = VCD_mount_dlg()
#self.about_dlg = VCDAbout()
self.setup_popup_menu()
self.startUP()
self.refresh_cb()
self.set_visible(True)
GObject.timeout_add(15000, self.refresh_timer)
def startUP(self):
if len(sys.argv)>1:
if (sys.argv[1]!='--hidden'):
for i in sys.argv[1:]: self.mount_f(i)
else: self.mount_cb()
def setup_popup_menu(self):
self.popup_menu = Gtk.Menu()
self.mounted_menu = Gtk.Menu()
self.open_menu = Gtk.Menu()
i = Gtk.MenuItem(_("Mount image"))
i.connect('activate', self.mount_cb)
self.popup_menu.add(i)
# self.mounted_menu.add(Gtk.SeparatorMenuItem())
i = Gtk.ImageMenuItem.new_from_stock(Gtk.STOCK_REFRESH, None)
i.connect('activate', self.refresh_cb)
i.set_always_show_image(True)
self.mounted_menu.add(i)
self.open_menu.add(Gtk.SeparatorMenuItem.new())
i = Gtk.ImageMenuItem.new_from_stock(Gtk.STOCK_REFRESH, None)
i.connect('activate', self.refresh_cb)
i.set_always_show_image(True)
self.open_menu.add(i)
self.popup_menu.add(Gtk.SeparatorMenuItem.new())
self.open_menu_item=i= Gtk.MenuItem(_("Open mounted image"))
i.set_submenu(self.open_menu)
self.popup_menu.add(i)
self.umount_menu_item=i= Gtk.MenuItem(_("Unmount"))
i.set_submenu(self.mounted_menu)
self.popup_menu.add(i)
self.popup_menu.add(Gtk.SeparatorMenuItem.new())
i = Gtk.ImageMenuItem.new_from_stock(Gtk.STOCK_ABOUT, None)
i.connect('activate', self.about_cb)
i.set_always_show_image(True)
self.popup_menu.add(i)
i = Gtk.ImageMenuItem.new_from_stock(Gtk.STOCK_QUIT, None)
i.connect('activate', Gtk.main_quit)
i.set_always_show_image(True)
self.popup_menu.add(i)
def right_click_event(self, icon, button, time):
self.popup_menu.show_all()
self.popup_menu.popup(None, None, Gtk.StatusIcon.position_menu, icon, button, time)
def refresh_timer(self):
self.refresh_cb(); return True;
def refresh_cb(self, *args):
self.popup_menu.popdown()
mm = Gtk.Menu()
oo = Gtk.Menu()
for i in os.listdir(mount_prefix):
mp = os.path.join(mount_prefix,i)
if (os.path.ismount(mp)):
j = Gtk.MenuItem(i.decode(sys.getfilesystemencoding()))
o = Gtk.MenuItem(i.decode(sys.getfilesystemencoding()))
j.connect('activate', self.umount_cb, i)
o.connect('activate', lambda a: run_file_man(mp))
mm.add(j)
oo.add(o)
mm.add(Gtk.SeparatorMenuItem())
oo.add(Gtk.SeparatorMenuItem())
i = Gtk.ImageMenuItem(Gtk.STOCK_REFRESH)
i.connect('activate', self.refresh_cb)
mm.add(i)
i = Gtk.ImageMenuItem(Gtk.STOCK_REFRESH)
i.connect('activate', self.refresh_cb)
oo.add(i)
mounted_menu = mm
open_menu = oo
g = self.open_menu_item.get_submenu()
s = self.umount_menu_item.get_submenu()
self.umount_menu_item.set_submenu(mm)
self.open_menu_item.set_submenu(oo)
del s, g
def mount_f(self, fn):
if not os.path.exists(fn): bad(_("File does not exist")); return -1
l=self.get_label(fn)
if not l: l=os.path.basename(fn)
mp=os.path.join( mount_prefix, l )
if os.path.exists(mp):
if os.path.ismount(os.path.join(mp)): bad(_("Already mounted")); return -2
try: os.rmdir(mp)
except OSError: bad(_("Mount point [%s] already exists, remove it please!") % mp); return -1
try: os.mkdir(mp)
except: bad(_('Could not create folder [%s]') % mp.decode(sys.getfilesystemencoding()) ); return -1
r=os.system('fuseiso -c UTF8 "%s" "%s"' % (fn, mp))
if r: bad(_("Could not mount [%s]") % mp); return -1
else: run_file_man(mp)
self.refresh_cb()
return 0
def mount_cb(self, *args):
if (self.mount_dlg.run()==Gtk.ResponseType.ACCEPT):
self.mount_f(self.mount_dlg.get_filename())
self.mount_dlg.hide()
def get_label_from_blkid(self, fn):
try:
p=Popen(['blkid','-o','value','-s','LABEL',fn], 0, stdout=PIPE)
l=p.communicate()[0].strip()
except: return None
r=p.returncode
if r==0 and l and len(l)>0: return l
else: return None
def get_label_from_file(self, fn):
try:
p=Popen(['file',fn], 0, stdout=PIPE)
o=p.communicate()[0].split(':',1)[1].strip()
l=label_re.findall(o)[0].strip()
except: return None
r=p.returncode
if r==0 and l and len(l)>0: return l
else: return None
def get_label(self, fn):
return self.get_label_from_blkid(fn) or self.get_label_from_file(fn)
def umount_cb(self, i, mp):
mpp=os.path.join(mount_prefix,mp.encode(sys.getfilesystemencoding()))
r=os.system("fusermount -u '%s'" % mpp)
if r: bad(_("Could not unmount [%s]") % mp)
else: os.rmdir(mpp)
self.refresh_cb()
def about_cb(self, *args):
#self.about_dlg.run()
return VCDAbout()
bus, bus_name, bus_object=None,None,None
try:
import dbus
import dbus.service
#import GObject # for GObject.MainLoop() if no Gtk is to be used
from dbus.mainloop.glib import DBusGMainLoop
dbus_loop = DBusGMainLoop(set_as_default=True)
bus = dbus.SessionBus()
except ImportError: pass
def init_dbus():
global bus_name, bus_object, app
if not bus: return
class Manager(dbus.service.Object):
def __init__(self, bus, path):
dbus.service.Object.__init__(self,bus,path)
@dbus.service.method("org.ojuba.VirtualCD", in_signature='as', out_signature='i')
def Mount(self,a):
r=0
for fn in a: r|=app.mount_f(fn)
return r
@dbus.service.method("org.ojuba.VirtualCD", in_signature='', out_signature='s')
def Version(self):
return "0.3.0"
# values from /usr/include/dbus-1.0/dbus/dbus-shared.h
r=bus.request_name('org.ojuba.VirtualCD', flags=0x4)
if r!=1:
print "Another process own OjubaVirtualCD Service, pass request to it: "
trials=0; appletbus=False
while(appletbus==False and trials<20):
print ".",
try:
appletbus=bus.get_object("org.ojuba.VirtualCD","/Manager"); break
except:
appletbus=False
time.sleep(1); trials+=1
print "*"
if len(sys.argv)==1: print "already running and no arguments passed"; exit(-1)
if appletbus: exit(appletbus.Mount(sys.argv[1:],dbus_interface='org.ojuba.VirtualCD'))
else: print "unable to connect"
exit(-1)
bus_name = dbus.service.BusName("org.ojuba.VirtualCD", bus)
bus_object = Manager(bus, '/Manager')
def main():
global app
check_mount_prefix()
for i in glob(os.path.join(mount_prefix,'*')):
if os.path.isdir(i):
try: os.rmdir(i)
except: pass
init_dbus()
app = VCDStatusIcon()
try: Gtk.main()
except KeyboardInterrupt: print "Exiting..."
if __name__ == '__main__':
main()
|
py | 1a3ffb0a8c1f793d5d3f19c341680b72a88899ea | # Copyright 2014 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from octavia_lib.common import constants as lib_constants
from wsme import types as wtypes
from octavia.api.common import types
from octavia.api.v2.types import health_monitor
from octavia.api.v2.types import member
from octavia.common import constants
class SessionPersistenceResponse(types.BaseType):
"""Defines which attributes are to be shown on any response."""
type = wtypes.wsattr(wtypes.text)
cookie_name = wtypes.wsattr(wtypes.text)
persistence_timeout = wtypes.wsattr(wtypes.IntegerType())
persistence_granularity = wtypes.wsattr(types.IPAddressType())
class SessionPersistencePOST(types.BaseType):
"""Defines mandatory and optional attributes of a POST request."""
type = wtypes.wsattr(wtypes.Enum(str, *constants.SUPPORTED_SP_TYPES),
mandatory=True)
cookie_name = wtypes.wsattr(wtypes.StringType(max_length=255),
default=None)
persistence_timeout = wtypes.wsattr(wtypes.IntegerType(), default=None)
persistence_granularity = wtypes.wsattr(types.IPAddressType(),
default=None)
class SessionPersistencePUT(types.BaseType):
"""Defines attributes that are acceptable of a PUT request."""
type = wtypes.wsattr(wtypes.Enum(str, *constants.SUPPORTED_SP_TYPES))
cookie_name = wtypes.wsattr(wtypes.StringType(max_length=255),
default=None)
persistence_timeout = wtypes.wsattr(wtypes.IntegerType(), default=None)
persistence_granularity = wtypes.wsattr(types.IPAddressType(),
default=None)
class BasePoolType(types.BaseType):
_type_to_model_map = {'admin_state_up': 'enabled',
'healthmonitor': 'health_monitor',
'healthmonitor_id': 'health_monitor.id',
'tls_container_ref': 'tls_certificate_id',
'ca_tls_container_ref': 'ca_tls_certificate_id',
'crl_container_ref': 'crl_container_id'}
_child_map = {'health_monitor': {'id': 'healthmonitor_id'}}
class PoolResponse(BasePoolType):
"""Defines which attributes are to be shown on any response."""
id = wtypes.wsattr(wtypes.UuidType())
name = wtypes.wsattr(wtypes.StringType())
description = wtypes.wsattr(wtypes.StringType())
provisioning_status = wtypes.wsattr(wtypes.StringType())
operating_status = wtypes.wsattr(wtypes.StringType())
admin_state_up = wtypes.wsattr(bool)
protocol = wtypes.wsattr(wtypes.text)
lb_algorithm = wtypes.wsattr(wtypes.text)
session_persistence = wtypes.wsattr(SessionPersistenceResponse)
project_id = wtypes.wsattr(wtypes.StringType())
loadbalancers = wtypes.wsattr([types.IdOnlyType])
listeners = wtypes.wsattr([types.IdOnlyType])
created_at = wtypes.wsattr(wtypes.datetime.datetime)
updated_at = wtypes.wsattr(wtypes.datetime.datetime)
healthmonitor_id = wtypes.wsattr(wtypes.UuidType())
members = wtypes.wsattr([types.IdOnlyType])
tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType()))
tls_container_ref = wtypes.wsattr(wtypes.StringType())
ca_tls_container_ref = wtypes.wsattr(wtypes.StringType())
crl_container_ref = wtypes.wsattr(wtypes.StringType())
tls_enabled = wtypes.wsattr(bool)
tls_ciphers = wtypes.wsattr(wtypes.StringType())
tls_versions = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType()))
@classmethod
def from_data_model(cls, data_model, children=False):
pool = super(PoolResponse, cls).from_data_model(
data_model, children=children)
if data_model.session_persistence:
pool.session_persistence = (
SessionPersistenceResponse.from_data_model(
data_model.session_persistence))
if cls._full_response():
del pool.loadbalancers
member_model = member.MemberFullResponse
if pool.healthmonitor:
pool.healthmonitor = (
health_monitor.HealthMonitorFullResponse
.from_data_model(data_model.health_monitor))
else:
if data_model.load_balancer:
pool.loadbalancers = [
types.IdOnlyType.from_data_model(data_model.load_balancer)]
else:
pool.loadbalancers = []
member_model = types.IdOnlyType
if data_model.health_monitor:
pool.healthmonitor_id = data_model.health_monitor.id
pool.listeners = [
types.IdOnlyType.from_data_model(i) for i in data_model.listeners]
pool.members = [
member_model.from_data_model(i) for i in data_model.members]
pool.tls_versions = data_model.tls_versions
return pool
class PoolFullResponse(PoolResponse):
@classmethod
def _full_response(cls):
return True
members = wtypes.wsattr([member.MemberFullResponse])
healthmonitor = wtypes.wsattr(health_monitor.HealthMonitorFullResponse)
class PoolRootResponse(types.BaseType):
pool = wtypes.wsattr(PoolResponse)
class PoolsRootResponse(types.BaseType):
pools = wtypes.wsattr([PoolResponse])
pools_links = wtypes.wsattr([types.PageType])
class PoolPOST(BasePoolType):
"""Defines mandatory and optional attributes of a POST request."""
name = wtypes.wsattr(wtypes.StringType(max_length=255))
description = wtypes.wsattr(wtypes.StringType(max_length=255))
admin_state_up = wtypes.wsattr(bool, default=True)
listener_id = wtypes.wsattr(wtypes.UuidType())
loadbalancer_id = wtypes.wsattr(wtypes.UuidType())
protocol = wtypes.wsattr(
wtypes.Enum(str, *lib_constants.POOL_SUPPORTED_PROTOCOLS),
mandatory=True)
lb_algorithm = wtypes.wsattr(
wtypes.Enum(str, *constants.SUPPORTED_LB_ALGORITHMS),
mandatory=True)
session_persistence = wtypes.wsattr(SessionPersistencePOST)
# TODO(johnsom) Remove after deprecation (R series)
project_id = wtypes.wsattr(wtypes.StringType(max_length=36))
healthmonitor = wtypes.wsattr(health_monitor.HealthMonitorSingleCreate)
members = wtypes.wsattr([member.MemberSingleCreate])
tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255)))
tls_container_ref = wtypes.wsattr(
wtypes.StringType(max_length=255))
ca_tls_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255))
crl_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255))
tls_enabled = wtypes.wsattr(bool, default=False)
tls_ciphers = wtypes.wsattr(wtypes.StringType(max_length=2048))
tls_versions = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(
max_length=32)))
class PoolRootPOST(types.BaseType):
pool = wtypes.wsattr(PoolPOST)
class PoolPUT(BasePoolType):
"""Defines attributes that are acceptable of a PUT request."""
name = wtypes.wsattr(wtypes.StringType(max_length=255))
description = wtypes.wsattr(wtypes.StringType(max_length=255))
admin_state_up = wtypes.wsattr(bool)
lb_algorithm = wtypes.wsattr(
wtypes.Enum(str, *constants.SUPPORTED_LB_ALGORITHMS))
session_persistence = wtypes.wsattr(SessionPersistencePUT)
tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255)))
tls_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255))
ca_tls_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255))
crl_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255))
tls_enabled = wtypes.wsattr(bool)
tls_ciphers = wtypes.wsattr(wtypes.StringType(max_length=2048))
tls_versions = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(
max_length=32)))
class PoolRootPut(types.BaseType):
pool = wtypes.wsattr(PoolPUT)
class PoolSingleCreate(BasePoolType):
"""Defines mandatory and optional attributes of a POST request."""
name = wtypes.wsattr(wtypes.StringType(max_length=255))
description = wtypes.wsattr(wtypes.StringType(max_length=255))
admin_state_up = wtypes.wsattr(bool, default=True)
protocol = wtypes.wsattr(
wtypes.Enum(str, *lib_constants.POOL_SUPPORTED_PROTOCOLS))
lb_algorithm = wtypes.wsattr(
wtypes.Enum(str, *constants.SUPPORTED_LB_ALGORITHMS))
session_persistence = wtypes.wsattr(SessionPersistencePOST)
healthmonitor = wtypes.wsattr(health_monitor.HealthMonitorSingleCreate)
members = wtypes.wsattr([member.MemberSingleCreate])
tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255)))
tls_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255))
ca_tls_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255))
crl_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255))
tls_enabled = wtypes.wsattr(bool, default=False)
tls_ciphers = wtypes.wsattr(wtypes.StringType(max_length=2048))
tls_versions = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(
max_length=32)))
class PoolStatusResponse(BasePoolType):
"""Defines which attributes are to be shown on status response."""
id = wtypes.wsattr(wtypes.UuidType())
name = wtypes.wsattr(wtypes.StringType())
provisioning_status = wtypes.wsattr(wtypes.StringType())
operating_status = wtypes.wsattr(wtypes.StringType())
health_monitor = wtypes.wsattr(
health_monitor.HealthMonitorStatusResponse)
members = wtypes.wsattr([member.MemberStatusResponse])
@classmethod
def from_data_model(cls, data_model, children=False):
pool = super(PoolStatusResponse, cls).from_data_model(
data_model, children=children)
member_model = member.MemberStatusResponse
if data_model.health_monitor:
pool.health_monitor = (
health_monitor.HealthMonitorStatusResponse.from_data_model(
data_model.health_monitor))
pool.members = [
member_model.from_data_model(i) for i in data_model.members]
return pool
|
py | 1a3ffbfa56c6c5fca9854a0972aba449015608bb | class GetoptError(Exception):
pass
def w_getopt(args, options):
"""A getopt for Windows.
Options may start with either '-' or '/', the option names may
have more than one letter (/tlb or -RegServer), and option names
are case insensitive.
Returns two elements, just as getopt.getopt. The first is a list
of (option, value) pairs in the same way getopt.getopt does, but
there is no '-' or '/' prefix to the option name, and the option
name is always lower case. The second is the list of arguments
which do not belong to an option.
Different from getopt.getopt, a single argument not belonging to an option
does not terminate parsing.
"""
opts = []
arguments = []
while args:
if args[0][:1] in "/-":
arg = args[0][1:] # strip the '-' or '/'
arg = arg.lower()
if arg + ':' in options:
try:
opts.append((arg, args[1]))
except IndexError:
raise GetoptError("option '%s' requires an argument" % args[0])
args = args[1:]
elif arg in options:
opts.append((arg, ''))
else:
raise GetoptError("invalid option '%s'" % args[0])
args = args[1:]
else:
arguments.append(args[0])
args = args[1:]
return opts, arguments
if __debug__:
if __name__ == "__main__":
import unittest
class TestCase(unittest.TestCase):
def test_1(self):
args = "-embedding spam /RegServer foo /UnregSERVER blabla".split()
opts, args = w_getopt(args,
"regserver unregserver embedding".split())
self.assertEqual(opts,
[('embedding', ''),
('regserver', ''),
('unregserver', '')])
self.assertEqual(args, ["spam", "foo", "blabla"])
def test_2(self):
args = "/TLB Hello.Tlb HELLO.idl".split()
opts, args = w_getopt(args, ["tlb:"])
self.assertEqual(opts, [('tlb', 'Hello.Tlb')])
self.assertEqual(args, ['HELLO.idl'])
def test_3(self):
# Invalid option
self.assertRaises(GetoptError, w_getopt,
"/TLIB hello.tlb hello.idl".split(), ["tlb:"])
def test_4(self):
# Missing argument
self.assertRaises(GetoptError, w_getopt,
"/TLB".split(), ["tlb:"])
unittest.main()
|
py | 1a3ffc11136f9722bf80461a13b812c1a0cacf23 | import pytest
import logging
import io
from qcodes.instrument_drivers.stahl import Stahl
import qcodes.instrument.sims as sims
@pytest.fixture(scope="function")
def stahl_instrument():
visa_lib = sims.__file__.replace(
'__init__.py',
'stahl.yaml@sim'
)
inst = Stahl('Stahl', 'ASRL3', visalib=visa_lib)
inst.log.setLevel(logging.DEBUG)
iostream = io.StringIO()
lh = logging.StreamHandler(iostream)
inst.log.logger.addHandler(lh)
try:
yield inst
finally:
inst.close()
def test_parse_idn_string():
"""
Test that we can parse IDN strings correctly
"""
assert Stahl.parse_idn_string("HV123 005 16 b") == {
"model": "HV",
"serial_number": "123",
"voltage_range": 5.0,
"n_channels": 16,
"output_type": "bipolar"
}
with pytest.raises(
RuntimeError,
match="Unexpected instrument response"
):
Stahl.parse_idn_string("HS123 005 16 bla b")
def test_get_idn(stahl_instrument):
"""
Instrument attributes are set correctly after getting the IDN
"""
assert stahl_instrument.IDN() == {
"vendor": "Stahl",
"model": "BS",
"serial": "123",
"firmware": None
}
assert stahl_instrument.n_channels == 16
assert stahl_instrument.voltage_range == 5.0
assert stahl_instrument.output_type == "bipolar"
def test_get_set_voltage(stahl_instrument):
"""
Test that we can correctly get/set voltages
"""
stahl_instrument.channel[0].voltage(1.2)
assert stahl_instrument.channel[0].voltage() == -1.2
logger = stahl_instrument.log.logger
log_messages = logger.handlers[0].stream.getvalue()
assert "did not produce an acknowledge reply" not in log_messages
def test_get_set_voltage_assert_warning(stahl_instrument):
"""
On channel 2 we have deliberately introduced an error in the
visa simulation; setting a voltage does not produce an acknowledge
string. Test that a warning is correctly issued.
"""
stahl_instrument.channel[1].voltage(1.0)
logger = stahl_instrument.log.logger
log_messages = logger.handlers[0].stream.getvalue()
assert "did not produce an acknowledge reply" in log_messages
def test_get_current(stahl_instrument):
"""
Test that we can read currents and that the unit is in Ampere
"""
assert stahl_instrument.channel[0].current() == 1E-6
assert stahl_instrument.channel[0].current.unit == "A"
def test_get_temperature(stahl_instrument):
"""
Due to limitations in pyvisa-sim, we cannot test this.
Line 191 of pyvisa-sim/component.py should read
"return response.encode('latin-1')" for this to work.
"""
pass
|
py | 1a3ffc3d94ef5b6407d4cef2ef182eccf57b503c | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
"""
BLIS - Balancing Load of Intermittent Solar:
A characteristic-based transient power plant model
Copyright (C) 2020. University of Virginia Licensing & Ventures Group (UVA LVG). All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
import pandas as pd
# Data-file details
filename = "PVLibSolarData.csv"
timezone_original = 'UTC'
timezone_new = 'US/Eastern'
# Version details
range1 = ['2017-07-01', '2017-07-31']
range1_name = 'July'
range2 = ['2017-10-30', '2017-10-30']
range2_name = 'Oct30th'
# -----
# Read-in data file
# -----
df = pd.read_csv(filename)
# -----
# Convert timezone
# -----
df.index = pd.to_datetime(df.loc[:, 'DatetimeUTC'])
df.index = df.index.tz_localize(timezone_original)
df.index = df.index.tz_convert(timezone_new)
# -----
# Initial Calculations
# -----
df_out = pd.DataFrame(columns=['dt', 'hour', 'demand', 'solar'])
df_out.index.name = 'Datetime'
df_out['dt'] = df.loc[:, 'dt']
df_out['hour'] = df.index.hour
df_out['demand'] = df.loc[:, 'demand']
for i in range(2):
# -----
# Case specific calculations
# -----
if i == 0:
# Case 1 - 1% solar
case = 'data001'
df_out['solar'] = df.loc[:, 'UVA_Rooftop']
else:
# Case 2 - 63% solar
case = 'data063'
df_out['solar'] = df.loc[:, 'Rooftop_and_32MWTracker']
# A - Entire Timeperiod
savename = case + '.csv'
df_out.to_csv(savename, index=False)
# B - Range1
savename = case + '_' + range1_name + '.csv'
df_out[range1[0]:range1[1]].to_csv(savename, index=True)
# C - Range2
savename = case + '_' + range2_name + '.csv'
df_out[range2[0]:range2[1]].to_csv(savename, index=True)
|
py | 1a3ffcd441b2a080789ab72d2e83586592e7ab43 | #!/usr/bin/env python
from argparse import FileType
import sys
import agate
from sqlalchemy import create_engine
from csvkit.cli import CSVKitUtility
class SQL2CSV(CSVKitUtility):
description = 'Execute an SQL query on a database and output the result to a CSV file.'
override_flags = 'f,b,d,e,H,p,q,S,t,u,z,zero'.split(',')
def add_arguments(self):
self.argparser.add_argument('--db', dest='connection_string', default='sqlite://',
help='An sqlalchemy connection string to connect to a database.',)
self.argparser.add_argument('file', metavar="FILE", nargs='?', type=FileType('rt'), default=sys.stdin,
help='The file to use as SQL query. If both FILE and QUERY are omitted, query will be read from STDIN.')
self.argparser.add_argument('--query', default=None,
help="The SQL query to execute. If specified, it overrides FILE and STDIN.")
self.argparser.add_argument('-H', '--no-header-row', dest='no_header_row', action='store_true',
help='Do not output column names.')
self.argparser.set_defaults(
delimiter=None,
doublequote=None,
escapechar=None,
encoding='utf-8',
field_size_limit=None,
quotechar=None,
quoting=None,
skipinitialspace=None,
tabs=None,
)
def main(self):
try:
engine = create_engine(self.args.connection_string)
except ImportError:
raise ImportError('You don\'t appear to have the necessary database backend installed for connection string you\'re trying to use. Available backends include:\n\nPostgresql:\tpip install psycopg2\nMySQL:\t\tpip install MySQL-python\n\nFor details on connection strings and other backends, please see the SQLAlchemy documentation on dialects at: \n\nhttp://www.sqlalchemy.org/docs/dialects/\n\n')
connection = engine.connect()
if self.args.query:
query = self.args.query.strip()
else:
query = ""
for line in self.args.file:
query += line
# Must escape '%'.
# @see https://github.com/wireservice/csvkit/issues/440
# @see https://bitbucket.org/zzzeek/sqlalchemy/commits/5bc1f17cb53248e7cea609693a3b2a9bb702545b
rows = connection.execute(query.replace('%', '%%'))
output = agate.csv.writer(self.output_file, **self.writer_kwargs)
if rows.returns_rows:
if not self.args.no_header_row:
output.writerow(rows._metadata.keys)
for row in rows:
output.writerow(row)
connection.close()
def launch_new_instance():
utility = SQL2CSV()
utility.run()
if __name__ == '__main__':
launch_new_instance()
|
py | 1a3ffd3749b5f044ef2ee8e21917bc6f9497bd40 | import datetime
from collections import defaultdict
from flask_marshmallow import Marshmallow
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import column_property, validates
db = SQLAlchemy()
ma = Marshmallow()
def get_class_by_tablename(tablename):
"""Return class reference mapped to table.
https://stackoverflow.com/a/23754464
:param tablename: String with name of table.
:return: Class reference or None.
"""
for c in db.Model._decl_class_registry.values():
if hasattr(c, "__tablename__") and c.__tablename__ == tablename:
return c
return None
class Votes(db.Model):
__tablename__ = "votes"
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey("users.id", ondelete="CASCADE"))
challenge_id = db.Column(db.Integer, db.ForeignKey("challenges.id", ondelete="CASCADE"))
value = db.Column(db.Boolean, default=False)
user = db.relationship("Users", foreign_keys="Votes.user_id", lazy="select")
class Badges(db.Model):
__tablename__ = "badges"
id = db.Column(db.Integer, primary_key=True)
description = db.Column(db.Text)
name = db.Column(db.String(80))
tag_id = db.Column(db.Integer, db.ForeignKey("tags.id", ondelete="CASCADE"), nullable=False)
def __init__(self, *args, **kwargs):
super(Badges, self).__init__(**kwargs)
class Notifications(db.Model):
__tablename__ = "notifications"
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.Text)
content = db.Column(db.Text)
date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
user = db.relationship("Users", foreign_keys="Notifications.user_id", lazy="select")
@property
def html(self):
from CTFd.utils.config.pages import build_html
from CTFd.utils.helpers import markup
return markup(build_html(self.content))
def __init__(self, *args, **kwargs):
super(Notifications, self).__init__(**kwargs)
class Pages(db.Model):
__tablename__ = "pages"
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(80))
route = db.Column(db.String(128), unique=True)
content = db.Column(db.Text)
draft = db.Column(db.Boolean)
hidden = db.Column(db.Boolean)
auth_required = db.Column(db.Boolean)
# TODO: Use hidden attribute
files = db.relationship("PageFiles", backref="page")
def __init__(self, *args, **kwargs):
super(Pages, self).__init__(**kwargs)
def __repr__(self):
return "<Pages {0}>".format(self.route)
class Challenges(db.Model):
__tablename__ = "challenges"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
description = db.Column(db.Text)
max_attempts = db.Column(db.Integer, default=0)
type = db.Column(db.String(80))
state = db.Column(db.String(80), nullable=False, default="visible")
requirements = db.Column(db.JSON)
author_id = db.Column(db.Integer, db.ForeignKey("users.id", ondelete="CASCADE"))
files = db.relationship("ChallengeFiles", backref="challenge")
resources = db.relationship("Resources", backref="challenge")
tags = db.relationship("Tags", secondary="tag_challenge")
flags = db.relationship("Flags", backref="challenge")
comments = db.relationship("ChallengeComments", backref="challenge")
author = db.relationship("Users", foreign_keys="Challenges.author_id", lazy="select")
class alt_defaultdict(defaultdict):
"""
This slightly modified defaultdict is intended to allow SQLAlchemy to
not fail when querying Challenges that contain a missing challenge type.
e.g. Challenges.query.all() should not fail if `type` is `a_missing_type`
"""
def __missing__(self, key):
return self["standard"]
__mapper_args__ = {
"polymorphic_identity": "standard",
"polymorphic_on": type,
"_polymorphic_map": alt_defaultdict(),
}
@property
def html(self):
from CTFd.utils.config.pages import build_html
from CTFd.utils.helpers import markup
return markup(build_html(self.description))
def __init__(self, *args, **kwargs):
super(Challenges, self).__init__(**kwargs)
def __repr__(self):
return "<Challenge %r>" % self.name
class Resources(db.Model):
__tablename__ = "resources"
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.String(80), default="standard")
challenge_id = db.Column(
db.Integer, db.ForeignKey("challenges.id", ondelete="CASCADE")
)
content = db.Column(db.Text)
__mapper_args__ = {"polymorphic_identity": "standard", "polymorphic_on": type}
@property
def name(self):
return "Resource {id}".format(id=self.id)
@property
def category(self):
return self.__tablename__
@property
def description(self):
return "Resource for {name}".format(name=self.challenge.name)
@property
def html(self):
from CTFd.utils.config.pages import build_html
from CTFd.utils.helpers import markup
return markup(build_html(self.content))
def __init__(self, *args, **kwargs):
super(Resources, self).__init__(**kwargs)
def __repr__(self):
return "<Resource %r>" % self.content
class Tags(db.Model):
__tablename__ = "tags"
id = db.Column(db.Integer, primary_key=True)
value = db.Column(db.String(80))
exercise = db.Column(db.Boolean)
challenges = db.relationship("Challenges", secondary="tag_challenge")
def __init__(self, *args, **kwargs):
super(Tags, self).__init__(**kwargs)
class TagChallenge(db.Model):
__tablename__ = "tag_challenge"
challenge_id = db.Column(db.Integer, db.ForeignKey("challenges.id", ondelete="CASCADE"),
primary_key=True, nullable=False)
tag_id = db.Column(db.Integer, db.ForeignKey("tags.id", ondelete="CASCADE"), primary_key=True, nullable=False)
def __init__(self, *args, **kwargs):
super(TagChallenge, self).__init__(**kwargs)
class Files(db.Model):
__tablename__ = "files"
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.String(80), default="standard")
location = db.Column(db.Text)
__mapper_args__ = {"polymorphic_identity": "standard", "polymorphic_on": type}
def __init__(self, *args, **kwargs):
super(Files, self).__init__(**kwargs)
def __repr__(self):
return "<File type={type} location={location}>".format(
type=self.type, location=self.location
)
class ChallengeFiles(Files):
__mapper_args__ = {"polymorphic_identity": "challenge"}
challenge_id = db.Column(
db.Integer, db.ForeignKey("challenges.id", ondelete="CASCADE")
)
def __init__(self, *args, **kwargs):
super(ChallengeFiles, self).__init__(**kwargs)
class PageFiles(Files):
__mapper_args__ = {"polymorphic_identity": "page"}
page_id = db.Column(db.Integer, db.ForeignKey("pages.id"))
def __init__(self, *args, **kwargs):
super(PageFiles, self).__init__(**kwargs)
class Flags(db.Model):
__tablename__ = "flags"
id = db.Column(db.Integer, primary_key=True)
challenge_id = db.Column(
db.Integer, db.ForeignKey("challenges.id", ondelete="CASCADE")
)
type = db.Column(db.String(80))
content = db.Column(db.Text)
data = db.Column(db.Text)
__mapper_args__ = {"polymorphic_on": type}
def __init__(self, *args, **kwargs):
super(Flags, self).__init__(**kwargs)
def __repr__(self):
return "<Flag {0} for challenge {1}>".format(self.content, self.challenge_id)
class Users(db.Model):
__tablename__ = "users"
__table_args__ = (db.UniqueConstraint("id"), {})
# Core attributes
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), unique=True)
password = db.Column(db.String(128))
email = db.Column(db.String(128), unique=True)
type = db.Column(db.String(80))
secret = db.Column(db.String(128))
# Supplementary attributes
website = db.Column(db.String(128))
country = db.Column(db.String(32))
school = db.Column(db.String(32))
cursus = db.Column(db.String(128))
specialisation = db.Column(db.String(128))
bracket = db.Column(db.String(32))
hidden = db.Column(db.Boolean, default=False)
banned = db.Column(db.Boolean, default=False)
verified = db.Column(db.Boolean, default=False)
field_entries = db.relationship(
"UserFieldEntries", foreign_keys="UserFieldEntries.user_id", lazy="joined"
)
created = db.Column(db.DateTime, default=datetime.datetime.utcnow)
__mapper_args__ = {"polymorphic_identity": "user", "polymorphic_on": type}
def __init__(self, **kwargs):
super(Users, self).__init__(**kwargs)
@validates("password")
def validate_password(self, key, plaintext):
from CTFd.utils.crypto import hash_password
return hash_password(str(plaintext))
@hybrid_property
def account_id(self):
return self.id
@hybrid_property
def account(self):
return self
@property
def fields(self):
return self.get_fields(admin=False)
@property
def solves(self):
return self.get_solves()
@property
def fails(self):
return self.get_fails()
def get_fields(self, admin=False):
if admin:
return self.field_entries
return [
entry for entry in self.field_entries if entry.field.public and entry.value
]
def get_solves(self):
solves = Solves.query.filter_by(user_id=self.id)
return solves.all()
def get_fails(self):
fails = Fails.query.filter_by(user_id=self.id)
return fails.all()
class Admins(Users):
__tablename__ = "admins"
__mapper_args__ = {"polymorphic_identity": "admin"}
class Contributors(Users):
__tablename__ = "contributors"
__mapper_args__ = {"polymorphic_identity": "contributor"}
class Teachers(Users):
__tablename__ = "teachers"
__mapper_args__ = {"polymorphic_identity": "teacher"}
class Submissions(db.Model):
__tablename__ = "submissions"
id = db.Column(db.Integer, primary_key=True)
challenge_id = db.Column(
db.Integer, db.ForeignKey("challenges.id", ondelete="CASCADE")
)
user_id = db.Column(db.Integer, db.ForeignKey("users.id", ondelete="CASCADE"))
ip = db.Column(db.String(46))
provided = db.Column(db.Text)
type = db.Column(db.String(32))
date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
# Relationships
user = db.relationship("Users", foreign_keys="Submissions.user_id", lazy="select")
challenge = db.relationship(
"Challenges", foreign_keys="Submissions.challenge_id", lazy="select"
)
__mapper_args__ = {"polymorphic_on": type}
@hybrid_property
def account_id(self):
return self.user_id
@hybrid_property
def account(self):
return self.user
@staticmethod
def get_child(type):
child_classes = {
x.polymorphic_identity: x.class_
for x in Submissions.__mapper__.self_and_descendants
}
return child_classes[type]
def __repr__(self):
return f"<Submission id={self.id}, challenge_id={self.challenge_id}, ip={self.ip}, provided={self.provided}>"
class Solves(Submissions):
__tablename__ = "solves"
__table_args__ = (
db.UniqueConstraint("challenge_id", "user_id"),
{},
)
id = db.Column(
None, db.ForeignKey("submissions.id", ondelete="CASCADE"), primary_key=True
)
challenge_id = column_property(
db.Column(db.Integer, db.ForeignKey("challenges.id", ondelete="CASCADE")),
Submissions.challenge_id,
)
user_id = column_property(
db.Column(db.Integer, db.ForeignKey("users.id", ondelete="CASCADE")),
Submissions.user_id,
)
user = db.relationship("Users", foreign_keys="Solves.user_id", lazy="select")
challenge = db.relationship(
"Challenges", foreign_keys="Solves.challenge_id", lazy="select"
)
__mapper_args__ = {"polymorphic_identity": "correct"}
class Fails(Submissions):
__mapper_args__ = {"polymorphic_identity": "incorrect"}
class Tracking(db.Model):
__tablename__ = "tracking"
id = db.Column(db.Integer, primary_key=True)
ip = db.Column(db.String(46))
user_id = db.Column(db.Integer, db.ForeignKey("users.id", ondelete="CASCADE"))
date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
user = db.relationship("Users", foreign_keys="Tracking.user_id", lazy="select")
def __init__(self, *args, **kwargs):
super(Tracking, self).__init__(**kwargs)
def __repr__(self):
return "<Tracking %r>" % self.ip
class Configs(db.Model):
__tablename__ = "config"
id = db.Column(db.Integer, primary_key=True)
key = db.Column(db.Text)
value = db.Column(db.Text)
def __init__(self, *args, **kwargs):
super(Configs, self).__init__(**kwargs)
class Tokens(db.Model):
__tablename__ = "tokens"
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey("users.id", ondelete="CASCADE"))
created = db.Column(db.DateTime, default=datetime.datetime.utcnow)
expiration = db.Column(
db.DateTime,
default=lambda: datetime.datetime.utcnow() + datetime.timedelta(days=30),
)
value = db.Column(db.String(128), unique=True)
user = db.relationship("Users", foreign_keys="Tokens.user_id", lazy="select")
def __init__(self, *args, **kwargs):
super(Tokens, self).__init__(**kwargs)
def __repr__(self):
return "<Token %r>" % self.id
class UserTokens(Tokens):
__mapper_args__ = {"polymorphic_identity": "user"}
class Comments(db.Model):
__tablename__ = "comments"
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.String(80), default="standard")
content = db.Column(db.Text)
date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
author_id = db.Column(db.Integer, db.ForeignKey("users.id", ondelete="CASCADE"))
author = db.relationship("Users", foreign_keys="Comments.author_id", lazy="select")
@property
def html(self):
from CTFd.utils.config.pages import build_html
from CTFd.utils.helpers import markup
return markup(build_html(self.content, sanitize=True))
__mapper_args__ = {"polymorphic_identity": "standard", "polymorphic_on": type}
class ChallengeComments(Comments):
__mapper_args__ = {"polymorphic_identity": "challenge"}
challenge_id = db.Column(
db.Integer, db.ForeignKey("challenges.id", ondelete="CASCADE")
)
class UserComments(Comments):
__mapper_args__ = {"polymorphic_identity": "user"}
user_id = db.Column(db.Integer, db.ForeignKey("users.id", ondelete="CASCADE"))
class PageComments(Comments):
__mapper_args__ = {"polymorphic_identity": "page"}
page_id = db.Column(db.Integer, db.ForeignKey("pages.id", ondelete="CASCADE"))
class Fields(db.Model):
__tablename__ = "fields"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
type = db.Column(db.String(80), default="standard")
field_type = db.Column(db.String(80))
description = db.Column(db.Text)
required = db.Column(db.Boolean, default=False)
public = db.Column(db.Boolean, default=False)
editable = db.Column(db.Boolean, default=False)
__mapper_args__ = {"polymorphic_identity": "standard", "polymorphic_on": type}
class UserFields(Fields):
__mapper_args__ = {"polymorphic_identity": "user"}
class FieldEntries(db.Model):
__tablename__ = "field_entries"
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.String(80), default="standard")
value = db.Column(db.JSON)
field_id = db.Column(db.Integer, db.ForeignKey("fields.id", ondelete="CASCADE"))
field = db.relationship(
"Fields", foreign_keys="FieldEntries.field_id", lazy="joined"
)
__mapper_args__ = {"polymorphic_identity": "standard", "polymorphic_on": type}
@hybrid_property
def name(self):
return self.field.name
@hybrid_property
def description(self):
return self.field.description
class UserFieldEntries(FieldEntries):
__mapper_args__ = {"polymorphic_identity": "user"}
user_id = db.Column(db.Integer, db.ForeignKey("users.id", ondelete="CASCADE"))
user = db.relationship("Users", foreign_keys="UserFieldEntries.user_id")
class Rights(db.Model):
__tablename__ = "rights"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
def __init__(self, *args, **kwargs):
super(Rights, self).__init__(**kwargs)
class Roles(db.Model):
__tablename__ = "roles"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
def __init__(self, *args, **kwargs):
super(Roles, self).__init__(**kwargs)
class RoleRights(db.Model):
__tablename__ = "role_rights"
role_id = db.Column(db.Integer, db.ForeignKey("roles.id", ondelete="CASCADE"), primary_key=True, nullable=False)
right_id = db.Column(db.Integer, db.ForeignKey("rights.id", ondelete="CASCADE"), primary_key=True, nullable=False)
def __init__(self, *args, **kwargs):
super(RoleRights, self).__init__(**kwargs)
class UserRights(db.Model):
__tablename__ = "user_rights"
user_id = db.Column(db.Integer, db.ForeignKey("users.id", ondelete="CASCADE"), primary_key=True, nullable=False)
right_id = db.Column(db.Integer, db.ForeignKey("rights.id", ondelete="CASCADE"), primary_key=True, nullable=False)
def __init__(self, *args, **kwargs):
super(UserRights, self).__init__(**kwargs)
|
py | 1a3ffd786c5c7507e6240c955ba68f45f9446b8f | import re
from typing import Optional, cast # noqa: F401
import flask_app.constants as constants
from flask import abort, current_app, g, jsonify, make_response, redirect, render_template, request
from flask_app.app_utils import (
add_session,
authenticated,
authorized,
get_session_username,
new_session_id,
next_month_link,
previous_month_link,
)
from flask_app.authentication import Authentication
from flask_app.calendar_data import CalendarData
from flask_app.gregorian_calendar import GregorianCalendar
from werkzeug.wrappers import Response
def get_authentication() -> Authentication:
auth = getattr(g, "_auth", None)
if auth is None:
auth = g._auth = Authentication(
data_folder=current_app.config["USERS_DATA_FOLDER"],
password_salt=current_app.config["PASSWORD_SALT"],
failed_login_delay_base=current_app.config["FAILED_LOGIN_DELAY_BASE"],
)
return cast(Authentication, auth)
@authenticated
def index_action() -> Response:
username = get_session_username(session_id=str(request.cookies.get(constants.SESSION_ID)))
authentication = get_authentication()
user_data = authentication.user_data(username)
return redirect("/{}/".format(user_data["default_calendar"]))
def login_action() -> Response:
return cast(Response, render_template("login.html"))
def do_login_action() -> Response:
username = request.form.get("username", "")
password = request.form.get("password", "")
authentication = get_authentication()
if authentication.is_valid(username, password):
session_id = new_session_id()
add_session(session_id, username)
response = make_response(redirect("/"))
cookie_kwargs = {
"key": constants.SESSION_ID,
"value": session_id,
# 1 month
"max_age": 2678400,
"secure": current_app.config["COOKIE_HTTPS_ONLY"],
"httponly": True,
}
samesite_policy = current_app.config.get("COOKIE_SAMESITE_POLICY", None)
# Certain Flask versions don't support 'samesite' param
if samesite_policy:
cookie_kwargs.update({"samesite": samesite_policy})
response.set_cookie(**cookie_kwargs)
return cast(Response, response)
else:
return redirect("/login")
@authenticated
@authorized
def main_calendar_action(calendar_id: str) -> Response:
GregorianCalendar.setfirstweekday(current_app.config["WEEK_STARTING_DAY"])
current_day, current_month, current_year = GregorianCalendar.current_date()
year = int(request.args.get("y", current_year))
year = max(min(year, current_app.config["MAX_YEAR"]), current_app.config["MIN_YEAR"])
month = int(request.args.get("m", current_month))
month = max(min(month, 12), 1)
month_name = GregorianCalendar.MONTH_NAMES[month - 1]
if current_app.config["HIDE_PAST_TASKS"]:
view_past_tasks = False
else:
view_past_tasks = request.cookies.get("ViewPastTasks", "1") == "1"
calendar_data = CalendarData(current_app.config["DATA_FOLDER"], current_app.config["WEEK_STARTING_DAY"])
try:
data = calendar_data.load_calendar(calendar_id)
except FileNotFoundError:
abort(404)
tasks = calendar_data.tasks_from_calendar(year, month, data)
tasks = calendar_data.add_repetitive_tasks_from_calendar(year, month, data, tasks)
if not view_past_tasks:
calendar_data.hide_past_tasks(year, month, tasks)
if current_app.config["WEEK_STARTING_DAY"] == constants.WEEK_START_DAY_MONDAY:
weekdays_headers = ["MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"]
else:
weekdays_headers = ["SUN", "MON", "TUE", "WED", "THU", "FRI", "SAT"]
return cast(
Response,
render_template(
"calendar.html",
calendar_id=calendar_id,
year=year,
month=month,
month_name=month_name,
current_year=current_year,
current_month=current_month,
current_day=current_day,
month_days=GregorianCalendar.month_days(year, month),
previous_month_link=previous_month_link(year, month),
next_month_link=next_month_link(year, month),
base_url=current_app.config["BASE_URL"],
tasks=tasks,
display_view_past_button=current_app.config["SHOW_VIEW_PAST_BUTTON"],
weekdays_headers=weekdays_headers,
),
)
@authenticated
@authorized
def new_task_action(calendar_id: str, year: int, month: int) -> Response:
GregorianCalendar.setfirstweekday(current_app.config["WEEK_STARTING_DAY"])
current_day, current_month, current_year = GregorianCalendar.current_date()
year = max(min(int(year), current_app.config["MAX_YEAR"]), current_app.config["MIN_YEAR"])
month = max(min(int(month), 12), 1)
month_names = GregorianCalendar.MONTH_NAMES
if current_month == month and current_year == year:
day = current_day
else:
day = 1
day = int(request.args.get("day", day))
task = {
"date": CalendarData.date_for_frontend(year, month, day),
"is_all_day": True,
"repeats": False,
"details": "",
}
emojis_enabled = current_app.config.get("EMOJIS_ENABLED", False)
return cast(
Response,
render_template(
"task.html",
calendar_id=calendar_id,
year=year,
month=month,
min_year=current_app.config["MIN_YEAR"],
max_year=current_app.config["MAX_YEAR"],
month_names=month_names,
task=task,
base_url=current_app.config["BASE_URL"],
editing=False,
emojis_enabled=emojis_enabled,
button_default_color_value=current_app.config["BUTTON_CUSTOM_COLOR_VALUE"],
buttons_colors=current_app.config["BUTTONS_COLORS_LIST"],
buttons_emojis=current_app.config["BUTTONS_EMOJIS_LIST"] if emojis_enabled else tuple(),
),
)
@authenticated
@authorized
def edit_task_action(calendar_id: str, year: int, month: int, day: int, task_id: int) -> Response:
month_names = GregorianCalendar.MONTH_NAMES
calendar_data = CalendarData(current_app.config["DATA_FOLDER"], current_app.config["WEEK_STARTING_DAY"])
repeats = request.args.get("repeats") == "1"
try:
if repeats:
task = calendar_data.repetitive_task_from_calendar(
calendar_id=calendar_id, year=year, month=month, task_id=int(task_id)
)
else:
task = calendar_data.task_from_calendar(
calendar_id=calendar_id, year=year, month=month, day=day, task_id=int(task_id),
)
except (FileNotFoundError, IndexError):
abort(404)
if task["details"] == " ":
task["details"] = ""
emojis_enabled = current_app.config.get("EMOJIS_ENABLED", False)
return cast(
Response,
render_template(
"task.html",
calendar_id=calendar_id,
year=year,
month=month,
day=day,
min_year=current_app.config["MIN_YEAR"],
max_year=current_app.config["MAX_YEAR"],
month_names=month_names,
task=task,
base_url=current_app.config["BASE_URL"],
editing=True,
emojis_enabled=emojis_enabled,
button_default_color_value=current_app.config["BUTTON_CUSTOM_COLOR_VALUE"],
buttons_colors=current_app.config["BUTTONS_COLORS_LIST"],
buttons_emojis=current_app.config["BUTTONS_EMOJIS_LIST"] if emojis_enabled else tuple(),
),
)
@authenticated
@authorized
def update_task_action(calendar_id: str, year: str, month: str, day: str, task_id: str) -> Response:
# Logic is same as save + delete, could refactor but can wait until need to change any save/delete logic
calendar_data = CalendarData(current_app.config["DATA_FOLDER"], current_app.config["WEEK_STARTING_DAY"])
# For creation of "updated" task use only form data
title = request.form["title"].strip()
date = request.form.get("date", "")
if len(date) > 0:
fragments = re.split("-", date)
updated_year = int(fragments[0]) # type: Optional[int]
updated_month = int(fragments[1]) # type: Optional[int]
updated_day = int(fragments[2]) # type: Optional[int]
else:
updated_year = updated_month = updated_day = None
is_all_day = request.form.get("is_all_day", "0") == "1"
start_time = request.form["start_time"]
end_time = request.form.get("end_time", None)
details = request.form["details"].replace("\r", "").replace("\n", "<br>")
color = request.form["color"]
has_repetition = request.form.get("repeats", "0") == "1"
repetition_type = request.form.get("repetition_type", "")
repetition_subtype = request.form.get("repetition_subtype", "")
repetition_value = int(request.form["repetition_value"]) # type: int
calendar_data.create_task(
calendar_id=calendar_id,
year=updated_year,
month=updated_month,
day=updated_day,
title=title,
is_all_day=is_all_day,
start_time=start_time,
end_time=end_time,
details=details,
color=color,
has_repetition=has_repetition,
repetition_type=repetition_type,
repetition_subtype=repetition_subtype,
repetition_value=repetition_value,
)
# For deletion of old task data use only url data
calendar_data.delete_task(
calendar_id=calendar_id, year_str=year, month_str=month, day_str=day, task_id=int(task_id),
)
if updated_year is None:
return redirect("{}/{}/".format(current_app.config["BASE_URL"], calendar_id), code=302)
else:
return redirect(
"{}/{}/?y={}&m={}".format(current_app.config["BASE_URL"], calendar_id, updated_year, updated_month),
code=302,
)
@authenticated
@authorized
def save_task_action(calendar_id: str) -> Response:
title = request.form["title"].strip()
date = request.form.get("date", "")
if len(date) > 0:
date_fragments = re.split("-", date)
year = int(date_fragments[0]) # type: Optional[int]
month = int(date_fragments[1]) # type: Optional[int]
day = int(date_fragments[2]) # type: Optional[int]
else:
year = month = day = None
is_all_day = request.form.get("is_all_day", "0") == "1"
start_time = request.form["start_time"]
end_time = request.form.get("end_time", None)
details = request.form["details"].replace("\r", "").replace("\n", "<br>")
color = request.form["color"]
has_repetition = request.form.get("repeats", "0") == "1"
repetition_type = request.form.get("repetition_type")
repetition_subtype = request.form.get("repetition_subtype")
repetition_value = int(request.form["repetition_value"])
calendar_data = CalendarData(current_app.config["DATA_FOLDER"], current_app.config["WEEK_STARTING_DAY"])
calendar_data.create_task(
calendar_id=calendar_id,
year=year,
month=month,
day=day,
title=title,
is_all_day=is_all_day,
start_time=start_time,
end_time=end_time,
details=details,
color=color,
has_repetition=has_repetition,
repetition_type=repetition_type,
repetition_subtype=repetition_subtype,
repetition_value=repetition_value,
)
if year is None:
return redirect("{}/{}/".format(current_app.config["BASE_URL"], calendar_id), code=302)
else:
return redirect("{}/{}/?y={}&m={}".format(current_app.config["BASE_URL"], calendar_id, year, month), code=302,)
@authenticated
@authorized
def delete_task_action(calendar_id: str, year: str, month: str, day: str, task_id: str) -> Response:
calendar_data = CalendarData(current_app.config["DATA_FOLDER"], current_app.config["WEEK_STARTING_DAY"])
calendar_data.delete_task(
calendar_id=calendar_id, year_str=year, month_str=month, day_str=day, task_id=int(task_id),
)
return cast(Response, jsonify({}))
@authenticated
@authorized
def update_task_day_action(calendar_id: str, year: str, month: str, day: str, task_id: str) -> Response:
new_day = request.data.decode("utf-8")
calendar_data = CalendarData(current_app.config["DATA_FOLDER"], current_app.config["WEEK_STARTING_DAY"])
calendar_data.update_task_day(
calendar_id=calendar_id, year_str=year, month_str=month, day_str=day, task_id=int(task_id), new_day_str=new_day,
)
return cast(Response, jsonify({}))
@authenticated
@authorized
def hide_repetition_task_instance_action(calendar_id: str, year: str, month: str, day: str, task_id: str) -> Response:
calendar_data = CalendarData(current_app.config["DATA_FOLDER"], current_app.config["WEEK_STARTING_DAY"])
calendar_data.hide_repetition_task_instance(
calendar_id=calendar_id, year_str=year, month_str=month, day_str=day, task_id_str=task_id,
)
return cast(Response, jsonify({}))
def open_calc_plots_action() -> Response:
# username = get_session_username(session_id=str(request.cookies.get(constants.SESSION_ID)))
# authentication = get_authentication()
# user_data = authentication.user_data(username)
# return cast(Response, render_template("../Calculator/index.html"))
# return cast(
# Response,
# render_template(
# "../Calculator/index.html"
# ))
return render_template("index.html")
|
py | 1a3ffefdef4c40fb0e6dc21aea43a6cda83b895f | from __future__ import absolute_import, unicode_literals
from django import forms
from django.forms.models import inlineformset_factory
from django.utils.translation import ugettext_lazy as _
from tuiuiu.contrib.searchpromotions.models import SearchPromotion
from tuiuiu.tuiuiuadmin.widgets import AdminPageChooser
from tuiuiu.tuiuiusearch.models import Query
class SearchPromotionForm(forms.ModelForm):
sort_order = forms.IntegerField(required=False)
def __init__(self, *args, **kwargs):
super(SearchPromotionForm, self).__init__(*args, **kwargs)
self.fields['page'].widget = AdminPageChooser()
class Meta:
model = SearchPromotion
fields = ('query', 'page', 'description')
widgets = {
'description': forms.Textarea(attrs=dict(rows=3)),
}
SearchPromotionsFormSetBase = inlineformset_factory(
Query, SearchPromotion, form=SearchPromotionForm, can_order=True, can_delete=True, extra=0
)
class SearchPromotionsFormSet(SearchPromotionsFormSetBase):
minimum_forms = 1
minimum_forms_message = _("Please specify at least one recommendation for this search term.")
def add_fields(self, form, *args, **kwargs):
super(SearchPromotionsFormSet, self).add_fields(form, *args, **kwargs)
# Hide delete and order fields
form.fields['DELETE'].widget = forms.HiddenInput()
form.fields['ORDER'].widget = forms.HiddenInput()
# Remove query field
del form.fields['query']
def clean(self):
# Search pick must have at least one recommended page to be valid
# Check there is at least one non-deleted form.
non_deleted_forms = self.total_form_count()
non_empty_forms = 0
for i in range(0, self.total_form_count()):
form = self.forms[i]
if self.can_delete and self._should_delete_form(form):
non_deleted_forms -= 1
if not (form.instance.id is None and not form.has_changed()):
non_empty_forms += 1
if (
non_deleted_forms < self.minimum_forms or
non_empty_forms < self.minimum_forms
):
raise forms.ValidationError(self.minimum_forms_message)
|
py | 1a3fffb6985cb9863ca6946e2256af4a17130ea1 | from tensorflow.python.client import device_lib
# 测试tensorflow安装成功与否
import tensorflow as tf
import numpy as np
import math
print(tf.test.is_gpu_available())
def get_available_gpus():
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
print(get_available_gpus())
'''
softmax 交叉熵公式验证 -sum(yi*ln(ai)) yi为样本i的真实标签=1 ai=(softmax(yi_hat)[max(yi)]) 即取yi对应下标的值
'''
def softmax(x):
sum_raw = np.sum(np.exp(x), axis=-1)
x1 = np.ones(np.shape(x))
for i in range(np.shape(x)[0]):
x1[i] = np.exp(x[i]) / sum_raw[i]
return x1
def get_loss(y:np.array([[]]),y_hat:np.array([[]])):
res=0.
mat_val=softmax(y_hat)
print('mat_val:',mat_val)
# sum所有元素求和
res=np.sum(y*np.log(mat_val))
return res
# y=np.array([[0,1,0],[0,1,0]])
# y_hat=np.array([[0.9,0.1,1],[0.2,0.8,2]])
# print(np.argmax(y,axis=1))
# print(get_loss(y,y_hat))
# loss=tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=y_hat))
import matplotlib.pyplot as plt
x=[]
x2=[]
x3=[]
y=[]
for i in range(1000):
x.append(np.floor(np.random.normal(8400,200)))
x2.append(np.floor(np.random.uniform(6800,8400)))
x3.append(np.floor(np.random.poisson(8400)))
plt.plot(x,y)
plt.show()
plt.plot(x2)
plt.show()
plt.plot(x3)
plt.show()
def printX(x):
x=np.array(x)
print(np.max(x), np.min(x), np.mean(x), np.std(x))
printX(x)
printX(x2)
printX(x3)
# with tf.Session() as sess:
# loss_val=sess.run(loss)
# print(loss_val) |
py | 1a400039044275c4eac8c9807a85bce9a7495da2 | import random
import math
import copy
from prj4_data import *
def GetRandomVacancy(L):
x = random.randint(0, L.xlim-1)
y = random.randint(0, L.ylim-1)
while L.layout[x][y] != None:
x = random.randint(0, L.xlim-1)
y = random.randint(0, L.ylim-1)
return x, y
def RandomPlacement(L):
for k,v in L.AllCells.items():
x, y = GetRandomVacancy(L)
L.layout[x][y] = k
v.loc = [x, y]
def SimulatedAnnealing(L, Tstart, Tend, iterPerT):
T = Tstart
alpha = 0.95
iterEst = math.log(Tend/Tstart, 0.85) # 对总退火周期的估计
print('estimated annealing iterations:', iterEst * iterPerT)
iOuterLoop = 0
while T > Tend:
cost = [L.getCost()]
accepted = list()
# 退火过程
for iInnerLoop in range(iterPerT):
flag = random.randint(0, 1)
Lnew = copy.deepcopy(L)
# 移动
if flag:
tIndex = random.choice(list(Lnew.AllCells.keys()))
Lnew.move(tIndex, GetRandomVacancy(Lnew))
# 交换
else:
t1Index = random.choice(list(Lnew.AllCells.keys()))
t2Index = random.choice(list(Lnew.AllCells.keys()))
while t2Index == t1Index:
t2Index = random.choice(list(Lnew.AllCells.keys()))
Lnew.swap(t1Index, t2Index)
cost.append(Lnew.getCost())
delta = cost[-1] - cost[-2]
if random.random() < math.exp(-delta/T):
L = Lnew
accepted.append(True)
else:
cost[-1] = cost[-2]
accepted.append(False)
print('temperature:', T)
print('cost:', cost[1:])
print('accepted:', accepted)
# 降低温度
if iOuterLoop < iterEst * 0.25:
alpha -= (0.95 - 0.8) / (iterEst / 4)
elif iOuterLoop > iterEst * 0.75:
alpha += (0.95 - 0.8) / (iterEst / 4)
if alpha < 0.8:
alpha = 0.8
elif alpha > 0.95:
alpha = 0.95
T *= alpha
iOuterLoop += 1
return L
|
py | 1a40006e12a50fa37ee42fb1f332dee304eed80c | #!/usr/bin/env python
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import os
import platform
import subprocess
import sys
import setuptools
from setuptools import find_packages, setup
from setuptools.command.install import install
from setuptools.command.test import test
# When executing the setup.py, we need to be able to import ourselves, this
# means that we need to add the src/ directory to the sys.path.
base_dir = os.path.dirname(__file__)
src_dir = os.path.join(base_dir, "src")
sys.path.insert(0, src_dir)
about = {}
with open(os.path.join(src_dir, "activeledgersdk", "__about__.py")) as f:
exec(f.read(), about)
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name=about["__title__"],
version=about["__version__"],
description=about["__summary__"],
long_description=long_description,
license=about["__license__"],
url=about["__uri__"],
author=about["__author__"],
author_email=about["__email__"],
package_dir={"": "src"},
packages=find_packages(where="src"),
include_package_data=True
)
|
py | 1a40016e7f5d41bd605e7ff485c17e4a8ec1a694 | import distutils
import os.path
from setuptools import setup
from setuptools.command.install import install as _install
PTH = (
'try:\n'
' import future_annotations\n'
'except ImportError:\n'
' pass\n'
'else:\n'
' future_annotations.register()\n'
)
class install(_install):
def initialize_options(self):
_install.initialize_options(self)
# Use this prefix to get loaded as early as possible
name = 'aaaaa_' + self.distribution.metadata.name
contents = f'import sys; exec({PTH!r})\n'
self.extra_path = (name, contents)
def finalize_options(self):
_install.finalize_options(self)
install_suffix = os.path.relpath(
self.install_lib, self.install_libbase,
)
if install_suffix == '.':
distutils.log.info('skipping install of .pth during easy-install')
elif install_suffix == self.extra_path[1]:
self.install_lib = self.install_libbase
distutils.log.info(
"will install .pth to '%s.pth'",
os.path.join(self.install_lib, self.extra_path[0]),
)
else:
raise AssertionError(
'unexpected install_suffix',
self.install_lib, self.install_libbase, install_suffix,
)
setup(cmdclass={'install': install})
|
py | 1a4002fdb646782100809684b3babd7cc72fd26b | from Node import Node
import numpy
class Operation(object):
BACK_MUTATION = 0
DELETE_MUTATION = 1
SWITCH_NODES = 2
PRUNE_REGRAFT = 3
@classmethod
def tree_operation(cls, tree, operation, k, gamma, max_deletions):
if operation == cls.BACK_MUTATION:
return cls.add_back_mutation(tree, k, gamma, max_deletions)
elif operation == cls.DELETE_MUTATION:
return cls.mutation_delete(tree)
elif operation == cls.SWITCH_NODES:
return cls.switch_nodes(tree)
elif operation == cls.PRUNE_REGRAFT:
return cls.prune_regraft(tree)
else:
raise SystemError("Something has happened while choosing an operation")
@classmethod
def add_back_mutation(cls, tree, k, gamma, max_deletions):
"""Adds a new random backmutation to the given tree"""
# gets a list of all the nodes from cache
cached_nodes = tree.phylogeny.get_cached_content()
keys = list(cached_nodes.keys())
# select a random node
# root has no parent, hence cannot add a back mutation
# keep trying till we find a suitable node
node = numpy.random.choice(keys)
while node.up == None or node.up.up == None:
node = numpy.random.choice(keys)
# if losses list has reached its maximum, then we can't procede
if (len(tree.losses_list) >= max_deletions):
return 1
# selecting possible node candidates (every ancestor)
candidates = [p for p in node.iter_ancestors() if (p.loss == False) and (p.mutation_id != -1)]
if len(candidates) == 0:
return 2
# selecting one random ancestor, based on gamma probabilities
found = False
while not found and len(candidates) > 0:
candidate = numpy.random.choice(candidates)
candidates.remove(candidate)
if numpy.random.uniform() < gamma[candidate.mutation_id]:
found = True
if not(found):
return 3
# Ensuring we have no more than k mutations per mutation type
if (tree.k_losses_list[candidate.mutation_id] >= k):
return 4
# If the mutation is already lost in the current tree, no way to remove it again
if (node.is_mutation_already_lost(candidate.mutation_id)):
return 5
# If there are already k mutation of candidate mutation_id
if (tree.k_losses_list[candidate.mutation_id] >= k):
return 6
node_deletion = Node(candidate.name, None, candidate.mutation_id, True)
tree.losses_list.append(node_deletion)
tree.k_losses_list[node_deletion.mutation_id] += 1
# saving parent before detaching
par = node.up
current = node.detach()
par.add_child(node_deletion)
node_deletion.add_child(current)
return 0
@classmethod
def mutation_delete(cls, tree):
"""Delete a random mutation from the given tree"""
if (len(tree.losses_list) == 0):
return 1
node = numpy.random.choice(tree.losses_list)
node.delete_node(tree)
return 0
@classmethod
def switch_nodes(cls, tree):
"""Switch two random nodes of the given tree"""
nodes = tree.phylogeny.get_cached_content()
keys = list(nodes.keys())
u = None
while (u == None or u.up == None or u.loss):
u = numpy.random.choice(keys)
keys.remove(u)
keys = list(nodes.keys())
v = None
while (v == None or v.up == None or v.loss or u.name == v.name):
v = numpy.random.choice(keys)
keys.remove(v)
u.swap(v)
return 0
@classmethod
def prune_regraft(cls, tree):
"""Prune-regraft two random nodes of the given tree"""
nodes_list = tree.phylogeny.get_cached_content()
prune_res = -1
while prune_res != 0:
keys = list(nodes_list.keys())
u = None
while (u == None or u.up == None or u.loss):
u = numpy.random.choice(keys)
keys.remove(u)
keys = list(nodes_list.keys())
v = None
while (v == None or v.up == None or v.loss):
v = numpy.random.choice(keys)
keys.remove(v)
prune_res = u.prune_and_reattach(v)
return 0
|
py | 1a40030c36f3647a94002cabaff145af81ca0399 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RSpdata(RPackage):
"""spData: Datasets for Spatial Analysis"""
homepage = "https://github.com/Nowosad/spData"
url = "https://cran.r-project.org/src/contrib/spData_0.3.0.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/spData"
version('0.3.0', sha256='de24ea659541a6c795cd26a1f6a213e15061af9c97a24cba1c24ce30c6c24c98')
depends_on('[email protected]:', type=('build', 'run'))
|
py | 1a40032f1ffdcd3de05e5aa75bec6bcaab538ce1 | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
from django.conf import settings
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
""" Managing the create and save user actions """
if not email:
raise ValueError("User must have a valid email")
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
""" Creating the super user"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
""" Custom user model that supports email using instead of username"""
email = models.EmailField(max_length=255, unique=True)
full_name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
class Operation(models.Model):
""" Model for the Post"""
operation_name = models.CharField(max_length=100)
admin_required = models.BooleanField(default=False)
class Meta:
verbose_name_plural = 'Operation'
def __str__(self):
return self.operation_name
class ReportManager(models.Manager):
def create_report(self, action_name, action_parameter,
user):
report = self.create(action_name=action_name,
action_parameter=action_parameter,
user=user)
return report
class Report(models.Model):
""" Model for Report"""
action_name = models.CharField(max_length=100)
action_parameter = models.CharField(max_length=100)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='requested_user'
)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = ReportManager()
|
py | 1a4003701258a790d89cf9df391e3ca123b3917d | # -*- coding: utf-8 -*-
#
# MPA Authors. All Rights Reserved.
#
""" Dataset for ISBI_2015"""
# Import global packages
import os
import numpy as np
import torch
import torch.nn.functional as F
import torchvision
from PIL import Image
import cv2
from matplotlib import pyplot as plt
# Kornia library for data augmentation
from kornia import augmentation as K
import kornia.augmentation.functional as KF
import kornia.augmentation.random_generator as KRG
# Import local functions
from evaluation import upscale_coordinates
# Import global constants
from constants import *
class ISBIDataSet(object):
""" Read ISBI2015 data and return images and labels.
Format is:
image (torch.tensor),
label(dictionary): {'ans_x': ANnotation of Senor X coordinate},
{'ans_y': ANnotation of Senor Y coordinate},
{'ans_c': ANnotation of Senor Classification},
{'anj_x': ANnotation of Junior X coordinate},
{'anj_y': ANnotation of Junior Y coordinate},
{'anj_c': ANnotation of Junior Classification}
Note:
1. We used the average of 'ans' and 'anj' as ground truth
2. Thus, the ground truth of facial classification is calculated from
evaluation of 'ana' not from annotation files.
"""
def __init__(self, data_root, mode, img_h, img_w, transforms, y_ch=False):
""" Transforms and downsampling are determined with 'transforms'
If transforms=ToTensor(), image is not downsampled and 'img_h'
and 'img_w' be obsolete.
If transforms=None, image is donwsampled as ('img_h', 'img_w')
Args:
data_root(str): Path of ISBI2015 dataset.
mode(str): Dataset mode in [train, test1, test2].
img_h(int): Height of image (used for downsampling)
img_w(int): Width of image (used for downsampling)
transforms(torchvision.transforms): Transforms to be applied. If it is
'None', then torchvision.transforms.ToTensor() is applied.
y_ch(bool): Use Y channel image as input (True) image or RGB (False).
"""
if mode == 'train':
self.data_prefix = "TrainingData"
elif mode == 'test1':
self.data_prefix = "Test1Data"
elif mode == 'test2':
self.data_prefix = "Test2Data"
else:
assert('Error in mode')
self.img_size = (img_h, img_w)
self.img_scale = (img_h / RAW_IMG_H, img_w / RAW_IMG_W)
self.transforms = transforms
self.y_ch = y_ch
if transforms is not None:
self.transforms = transforms
else:
self.transforms = torchvision.transforms.Compose([
torchvision.transforms.Resize(self.img_size),
torchvision.transforms.ToTensor(),]
)
self.data_root = data_root
self.img_root = os.path.join(
os.path.join(self.data_root, "RawImage"),
self.data_prefix
)
self.ans_root = os.path.join(
os.path.join(self.data_root, "AnnotationsByMD/senior"),
self.data_prefix
)
self.anj_root = os.path.join(
os.path.join(self.data_root, "AnnotationsByMD/junior"),
self.data_prefix
)
self.img_list = list(sorted(os.listdir(self.img_root)))
self.ans_list = list(sorted(os.listdir(self.ans_root)))
self.anj_list = list(sorted(os.listdir(self.anj_root)))
def __getitem__(self, idx):
""" We used the average of 'ans' and 'anj' as ground truth ('ana') and
to fit to the scale, we also calculate 'ana_fs' that indicate the 'ana' in
the down sampled images.
The shape of ground-truth data is
ann = {
'ans_x': Annotation of x coordinate by senior in text file
'ans_y': Annotation of y coordinate by senior in text file
'anj_x': Annotation of x coordinate by junior in text file
'anj_y': Annotation of x coordinate by junior in text file
'ana_x': Average of 'ans_x' and 'anj_x'
'ana_y': Average of 'ans_y' and 'anj_y'
'ans_x_fs': Scaled 'ans_x' for down sampled input image
'ans_y_fs': Scaled 'ans_y' for down sampled input image
'anj_x_fs': Scaled 'anj_x' for down sampled input image
'anj_y_fs': Scaled 'anj_y' for down sampled input image
'ana_x_fs': Scaled 'ana_x' for down sampled input image
'ana_y_fs': Scaled 'ana_y' for down sampled input image
'ans_c': Annotation of facial class type by senior in text file
'anj_c': Annotation of facial class type by junior in text file
'ana_c': (deprecated) Set as the same as 'ans_c'
}
"""
# load images ad masks
img_path = os.path.join(self.img_root, self.img_list[idx])
ans_path = os.path.join(self.ans_root, self.ans_list[idx])
anj_path = os.path.join(self.anj_root, self.anj_list[idx])
pil_img = Image.open(img_path).convert("RGB")
img = self.transforms(pil_img) # Load image
with open(ans_path) as ans_f: # Read lines without '\n'
ans = [ans_l.rstrip() for ans_l in ans_f]
with open(anj_path) as anj_f: # Read lines without '\n'
anj = [anj_l.rstrip() for anj_l in anj_f]
# Annotation
ann = {}
# Annotation by Senior. (_fs means 'fixed scale')
ann["ans_x"] = np.array([(float(xy.split(',')[0])) for xy in ans[:NUM_LM]])
ann["ans_y"] = np.array([(float(xy.split(',')[1])) for xy in ans[:NUM_LM]])
ann["ans_x_fs"] = self.img_scale[1] * ann["ans_x"]
ann["ans_y_fs"] = self.img_scale[0] * ann["ans_y"]
# Annontation by Junior.
ann["anj_x"] = np.array([(float(xy.split(',')[0])) for xy in anj[:NUM_LM]])
ann["anj_y"] = np.array([(float(xy.split(',')[1])) for xy in anj[:NUM_LM]])
ann["anj_x_fs"] = self.img_scale[1] * ann["anj_x"]
ann["anj_y_fs"] = self.img_scale[0] * ann["anj_y"]
# Averaged annotation.
ann["ana_x"] = 0.5 * (ann["ans_x"] + ann["anj_x"])
ann["ana_y"] = 0.5 * (ann["ans_y"] + ann["anj_y"])
ann["ana_x_fs"] = 0.5 * (ann["ans_x_fs"] + ann["anj_x_fs"])
ann["ana_y_fs"] = 0.5 * (ann["ans_y_fs"] + ann["anj_y_fs"])
# Face type
ann["ans_c"] = np.pad(np.array([int(c) for c in ans[NUM_LM:]]), (0, 11))
ann["anj_c"] = np.pad(np.array([int(c) for c in anj[NUM_LM:]]), (0, 11))
ann["ana_c"] = ann["ans_c"]
if self.y_ch == False:
return img, ann
else:
y_ch_img = self.transforms(pil_img.convert("YCbCr").getchannel('Y'))
return img, ann, y_ch_img
def __len__(self):
return len(self.img_list)
def to_numpy_image(tensor_img):
return tensor_img.transpose(1, 3).transpose(1, 2).cpu().numpy()
def to_tensor_image(np_img):
return torch.tensor(np.transpose(np_img, (0, 3, 1, 2)))
def to_numpy_arr(tensor_arr):
return tensor_arr.cpu().numpy()
def to_tensor_arr(np_arr):
return torch.tensor(np_arr)
def vis_isbi(img_batch, pred_batch, x, y, c, radius, font_scale, txt_offset):
""" Visualize predicted (or ground truth) landmark positions as circle
in the input images.
Args:
img_batch (torch.tensor): Raw input image from ISBI2015
pred_batch (torch.tensor): Image used for the prediction (e.g. down sampled)
x (torch.tensor): (Predicted) landmark positions (x coordinate)
y (torch.tensor): (Predicted) landmark positions (y coordinate)
c (torch.tensor): (Deprecated) (predicted) facial class type
radius (int): Radius of circle of landmark
font_scale (int): Size of landmark text (short names)
txt_offset (int): Offset distance of text from landmark locations
Returns:
vis_img (tensor): Result image
"""
n_batch, img_c, img_h, img_w = img_batch.shape
_, pred_c, pred_h, pred_w = pred_batch.shape
x = ((img_w / pred_w) * to_numpy_arr(x)).astype(np.int)
y = ((img_h / pred_h) * to_numpy_arr(y)).astype(np.int)
num_lm = x.shape[1]
img_batch = to_numpy_image(img_batch)
vis_img = np.zeros_like(img_batch)
for n in range(n_batch):
img = cv2.UMat(img_batch[n])
for i in range(num_lm):
img = cv2.circle(img=img,
center=(x[n, i], y[n, i]),
radius=radius,
color=(1, 0, 0),
thickness=-1,
)
img = cv2.putText(img=img,
text='{}'.format(S_LM_NAME_DICT[i]),
org=(x[n, i] + txt_offset, y[n, i] + txt_offset),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=font_scale,
color=(0, 1, 0),
thickness=2,
lineType=cv2.LINE_AA
)
overlayed_img = np.array(img.get())
if len(overlayed_img.shape) == 2: # For gray scale image
vis_img[n,:,:,0] = np.array(img.get())
else:
vis_img[n,:,:,:] = np.array(img.get())
return to_tensor_image(vis_img)
def ann_to_heatmap(img_batch, ksize, sigma, x, y, c):
""" Convert annotation into heatmaps of landmark locations using Gaussian
distribution
Args:
img_batch (torch.tensor): Input image
ksize (int): Size of Gaussian kernel (2 * ksize + 1)
sigma (int): Sigma of Gaussian kernel
x (torch.tensor): Landmark positions (x coordinate)
y (torch.tensor): Landmark positions (y coordinate)
c (torch.tensor): (Deprecated) Facial type
Returns:
gt_heatmap (tensor): Heatmatp of ground truth
"""
n_batch, _, img_h, img_w = img_batch.shape
n_lm = x.shape[1]
x = torch.round(x).int()
y = torch.round(y).int()
g_mask = cv2.getGaussianKernel(2 * ksize + 1, sigma)
g_mask = g_mask * g_mask.transpose()
g_mask = torch.tensor(g_mask / np.max(g_mask))
gt_heatmap = torch.zeros([n_batch, n_lm, img_h, img_w])
for n in range(n_batch):
for i in range(n_lm):
gt_heatmap[n, i, y[n, i], x[n, i]] = 1
return gt_heatmap
def heatmap_to_ann(heatmap_batch):
""" Convert heatmap into series of X,Y coordinate by applying argmax.
Args:
heatmap_batch (torch.tensor)
Returns: Integer coordinates (x, y)
"""
n_batch, n_lm, img_w, img_h = heatmap_batch.shape
x = torch.zeros([n_batch, n_lm])
y = torch.zeros([n_batch, n_lm])
for n in range(n_batch):
for i in range(n_lm):
raw_idx = heatmap_batch[n, i, :, :].argmax()
y[n, i] = raw_idx // img_h
x[n, i] = raw_idx - (y[n, i] * img_h)
return x.int(), y.int()
def augmentation(
img_batch,
heatmap_batch,
x,
y,
degrees,
scale,
brightness,
contrst,
saturation,
hue,
same_on_batch):
""" Augment cephalogram and heatmap with following step.
1. Rotation: Use image center or porion as ceter of rotation.
2. Scaling: Use image center or porion as ceter of rotation.
3. Color jittering: Perturb brightness, contrast, stauration and hue.
Args:
img_batch (torch.tensor): Cephalogram from ISBI2015.
Shape = [n_batch, n_ch, height, width]
heatmap_batch (torch.tensor): GT heatmap.
Shape = [n_batch, n_ch, height, width]
x (torch.tensor): X coordinates of landmarks
Shape = [n_batch, NUM_LM]
y (torch.tensor): Y coordinates of landmarks
Shape = [n_batch, NUM_LM]
degrees (list): Range of random rotation.
Shape = [int, int]
scale (int): Range of random scale.
brightness (int): Range of random brightness.
contrst (int): Range of random contrast.
stauration (int): Range of random stauration.
hue (int): Range of random hue.
same_on_batch(bool): Same on batch.
Returns:
aug_img (torch.tensor): Augmented cephalograms.
Shape = [n_batch, n_ch, height, width]
aug_heatmap (torch.tensor): Augmented heatmaps.
Shape = [n_batch, n_ch, height, width]
aug_x (torch.tensor): X coordinates of augmented cephalograms' landmarks
scaled as ISBI2015
Shape = [n_batch, NUM_LM]
aug_y (torch.tensor): Y coordinates of augmented cephalograms' landmarks
scaled as ISBI2015
Shape = [n_batch, NUM_LM]
aug_x_fs (torch.tensor): X coordinates of augmented cephalograms' landmarks
scaled as heatmap
Shape = [n_batch, NUM_LM]
aug_y_fs (torch.tensor): Y coordinates of augmented cephalograms' landmarks
scaled as heatmap
Shape = [n_batch, NUM_LM]
"""
n_batch, img_c, img_h, img_w = img_batch.shape
aff_degrees = degrees
aff_scale = scale
affine_params = KRG.random_affine_generator(
batch_size=n_batch,
height=img_h,
width=img_w,
degrees=aff_degrees,
scale=aff_scale,
same_on_batch=same_on_batch,
)
color_jitter_params = KRG.random_color_jitter_generator(
batch_size=n_batch,
brightness=brightness,
contrast=contrst,
saturation=saturation,
hue=hue,
same_on_batch=same_on_batch)
aug_imgs = KF.apply_affine(img_batch, affine_params)
aug_heatmaps = KF.apply_affine(heatmap_batch, affine_params)
aug_x_fs, aug_y_fs = heatmap_to_ann(aug_heatmaps)
aug_x, aug_y = upscale_coordinates(
img_batch=img_batch, x=aug_x_fs, y=aug_y_fs
)
return aug_imgs, aug_heatmaps, aug_x_fs, aug_y_fs, aug_x, aug_y
def crop_lm_patches(img_batch, x_c_batch, y_c_batch, ann_batch, pat_sz):
""" Cropping patches for local stage
Args:
img_batch (tensor): Input image
x_c_batch (tensor): Crop center 'x'
y_c_batch (tensor): Crop center 'y'
ann_batch (tensor): Ground truth annotation
pat_sz (int): Side length of patch
Returns:
img_crop_batch_list (tensor): Cropped patch images
ana_x_batch_list (tensor): Landmark coordinates 'x' of patches
ana_y_batch_list (tensor): Landmark coordinates 'y' of patches
"""
img_crop_batch_list = []
ana_x_batch_list = []
ana_y_batch_list = []
# Zero padding for cropping
img_batch = F.pad(img_batch, (pat_sz, pat_sz, pat_sz, pat_sz))
for img_idx in range(img_batch.shape[0]):
img_crop_ch_list = []
ana_x_ch_list = []
ana_y_ch_list = []
# Padding requires offset GT and crop center by pat_sz.
ana_x = int(ann_batch['ana_x'][img_idx]) + pat_sz
ana_y = int(ann_batch['ana_y'][img_idx]) + pat_sz
x_c = int(x_c_batch[img_idx]) + pat_sz
y_c = int(y_c_batch[img_idx]) + pat_sz
# ROI of patch
pat_x_r = slice(x_c - pat_sz, x_c + pat_sz)
pat_y_r = slice(y_c - pat_sz, y_c + pat_sz)
# Cropped image
img_crop = img_batch[img_idx:img_idx + 1, :, pat_y_r, pat_x_r].clone()
img_crop_ch_list.append(img_crop)
# Annotation of patch is
# GT landmark position - crop center + patch_size
ana_x_ch_list.append(torch.tensor([[pat_sz + ana_x - x_c]]))
ana_y_ch_list.append(torch.tensor([[pat_sz + ana_y - y_c]]))
img_crop_batch_list.append(torch.cat(img_crop_ch_list, dim=1))
ana_x_batch_list.append(torch.cat(ana_x_ch_list, dim=1))
ana_y_batch_list.append(torch.cat(ana_y_ch_list, dim=1))
img_crop_batch_list = torch.cat(img_crop_batch_list, dim=0)
ana_x_batch_list = torch.cat(ana_x_batch_list, dim=0)
ana_y_batch_list = torch.cat(ana_y_batch_list, dim=0)
return img_crop_batch_list, ana_x_batch_list, ana_y_batch_list
def vis_patch(img_batch, x, y, c, radius, font_scale, txt_offset, lm_idx):
""" Visualize predicted (or ground truth) landmark positions as circle
in the cropped patches.
Args:
img_batch (torch.tensor): Cropped patch image
x (torch.tensor): (Predicted) landmark positions (x coordinate)
y (torch.tensor): (Predicted) landmark positions (y coordinate)
c (torch.tensor): (Deprecated) (predicted) facial class type
radius (int): Radius of circle of landmark
font_scale (int): Size of landmark text (short names)
txt_offset (int): Offset distance of text from landmark locations
lm_idx (int): Index of landmark to visualize
Returns:
vis_img (tensor): Result image
"""
n_batch, img_c, img_h, img_w = img_batch.shape
x = to_numpy_arr(x).astype(np.int)
y = to_numpy_arr(y).astype(np.int)
num_lm = x.shape[1]
img_batch = to_numpy_image(img_batch)
vis_img = np.zeros_like(img_batch)
for n in range(n_batch):
img = cv2.UMat(img_batch[n])
img = cv2.circle(img=img,
center=(x[n], y[n]),
radius=radius,
color=(1, 0, 0),
thickness=-1,
)
img = cv2.putText(img=img,
text='{}'.format(S_LM_NAME_DICT[lm_idx]),
org=(x[n] + txt_offset, y[n] + txt_offset),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=font_scale,
color=(0, 1, 0),
thickness=2,
lineType=cv2.LINE_AA
)
overlayed_img = np.array(img.get())
if len(overlayed_img.shape) == 2:
vis_img[n,:,:,0] = np.array(img.get())
else:
vis_img[n,:,:,:] = np.array(img.get())
return to_tensor_image(vis_img) |
py | 1a4004148f46d0b8c0efaf2a1366946f6b2979a6 | #!/usr/bin/python3
import numpy as np
from rotor_tm_utils.vec2asym import vec2asym
import scipy.linalg as LA
from rotor_tm_utils.vee import vee
from rotor_tm_utils.RPYtoRot_ZXY import RPYtoRot_ZXY
from rotor_tm_utils import utilslib
import scipy
from scipy.spatial.transform import Rotation as tranrot
import json
class controller:
def __init__(self):
self.gd = np.zeros((0,0), dtype=float)
self.icnt = None
# for hover_controller
self.last_t = None
def cooperative_attitude_controller(self, qd, qn, params):
# DESCRIPTION:
# Attitude controller for cooperative cable suspended payload and MAV(s)
# This function is used as a helper function in cooperative_suspended_payload_controller()
# to compute F, M, and Rot_des
# INPUTS:
# qd - a list of dictionary containing states of all MAV(s)
# qd[0] would give a dictionary of MAV 0's states and related information, specifically
# Key Type Size Description
# 'pos' ndarray 3 by 1 MAV 0's position
# 'vel' ndarray 3 by 1 MAV 0's velocity
# 'quat' ndarray 4 by 1 MAV 0's orientation as unit quaternion
# 'omega' ndarray 3 by 1 MAV 0's angular velocity
# 'rot' ndarray 3 by 3 MAV 0's rotation as rotation matrix
# 'xi' ndarray 3 by 1 MAV 0's cable direction as a unit vector
# 'xixiT' ndarray 3 by 3 xi dot product with xi
# 'xidot' ndarray 3 by 1 MAV 0's velocity normalized over separation distance
# 'yaw_des' float NA desired payload yaw, set to 0.0 current
# 'yawdot_des' float NA time derivative of desired payload yaw, set to 0.0 currently
# 'mu_des' ndarray 3 by 1 desired cable tension of the cable suspended under MAV 0
# 'attach_accel' ndarray 3 by 1 acceleration of the cable attach point
# 'rot_des' ndarray 3 by 3 desired rotation as a rotation matrix
# 'omega_des' ndarray 3 by 1 desired payload angular velocity
# set to [[0., 0., 0.]] currently
# qn - an integer identifying the id of the current MAV the controller is controlling
# params - a read_params class objects containing all MAV parameters
# OUTPUTS:
# F - a 3 by 1 vector describing thrust
# M - a 3 by 1 vector describing Moment
# Rot_des - a rotation matrix describing desired rotation
if self.gd.size == 0:
self.gd = np.zeros((0,3), dtype= float)
self.icnt = 0
# Parameter Initialization
m = params.mass
l = params.l
e3 = np.array([[0],[0],[1]])
# State Feedback
xi = qd[qn]["xi"]
xidot = qd[qn]["xidot"]
rot = qd[qn]["rot"]
# Cable Direction Tracking Control
mu_des = qd[qn]["mu_des"]
xi_des = np.divide(-mu_des, np.linalg.norm(mu_des))
xi_des_dot = np.array([[0.0],[0.0],[0.0]])
w_des = np.cross(xi_des, xi_des_dot, axisa=0, axisb=0).T
w = np.cross(xi, xidot, axisa=0, axisb=0).T
mu = np.matmul(qd[qn]["xixiT"], mu_des)
e_xi = np.cross(xi_des, xi, axisa=0, axisb=0).T
e_w = w + np.cross(xi, np.cross(xi, w_des, axisa=0, axisb=0).T, axisa=0, axisb=0).T
u_parallel = mu + m*l*np.linalg.norm(w)**2*xi + np.matmul(m*qd[qn]["xixiT"], qd[qn]["attach_accel"])
u_perpendicular = -m*l*np.cross(xi, params.Kxi @ e_xi + params.Kw @ e_w + (xi.T @ w_des) * xi_des_dot, axisa=0, axisb=0).T - m*np.cross(xi, np.cross(xi, qd[qn]["attach_accel"], axisa=0, axisb=0).T, axisa=0, axisb=0).T
Force = u_parallel + u_perpendicular
F = Force.T @ np.matmul(rot,e3)
# Desired Attitude and Angular Velocity
yaw_des = qd[qn]["yaw_des"]
yawdot_des = qd[qn]["yawdot_des"]
Rot_des = np.zeros((3,3), dtype=float)
Z_body_in_world = Force/np.linalg.norm(Force)
Rot_des[:, 2:3] = Z_body_in_world
Y_unit = np.array([[-np.sin(yaw_des)], [np.cos(yaw_des)], [0]])
X_body_in_world = np.cross(Y_unit, Z_body_in_world, axisa=0, axisb=0).T
X_body_in_world = X_body_in_world/np.linalg.norm(X_body_in_world)
Rot_des[:,0:1] = X_body_in_world
Y_body_in_world = np.cross(Z_body_in_world, X_body_in_world, axisa=0, axisb=0).T
Y_body_in_world = Y_body_in_world/np.linalg.norm(Y_body_in_world)
Rot_des[:,1:2] = Y_body_in_world
p_des = np.array([[0.0]])
q_des = np.array([[0.0]])
r_des = yawdot_des*Z_body_in_world[2:3, :]
qd[qn]["rot_des"] = Rot_des
qd[qn]["omega_des"] = np.vstack((p_des, q_des, r_des))
# Quadrotor Attitude Control
M = self.quadrotor_attitude_controller(qd[qn], params)
return F, M, Rot_des
def quadrotor_attitude_controller(self, qd, params):
# DESCRIPTION:
# Attitude controller for a single cable suspended MAV and payload
# This function is used as a helper function in cooperative_attitude_controller() to compute M
# INPUTS:
# qd - a list of dictionary containing states of all MAV(s)
# qd[0] would give a dictionary of MAV 0's states and related information, specifically
# Key Type Size Description
# 'pos' ndarray 3 by 1 MAV 0's position
# 'vel' ndarray 3 by 1 MAV 0's velocity
# 'quat' ndarray 4 by 1 MAV 0's orientation as unit quaternion
# 'omega' ndarray 3 by 1 MAV 0's angular velocity
# 'rot' ndarray 3 by 3 MAV 0's rotation as rotation matrix
# 'xi' ndarray 3 by 1 MAV 0's cable direction as a unit vector
# 'xixiT' ndarray 3 by 3 xi dot product with xi
# 'xidot' ndarray 3 by 1 MAV 0's velocity normalized over separation distance
# 'yaw_des' float NA desired payload yaw, set to 0.0 current
# 'yawdot_des' float NA time derivative of desired payload yaw, set to 0.0 currently
# 'mu_des' ndarray 3 by 1 desired cable tension of the cable suspended under MAV 0
# 'attach_accel' ndarray 3 by 1 acceleration of the cable attach point
# 'rot_des' ndarray 3 by 3 desired rotation as a rotation matrix
# 'omega_des' ndarray 3 by 1 desired payload angular velocity
# set to [[0., 0., 0.]] currently
# params - a dictionary of the payload parameters
# OUTPUTS:
# M - a 3 by 1 vector describing Moment
Rot = qd["rot"]
Rot_des = qd["rot_des"]
omega_des = qd["omega_des"]
e_Rot = np.matmul(Rot_des.T, Rot) - np.matmul(Rot.T, Rot_des)
e_angle = vee(e_Rot)/2
e_omega = qd["omega"] - np.matmul(Rot.T, np.matmul(Rot_des, omega_des))
M = np.cross(qd["omega"], np.matmul(params.I, qd["omega"]), axisa=0, axisb=0).T - np.matmul(params.Kpe, e_angle) - np.matmul(params.Kde, e_omega)
return M
def cooperative_suspended_payload_controller(self, ql, qd, pl_params, qd_params):
# DESCRIPTION:
# Controller for cooperative cable suspended payload and MAV(s)
# INPUTS:
# ql - a dictionary containing state of the payload, specifically
# Key Type Size Description
# 'pos' ndarray 3 by 1 payload position
# 'vel' ndarray 3 by 1 payload velocity
# 'quat' ndarray 4 by 1 payload orientation as unit quaternion
# 'omega' ndarray 3 by 1 payload angular velocity
# 'rot' ndarray 3 by 3 payload rotation as rotation matrix
# 'pos_des' ndarray 3 by 1 desired payload position
# 'vel_des' ndarray 3 by 1 desired payload velocity
# 'acc_des' ndarray 3 by 1 desired payload acceleration
# 'jrk_des' ndarray 3 by 1 desired payload jerk
# 'quat_des' ndarray 4 by 1 desired payload orientation as unit quaterion
# set to [[1.], [0.], [0.], [0.]] currently
# 'omega_des' ndarray 3 by 1 desired payload angular velocity
# set to [[0., 0., 0.]] currently
# 'yaw_des' float NA desired payload yaw, set to 0.0 current
# 'yawdot_des' float NA time derivative of desired payload yaw, set to 0.0 currently
# qd - a list of dictionary containing states of all MAV(s)
# qd[0] would give a dictionary of MAV 0's states and related information, specifically
# Key Type Size Description
# 'pos' ndarray 3 by 1 MAV 0's position
# 'vel' ndarray 3 by 1 MAV 0's velocity
# 'quat' ndarray 4 by 1 MAV 0's orientation as unit quaternion
# 'omega' ndarray 3 by 1 MAV 0's angular velocity
# 'rot' ndarray 3 by 3 MAV 0's rotation as rotation matrix
# 'xi' ndarray 3 by 1 MAV 0's cable direction as a unit vector
# 'xixiT' ndarray 3 by 3 xi dot product with xi
# 'xidot' ndarray 3 by 1 MAV 0's velocity normalized over separation distance
# 'yaw_des' float NA desired payload yaw, set to 0.0 current
# 'yawdot_des' float NA time derivative of desired payload yaw, set to 0.0 currently
# 'mu_des' ndarray 3 by 1 desired cable tension of the cable suspended under MAV 0
# 'attach_accel' ndarray 3 by 1 acceleration of the cable attach point
# 'rot_des' ndarray 3 by 3 desired rotation as a rotation matrix
# 'omega_des' ndarray 3 by 1 desired payload angular velocity
# set to [[0., 0., 0.]] currently
# pl_params - a read_params class object containing payload parameters
# qd_params - a read_params class objects containing all MAV parameters
# OUTPUTS:
# mu - a 3*(Number of MAV(s)) by 1 ndarray, describing tension condition of each cable
# att_acc_c - a 2*(Number of MAV(s)) by 1 ndarray, describing cable payload attachment acceleration
# qd_F - a dictionary with (Number of MAV(s)) fields, with key '0', '1', '2', etc.
# Each dictionary contains a 1 by 1 ndarray denoting the thrust
# qd_M - a dictionary with (Number of MAV(s)) fields, with key '0', '1', '2', etc.
# Each dictionary contains a 3 by 1 ndarray denoting the moment
# qd_quat_des - a dictionary with (Number of MAV(s)) fields, with key '0', '1', '2', etc.
# Each dictionary contains a 1d ndarray with 4 elements denoting the desired orientation as unit quaternion
# qd_rot_des - a dictionary with (Number of MAV(s)) fields, with key '0', '1', '2', etc.
# Each dictionary contains a 3 by 3 ndarray denoting the desired orientation as rotation matrix
if not pl_params.sim_start:
self.icnt = 0
self.icnt = self.icnt + 1
# Parameter Initialization
quat_des = ql["quat_des"]
omega_des = ql["omega_des"]
g = pl_params.grav
m = pl_params.mass
nquad = pl_params.nquad
e3 = np.array([[0],[0],[1.0]])
Rot = ql["rot"]
omega_asym = vec2asym(ql["omega"])
Rot_des = utilslib.QuatToRot(quat_des)
## Position control
# Position error
ep = ql["pos_des"]-ql["pos"]
# Velocity error
ed = ql["vel_des"]-ql["vel"]
# Desired acceleration This equation drives the errors of trajectory to zero.
acceleration_des = ql["acc_des"] + np.matmul(pl_params.Kp, ep) + np.matmul(pl_params.Kd, ed)
F = m*g*e3 + m*acceleration_des
## Attitude Control
# Errors of anlges and angular velocities
e_Rot = Rot_des.T @ Rot - Rot.T @ Rot_des
e_angle = np.divide(vee(e_Rot), 2)
e_omega = ql["omega"] - Rot.T @ Rot_des @ omega_des.T
# Net moment
# Missing the angular acceleration term but in general it is neglectable.
M = np.matmul(-pl_params.Kpe, e_angle) - np.matmul(pl_params.Kde, e_omega) # may need to be changed to scalar product
# Cable tension distribution
diag_rot = np.zeros((0,0), dtype=float)
for i in range(1, nquad+1):
diag_rot = LA.block_diag(diag_rot, Rot)
mu = diag_rot @ pl_params.pseudo_inv_P @ np.append(Rot.T @ F, M, axis=0)
for i in range(1, nquad+1):
if (0>mu[3*i-1, 0]):
mu[3*i-1, 0] = 0
print("mu is less than zero")
else:# Is this really necessary?
mu[3*i-1, 0] = mu[3*i-1, 0]
att_acc_c = acceleration_des + g*e3 + np.matmul(np.matmul(np.matmul(Rot, omega_asym), omega_asym), pl_params.rho_vec_list)
# Quadrotor Attitude Controller
qd_F = {}
qd_M = {}
qd_rot_des = {}
qd_quat_des = {}
for qn in range(0, nquad):
qd[qn]["yaw_des"] = 0
qd[qn]["yawdot_des"] = 0
qd[qn]["mu_des"] = mu[3*qn:3*(qn+1)]
qd[qn]["attach_accel"] = att_acc_c[:,qn].reshape((3,1))
[F_qn, M_qn, Rot_des_qn] = self.cooperative_attitude_controller(qd, qn, qd_params)
qd_F[qn] = F_qn
qd_M[qn] = M_qn
qd_quat_des[qn] = tranrot.from_matrix(Rot_des_qn).as_quat()
qd_rot_des[qn] = Rot_des_qn
#return qd_F, qd_M
return mu, att_acc_c, qd_F, qd_M, qd_quat_des, qd_rot_des
# untested
def cooperative_payload_controller(self, ql, params):
if not params["sim_start"]:
# self.coeff0 = params.coeff0
self.icnt = 0
self.icnt = self.icnt + 1
## Parameter Initialization
quat_des = ql["quat_des"]
omega_des = ql["omega_des"]
g = params.grav
m = params.mass
e3 = np.array([[0],[0],[1]])
Rot = ql["rot"]
omega_asym = vec2asym(ql["omega"])
Rot_des = utilslib.QuatToRot(quat_des)
## Position control
# jerk_des = ql.jerk_des;
# Position error
ep = ql["pos_des"]-ql["pos"]
# Velocity error
ed = ql["vel_des"]-ql["vel"]
# Desired acceleration This equation drives the errors of trajectory to zero.
acceleration_des = ql["acc_des"] + np.matmul(params.Kp, ep) + np.matmul(params.Kd, ed)
# Net force F=kx*ex kv*ex_dot + mge3 +mxdes_ddot
F = m*g*e3 + m*acceleration_des
## Attitude Control
# Errors of anlges and angular velocities
e_Rot = np.matmul(np.transpose(Rot_des), Rot) - np.matmul(np.transpose(Rot), Rot_des)
e_angle = vee(e_Rot)/2
e_omega = ql["omega"] - np.matmul(np.matmul(np.transpose(Rot), Rot_des), np.transpose(omega_des))
# Net moment
# Missing the angular acceleration term but in general it is neglectable.
M = - np.matmul(params.Kpe, e_angle) - np.matmul(params.Kde, e_omega)
## Cable tension distribution
diag_rot = np.array([[]])
for i in range(1, params.nquad+1):
diag_rot = scipy.linalg.block_diag(diag_rot,Rot)
mu = np.matmul(np.matmul(diag_rot, params.pseudo_inv_P), np.vstack(np.matmul(np.transpose(Rot), F), M))
for i in range(1, params.nquad+1):
if mu[3*i-1]<0:
mu[3*i-1] = 0
att_acc_c = acceleration_des + g @ e3 + Rot @ omega_asym @ omega_asym @ params.rho_vec_list
return mu,att_acc_c
# untested
def geometric_controller(self, qd, t, qn, params):
if self.gd.size == 0:
self.gd = np.zeros((0,3), dtype= float)
self.icnt = 0
self.icnt += 1
## Parameter Initialization
yaw_des = qd[qn]["yaw_des"]
yawdot_des = qd[qn]["yawdot_des"]
g = params.grav
m = params.mass
phi = qd[qn]["euler"][0]
theta = qd[qn]["euler"][1]
psi = qd[qn]["euler"][2]
e3 = np.array([[0], [0], [1]])
# The rotation matrix in this function is world to body [bRw] you will
# need to transpose this matrix to get the body to world [wRb] such that
# [wP] = [wRb] * [bP], where [bP] is a point in the body frame and [wP]
# is a point in the world frame
Rot_worldtobody = RPYtoRot_ZXY(phi, theta, psi)
## Position control
jerk_des = qd[qn]["jerk_des"]
# Position error
ep = qd[qn]["pos_des"]-qd[qn]["pos"]
# Velocity error
ed = qd[qn]["vel_des"]-qd[qn]["vel"]
# Desired acceleration This equation drives the errors of trajectory to zero.
acceleration_des = qd[qn]["acc_des"] + params.Kp @ ep + params.Kd @ ed;
# Thurst f=(kx*ex kv*ex_dot + mge3 +mxdes_ddot)*Re3
Force = m*g*e3 + m*acceleration_des
F = np.transpose(Force) @ np.transpose(Rot_worldtobody) @ e3
## Attitude Control
Rot_des = np.zeros((3,3), dtype=float)
Z_body_in_world = Force/np.linalg.norm(Force)
Rot_des[:,2] = Z_body_in_world
X_unit = np.vstack(np.cos(yaw_des), np.sin(yaw_des), 0)
Y_body_in_world = np.cross(Z_body_in_world,X_unit)
Y_body_in_world = Y_body_in_world/np.linalg.norm(Y_body_in_world)
Rot_des[:,1] = Y_body_in_world
X_body_in_world = np.cross(Y_body_in_world,Z_body_in_world)
Rot_des[:,0] = X_body_in_world
# Errors of anlges and angular velocities
e_Rot = np.transpose(Rot_des) @ np.transpose(Rot_worldtobody) - Rot_worldtobody @ Rot_des
e_angle = vee(e_Rot)/2
p_des = -(m/F) * np.transpose(jerk_des - (np.transpose(Z_body_in_world) @ jerk_des) @ Z_body_in_world) @ Y_body_in_world
q_des = (m/F) * np.transpose(jerk_des - (np.transpose(Z_body_in_world) @ jerk_des) @ Z_body_in_world) @ X_body_in_world
r_des = yawdot_des * Z_body_in_world[2]
e_omega = qd[qn]["omega"] - Rot_worldtobody @ Rot_des @ np.transpose(np.hstack(p_des, q_des, r_des))
# Moment
# Missing the angular acceleration term but in general it is neglectable.
M = - params.Kpe @ e_angle - params.Kde @ e_omega + np.cross(qd[qn]["omega"], params.I*qd[qn]["omega"])
# =================== Your code ends here ===================
# Output trpy and drpy as in hardware
trpy = np.array([0,0,0,0])
drpy = np.array([0,0,0,0])
return F, M, trpy, drpy
# untested
def hover_controller(self, qd, t, qn, params):
if self.gd.size == 0:
self.gd = np.zeros((0,3), dtype= float)
self.icnt = 0
self.icnt += 1
# position_now = qd{qn}.pos;
# velocity_now = qd{qn}.vel;
# Eulerangle_now = qd{qn}.euler;
# omega_now = qd{qn}.omega;
# position_tra = qd{qn}.pos_des;
# velocity_tra = qd{qn}.vel_des;
# acceleration_tra = qd{qn}.acc_des;
## Parameter Initialization
yaw_des = qd[qn]["yaw_des"]
yawdot_des = qd[qn]["yawdot_des"]
g = params.grav
m = params.mass
# Gain matrices
Kp_pos = np.array([[5, 0, 0],
[0, 5, 0],
[0, 0, 150]])
Kp_att = np.array([[5, 0, 0],
[0, 5, 0],
[0, 0, 150]])
Kd_att = np.array([[5.5, 0, 0],
[0, 5.5, 0],
[0, 0, 150]])
Ki_att = np.array([[0.004, 0, 0],
[0, 0.004, 0],
[0, 0, 0.004]])
Kpe = np.array([[0.1, 0, 0],
[0, 0.1, 0],
[0, 0, 0.2]])
Kde = np.array([[0.004, 0, 0],
[0, 0.004, 0],
[0, 0, 0.004]])
## Position control
# Position error
e_pos = qd[qn]["pos_des"]-qd[qn]["pos"]
vel_des = Kp_pos @ e_pos
# Velocity error
e_vel = vel_des-qd[qn]["vel"]
## Hover controller
# Desired acceleration This equation drives the errors of trajectory to zero.
acceleration_des = qd[qn]["acc_des"] + params.Kp @ e_pos + params.Kd @ e_vel
# Desired roll, pitch and yaw
phi_des = (acceleration_des[0]*np.sin(yaw_des)-acceleration_des[1]*np.cos(yaw_des))/g
theta_des = (acceleration_des[0]*np.cos(yaw_des)+acceleration_des[1]*np.sin(yaw_des))/g
psi_des = yaw_des
# Errors of anlges and angular velocities
e_angle = np.transpose(np.hstack(phi_des, theta_des, psi_des)) - qd[qn]["euler"]
e_omega = np.transpose(np.hstack(0, 0, yawdot_des)) - qd[qn]["omega"]
# Thurst
F = m*g + m*acceleration_des[2]
# Moment
M = Kpe @ e_angle + Kde @ e_omega
#
self.gd[self.icnt-1,:] = np.hstack(t, phi_des, qd[qn]["euler"][0]) # for graphing
# Output trpy and drpy as in hardware
trpy = np.array([0,0,0,0])
drpy = np.array([0,0,0,0])
return F, M, trpy, drpy
def rigid_links_cooperative_payload_controller(self, ql, params):
# DESCRIPTION:
# Controller for rigid link connected payload and MAV(s)
# INPUTS:
# ql - a dictionary containing state of the payload, specifically
# Key Type Size Description
# 'pos' ndarray 3 by 1 payload position
# 'vel' ndarray 3 by 1 payload velocity
# 'quat' ndarray 4 by 1 payload orientation as unit quaternion
# 'omega' ndarray 3 by 1 payload angular velocity
# 'rot' ndarray 3 by 3 payload rotation as rotation matrix
# 'pos_des' ndarray 3 by 1 desired payload position
# 'vel_des' ndarray 3 by 1 desired payload velocity
# 'acc_des' ndarray 3 by 1 desired payload acceleration
# 'jrk_des' ndarray 3 by 1 desired payload jerk
# 'quat_des' ndarray 4 by 1 desired payload orientation as unit quaterion
# set to [[1.], [0.], [0.], [0.]] currently
# 'omega_des' ndarray 3 by 1 desired payload angular velocity
# set to [[0., 0., 0.]] currently
# 'qd_yaw_des' float NA desired MAV yaw, set to 0.0 current
# 'qd_yawdot_des' float NA time derivative of desired MAV yaw, set to 0.0 currently
# params - a read_params class object containing payload parameters
# OUTPUTS:
# uav_F - a dictionary with one field (key = '0'), a 3 by 1 ndarray denoting the desired force
# uav_F {0: array([[Fx],
# [Fy],
# [Fz]])}
# uav_M - a dictionary with one field (key = '0'), a 3 by 1 ndarray denoting the desired moment
# uav_F {0: array([[Mx],
# [My],
# [Mz]])}
if not params.sim_start:
self.icnt = 0
self.icnt = self.icnt + 1
## Parameter Initialization
quat_des = ql["quat_des"]
yaw_des = 0
omega_des = ql["omega_des"]
g = params.grav
m = params.struct_mass
e3 = np.array([[0],[0],[1]])
Rot = ql["rot"]
omega = ql["omega"]
## Position control
# Position error
ep = ql["pos_des"]-ql["pos"]
# Velocity error
ed = ql["vel_des"]-ql["vel"]
ep = ep.reshape((3,1))
ed = ed.reshape((3,1))
# Desired acceleration This equation drives the errors of trajectory to zero.
acceleration_des = ql["acc_des"] + params.Kp @ ep + params.Kd @ ed
Force = m*g*e3 + m*acceleration_des
tau = np.transpose(Force) @ Rot @ e3
## Attitude Control
Rot_des = np.zeros((3,3), dtype=float)
Z_body_in_world = Force/np.linalg.norm(Force)
Rot_des[:,2:3] = Z_body_in_world
X_unit = np.array([[np.cos(yaw_des)], [np.sin(yaw_des)], [0]])
Y_body_in_world = np.cross(Z_body_in_world,X_unit, axisa=0, axisb=0).T
Y_body_in_world = Y_body_in_world/np.linalg.norm(Y_body_in_world)
Rot_des[:,1:2] = Y_body_in_world
X_body_in_world = np.cross(Y_body_in_world,Z_body_in_world, axisa=0, axisb=0).T
Rot_des[:,0:1] = X_body_in_world
# Errors of anlges and angular velocities
e_Rot = np.transpose(Rot_des) @ Rot - np.transpose(Rot) @ Rot_des
e_angle = vee(e_Rot)/2
e_omega = omega.reshape((3,1)) - np.transpose(Rot) @ Rot_des @ omega_des.reshape((3, 1))
# Net moment
# Missing the angular acceleration term but in general it is neglectable.
M = - params.Kpe @ e_angle - params.Kde @ e_omega + np.cross(omega, params.struct_I @ omega, axisa=0, axisb=0).reshape((3,1))
## Quadrotor Thrust and Moment Distribution
u = params.thrust_moment_distribution_mat @ np.vstack((tau, M))
u = params.A @ u
uav_F_arr = u[0] * Rot[:,2].reshape((3,1))
uav_M_arr = u[1:4]
# convert u into uav_F and uav_M
uav_F = {}
uav_F[0] = uav_F_arr
uav_M = {}
uav_M[0] = uav_M_arr
return uav_F, uav_M
def single_payload_geometric_controller(self, ql, qd_params, pl_params):
# DESCRIPTION:
# Controller for rigid link connected payload and MAV(s)
# INPUTS:
# ql - a dictionary containing state of the payload and MAV combined, specifically
# Key Type Size Description
# 'pos' ndarray 3 by 1 payload position
# 'vel' ndarray 3 by 1 payload velocity
# 'qd_pos' ndarray 3 by 1 MAV position
# 'qd_vel' ndarray 3 by 1 MAV velocity
# 'qd_quat' ndarray 4 by 1 MAV orientation as unit quaternion
# 'qd_omega' ndarray 3 by 1 MAV angular velocity
# 'qd_rot' ndarray 3 by 3 MAV orientation as rotation matrix
# 'pos_des' ndarray 3 by 1 desired payload position
# 'vel_des' ndarray 3 by 1 desired payload velocity
# 'acc_des' ndarray 3 by 1 desired payload acceleration
# 'jrk_des' ndarray 3 by 1 desired payload jerk
# 'quat_des' ndarray 4 by 1 desired payload orientation as unit quaterion
# set to [[1.], [0.], [0.], [0.]] currently
# 'omega_des' ndarray 3 by 1 desired payload angular velocity
# set to [[0., 0., 0.]] currently
# 'qd_yaw_des' float NA desired MAV yaw, set to 0.0 current
# 'qd_yawdot_des' float NA time derivative of desired MAV yaw, set to 0.0 currently
# pl_params - a read_params class object containing payload parameters
# qd_params - a read_params class objects containing all MAV parameters
# OUTPUTS:
# F - a 1 by 1 ndarray, denoting the thrust force
# M - a list of size 3, containing three 1d ndarray of size 1, denoting the moment
# M = [[array([Mx])]
# [array([My])]
# [array([Mz])]]
## Parameter Initialization
if not pl_params.sim_start:
self.icnt = 0
g = pl_params.grav
e3 = np.array([[0],[0],[1]])
self.icnt = self.icnt + 1
quad_m = qd_params.mass
pl_m = pl_params.mass
l = pl_params.cable_length
## State Initialization
quad_load_rel_pos = ql["qd_pos"]-ql["pos"]
quad_load_rel_vel = ql["qd_vel"]-ql["vel"]
quad_load_distance = np.linalg.norm(quad_load_rel_pos)
xi_ = -quad_load_rel_pos/quad_load_distance
xixiT_ = xi_ @ np.transpose(xi_)
xidot_ = -quad_load_rel_vel/quad_load_distance
xi_asym_ = vec2asym(xi_)
w_ = np.cross(xi_, xidot_, axisa=0, axisb=0).T
Rot_worldtobody = ql["qd_rot"]
## Payload Position control
#Position error
ep = ql["pos_des"]-ql["pos"]
#Velocity error
ed = ql["vel_des"]-ql["vel"]
# Desired acceleration This equation drives the errors of trajectory to zero.
acceleration_des = ql["acc_des"] + g*e3 + pl_params.Kp @ ep + pl_params.Kd @ ed
# Desired yaw and yawdot
yaw_des = ql["qd_yaw_des"] # This can remain for Quad
yawdot_des = ql["qd_yawdot_des"]
## Cable Direction Control
# Desired cable direction
mu_des_ = (quad_m + pl_m) * acceleration_des + quad_m * l * (np.transpose(xidot_) @ xidot_) * xi_
xi_des_ = -mu_des_ / np.linalg.norm(mu_des_)
xi_des_dot_ = np.zeros((3, 1), dtype=float)
w_des_ = np.cross(xi_des_, xi_des_dot_, axisa=0, axisb=0).T
w_des_dot_ = np.zeros((3, 1), dtype=float)
mu_ = xixiT_ @ mu_des_
e_xi = np.cross(xi_des_, xi_, axisa=0, axisb=0).T
e_w = w_ + xi_asym_ @ xi_asym_ @ w_des_
Force = mu_ - quad_m*l*np.cross(xi_, qd_params.Kxi @ e_xi + qd_params.Kw @ e_w+ (xi_.T @ w_des_) * xidot_ + xi_asym_ @ xi_asym_ @ w_des_dot_, axisa=0, axisb=0).T
F = np.transpose(Force) @ Rot_worldtobody @ e3
# Attitude Control
Rot_des = np.zeros((3,3), dtype=float)
Z_body_in_world = Force/np.linalg.norm(Force)
Rot_des[:,2:3] = Z_body_in_world
X_unit = np.array([[np.cos(yaw_des)], [np.sin(yaw_des)], [0]])
Y_body_in_world = np.cross(Z_body_in_world, X_unit, axisa=0, axisb=0).T
Y_body_in_world = Y_body_in_world/np.linalg.norm(Y_body_in_world)
Rot_des[:,1:2] = Y_body_in_world
X_body_in_world = np.cross(Y_body_in_world,Z_body_in_world, axisa=0, axisb=0).T
Rot_des[:,0:1] = X_body_in_world
# Errors of anlges and angular velocities
e_Rot = np.transpose(Rot_des) @ Rot_worldtobody - Rot_worldtobody.T @ Rot_des
e_angle = vee(e_Rot)/2
p_des = 0.0
q_des = 0.0
r_des = yawdot_des*Z_body_in_world[2]
e_omega = ql["qd_omega"] - Rot_worldtobody.T @ Rot_des @ np.array([[p_des], [q_des], [r_des]])
# Moment
# Missing the angular acceleration term but in general it is neglectable.
M = - qd_params.Kpe @ e_angle - qd_params.Kde @ e_omega + np.cross(ql["qd_omega"],qd_params.I @ ql["qd_omega"], axisa=0, axisb=0).T
return F, M
|
py | 1a4004d944c51b2de74898ea1e8a0609f6e88859 | # coding=utf-8
# *** WARNING: this file was generated by pulumi-gen-awsx. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from .. import awsx as _awsx
from ._inputs import *
import pulumi_aws
__all__ = ['FargateTaskDefinitionArgs', 'FargateTaskDefinition']
@pulumi.input_type
class FargateTaskDefinitionArgs:
def __init__(__self__, *,
container: Optional['TaskDefinitionContainerDefinitionArgs'] = None,
containers: Optional[Mapping[str, 'TaskDefinitionContainerDefinitionArgs']] = None,
cpu: Optional[pulumi.Input[str]] = None,
ephemeral_storage: Optional[pulumi.Input['pulumi_aws.ecs.TaskDefinitionEphemeralStorageArgs']] = None,
execution_role: Optional['_awsx.DefaultRoleWithPolicyArgs'] = None,
family: Optional[pulumi.Input[str]] = None,
inference_accelerators: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_aws.ecs.TaskDefinitionInferenceAcceleratorArgs']]]] = None,
ipc_mode: Optional[pulumi.Input[str]] = None,
log_group: Optional['_awsx.DefaultLogGroupArgs'] = None,
memory: Optional[pulumi.Input[str]] = None,
pid_mode: Optional[pulumi.Input[str]] = None,
placement_constraints: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_aws.ecs.TaskDefinitionPlacementConstraintArgs']]]] = None,
proxy_configuration: Optional[pulumi.Input['pulumi_aws.ecs.TaskDefinitionProxyConfigurationArgs']] = None,
runtime_platform: Optional[pulumi.Input['pulumi_aws.ecs.TaskDefinitionRuntimePlatformArgs']] = None,
skip_destroy: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
task_role: Optional['_awsx.DefaultRoleWithPolicyArgs'] = None,
volumes: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_aws.ecs.TaskDefinitionVolumeArgs']]]] = None):
"""
The set of arguments for constructing a FargateTaskDefinition resource.
:param 'TaskDefinitionContainerDefinitionArgs' container: Single container to make a TaskDefinition from. Useful for simple cases where there aren't
multiple containers, especially when creating a TaskDefinition to call [run] on.
Either [container] or [containers] must be provided.
:param Mapping[str, 'TaskDefinitionContainerDefinitionArgs'] containers: All the containers to make a TaskDefinition from. Useful when creating a Service that will
contain many containers within.
Either [container] or [containers] must be provided.
:param pulumi.Input[str] cpu: The number of cpu units used by the task. If not provided, a default will be computed based on the cumulative needs specified by [containerDefinitions]
:param pulumi.Input['pulumi_aws.ecs.TaskDefinitionEphemeralStorageArgs'] ephemeral_storage: The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on AWS Fargate. See Ephemeral Storage.
:param '_awsx.DefaultRoleWithPolicyArgs' execution_role: The execution role that the Amazon ECS container agent and the Docker daemon can assume.
Will be created automatically if not defined.
:param pulumi.Input[str] family: An optional unique name for your task definition. If not specified, then a default will be created.
:param pulumi.Input[Sequence[pulumi.Input['pulumi_aws.ecs.TaskDefinitionInferenceAcceleratorArgs']]] inference_accelerators: Configuration block(s) with Inference Accelerators settings. Detailed below.
:param pulumi.Input[str] ipc_mode: IPC resource namespace to be used for the containers in the task The valid values are `host`, `task`, and `none`.
:param '_awsx.DefaultLogGroupArgs' log_group: A set of volume blocks that containers in your task may use.
:param pulumi.Input[str] memory: The amount (in MiB) of memory used by the task. If not provided, a default will be computed
based on the cumulative needs specified by [containerDefinitions]
:param pulumi.Input[str] pid_mode: Process namespace to use for the containers in the task. The valid values are `host` and `task`.
:param pulumi.Input[Sequence[pulumi.Input['pulumi_aws.ecs.TaskDefinitionPlacementConstraintArgs']]] placement_constraints: Configuration block for rules that are taken into consideration during task placement. Maximum number of `placement_constraints` is `10`. Detailed below.
:param pulumi.Input['pulumi_aws.ecs.TaskDefinitionProxyConfigurationArgs'] proxy_configuration: Configuration block for the App Mesh proxy. Detailed below.
:param pulumi.Input['pulumi_aws.ecs.TaskDefinitionRuntimePlatformArgs'] runtime_platform: Configuration block for runtime_platform that containers in your task may use.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags.
:param '_awsx.DefaultRoleWithPolicyArgs' task_role: IAM role that allows your Amazon ECS container task to make calls to other AWS services.
Will be created automatically if not defined.
:param pulumi.Input[Sequence[pulumi.Input['pulumi_aws.ecs.TaskDefinitionVolumeArgs']]] volumes: Configuration block for volumes that containers in your task may use. Detailed below.
"""
if container is not None:
pulumi.set(__self__, "container", container)
if containers is not None:
pulumi.set(__self__, "containers", containers)
if cpu is not None:
pulumi.set(__self__, "cpu", cpu)
if ephemeral_storage is not None:
pulumi.set(__self__, "ephemeral_storage", ephemeral_storage)
if execution_role is not None:
pulumi.set(__self__, "execution_role", execution_role)
if family is not None:
pulumi.set(__self__, "family", family)
if inference_accelerators is not None:
pulumi.set(__self__, "inference_accelerators", inference_accelerators)
if ipc_mode is not None:
pulumi.set(__self__, "ipc_mode", ipc_mode)
if log_group is not None:
pulumi.set(__self__, "log_group", log_group)
if memory is not None:
pulumi.set(__self__, "memory", memory)
if pid_mode is not None:
pulumi.set(__self__, "pid_mode", pid_mode)
if placement_constraints is not None:
pulumi.set(__self__, "placement_constraints", placement_constraints)
if proxy_configuration is not None:
pulumi.set(__self__, "proxy_configuration", proxy_configuration)
if runtime_platform is not None:
pulumi.set(__self__, "runtime_platform", runtime_platform)
if skip_destroy is not None:
pulumi.set(__self__, "skip_destroy", skip_destroy)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if task_role is not None:
pulumi.set(__self__, "task_role", task_role)
if volumes is not None:
pulumi.set(__self__, "volumes", volumes)
@property
@pulumi.getter
def container(self) -> Optional['TaskDefinitionContainerDefinitionArgs']:
"""
Single container to make a TaskDefinition from. Useful for simple cases where there aren't
multiple containers, especially when creating a TaskDefinition to call [run] on.
Either [container] or [containers] must be provided.
"""
return pulumi.get(self, "container")
@container.setter
def container(self, value: Optional['TaskDefinitionContainerDefinitionArgs']):
pulumi.set(self, "container", value)
@property
@pulumi.getter
def containers(self) -> Optional[Mapping[str, 'TaskDefinitionContainerDefinitionArgs']]:
"""
All the containers to make a TaskDefinition from. Useful when creating a Service that will
contain many containers within.
Either [container] or [containers] must be provided.
"""
return pulumi.get(self, "containers")
@containers.setter
def containers(self, value: Optional[Mapping[str, 'TaskDefinitionContainerDefinitionArgs']]):
pulumi.set(self, "containers", value)
@property
@pulumi.getter
def cpu(self) -> Optional[pulumi.Input[str]]:
"""
The number of cpu units used by the task. If not provided, a default will be computed based on the cumulative needs specified by [containerDefinitions]
"""
return pulumi.get(self, "cpu")
@cpu.setter
def cpu(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cpu", value)
@property
@pulumi.getter(name="ephemeralStorage")
def ephemeral_storage(self) -> Optional[pulumi.Input['pulumi_aws.ecs.TaskDefinitionEphemeralStorageArgs']]:
"""
The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on AWS Fargate. See Ephemeral Storage.
"""
return pulumi.get(self, "ephemeral_storage")
@ephemeral_storage.setter
def ephemeral_storage(self, value: Optional[pulumi.Input['pulumi_aws.ecs.TaskDefinitionEphemeralStorageArgs']]):
pulumi.set(self, "ephemeral_storage", value)
@property
@pulumi.getter(name="executionRole")
def execution_role(self) -> Optional['_awsx.DefaultRoleWithPolicyArgs']:
"""
The execution role that the Amazon ECS container agent and the Docker daemon can assume.
Will be created automatically if not defined.
"""
return pulumi.get(self, "execution_role")
@execution_role.setter
def execution_role(self, value: Optional['_awsx.DefaultRoleWithPolicyArgs']):
pulumi.set(self, "execution_role", value)
@property
@pulumi.getter
def family(self) -> Optional[pulumi.Input[str]]:
"""
An optional unique name for your task definition. If not specified, then a default will be created.
"""
return pulumi.get(self, "family")
@family.setter
def family(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "family", value)
@property
@pulumi.getter(name="inferenceAccelerators")
def inference_accelerators(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_aws.ecs.TaskDefinitionInferenceAcceleratorArgs']]]]:
"""
Configuration block(s) with Inference Accelerators settings. Detailed below.
"""
return pulumi.get(self, "inference_accelerators")
@inference_accelerators.setter
def inference_accelerators(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_aws.ecs.TaskDefinitionInferenceAcceleratorArgs']]]]):
pulumi.set(self, "inference_accelerators", value)
@property
@pulumi.getter(name="ipcMode")
def ipc_mode(self) -> Optional[pulumi.Input[str]]:
"""
IPC resource namespace to be used for the containers in the task The valid values are `host`, `task`, and `none`.
"""
return pulumi.get(self, "ipc_mode")
@ipc_mode.setter
def ipc_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ipc_mode", value)
@property
@pulumi.getter(name="logGroup")
def log_group(self) -> Optional['_awsx.DefaultLogGroupArgs']:
"""
A set of volume blocks that containers in your task may use.
"""
return pulumi.get(self, "log_group")
@log_group.setter
def log_group(self, value: Optional['_awsx.DefaultLogGroupArgs']):
pulumi.set(self, "log_group", value)
@property
@pulumi.getter
def memory(self) -> Optional[pulumi.Input[str]]:
"""
The amount (in MiB) of memory used by the task. If not provided, a default will be computed
based on the cumulative needs specified by [containerDefinitions]
"""
return pulumi.get(self, "memory")
@memory.setter
def memory(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "memory", value)
@property
@pulumi.getter(name="pidMode")
def pid_mode(self) -> Optional[pulumi.Input[str]]:
"""
Process namespace to use for the containers in the task. The valid values are `host` and `task`.
"""
return pulumi.get(self, "pid_mode")
@pid_mode.setter
def pid_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pid_mode", value)
@property
@pulumi.getter(name="placementConstraints")
def placement_constraints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_aws.ecs.TaskDefinitionPlacementConstraintArgs']]]]:
"""
Configuration block for rules that are taken into consideration during task placement. Maximum number of `placement_constraints` is `10`. Detailed below.
"""
return pulumi.get(self, "placement_constraints")
@placement_constraints.setter
def placement_constraints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_aws.ecs.TaskDefinitionPlacementConstraintArgs']]]]):
pulumi.set(self, "placement_constraints", value)
@property
@pulumi.getter(name="proxyConfiguration")
def proxy_configuration(self) -> Optional[pulumi.Input['pulumi_aws.ecs.TaskDefinitionProxyConfigurationArgs']]:
"""
Configuration block for the App Mesh proxy. Detailed below.
"""
return pulumi.get(self, "proxy_configuration")
@proxy_configuration.setter
def proxy_configuration(self, value: Optional[pulumi.Input['pulumi_aws.ecs.TaskDefinitionProxyConfigurationArgs']]):
pulumi.set(self, "proxy_configuration", value)
@property
@pulumi.getter(name="runtimePlatform")
def runtime_platform(self) -> Optional[pulumi.Input['pulumi_aws.ecs.TaskDefinitionRuntimePlatformArgs']]:
"""
Configuration block for runtime_platform that containers in your task may use.
"""
return pulumi.get(self, "runtime_platform")
@runtime_platform.setter
def runtime_platform(self, value: Optional[pulumi.Input['pulumi_aws.ecs.TaskDefinitionRuntimePlatformArgs']]):
pulumi.set(self, "runtime_platform", value)
@property
@pulumi.getter(name="skipDestroy")
def skip_destroy(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "skip_destroy")
@skip_destroy.setter
def skip_destroy(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "skip_destroy", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value map of resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="taskRole")
def task_role(self) -> Optional['_awsx.DefaultRoleWithPolicyArgs']:
"""
IAM role that allows your Amazon ECS container task to make calls to other AWS services.
Will be created automatically if not defined.
"""
return pulumi.get(self, "task_role")
@task_role.setter
def task_role(self, value: Optional['_awsx.DefaultRoleWithPolicyArgs']):
pulumi.set(self, "task_role", value)
@property
@pulumi.getter
def volumes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_aws.ecs.TaskDefinitionVolumeArgs']]]]:
"""
Configuration block for volumes that containers in your task may use. Detailed below.
"""
return pulumi.get(self, "volumes")
@volumes.setter
def volumes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_aws.ecs.TaskDefinitionVolumeArgs']]]]):
pulumi.set(self, "volumes", value)
class FargateTaskDefinition(pulumi.ComponentResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
container: Optional[pulumi.InputType['TaskDefinitionContainerDefinitionArgs']] = None,
containers: Optional[Mapping[str, pulumi.InputType['TaskDefinitionContainerDefinitionArgs']]] = None,
cpu: Optional[pulumi.Input[str]] = None,
ephemeral_storage: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.ecs.TaskDefinitionEphemeralStorageArgs']]] = None,
execution_role: Optional[pulumi.InputType['_awsx.DefaultRoleWithPolicyArgs']] = None,
family: Optional[pulumi.Input[str]] = None,
inference_accelerators: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_aws.ecs.TaskDefinitionInferenceAcceleratorArgs']]]]] = None,
ipc_mode: Optional[pulumi.Input[str]] = None,
log_group: Optional[pulumi.InputType['_awsx.DefaultLogGroupArgs']] = None,
memory: Optional[pulumi.Input[str]] = None,
pid_mode: Optional[pulumi.Input[str]] = None,
placement_constraints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_aws.ecs.TaskDefinitionPlacementConstraintArgs']]]]] = None,
proxy_configuration: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.ecs.TaskDefinitionProxyConfigurationArgs']]] = None,
runtime_platform: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.ecs.TaskDefinitionRuntimePlatformArgs']]] = None,
skip_destroy: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
task_role: Optional[pulumi.InputType['_awsx.DefaultRoleWithPolicyArgs']] = None,
volumes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_aws.ecs.TaskDefinitionVolumeArgs']]]]] = None,
__props__=None):
"""
Create a TaskDefinition resource with the given unique name, arguments, and options.
Creates required log-group and task & execution roles.
Presents required Service load balancers if target group included in port mappings.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.InputType['TaskDefinitionContainerDefinitionArgs'] container: Single container to make a TaskDefinition from. Useful for simple cases where there aren't
multiple containers, especially when creating a TaskDefinition to call [run] on.
Either [container] or [containers] must be provided.
:param Mapping[str, pulumi.InputType['TaskDefinitionContainerDefinitionArgs']] containers: All the containers to make a TaskDefinition from. Useful when creating a Service that will
contain many containers within.
Either [container] or [containers] must be provided.
:param pulumi.Input[str] cpu: The number of cpu units used by the task. If not provided, a default will be computed based on the cumulative needs specified by [containerDefinitions]
:param pulumi.Input[pulumi.InputType['pulumi_aws.ecs.TaskDefinitionEphemeralStorageArgs']] ephemeral_storage: The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on AWS Fargate. See Ephemeral Storage.
:param pulumi.InputType['_awsx.DefaultRoleWithPolicyArgs'] execution_role: The execution role that the Amazon ECS container agent and the Docker daemon can assume.
Will be created automatically if not defined.
:param pulumi.Input[str] family: An optional unique name for your task definition. If not specified, then a default will be created.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_aws.ecs.TaskDefinitionInferenceAcceleratorArgs']]]] inference_accelerators: Configuration block(s) with Inference Accelerators settings. Detailed below.
:param pulumi.Input[str] ipc_mode: IPC resource namespace to be used for the containers in the task The valid values are `host`, `task`, and `none`.
:param pulumi.InputType['_awsx.DefaultLogGroupArgs'] log_group: A set of volume blocks that containers in your task may use.
:param pulumi.Input[str] memory: The amount (in MiB) of memory used by the task. If not provided, a default will be computed
based on the cumulative needs specified by [containerDefinitions]
:param pulumi.Input[str] pid_mode: Process namespace to use for the containers in the task. The valid values are `host` and `task`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_aws.ecs.TaskDefinitionPlacementConstraintArgs']]]] placement_constraints: Configuration block for rules that are taken into consideration during task placement. Maximum number of `placement_constraints` is `10`. Detailed below.
:param pulumi.Input[pulumi.InputType['pulumi_aws.ecs.TaskDefinitionProxyConfigurationArgs']] proxy_configuration: Configuration block for the App Mesh proxy. Detailed below.
:param pulumi.Input[pulumi.InputType['pulumi_aws.ecs.TaskDefinitionRuntimePlatformArgs']] runtime_platform: Configuration block for runtime_platform that containers in your task may use.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags.
:param pulumi.InputType['_awsx.DefaultRoleWithPolicyArgs'] task_role: IAM role that allows your Amazon ECS container task to make calls to other AWS services.
Will be created automatically if not defined.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_aws.ecs.TaskDefinitionVolumeArgs']]]] volumes: Configuration block for volumes that containers in your task may use. Detailed below.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[FargateTaskDefinitionArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a TaskDefinition resource with the given unique name, arguments, and options.
Creates required log-group and task & execution roles.
Presents required Service load balancers if target group included in port mappings.
:param str resource_name: The name of the resource.
:param FargateTaskDefinitionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(FargateTaskDefinitionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
container: Optional[pulumi.InputType['TaskDefinitionContainerDefinitionArgs']] = None,
containers: Optional[Mapping[str, pulumi.InputType['TaskDefinitionContainerDefinitionArgs']]] = None,
cpu: Optional[pulumi.Input[str]] = None,
ephemeral_storage: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.ecs.TaskDefinitionEphemeralStorageArgs']]] = None,
execution_role: Optional[pulumi.InputType['_awsx.DefaultRoleWithPolicyArgs']] = None,
family: Optional[pulumi.Input[str]] = None,
inference_accelerators: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_aws.ecs.TaskDefinitionInferenceAcceleratorArgs']]]]] = None,
ipc_mode: Optional[pulumi.Input[str]] = None,
log_group: Optional[pulumi.InputType['_awsx.DefaultLogGroupArgs']] = None,
memory: Optional[pulumi.Input[str]] = None,
pid_mode: Optional[pulumi.Input[str]] = None,
placement_constraints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_aws.ecs.TaskDefinitionPlacementConstraintArgs']]]]] = None,
proxy_configuration: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.ecs.TaskDefinitionProxyConfigurationArgs']]] = None,
runtime_platform: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.ecs.TaskDefinitionRuntimePlatformArgs']]] = None,
skip_destroy: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
task_role: Optional[pulumi.InputType['_awsx.DefaultRoleWithPolicyArgs']] = None,
volumes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_aws.ecs.TaskDefinitionVolumeArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is not None:
raise ValueError('ComponentResource classes do not support opts.id')
else:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = FargateTaskDefinitionArgs.__new__(FargateTaskDefinitionArgs)
__props__.__dict__["container"] = container
__props__.__dict__["containers"] = containers
__props__.__dict__["cpu"] = cpu
__props__.__dict__["ephemeral_storage"] = ephemeral_storage
__props__.__dict__["execution_role"] = execution_role
__props__.__dict__["family"] = family
__props__.__dict__["inference_accelerators"] = inference_accelerators
__props__.__dict__["ipc_mode"] = ipc_mode
__props__.__dict__["log_group"] = log_group
__props__.__dict__["memory"] = memory
__props__.__dict__["pid_mode"] = pid_mode
__props__.__dict__["placement_constraints"] = placement_constraints
__props__.__dict__["proxy_configuration"] = proxy_configuration
__props__.__dict__["runtime_platform"] = runtime_platform
__props__.__dict__["skip_destroy"] = skip_destroy
__props__.__dict__["tags"] = tags
__props__.__dict__["task_role"] = task_role
__props__.__dict__["volumes"] = volumes
__props__.__dict__["load_balancers"] = None
__props__.__dict__["task_definition"] = None
super(FargateTaskDefinition, __self__).__init__(
'awsx:ecs:FargateTaskDefinition',
resource_name,
__props__,
opts,
remote=True)
@property
@pulumi.getter(name="executionRole")
def execution_role(self) -> pulumi.Output[Optional['pulumi_aws.iam.Role']]:
"""
Auto-created IAM task execution role that the Amazon ECS container agent and the Docker daemon can assume.
"""
return pulumi.get(self, "execution_role")
@property
@pulumi.getter(name="loadBalancers")
def load_balancers(self) -> pulumi.Output[Sequence['pulumi_aws.ecs.outputs.ServiceLoadBalancer']]:
"""
Computed load balancers from target groups specified of container port mappings.
"""
return pulumi.get(self, "load_balancers")
@property
@pulumi.getter(name="logGroup")
def log_group(self) -> pulumi.Output[Optional['pulumi_aws.cloudwatch.LogGroup']]:
"""
Auto-created Log Group resource for use by containers.
"""
return pulumi.get(self, "log_group")
@property
@pulumi.getter(name="taskDefinition")
def task_definition(self) -> pulumi.Output['pulumi_aws.ecs.TaskDefinition']:
"""
Underlying ECS Task Definition resource
"""
return pulumi.get(self, "task_definition")
@property
@pulumi.getter(name="taskRole")
def task_role(self) -> pulumi.Output[Optional['pulumi_aws.iam.Role']]:
"""
Auto-created IAM role that allows your Amazon ECS container task to make calls to other AWS services.
"""
return pulumi.get(self, "task_role")
|
py | 1a400541c5fd0e3d68f11cbf661d7681a3b4f816 | # pylint: disable=g-bad-file-header
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.graph_editor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import graph_editor as ge
class SubgraphviewTest(tf.test.TestCase):
def test_simple_swap(self):
g = tf.Graph()
with g.as_default():
a0 = tf.constant(1.0, shape=[2], name="a0")
b0 = tf.constant(2.0, shape=[2], name="b0")
c0 = tf.add(a0, b0, name="c0")
a1 = tf.constant(3.0, shape=[2], name="a1")
b1 = tf.constant(4.0, shape=[2], name="b1")
c1 = tf.add(a1, b1, name="b1")
ge.util.swap_ts([a0, b0], [a1, b1])
assert c0.op.inputs[0] == a1 and c0.op.inputs[1] == b1
assert c1.op.inputs[0] == a0 and c1.op.inputs[1] == b0
if __name__ == "__main__":
tf.test.main()
|
py | 1a40058ff427d4d68183b8f2660cacb579b26a3d | from lewis.core.statemachine import State
from lewis.core import approaches
from lewis_emulators.ips.modes import Activity
SECS_PER_MIN = 60
class HeaterOnState(State):
def in_state(self, dt):
device = self._context
device.heater_current = approaches.linear(
device.heater_current, device.HEATER_ON_CURRENT, device.HEATER_RAMP_RATE, dt)
# The magnet can only be ramped at a certain rate. The PSUs ramp rate can be varied.
# If the PSU attempts to ramp too fast for the magnet, then get a quench
curr_ramp_rate = device.current_ramp_rate / SECS_PER_MIN
if curr_ramp_rate > device.MAGNET_RAMP_RATE:
device.quench("PSU ramp rate is too high")
elif abs(device.current - device.magnet_current) > device.QUENCH_CURRENT_DELTA * dt:
device.quench("Difference between PSU current ({}) and magnet current ({}) is higher than allowed ({})"
.format(device.current, device.magnet_current, device.QUENCH_CURRENT_DELTA * dt))
elif device.activity == Activity.TO_SETPOINT:
device.current = approaches.linear(device.current, device.current_setpoint, curr_ramp_rate, dt)
device.magnet_current = approaches.linear(device.magnet_current, device.current_setpoint, curr_ramp_rate, dt)
elif device.activity == Activity.TO_ZERO:
device.current = approaches.linear(device.current, 0, curr_ramp_rate, dt)
device.magnet_current = approaches.linear(device.magnet_current, 0, curr_ramp_rate, dt)
class HeaterOffState(State):
def in_state(self, dt):
device = self._context
device.heater_current = approaches.linear(
device.heater_current, device.HEATER_OFF_CURRENT, device.HEATER_RAMP_RATE, dt)
curr_ramp_rate = device.current_ramp_rate / SECS_PER_MIN
# In this state, the magnet current is totally unaffected by whatever the PSU decides to do.
if device.activity == Activity.TO_SETPOINT:
device.current = approaches.linear(device.current, device.current_setpoint, curr_ramp_rate, dt)
elif device.activity == Activity.TO_ZERO:
device.current = approaches.linear(device.current, 0, curr_ramp_rate, dt)
class MagnetQuenchedState(State):
pass
|
py | 1a4005e69f1c5000cf6c22ba995ec955acc135d8 | from main import app
if __name__ == "__main__":
app.run() |
py | 1a4006200fa3a4ad63a68cba06b150ab5f64e140 | """vaccineFinder URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('',views.index,name="indexPage"),
path('generate_otp/',views.generateOtpApi),
path('dashboard_page/',views.dashboard,name="dashPage"),
path('search-by-pincode/',views.byPincode,name="byPincode"),
path('logout/',views.logout,name="logout")
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL,document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
|
py | 1a40063d20778e884e1b652e23fdab2a7cb6c557 | """
Script used to create surface plots to illustrate
(stochastic) gradient descent in chapter 5.
"""
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
# Initialize figure
fig = plt.figure()
ax = fig.gca(projection='3d')
# Make data.
X = np.arange(-2, 2, 0.3)
Y = np.arange(-2, 2, 0.3)
X, Y = np.meshgrid(X, Y)
R = Y * np.sin(X) - X * np.cos(Y)
Z = np.sin(R)
# Plot the surface.
surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
# Customize the z axis.
ax.set_zlim(-1.0, 1.0)
ax.zaxis.set_major_locator(LinearLocator(8))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.01f'))
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
# Show plot
plt.show()
|
py | 1a4006712e4ee9247acb2ecec13c9a2d306a5f8b | """
Plugin Manager
--------------
A plugin manager class is used to load plugins, manage the list of
loaded plugins, and proxy calls to those plugins.
The plugin managers provided with nose are:
:class:`PluginManager`
This manager doesn't implement loadPlugins, so it can only work
with a static list of plugins.
:class:`BuiltinPluginManager`
This manager loads plugins referenced in ``nose.plugins.builtin``.
:class:`EntryPointPluginManager`
This manager uses setuptools entrypoints to load plugins.
:class:`ExtraPluginsPluginManager`
This manager loads extra plugins specified with the keyword
`addplugins`.
:class:`DefaultPluginMananger`
This is the manager class that will be used by default. If
setuptools is installed, it is a subclass of
:class:`EntryPointPluginManager` and :class:`BuiltinPluginManager`;
otherwise, an alias to :class:`BuiltinPluginManager`.
:class:`RestrictedPluginManager`
This manager is for use in test runs where some plugin calls are
not available, such as runs started with ``python setup.py test``,
where the test runner is the default unittest :class:`TextTestRunner`. It
is a subclass of :class:`DefaultPluginManager`.
Writing a plugin manager
========================
If you want to load plugins via some other means, you can write a
plugin manager and pass an instance of your plugin manager class when
instantiating the :class:`nose.config.Config` instance that you pass to
:class:`TestProgram` (or :func:`main` or :func:`run`).
To implement your plugin loading scheme, implement ``loadPlugins()``,
and in that method, call ``addPlugin()`` with an instance of each plugin
you wish to make available. Make sure to call
``super(self).loadPlugins()`` as well if have subclassed a manager
other than ``PluginManager``.
"""
import inspect
import logging
import os
import sys
from itertools import chain as iterchain
from warnings import warn
import nose.config
from nose.failure import Failure
from nose.plugins.base import IPluginInterface
from nose.pyversion import sort_list
try:
import pickle as pickle
except:
import pickle
try:
from io import StringIO
except:
from io import StringIO
__all__ = ['DefaultPluginManager', 'PluginManager', 'EntryPointPluginManager',
'BuiltinPluginManager', 'RestrictedPluginManager']
log = logging.getLogger(__name__)
class PluginProxy(object):
"""Proxy for plugin calls. Essentially a closure bound to the
given call and plugin list.
The plugin proxy also must be bound to a particular plugin
interface specification, so that it knows what calls are available
and any special handling that is required for each call.
"""
interface = IPluginInterface
def __init__(self, call, plugins):
try:
self.method = getattr(self.interface, call)
except AttributeError:
raise AttributeError("%s is not a valid %s method"
% (call, self.interface.__name__))
self.call = self.makeCall(call)
self.plugins = []
for p in plugins:
self.addPlugin(p, call)
def __call__(self, *arg, **kw):
return self.call(*arg, **kw)
def addPlugin(self, plugin, call):
"""Add plugin to my list of plugins to call, if it has the attribute
I'm bound to.
"""
meth = getattr(plugin, call, None)
if meth is not None:
if call == 'loadTestsFromModule' and \
len(inspect.getargspec(meth)[0]) == 2:
orig_meth = meth
meth = lambda module, path, **kwargs: orig_meth(module)
self.plugins.append((plugin, meth))
def makeCall(self, call):
if call == 'loadTestsFromNames':
# special case -- load tests from names behaves somewhat differently
# from other chainable calls, because plugins return a tuple, only
# part of which can be chained to the next plugin.
return self._loadTestsFromNames
meth = self.method
if getattr(meth, 'generative', False):
# call all plugins and yield a flattened iterator of their results
return lambda *arg, **kw: list(self.generate(*arg, **kw))
elif getattr(meth, 'chainable', False):
return self.chain
else:
# return a value from the first plugin that returns non-None
return self.simple
def chain(self, *arg, **kw):
"""Call plugins in a chain, where the result of each plugin call is
sent to the next plugin as input. The final output result is returned.
"""
result = None
# extract the static arguments (if any) from arg so they can
# be passed to each plugin call in the chain
static = [a for (static, a)
in zip(getattr(self.method, 'static_args', []), arg)
if static]
for p, meth in self.plugins:
result = meth(*arg, **kw)
arg = static[:]
arg.append(result)
return result
def generate(self, *arg, **kw):
"""Call all plugins, yielding each item in each non-None result.
"""
for p, meth in self.plugins:
result = None
try:
result = meth(*arg, **kw)
if result is not None:
for r in result:
yield r
except (KeyboardInterrupt, SystemExit):
raise
except:
exc = sys.exc_info()
yield Failure(*exc)
continue
def simple(self, *arg, **kw):
"""Call all plugins, returning the first non-None result.
"""
for p, meth in self.plugins:
result = meth(*arg, **kw)
if result is not None:
return result
def _loadTestsFromNames(self, names, module=None):
"""Chainable but not quite normal. Plugins return a tuple of
(tests, names) after processing the names. The tests are added
to a suite that is accumulated throughout the full call, while
names are input for the next plugin in the chain.
"""
suite = []
for p, meth in self.plugins:
result = meth(names, module=module)
if result is not None:
suite_part, names = result
if suite_part:
suite.extend(suite_part)
return suite, names
class NoPlugins(object):
"""Null Plugin manager that has no plugins."""
interface = IPluginInterface
def __init__(self):
self._plugins = self.plugins = ()
def __iter__(self):
return ()
def _doNothing(self, *args, **kwds):
pass
def _emptyIterator(self, *args, **kwds):
return ()
def __getattr__(self, call):
method = getattr(self.interface, call)
if getattr(method, "generative", False):
return self._emptyIterator
else:
return self._doNothing
def addPlugin(self, plug):
raise NotImplementedError()
def addPlugins(self, plugins):
raise NotImplementedError()
def configure(self, options, config):
pass
def loadPlugins(self):
pass
def sort(self):
pass
class PluginManager(object):
"""Base class for plugin managers. PluginManager is intended to be
used only with a static list of plugins. The loadPlugins() implementation
only reloads plugins from _extraplugins to prevent those from being
overridden by a subclass.
The basic functionality of a plugin manager is to proxy all unknown
attributes through a ``PluginProxy`` to a list of plugins.
Note that the list of plugins *may not* be changed after the first plugin
call.
"""
proxyClass = PluginProxy
def __init__(self, plugins=(), proxyClass=None):
self._plugins = []
self._extraplugins = ()
self._proxies = {}
if plugins:
self.addPlugins(plugins)
if proxyClass is not None:
self.proxyClass = proxyClass
def __getattr__(self, call):
try:
return self._proxies[call]
except KeyError:
proxy = self.proxyClass(call, self._plugins)
self._proxies[call] = proxy
return proxy
def __iter__(self):
return iter(self.plugins)
def addPlugin(self, plug):
# allow, for instance, plugins loaded via entry points to
# supplant builtin plugins.
new_name = getattr(plug, 'name', object())
self._plugins[:] = [p for p in self._plugins
if getattr(p, 'name', None) != new_name]
self._plugins.append(plug)
def addPlugins(self, plugins=(), extraplugins=()):
"""extraplugins are maintained in a separate list and
re-added by loadPlugins() to prevent their being overwritten
by plugins added by a subclass of PluginManager
"""
self._extraplugins = extraplugins
for plug in iterchain(plugins, extraplugins):
self.addPlugin(plug)
def configure(self, options, config):
"""Configure the set of plugins with the given options
and config instance. After configuration, disabled plugins
are removed from the plugins list.
"""
log.debug("Configuring plugins")
self.config = config
cfg = PluginProxy('configure', self._plugins)
cfg(options, config)
enabled = [plug for plug in self._plugins if plug.enabled]
self.plugins = enabled
self.sort()
log.debug("Plugins enabled: %s", enabled)
def loadPlugins(self):
for plug in self._extraplugins:
self.addPlugin(plug)
def sort(self):
return sort_list(self._plugins, lambda x: getattr(x, 'score', 1), reverse=True)
def _get_plugins(self):
return self._plugins
def _set_plugins(self, plugins):
self._plugins = []
self.addPlugins(plugins)
plugins = property(_get_plugins, _set_plugins, None,
"""Access the list of plugins managed by
this plugin manager""")
class ZeroNinePlugin:
"""Proxy for 0.9 plugins, adapts 0.10 calls to 0.9 standard.
"""
def __init__(self, plugin):
self.plugin = plugin
def options(self, parser, env=os.environ):
self.plugin.add_options(parser, env)
def addError(self, test, err):
if not hasattr(self.plugin, 'addError'):
return
# switch off to addSkip, addDeprecated if those types
from nose.exc import SkipTest, DeprecatedTest
ec, ev, tb = err
if issubclass(ec, SkipTest):
if not hasattr(self.plugin, 'addSkip'):
return
return self.plugin.addSkip(test.test)
elif issubclass(ec, DeprecatedTest):
if not hasattr(self.plugin, 'addDeprecated'):
return
return self.plugin.addDeprecated(test.test)
# add capt
capt = test.capturedOutput
return self.plugin.addError(test.test, err, capt)
def loadTestsFromFile(self, filename):
if hasattr(self.plugin, 'loadTestsFromPath'):
return self.plugin.loadTestsFromPath(filename)
def addFailure(self, test, err):
if not hasattr(self.plugin, 'addFailure'):
return
# add capt and tbinfo
capt = test.capturedOutput
tbinfo = test.tbinfo
return self.plugin.addFailure(test.test, err, capt, tbinfo)
def addSuccess(self, test):
if not hasattr(self.plugin, 'addSuccess'):
return
capt = test.capturedOutput
self.plugin.addSuccess(test.test, capt)
def startTest(self, test):
if not hasattr(self.plugin, 'startTest'):
return
return self.plugin.startTest(test.test)
def stopTest(self, test):
if not hasattr(self.plugin, 'stopTest'):
return
return self.plugin.stopTest(test.test)
def __getattr__(self, val):
return getattr(self.plugin, val)
class EntryPointPluginManager(PluginManager):
"""Plugin manager that loads plugins from the `nose.plugins` and
`nose.plugins.0.10` entry points.
"""
entry_points = (('nose.plugins.0.10', None),
('nose.plugins', ZeroNinePlugin))
def loadPlugins(self):
"""Load plugins by iterating the `nose.plugins` entry point.
"""
from pkg_resources import iter_entry_points
loaded = {}
for entry_point, adapt in self.entry_points:
for ep in iter_entry_points(entry_point):
if ep.name in loaded:
continue
loaded[ep.name] = True
log.debug('%s load plugin %s', self.__class__.__name__, ep)
try:
plugcls = ep.load()
except KeyboardInterrupt:
raise
except Exception as e:
# never want a plugin load to kill the test run
# but we can't log here because the logger is not yet
# configured
warn("Unable to load plugin %s: %s" % (ep, e),
RuntimeWarning)
continue
if adapt:
plug = adapt(plugcls())
else:
plug = plugcls()
self.addPlugin(plug)
super(EntryPointPluginManager, self).loadPlugins()
class BuiltinPluginManager(PluginManager):
"""Plugin manager that loads plugins from the list in
`nose.plugins.builtin`.
"""
def loadPlugins(self):
"""Load plugins in nose.plugins.builtin
"""
from nose.plugins import builtin
for plug in builtin.plugins:
self.addPlugin(plug())
super(BuiltinPluginManager, self).loadPlugins()
try:
import pkg_resources
class DefaultPluginManager(EntryPointPluginManager, BuiltinPluginManager):
pass
except ImportError:
class DefaultPluginManager(BuiltinPluginManager):
pass
class RestrictedPluginManager(DefaultPluginManager):
"""Plugin manager that restricts the plugin list to those not
excluded by a list of exclude methods. Any plugin that implements
an excluded method will be removed from the manager's plugin list
after plugins are loaded.
"""
def __init__(self, plugins=(), exclude=(), load=True):
DefaultPluginManager.__init__(self, plugins)
self.load = load
self.exclude = exclude
self.excluded = []
self._excludedOpts = None
def excludedOption(self, name):
if self._excludedOpts is None:
from optparse import OptionParser
self._excludedOpts = OptionParser(add_help_option=False)
for plugin in self.excluded:
plugin.options(self._excludedOpts, env={})
return self._excludedOpts.get_option('--' + name)
def loadPlugins(self):
if self.load:
DefaultPluginManager.loadPlugins(self)
allow = []
for plugin in self.plugins:
ok = True
for method in self.exclude:
if hasattr(plugin, method):
ok = False
self.excluded.append(plugin)
break
if ok:
allow.append(plugin)
self.plugins = allow
|
py | 1a40068c6441a292750044d86526dfd1a7b62d09 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# {fact [email protected] defects=1}
def notequals_operator_noncompliant():
phrase = "Thisisstring"
# Noncompliant: uses checks for equality instead of identity.
if phrase != None:
print(True)
# {/fact}
# {fact [email protected] defects=0}
def isnot_operator_compliant():
phrase = "Thisisstring"
# Compliant: uses the correct mechanism for checking the identity.
if phrase is not None:
print(True)
# {/fact}
|
py | 1a4006f5e3c1147a3cda6ce16847f6201f4cd06b | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.18
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1HorizontalPodAutoscalerList(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1HorizontalPodAutoscaler]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1HorizontalPodAutoscalerList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1HorizontalPodAutoscalerList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1HorizontalPodAutoscalerList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1HorizontalPodAutoscalerList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1HorizontalPodAutoscalerList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1HorizontalPodAutoscalerList. # noqa: E501
list of horizontal pod autoscaler objects. # noqa: E501
:return: The items of this V1HorizontalPodAutoscalerList. # noqa: E501
:rtype: list[V1HorizontalPodAutoscaler]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1HorizontalPodAutoscalerList.
list of horizontal pod autoscaler objects. # noqa: E501
:param items: The items of this V1HorizontalPodAutoscalerList. # noqa: E501
:type: list[V1HorizontalPodAutoscaler]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1HorizontalPodAutoscalerList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1HorizontalPodAutoscalerList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1HorizontalPodAutoscalerList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1HorizontalPodAutoscalerList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1HorizontalPodAutoscalerList. # noqa: E501
:return: The metadata of this V1HorizontalPodAutoscalerList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1HorizontalPodAutoscalerList.
:param metadata: The metadata of this V1HorizontalPodAutoscalerList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1HorizontalPodAutoscalerList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1HorizontalPodAutoscalerList):
return True
return self.to_dict() != other.to_dict()
|
py | 1a4007cdf94e0d800ce2e6fffba7417d1c68686d | # --------------
#Importing the modules
import pandas as pd
import numpy as np
from scipy.stats import mode
def categorical(df):
""" Extract names of categorical column
This function accepts a dataframe and returns categorical list,
containing the names of categorical columns(categorical_var).
"""
categorical_var= df.select_dtypes(include='object').columns.tolist()
return categorical_var
def numerical(df):
""" Extract names of numerical column
This function accepts a dataframe and returns numerical list,
containing the names of numerical columns(numerical_var).
"""
numerical_var = df.select_dtypes(include='number').columns.tolist()
return numerical_var
def clear(df,col,val):
""" Check distribution of variable
This function accepts a dataframe,column(feature) and value which returns count of the value,
containing the value counts of a variable(value_counts)
"""
value_counts = df[col].value_counts()[val]
return value_counts
def instances_based_condition(df,col1,val1,col2,val2):
""" Instances based on the condition
This function accepts a dataframe, 2 columns(feature) and 2 values which returns the dataframe
based on the condition.
"""
instance = df[(df[col1] > val1) & (df[col2]== val2)]
return instance
def agg_values_ina_month(df,date_col,agg_col, agg):
""" Aggregate values according to month
This function accepts a dataframe, 2 columns(feature) and aggregated funcion(agg) which returns the Pivot
table with different aggregated value of the feature with an index of the month.
"""
df[date_col] = pd.to_datetime(df[date_col])
aggregate = {'mean':np.mean,'max':np.max,'min':np.min,'sum':np.sum,'len':len}
aggregated_value = df.pivot_table(values=[agg_col], index=df[date_col].dt.month,aggfunc={agg_col:aggregate[agg]})
return aggregated_value
# Code to group values based on the feature
def group_values(df,col1,agg1):
""" Agrregate values by grouping
This function accepts a dataframe, 1 column(feature) and aggregated function(agg1) which groupby the datframe based on the column.
"""
aggregate = {'mean':np.mean,'max':np.max,'min':np.min,'sum':np.sum,'len':len}
grouping = df.groupby(col1).agg(aggregate[agg1])
return grouping
# function for conversion
def convert(df,celsius):
""" Convert temperatures from celsius to fahrenhheit
This function accepts a dataframe, 1 column(feature) which returns the dataframe with converted values from
celsius to fahrenhheit.
"""
centigrade_temps = df[celsius]
converted_temp = 1.8*centigrade_temps + 32
return converted_temp
# Load the weather_2012 data csv file and store it in weather variable.
weather = pd.read_csv(path)
weather.head()
# Check the categorical and numerical variables. You can check it by calling categorical and numerical function.
print(categorical(weather))
print(numerical(weather))
#Checking the distribution of a specific value like the number of times the weather was exactly Cloudy in the given column.
#You can check it by calling the function clear with respective parameters.
print(clear(weather,"Weather",'Clear'))
print(clear(weather,"Wind Spd (km/h)", 4))
#Check some instances based on a specific condition like when the wind speed was above 35 and visibility was 25.
#Check it by calling the function instances_based_condition with respective parameters.
wind_speed_35_vis_25 = instances_based_condition(weather,'Wind Spd (km/h)',35,'Visibility (km)',25)
#Calculate the mean temperature recorded by month from temperature data. Generate a pivot table which contains the aggregated values(like mean, max ,min, sum, len) recoreded by month.
#Call the function agg_values_ina_month with respective parameters.
agg_values_ina_month(weather,'Date/Time','Dew Point Temp (C)','mean')
# To groupby based on a column like on Weather column and then aggregate the mean values of each column for different types of weather using mean.
#Call the function group_values.
mean_weather = group_values(weather,"Weather",'mean')
# Convert celsius temperature into fahrehheit temperatures from temperature data by calling the function convert.
weather_fahrehheit = convert(weather,"Temp (C)")
|
py | 1a4007e1e7a5ec2f5c1dbf41a48bc30573d86300 | #!/usr/bin/env python3
# In this example, we demonstrate how a Korali experiment can
# be resumed from any point (generation). This is a useful feature
# for continuing jobs after an error, or to fragment big jobs into
# smaller ones that can better fit a supercomputer queue.
#
# First, we run a simple Korali experiment.
import sys
sys.path.append('./_model')
from model import *
import korali
k = korali.Engine()
e = korali.Experiment()
e["Problem"]["Type"] = "Bayesian/Custom"
e["Problem"]["Likelihood Model"] = calculateLogLikelihood
e["Solver"]["Type"] = "Sampler/TMCMC"
e["Solver"]["Population Size"] = 5000
e["Solver"]["Termination Criteria"]["Max Generations"] = 4
e["Distributions"][0]["Name"] = "Uniform 0"
e["Distributions"][0]["Type"] = "Univariate/Uniform"
e["Distributions"][0]["Minimum"] = -100.0
e["Distributions"][0]["Maximum"] = +100.0
e["Variables"][0]["Name"] = "X"
e["Variables"][0]["Prior Distribution"] = "Uniform 0"
print("\n-------------------------------------------------------------")
print("Running first generations...")
print("-------------------------------------------------------------\n")
k.run(e)
print("\n-------------------------------------------------------------")
print("Running last generations...")
print("-------------------------------------------------------------\n")
e["Solver"]["Termination Criteria"]["Max Generations"] = 10
k.run(e)
|
py | 1a40090fb444766e4c552710686d771073c7f274 | # THIS FILE IS AUTOMATICALLY GENERATED.
"""
Sample Calm DSL for Hello blueprint
The top-level folder contains the following files:
HelloBlueprint/
├── .local
│ └── keys
│ ├── centos
│ └── centos_pub
├── blueprint.py
└── scripts
├── pkg_install_task.sh
└── pkg_uninstall_task.sh
On launch, this blueprint does the following:
1. Creates AHV VM (2 vCPUs, 4G Mem, 1 core)
2. Installs CentOS 7 by downloading image from http://download.nutanix.com.
3. Injects SSH public key in the VM using cloud init.
4. Creates calm credential using the SSH private key to run tasks on the VM.
Order of execution for every deployment during blueprint launch:
1. Substrate.__pre_create__() (Only http and escript tasks are allowed here)
2. Substrate.__create__() (Generated from provider_spec)
3. Package.__install__() (Scripts to install application go here)
4. Service.__create__() (Scripts to configure and create the service go here)
5. Service.__start__() (Scripts to start the service go here)
Useful commands (execute from top-level directory):
1. calm compile bp --file HelloBlueprint/blueprint.py
2. calm create bp --file HelloBlueprint/blueprint.py --name <blueprint_name>
3. calm get bps --name <blueprint_name>
4. calm describe bp <blueprint_name>
5. calm launch bp <blueprint_name> --app_name <app_name> -i
6. calm get apps --name <app_name>
7. calm describe app <app_name>
8. calm delete app <app_name>
9. calm delete bp <blueprint_name>
"""
import os
from calm.dsl.builtins import Service, Package, Substrate
from calm.dsl.builtins import Deployment, Profile, Blueprint
from calm.dsl.builtins import CalmVariable as Variable
from calm.dsl.builtins import CalmTask as Task
from calm.dsl.builtins import action, parallel, ref, basic_cred
from calm.dsl.builtins import read_local_file
from calm.dsl.builtins import vm_disk_package, AhvVmDisk, AhvVmNic
from calm.dsl.builtins import AhvVmGC, AhvVmResources, AhvVm
# SSH Credentials
CENTOS_USER = "centos"
CENTOS_KEY = read_local_file(os.path.join("keys", "centos"))
CENTOS_PUBLIC_KEY = read_local_file(os.path.join("keys", "centos_pub"))
CentosCred = basic_cred(
CENTOS_USER, CENTOS_KEY, name="Centos", type="KEY", default=True,
)
# OS Image details for VM
CENTOS_IMAGE_SOURCE = "http://download.nutanix.com/calm/CentOS-7-x86_64-1810.qcow2"
CentosPackage = vm_disk_package(
name="centos_disk", config={"image": {"source": CENTOS_IMAGE_SOURCE}},
)
class HelloService(Service):
"""Sample Service"""
# Service Variables
ENV = Variable.WithOptions.Predefined.string(
["DEV", "PROD"], default="DEV", is_mandatory=True, runtime=True
)
# Service Actions
@action
def __create__():
# Step 1
Task.Exec.ssh(name="Task1", script="echo 'Service create in ENV=@@{ENV}@@'")
@action
def __start__():
# Step 1
Task.Exec.ssh(name="Task1", script="echo 'Service start in ENV=@@{ENV}@@'")
@action
def __stop__():
# Step 1
Task.Exec.ssh(name="Task1", script="echo 'Service stop in ENV=@@{ENV}@@'")
@action
def __delete__():
# Step 1
Task.Exec.ssh(name="Task1", script="echo 'Service delete in ENV=@@{ENV}@@'")
# Custom service actions
@action
def custom_action_1():
"""Sample service action"""
# Step 1
Task.Exec.ssh(name="Task11", script='echo "Hello"')
# Step 2
Task.Exec.ssh(name="Task12", script='echo "Hello again"')
@action
def custom_action_2():
# Step 1
Task.Exec.ssh(name="Task21", script="date")
# Step 2
with parallel(): # All tasks within this context will be run in parallel
Task.Exec.ssh(name="Task22a", script="date")
Task.Exec.ssh(name="Task22b", script="date")
# Step 3
Task.Exec.ssh(name="Task23", script="date")
class HelloPackage(Package):
"""Sample Package"""
# Services created by installing this Package
services = [ref(HelloService)]
# Package Variables
sample_pkg_var = Variable.Simple("Sample package installation")
# Package Actions
@action
def __install__():
# Step 1
Task.Exec.ssh(
name="Task1", filename=os.path.join("scripts", "pkg_install_task.sh")
)
@action
def __uninstall__():
# Step 1
Task.Exec.ssh(
name="Task1", filename=os.path.join("scripts", "pkg_uninstall_task.sh")
)
class MyAhvVmResources(AhvVmResources):
memory = 4
vCPUs = 2
cores_per_vCPU = 1
disks = [
AhvVmDisk.Disk.Scsi.cloneFromVMDiskPackage(CentosPackage, bootable=True),
]
nics = [AhvVmNic.DirectNic.ingress("Xi-AWS: Autogenerated private subnet 172.31.49.0/24")]
guest_customization = AhvVmGC.CloudInit(
config={
"users": [
{
"name": CENTOS_USER,
"ssh-authorized-keys": [CENTOS_PUBLIC_KEY],
"sudo": ["ALL=(ALL) NOPASSWD:ALL"],
}
]
}
)
class MyAhvVm(AhvVm):
resources = MyAhvVmResources
categories = {"AppFamily": "Demo", "AppType": "Default"}
class MyAhvVMSubstrate(Substrate):
"""AHV VM Substrate"""
provider_type = "AHV_VM"
provider_spec = MyAhvVm
# Substrate Actions
@action
def __pre_create__():
# Step 1
Task.Exec.escript(
name="Task1", script="print 'Pre Create task runs before VM is created'"
)
@action
def __post_delete__():
# Step 1
Task.Exec.escript(
name="Task1", script="print 'Post delete task runs after VM is deleted'"
)
class HelloDeployment(Deployment):
"""Sample Deployment"""
packages = [ref(HelloPackage)]
substrate = ref(MyAhvVMSubstrate)
class DefaultProfile(Profile):
# Deployments under this profile
deployments = [HelloDeployment]
# Profile Variables
var1 = Variable.Simple("sample_val1", runtime=True)
var2 = Variable.Simple("sample_val2", runtime=True)
var3 = Variable.Simple.int("2", validate_regex=True, regex=r"^[\d]*$")
# Profile Actions
@action
def custom_profile_action_1():
"""Sample description for a profile action"""
# Step 1: Run a task on a service in the profile
Task.Exec.ssh(
name="Task1",
script='echo "Profile level action using @@{var1}@@ and @@{var2}@@ and @@{var3}@@"',
target=ref(HelloService),
)
# Step 2: Call service action as a task.
# It will execute all tasks under the given action.
HelloService.custom_action_1(name="Task6")
class HelloBlueprint(Blueprint):
""" Sample blueprint for Hello app using AHV VM"""
credentials = [CentosCred]
services = [HelloService]
packages = [HelloPackage, CentosPackage]
substrates = [MyAhvVMSubstrate]
profiles = [DefaultProfile]
|
py | 1a400928269c94098ddc943a68276054f1066db8 | '''
- Escreva um programa que leia um valor em metros e o exiba
convertido em centímetros e milímetros.
'''
m = float(input('Digite um valor em metros(m)'))
cm = m * 100
mm = m * 1000
print('O valor digitado foi {}m que equivale a {}cm e a {}mm'.format(m, cm, mm))
|
py | 1a4009ca7355c76c50cd8046f8451c57c3c52426 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
from profile_chrome import chrome_startup_tracing_agent
from profile_chrome import chrome_tracing_agent
from profile_chrome import ui
from profile_chrome import util
from systrace import output_generator
from systrace import tracing_controller
def _GetResults(trace_results, controller, output, compress, write_json,
interval):
ui.PrintMessage('Downloading...')
# Wait for the trace file to get written.
time.sleep(1)
for agent in controller.get_child_agents:
if isinstance(agent, chrome_tracing_agent.ChromeTracingAgent):
time.sleep(interval / 4)
# Ignore the systraceController because it will not contain any results,
# instead being in charge of collecting results.
trace_results = [x for x in controller.all_results if not (x.source_name ==
'systraceController')]
if not trace_results:
ui.PrintMessage('No results')
return ''
result = None
trace_results = output_generator.MergeTraceResultsIfNeeded(trace_results)
if not write_json:
ui.PrintMessage('Writing trace HTML...')
html_file = trace_results[0].source_name + '.html'
result = output_generator.GenerateHTMLOutput(trace_results, html_file)
ui.PrintMessage('\nWrote file://%s' % result)
elif compress and len(trace_results) == 1:
result = output or trace_results[0].source_name + '.gz'
util.WriteDataToCompressedFile(trace_results[0].raw_data, result)
elif len(trace_results) > 1:
result = (output or 'chrome-combined-trace-%s.zip' %
util.GetTraceTimestamp())
util.ArchiveData(trace_results, result)
elif output:
result = output
with open(result, 'wb') as f:
f.write(trace_results[0].raw_data)
else:
result = trace_results[0].source_name
with open(result, 'wb') as f:
f.write(trace_results[0].raw_data)
return result
def CaptureProfile(options, interval, modules, output=None,
compress=False, write_json=False):
"""Records a profiling trace saves the result to a file.
Args:
options: Command line options.
interval: Time interval to capture in seconds. An interval of None (or 0)
continues tracing until stopped by the user.
modules: The list of modules to initialize the tracing controller with.
output: Output file name or None to use an automatically generated name.
compress: If True, the result will be compressed either with gzip or zip
depending on the number of captured subtraces.
write_json: If True, prefer JSON output over HTML.
Returns:
Path to saved profile.
"""
agents_with_config = tracing_controller.CreateAgentsWithConfig(options,
modules)
if chrome_startup_tracing_agent in modules:
controller_config = tracing_controller.GetChromeStartupControllerConfig(
options)
else:
controller_config = tracing_controller.GetControllerConfig(options)
controller = tracing_controller.TracingController(agents_with_config,
controller_config)
try:
result = controller.StartTracing()
trace_type = controller.GetTraceType()
if not result:
ui.PrintMessage('Trace starting failed.')
if interval:
ui.PrintMessage(('Capturing %d-second %s. Press Enter to stop early...' %
(interval, trace_type)), eol='')
ui.WaitForEnter(interval)
else:
ui.PrintMessage('Capturing %s. Press Enter to stop...' % trace_type,
eol='')
raw_input()
ui.PrintMessage('Stopping...')
all_results = controller.StopTracing()
finally:
if interval:
ui.PrintMessage('done')
return _GetResults(all_results, controller, output, compress, write_json,
interval)
|
py | 1a400aa8bfb13d9f592939732eb7a52804938351 | def shellSort(arr):
_len = len(arr)
grap = _len
while grap > 1:
grap = grap // 2 # 间隔距离
for i in range(grap, _len):
j, curr = i, arr[i]
while j >= grap and curr < arr[j - grap]:
arr[j] = arr[j - grap] # 比 curr大 则把前面大的值往后存放
j -= grap # 前移比较
arr[j] = curr # 找到位置 存放
return arr
a = [31, 42, 13, 54, 5]
print(shellSort(a))
|
py | 1a400bbd334ec121fe6f308adc29246c526eb4ab | '''
Run models (ResNet18, MobileNetV2) by scaling filter sizes to different ratios on TinyImageNet.
Stores accuracy for comparison plot.
Default Scaling Ratios: 0.25, 0.5, 0.75, 1.0
'''
from __future__ import print_function
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath('.'))))
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as utils_data
from torch.autograd import Variable
import torchvision
import torchvision.transforms as transforms
import numpy as np
import numpy.linalg as la
import pdb
import pickle
import visdom
import time
import torch.backends.cudnn as cudnn
import gc
import math
import argparse
import copy
from utils import progress_bar, save_checkpoint, adjust_learning_rate, accuracy, adjust_learning_rate_imagenet
import csv
from sklearn import linear_model
from model.VGG import vgg11
from model.preact_resnet import PreActResNet18
from model.resnet import *
from model.lenet import LeNet
from model.mobilenetv2 import MobileNetV2
from torch.optim.lr_scheduler import StepLR
from copy import deepcopy
##############
## Function ##
##############
def num_flat_features(x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
def train(args, model, train_loader, optimizer, epoch, criterion, pruning_engine=None, scheduler=None):
"""Train for one epoch on the training set also performs pruning"""
train_loss = 0
train_acc = 0
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.cuda(), target.cuda()
# make sure that all gradients are zero
for p in model.parameters():
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
output = model(data)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
loss.backward()
optimizer.step()
train_loss += loss.item()
train_acc += prec1.item()
progress_bar(batch_idx, len(train_loader), 'Loss: %.3f | Acc: %.3f%%'
% (train_loss/(batch_idx+1), train_acc/(batch_idx+1)))
return train_acc/(batch_idx+1), train_loss/(batch_idx+1)
def validate(args, test_loader, model, criterion, epoch, pruning_engine=None, optimizer=None):
"""Perform validation on the validation set"""
test_loss = 0
test_acc = 0
# switch to evaluate mode
model.eval()
with torch.no_grad():
for batch_idx, (data, target) in enumerate(test_loader):
data = data.cuda()
target = target.cuda()
output = model(data)
loss = criterion(output, target)
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
test_loss += loss.item()
test_acc += prec1.item()
progress_bar(batch_idx, len(test_loader), 'Loss: %.3f | Acc: %.3f%%'
% (test_loss/(batch_idx+1), test_acc/(batch_idx+1)))
return test_acc/(batch_idx+1), test_loss/(batch_idx+1)
def main():
# Training settings
parser = argparse.ArgumentParser(description='Efficient Filter Scaling of Convolutional Neural Network')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=150, metavar='N',
help='number of epochs to train (default: 40)')
parser.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='learning rate (default: 0.1)')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='weight decay (default: 5e-4)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--dataset', default="tinyimagenet", type=str,
help='dataset for experiment, choice: tinyimagenet', choices= ["tinyimagenet"])
parser.add_argument('--data', metavar='DIR', default='/DATA/tiny-imagenet-200', help='path to imagenet dataset')
parser.add_argument('--model', default="resnet18", type=str,
help='model selection, choices: vgg, mobilenetv2, resnet18',
choices=["mobilenetv2", "resnet18"])
parser.add_argument('--save', default='model',
help='model file')
parser.add_argument('--prune_fname', default='filename',
help='prune save file')
parser.add_argument('--descent_idx', type=int, default=14,
help='Iteration for Architecture Descent')
parser.add_argument('--morph', dest="morph", action='store_true', default=False,
help='Prunes only 50 percent of neurons, for comparison with MorphNet')
parser.add_argument('--uniform', dest="uniform", action='store_true', default=False,
help='Use uniform scaling instead of NeuralScale')
args = parser.parse_args()
##################
## Data loading ##
##################
kwargs = {'num_workers': 1, 'pin_memory': True}
if args.dataset == "tinyimagenet":
print("Using tiny-Imagenet Dataset")
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'test')
normalize = transforms.Normalize([0.4802, 0.4481, 0.3975], [0.2302, 0.2265, 0.2262])
train_dataset = torchvision.datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomCrop(64, padding=4),
transforms.RandomRotation(20),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
train_sampler = None
kwargs = {'num_workers': 16}
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
sampler=train_sampler, pin_memory=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.ImageFolder(valdir, transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False, pin_memory=True, **kwargs)
else:
print("Dataset does not exist! [Imagenet]")
exit()
if args.dataset=='tinyimagenet':
num_classes = 200
else:
print("Only tinyimagenet")
exit()
ratios = [0.25, 0.5, 0.75, 1.0]
pruned_filters = None
neuralscale = True # turn on NeuralScale by default
if args.uniform:
neuralscale = False
if args.morph:
neuralscale = False
if args.model == "resnet18":
pruned_filters = [82,90,78,80,96,180,104,96,194,312,182,178,376,546,562,454,294] # resnet18 tinyimagenet
elif args.mode == "mobilenetv2":
pruned_filters = [28, 16, 24, 24, 32, 32, 30, 64, 59, 50, 41, 96, 73, 48, 160, 69, 47, 155, 360] # mobilenetv2 tinyimagenet
else:
print("{} not supported.".format(args.model))
exit()
for ratio in ratios:
print("Current ratio: {}".format(ratio))
###########
## Model ##
###########
print("Setting Up Model...")
if args.model == "resnet18":
model = PreActResNet18(ratio=ratio, neuralscale=neuralscale, num_classes=num_classes, dataset=args.dataset, prune_fname=args.prune_fname, descent_idx=args.descent_idx, pruned_filters=pruned_filters)
elif args.model == "mobilenetv2":
model = MobileNetV2(ratio=ratio, neuralscale=neuralscale, num_classes=num_classes, dataset=args.dataset, prune_fname=args.prune_fname, descent_idx=args.descent_idx, pruned_filters=pruned_filters)
else:
print(args.model, "model not supported [resnet18 mobilenetv2] only")
exit()
print("{} set up.".format(args.model))
# for model saving
model_path = "saved_models"
if not os.path.exists(model_path):
os.makedirs(model_path)
log_save_folder = "%s/%s"%(model_path, args.model)
if not os.path.exists(log_save_folder):
os.makedirs(log_save_folder)
model_save_path = "%s/%s"%(log_save_folder, args.save) + "_checkpoint.t7"
model_state_dict = model.state_dict()
if args.save:
print("Model will be saved to {}".format(model_save_path))
save_checkpoint({
'state_dict': model_state_dict
}, False, filename = model_save_path)
else:
print("Save path not defined. Model will not be saved.")
# Assume cuda is available and uses single GPU
model.cuda()
cudnn.benchmark = True
# define objective
criterion = nn.CrossEntropyLoss()
######################
## Set up pruning ##
######################
# remove updates from gate layers, because we want them to be 0 or 1 constantly
parameters_for_update = []
parameters_for_update_named = []
for name, m in model.named_parameters():
if "gate" not in name:
parameters_for_update.append(m)
parameters_for_update_named.append((name, m))
else:
print("skipping parameter", name, "shape:", m.shape)
total_size_params = sum([np.prod(par.shape) for par in parameters_for_update])
print("Total number of parameters, w/o usage of bn consts: ", total_size_params)
optimizer = optim.SGD(parameters_for_update, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
###############
## Training ##
###############
best_test_acc = 0
train_acc_plt = []
train_loss_plt = []
test_acc_plt = []
test_loss_plt = []
epoch_plt = []
for epoch in range(1, args.epochs + 1):
adjust_learning_rate_imagenet(args, optimizer, epoch, search=False)
print("Epoch: {}".format(epoch))
# train model
train_acc, train_loss = train(args, model, train_loader, optimizer, epoch, criterion)
# evaluate on validation set
test_acc, test_loss = validate(args, test_loader, model, criterion, epoch, optimizer=optimizer)
# remember best prec@1 and save checkpoint
is_best = test_acc > best_test_acc
best_test_acc = max(test_acc, best_test_acc)
model_state_dict = model.state_dict()
if args.save:
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model_state_dict,
'best_prec1': test_acc,
}, is_best, filename=model_save_path)
train_acc_plt.append(train_acc)
train_loss_plt.append(train_loss)
test_acc_plt.append(test_acc)
test_loss_plt.append(test_loss)
epoch_plt.append(epoch)
pickle_save = {
"ratio": ratio,
"train_acc": train_acc_plt,
"train_loss": train_loss_plt,
"test_acc": test_acc_plt,
"test_loss": test_loss_plt,
}
plot_path = "saved_plots"
if not os.path.exists(plot_path):
os.makedirs(plot_path)
log_save_folder = "%s/%s"%(plot_path, args.model)
if not os.path.exists(log_save_folder):
os.makedirs(log_save_folder)
pickle_out = open("%s/%s_%s.pk"%(log_save_folder, args.save, int(ratio*100)),"wb")
pickle.dump(pickle_save, pickle_out)
pickle_out.close()
if __name__ == '__main__':
main()
|
py | 1a400c9072b8cf40fac1d92826c33827090973f2 | # Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects relating to skills."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from constants import constants
from core.domain import change_domain
from core.domain import html_cleaner
from core.domain import state_domain
import feconf
import python_utils
import utils
# Do not modify the values of these constants. This is to preserve backwards
# compatibility with previous change dicts.
SKILL_PROPERTY_DESCRIPTION = 'description'
SKILL_PROPERTY_LANGUAGE_CODE = 'language_code'
SKILL_PROPERTY_SUPERSEDING_SKILL_ID = 'superseding_skill_id'
SKILL_PROPERTY_ALL_QUESTIONS_MERGED = 'all_questions_merged'
SKILL_CONTENTS_PROPERTY_EXPLANATION = 'explanation'
SKILL_CONTENTS_PROPERTY_WORKED_EXAMPLES = 'worked_examples'
SKILL_MISCONCEPTIONS_PROPERTY_NAME = 'name'
SKILL_MISCONCEPTIONS_PROPERTY_NOTES = 'notes'
SKILL_MISCONCEPTIONS_PROPERTY_FEEDBACK = 'feedback'
# These take additional 'property_name' and 'new_value' parameters and,
# optionally, 'old_value'.
CMD_UPDATE_SKILL_PROPERTY = 'update_skill_property'
CMD_UPDATE_SKILL_CONTENTS_PROPERTY = 'update_skill_contents_property'
CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY = (
'update_skill_misconceptions_property')
CMD_UPDATE_RUBRICS = 'update_rubrics'
CMD_ADD_SKILL_MISCONCEPTION = 'add_skill_misconception'
CMD_DELETE_SKILL_MISCONCEPTION = 'delete_skill_misconception'
CMD_CREATE_NEW = 'create_new'
CMD_MIGRATE_CONTENTS_SCHEMA_TO_LATEST_VERSION = (
'migrate_contents_schema_to_latest_version')
CMD_MIGRATE_MISCONCEPTIONS_SCHEMA_TO_LATEST_VERSION = (
'migrate_misconceptions_schema_to_latest_version')
CMD_MIGRATE_RUBRICS_SCHEMA_TO_LATEST_VERSION = (
'migrate_rubrics_schema_to_latest_version')
CMD_PUBLISH_SKILL = 'publish_skill'
class SkillChange(change_domain.BaseChange):
"""Domain object for changes made to skill object.
The allowed commands, together with the attributes:
- 'add_skill_misconception' (with new_misconception_dict)
- 'delete_skill_misconception' (with misconception_id)
- 'create_new'
- 'update_skill_property' (with property_name, new_value
and old_value)
- 'update_skill_contents_property' (with property_name,
new_value and old_value)
- 'update_skill_misconceptions_property' (
with misconception_id, property_name, new_value and old_value)
- 'migrate_contents_schema_to_latest_version' (with
from_version and to_version)
- 'migrate_misconceptions_schema_to_latest_version' (with
from_version and to_version)
"""
# The allowed list of skill properties which can be used in
# update_skill_property command.
SKILL_PROPERTIES = (
SKILL_PROPERTY_DESCRIPTION, SKILL_PROPERTY_LANGUAGE_CODE,
SKILL_PROPERTY_SUPERSEDING_SKILL_ID,
SKILL_PROPERTY_ALL_QUESTIONS_MERGED)
# The allowed list of skill contents properties which can be used in
# update_skill_contents_property command.
SKILL_CONTENTS_PROPERTIES = (
SKILL_CONTENTS_PROPERTY_EXPLANATION,
SKILL_CONTENTS_PROPERTY_WORKED_EXAMPLES)
# The allowed list of misconceptions properties which can be used in
# update_skill_misconceptions_property command.
SKILL_MISCONCEPTIONS_PROPERTIES = (
SKILL_MISCONCEPTIONS_PROPERTY_NAME,
SKILL_MISCONCEPTIONS_PROPERTY_NOTES,
SKILL_MISCONCEPTIONS_PROPERTY_FEEDBACK
)
ALLOWED_COMMANDS = [{
'name': CMD_CREATE_NEW,
'required_attribute_names': [],
'optional_attribute_names': []
}, {
'name': CMD_ADD_SKILL_MISCONCEPTION,
'required_attribute_names': ['new_misconception_dict'],
'optional_attribute_names': []
}, {
'name': CMD_DELETE_SKILL_MISCONCEPTION,
'required_attribute_names': ['misconception_id'],
'optional_attribute_names': []
}, {
'name': CMD_UPDATE_RUBRICS,
'required_attribute_names': ['difficulty', 'explanation'],
'optional_attribute_names': []
}, {
'name': CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY,
'required_attribute_names': [
'misconception_id', 'property_name', 'new_value', 'old_value'],
'optional_attribute_names': [],
'allowed_values': {'property_name': SKILL_MISCONCEPTIONS_PROPERTIES}
}, {
'name': CMD_UPDATE_SKILL_PROPERTY,
'required_attribute_names': ['property_name', 'new_value', 'old_value'],
'optional_attribute_names': [],
'allowed_values': {'property_name': SKILL_PROPERTIES}
}, {
'name': CMD_UPDATE_SKILL_CONTENTS_PROPERTY,
'required_attribute_names': ['property_name', 'new_value', 'old_value'],
'optional_attribute_names': [],
'allowed_values': {'property_name': SKILL_CONTENTS_PROPERTIES}
}, {
'name': CMD_MIGRATE_CONTENTS_SCHEMA_TO_LATEST_VERSION,
'required_attribute_names': ['from_version', 'to_version'],
'optional_attribute_names': []
}, {
'name': CMD_MIGRATE_MISCONCEPTIONS_SCHEMA_TO_LATEST_VERSION,
'required_attribute_names': ['from_version', 'to_version'],
'optional_attribute_names': []
}, {
'name': CMD_MIGRATE_RUBRICS_SCHEMA_TO_LATEST_VERSION,
'required_attribute_names': ['from_version', 'to_version'],
'optional_attribute_names': []
}]
class Misconception(python_utils.OBJECT):
"""Domain object describing a skill misconception."""
def __init__(
self, misconception_id, name, notes, feedback):
"""Initializes a Misconception domain object.
Args:
misconception_id: int. The unique id of each misconception.
name: str. The name of the misconception.
notes: str. General advice for creators about the
misconception (including examples) and general notes. This
should be an html string.
feedback: str. This can auto-populate the feedback field
when an answer group has been tagged with a misconception. This
should be an html string.
"""
self.id = misconception_id
self.name = name
self.notes = html_cleaner.clean(notes)
self.feedback = html_cleaner.clean(feedback)
def to_dict(self):
"""Returns a dict representing this Misconception domain object.
Returns:
A dict, mapping all fields of Misconception instance.
"""
return {
'id': self.id,
'name': self.name,
'notes': self.notes,
'feedback': self.feedback
}
@classmethod
def from_dict(cls, misconception_dict):
"""Returns a Misconception domain object from a dict.
Args:
misconception_dict: dict. The dict representation of
Misconception object.
Returns:
Misconception. The corresponding Misconception domain object.
"""
misconception = cls(
misconception_dict['id'], misconception_dict['name'],
misconception_dict['notes'], misconception_dict['feedback'])
return misconception
@classmethod
def require_valid_misconception_id(cls, misconception_id):
"""Validates the misconception id for a Misconception object.
Args:
misconception_id: int. The misconception id to be validated.
Raises:
ValidationError. The misconception id is invalid.
"""
if not isinstance(misconception_id, int):
raise utils.ValidationError(
'Expected misconception ID to be an integer, received %s' %
misconception_id)
def validate(self):
"""Validates various properties of the Misconception object.
Raises:
ValidationError: One or more attributes of the misconception are
invalid.
"""
self.require_valid_misconception_id(self.id)
if not isinstance(self.name, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected misconception name to be a string, received %s' %
self.name)
utils.require_valid_name(
self.name, 'misconception_name', allow_empty=False)
if not isinstance(self.notes, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected misconception notes to be a string, received %s' %
self.notes)
if not isinstance(self.feedback, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected misconception feedback to be a string, received %s' %
self.feedback)
class Rubric(python_utils.OBJECT):
"""Domain object describing a skill rubric."""
def __init__(self, difficulty, explanation):
"""Initializes a Rubric domain object.
Args:
difficulty: str. The question difficulty that this rubric addresses.
explanation: str. The explanation for the corresponding difficulty.
"""
self.difficulty = difficulty
self.explanation = html_cleaner.clean(explanation)
def to_dict(self):
"""Returns a dict representing this Rubric domain object.
Returns:
A dict, mapping all fields of Rubric instance.
"""
return {
'difficulty': self.difficulty,
'explanation': self.explanation
}
@classmethod
def from_dict(cls, rubric_dict):
"""Returns a Rubric domain object from a dict.
Args:
rubric_dict: dict. The dict representation of Rubric object.
Returns:
Rubric. The corresponding Rubric domain object.
"""
rubric = cls(
rubric_dict['difficulty'], rubric_dict['explanation'])
return rubric
def validate(self):
"""Validates various properties of the Rubric object.
Raises:
ValidationError: One or more attributes of the rubric are
invalid.
"""
if not isinstance(self.difficulty, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected difficulty to be a string, received %s' %
self.difficulty)
if self.difficulty not in constants.SKILL_DIFFICULTIES:
raise utils.ValidationError(
'Invalid difficulty received for rubric: %s' % self.difficulty)
if not isinstance(self.explanation, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected explanation to be a string, received %s' %
self.explanation)
if self.explanation == '' or self.explanation == '<p></p>':
raise utils.ValidationError('Explanation should be non empty')
class SkillContents(python_utils.OBJECT):
"""Domain object representing the skill_contents dict."""
def __init__(
self, explanation, worked_examples, recorded_voiceovers,
written_translations):
"""Constructs a SkillContents domain object.
Args:
explanation: SubtitledHtml. An explanation on how to apply the
skill.
worked_examples: list(SubtitledHtml). A list of worked examples
for the skill. Each element should be a SubtitledHtml object.
recorded_voiceovers: RecordedVoiceovers. The recorded voiceovers for
the skill contents and their translations in different
languages.
written_translations: WrittenTranslations. A text translation of
the skill contents.
"""
self.explanation = explanation
self.worked_examples = worked_examples
self.recorded_voiceovers = recorded_voiceovers
self.written_translations = written_translations
def validate(self):
"""Validates various properties of the SkillContents object.
Raises:
ValidationError: One or more attributes of skill contents are
invalid.
"""
available_content_ids = set([])
if not isinstance(self.explanation, state_domain.SubtitledHtml):
raise utils.ValidationError(
'Expected skill explanation to be a SubtitledHtml object, '
'received %s' % self.explanation)
self.explanation.validate()
available_content_ids.add(self.explanation.content_id)
if not isinstance(self.worked_examples, list):
raise utils.ValidationError(
'Expected worked examples to be a list, received %s' %
self.worked_examples)
for example in self.worked_examples:
if not isinstance(example, state_domain.SubtitledHtml):
raise utils.ValidationError(
'Expected worked example to be a SubtitledHtml object, '
'received %s' % example)
if example.content_id in available_content_ids:
raise utils.ValidationError(
'Found a duplicate content id %s' % example.content_id)
available_content_ids.add(example.content_id)
example.validate()
self.recorded_voiceovers.validate(available_content_ids)
self.written_translations.validate(available_content_ids)
def to_dict(self):
"""Returns a dict representing this SkillContents domain object.
Returns:
A dict, mapping all fields of SkillContents instance.
"""
return {
'explanation': self.explanation.to_dict(),
'worked_examples': [worked_example.to_dict()
for worked_example in self.worked_examples],
'recorded_voiceovers': self.recorded_voiceovers.to_dict(),
'written_translations': self.written_translations.to_dict()
}
@classmethod
def from_dict(cls, skill_contents_dict):
"""Return a SkillContents domain object from a dict.
Args:
skill_contents_dict: dict. The dict representation of
SkillContents object.
Returns:
SkillContents. The corresponding SkillContents domain object.
"""
skill_contents = cls(
state_domain.SubtitledHtml(
skill_contents_dict['explanation']['content_id'],
skill_contents_dict['explanation']['html']),
[state_domain.SubtitledHtml(
worked_example['content_id'],
worked_example['html'])
for worked_example in skill_contents_dict['worked_examples']],
state_domain.RecordedVoiceovers.from_dict(skill_contents_dict[
'recorded_voiceovers']),
state_domain.WrittenTranslations.from_dict(skill_contents_dict[
'written_translations'])
)
return skill_contents
class Skill(python_utils.OBJECT):
"""Domain object for an Oppia Skill."""
def __init__(
self, skill_id, description, misconceptions, rubrics,
skill_contents, misconceptions_schema_version,
rubric_schema_version, skill_contents_schema_version,
language_code, version, next_misconception_id, superseding_skill_id,
all_questions_merged, created_on=None, last_updated=None):
"""Constructs a Skill domain object.
Args:
skill_id: str. The unique ID of the skill.
description: str. Describes the observable behaviour of the skill.
misconceptions: list(Misconception). The list of misconceptions
associated with the skill.
rubrics: list(Rubric). The list of rubrics that explain each
difficulty level of a skill.
skill_contents: SkillContents. The object representing the contents
of the skill.
misconceptions_schema_version: int. The schema version for the
misconceptions object.
rubric_schema_version: int. The schema version for the
rubric object.
skill_contents_schema_version: int. The schema version for the
skill_contents object.
language_code: str. The ISO 639-1 code for the language this
skill is written in.
version: int. The version of the skill.
next_misconception_id: int. The misconception id to be used by
the next misconception added.
superseding_skill_id: str|None. Skill ID of the skill we
merge this skill into. This is non null only if we indicate
that this skill is a duplicate and needs to be merged into
another one.
all_questions_merged: bool. Flag that indicates if all
questions are moved from this skill to the superseding skill.
created_on: datetime.datetime. Date and time when the skill is
created.
last_updated: datetime.datetime. Date and time when the
skill was last updated.
"""
self.id = skill_id
self.description = description
self.misconceptions = misconceptions
self.skill_contents = skill_contents
self.misconceptions_schema_version = misconceptions_schema_version
self.rubric_schema_version = rubric_schema_version
self.skill_contents_schema_version = skill_contents_schema_version
self.language_code = language_code
self.created_on = created_on
self.last_updated = last_updated
self.version = version
self.rubrics = rubrics
self.next_misconception_id = next_misconception_id
self.superseding_skill_id = superseding_skill_id
self.all_questions_merged = all_questions_merged
@classmethod
def require_valid_skill_id(cls, skill_id):
"""Checks whether the skill id is a valid one.
Args:
skill_id: str. The skill id to validate.
"""
if not isinstance(skill_id, python_utils.BASESTRING):
raise utils.ValidationError('Skill id should be a string.')
if len(skill_id) != 12:
raise utils.ValidationError('Invalid skill id.')
@classmethod
def require_valid_description(cls, description):
"""Checks whether the description of the skill is a valid one.
Args:
description: str. The description to validate.
"""
if not isinstance(description, python_utils.BASESTRING):
raise utils.ValidationError('Description should be a string.')
if description == '':
raise utils.ValidationError('Description field should not be empty')
def validate(self):
"""Validates various properties of the Skill object.
Raises:
ValidationError: One or more attributes of skill are invalid.
"""
self.require_valid_description(self.description)
Misconception.require_valid_misconception_id(self.next_misconception_id)
if not isinstance(self.misconceptions_schema_version, int):
raise utils.ValidationError(
'Expected misconceptions schema version to be an integer, '
'received %s' % self.misconceptions_schema_version)
if (
self.misconceptions_schema_version !=
feconf.CURRENT_MISCONCEPTIONS_SCHEMA_VERSION):
raise utils.ValidationError(
'Expected misconceptions schema version to be %s, received %s'
% (
feconf.CURRENT_MISCONCEPTIONS_SCHEMA_VERSION,
self.misconceptions_schema_version)
)
if not isinstance(self.rubric_schema_version, int):
raise utils.ValidationError(
'Expected rubric schema version to be an integer, '
'received %s' % self.rubric_schema_version)
if (
self.rubric_schema_version !=
feconf.CURRENT_RUBRIC_SCHEMA_VERSION):
raise utils.ValidationError(
'Expected rubric schema version to be %s, received %s'
% (
feconf.CURRENT_RUBRIC_SCHEMA_VERSION,
self.rubric_schema_version)
)
if not isinstance(self.skill_contents_schema_version, int):
raise utils.ValidationError(
'Expected skill contents schema version to be an integer, '
'received %s' % self.skill_contents_schema_version)
if (
self.skill_contents_schema_version !=
feconf.CURRENT_SKILL_CONTENTS_SCHEMA_VERSION):
raise utils.ValidationError(
'Expected skill contents schema version to be %s, received %s'
% (
feconf.CURRENT_SKILL_CONTENTS_SCHEMA_VERSION,
self.skill_contents_schema_version)
)
if not isinstance(self.language_code, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected language code to be a string, received %s' %
self.language_code)
if not utils.is_valid_language_code(self.language_code):
raise utils.ValidationError(
'Invalid language code: %s' % self.language_code)
if not isinstance(self.skill_contents, SkillContents):
raise utils.ValidationError(
'Expected skill_contents to be a SkillContents object, '
'received %s' % self.skill_contents)
self.skill_contents.validate()
if not isinstance(self.rubrics, list):
raise utils.ValidationError(
'Expected rubrics to be a list, '
'received %s' % self.skill_contents)
difficulties_list = []
for rubric in self.rubrics:
if not isinstance(rubric, Rubric):
raise utils.ValidationError(
'Expected each rubric to be a Rubric '
'object, received %s' % rubric)
if rubric.difficulty in difficulties_list:
raise utils.ValidationError(
'Duplicate rubric found for: %s' % rubric.difficulty)
difficulties_list.append(rubric.difficulty)
rubric.validate()
if len(difficulties_list) != 3:
raise utils.ValidationError(
'All 3 difficulties should be addressed in rubrics')
if difficulties_list != constants.SKILL_DIFFICULTIES:
raise utils.ValidationError(
'The difficulties should be ordered as follows [%s, %s, %s]'
% (
constants.SKILL_DIFFICULTIES[0],
constants.SKILL_DIFFICULTIES[1],
constants.SKILL_DIFFICULTIES[2]))
if not isinstance(self.misconceptions, list):
raise utils.ValidationError(
'Expected misconceptions to be a list, '
'received %s' % self.misconceptions)
misconception_id_list = []
for misconception in self.misconceptions:
if not isinstance(misconception, Misconception):
raise utils.ValidationError(
'Expected each misconception to be a Misconception '
'object, received %s' % misconception)
if misconception.id in misconception_id_list:
raise utils.ValidationError(
'Duplicate misconception ID found: %s' % misconception.id)
misconception_id_list.append(misconception.id)
if int(misconception.id) >= int(self.next_misconception_id):
raise utils.ValidationError(
'The misconception with id %s is out of bounds.'
% misconception.id)
misconception.validate()
if (self.all_questions_merged and
self.superseding_skill_id is None):
raise utils.ValidationError(
'Expected a value for superseding_skill_id when '
'all_questions_merged is True.')
if (self.superseding_skill_id is not None and
self.all_questions_merged is None):
raise utils.ValidationError(
'Expected a value for all_questions_merged when '
'superseding_skill_id is set.')
def to_dict(self):
"""Returns a dict representing this Skill domain object.
Returns:
A dict, mapping all fields of Skill instance.
"""
return {
'id': self.id,
'description': self.description,
'misconceptions': [
misconception.to_dict()
for misconception in self.misconceptions],
'rubrics': [
rubric.to_dict() for rubric in self.rubrics],
'skill_contents': self.skill_contents.to_dict(),
'language_code': self.language_code,
'misconceptions_schema_version': self.misconceptions_schema_version,
'rubric_schema_version': self.rubric_schema_version,
'skill_contents_schema_version': self.skill_contents_schema_version,
'version': self.version,
'next_misconception_id': self.next_misconception_id,
'superseding_skill_id': self.superseding_skill_id,
'all_questions_merged': self.all_questions_merged
}
@classmethod
def create_default_skill(cls, skill_id, description, rubrics):
"""Returns a skill domain object with default values. This is for
the frontend where a default blank skill would be shown to the user
when the skill is created for the first time.
Args:
skill_id: str. The unique id of the skill.
description: str. The initial description for the skill.
rubrics: list(Rubric). The list of rubrics for the skill.
Returns:
Skill. The Skill domain object with the default values.
"""
explanation_content_id = feconf.DEFAULT_SKILL_EXPLANATION_CONTENT_ID
skill_contents = SkillContents(
state_domain.SubtitledHtml(
explanation_content_id, feconf.DEFAULT_SKILL_EXPLANATION), [],
state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {
explanation_content_id: {}
}
}),
state_domain.WrittenTranslations.from_dict({
'translations_mapping': {
explanation_content_id: {}
}
}))
return cls(
skill_id, description, [], rubrics, skill_contents,
feconf.CURRENT_MISCONCEPTIONS_SCHEMA_VERSION,
feconf.CURRENT_RUBRIC_SCHEMA_VERSION,
feconf.CURRENT_SKILL_CONTENTS_SCHEMA_VERSION,
constants.DEFAULT_LANGUAGE_CODE, 0, 0, None, False)
@classmethod
def update_skill_contents_from_model(
cls, versioned_skill_contents, current_version):
"""Converts the skill_contents blob contained in the given
versioned_skill_contents dict from current_version to
current_version + 1. Note that the versioned_skill_contents being
passed in is modified in-place.
Args:
versioned_skill_contents: dict. A dict with two keys:
- schema_version: str. The schema version for the
skill_contents dict.
- skill_contents: dict. The dict comprising the skill
contents.
current_version: int. The current schema version of skill_contents.
"""
versioned_skill_contents['schema_version'] = current_version + 1
conversion_fn = getattr(
cls, '_convert_skill_contents_v%s_dict_to_v%s_dict' % (
current_version, current_version + 1))
versioned_skill_contents['skill_contents'] = conversion_fn(
versioned_skill_contents['skill_contents'])
@classmethod
def update_misconceptions_from_model(
cls, versioned_misconceptions, current_version):
"""Converts the misconceptions blob contained in the given
versioned_misconceptions dict from current_version to
current_version + 1. Note that the versioned_misconceptions being
passed in is modified in-place.
Args:
versioned_misconceptions: dict. A dict with two keys:
- schema_version: str. The schema version for the
misconceptions dict.
- misconceptions: list(dict). The list of dicts comprising the
misconceptions of the skill.
current_version: int. The current schema version of misconceptions.
"""
versioned_misconceptions['schema_version'] = current_version + 1
conversion_fn = getattr(
cls, '_convert_misconception_v%s_dict_to_v%s_dict' % (
current_version, current_version + 1))
updated_misconceptions = []
for misconception in versioned_misconceptions['misconceptions']:
updated_misconceptions.append(conversion_fn(misconception))
versioned_misconceptions['misconceptions'] = updated_misconceptions
@classmethod
def update_rubrics_from_model(cls, versioned_rubrics, current_version):
"""Converts the rubrics blob contained in the given
versioned_rubrics dict from current_version to
current_version + 1. Note that the versioned_rubrics being
passed in is modified in-place.
Args:
versioned_rubrics: dict. A dict with two keys:
- schema_version: str. The schema version for the
rubrics dict.
- rubrics: list(dict). The list of dicts comprising the
rubrics of the skill.
current_version: int. The current schema version of rubrics.
"""
versioned_rubrics['schema_version'] = current_version + 1
conversion_fn = getattr(
cls, '_convert_rubric_v%s_dict_to_v%s_dict' % (
current_version, current_version + 1))
updated_rubrics = []
for rubric in versioned_rubrics['rubrics']:
updated_rubrics.append(conversion_fn(rubric))
versioned_rubrics['rubrics'] = updated_rubrics
def update_description(self, description):
"""Updates the description of the skill.
Args:
description: str. The new description of the skill.
"""
self.description = description
def update_language_code(self, language_code):
"""Updates the language code of the skill.
Args:
language_code: str. The new language code of the skill.
"""
self.language_code = language_code
def update_superseding_skill_id(self, superseding_skill_id):
"""Updates the superseding skill ID of the skill.
Args:
superseding_skill_id: str. ID of the skill that supersedes this one.
"""
self.superseding_skill_id = superseding_skill_id
def record_that_all_questions_are_merged(self, all_questions_merged):
"""Updates the flag value which indicates if all questions are merged.
Args:
all_questions_merged: bool. Flag indicating if all questions are
merged to the superseding skill.
"""
self.all_questions_merged = all_questions_merged
def update_explanation(self, explanation):
"""Updates the explanation of the skill.
Args:
explanation: SubtitledHtml. The new explanation of the skill.
"""
self.skill_contents.explanation = (
state_domain.SubtitledHtml.from_dict(explanation))
def update_worked_examples(self, worked_examples):
"""Updates the worked examples list of the skill.
Args:
worked_examples: list(dict). The new worked examples of the skill.
"""
old_content_ids = [worked_example.content_id for worked_example in (
self.skill_contents.worked_examples)]
self.skill_contents.worked_examples = [
state_domain.SubtitledHtml.from_dict(worked_example)
for worked_example in worked_examples]
new_content_ids = [worked_example.content_id for worked_example in (
self.skill_contents.worked_examples)]
self._update_content_ids_in_assets(old_content_ids, new_content_ids)
def _update_content_ids_in_assets(self, old_ids_list, new_ids_list):
"""Adds or deletes content ids in recorded_voiceovers and
written_translations.
Args:
old_ids_list: list(str). A list of content ids present earlier
in worked_examples.
state.
new_ids_list: list(str). A list of content ids currently present
in worked_examples.
"""
content_ids_to_delete = set(old_ids_list) - set(new_ids_list)
content_ids_to_add = set(new_ids_list) - set(old_ids_list)
written_translations = self.skill_contents.written_translations
recorded_voiceovers = self.skill_contents.recorded_voiceovers
for content_id in content_ids_to_delete:
recorded_voiceovers.delete_content_id_for_voiceover(content_id)
written_translations.delete_content_id_for_translation(
content_id)
for content_id in content_ids_to_add:
recorded_voiceovers.add_content_id_for_voiceover(content_id)
written_translations.add_content_id_for_translation(content_id)
def _find_misconception_index(self, misconception_id):
"""Returns the index of the misconception with the given misconception
id, or None if it is not in the misconceptions list.
Args:
misconception_id: int. The id of the misconception.
Returns:
int or None. The index of the corresponding misconception, or None
if there is no such misconception.
"""
for ind, misconception in enumerate(self.misconceptions):
if misconception.id == misconception_id:
return ind
return None
def add_misconception(self, misconception_dict):
"""Adds a new misconception to the skill.
Args:
misconception_dict: dict. The misconception to be added.
"""
misconception = Misconception(
misconception_dict['id'],
misconception_dict['name'],
misconception_dict['notes'],
misconception_dict['feedback'])
self.misconceptions.append(misconception)
self.next_misconception_id = self.get_incremented_misconception_id(
misconception_dict['id'])
def update_rubric(self, difficulty, explanation):
"""Adds or updates the rubric of the given difficulty.
Args:
difficulty: str. The difficulty of the rubric.
explanation: str. The explanation for the rubric.
"""
for rubric in self.rubrics:
if rubric.difficulty == difficulty:
rubric.explanation = explanation
return
raise ValueError(
'There is no rubric for the given difficulty.')
def get_incremented_misconception_id(self, misconception_id):
"""Returns the incremented misconception id.
Args:
misconception_id: int. The id of the misconception to be
incremented.
Returns:
int. The incremented misconception id.
"""
return misconception_id + 1
def delete_misconception(self, misconception_id):
"""Removes a misconception with the given id.
Args:
misconception_id: int. The id of the misconception to be removed.
Raises:
ValueError: There is no misconception with the given id.
"""
index = self._find_misconception_index(misconception_id)
if index is None:
raise ValueError(
'There is no misconception with the given id.')
del self.misconceptions[index]
def update_misconception_name(self, misconception_id, name):
"""Updates the name of the misconception with the given id.
Args:
misconception_id: int. The id of the misconception to be edited.
name: str. The new name of the misconception.
Raises:
ValueError: There is no misconception with the given id.
"""
index = self._find_misconception_index(misconception_id)
if index is None:
raise ValueError(
'There is no misconception with the given id.')
self.misconceptions[index].name = name
def update_misconception_notes(self, misconception_id, notes):
"""Updates the notes of the misconception with the given id.
Args:
misconception_id: int. The id of the misconception to be edited.
notes: str. The new notes of the misconception.
Raises:
ValueError: There is no misconception with the given id.
"""
index = self._find_misconception_index(misconception_id)
if index is None:
raise ValueError(
'There is no misconception with the given id.')
self.misconceptions[index].notes = notes
def update_misconception_feedback(self, misconception_id, feedback):
"""Updates the feedback of the misconception with the given id.
Args:
misconception_id: int. The id of the misconception to be edited.
feedback: str. The html string that corresponds to the new feedback
of the misconception.
Raises:
ValueError: There is no misconception with the given id.
"""
index = self._find_misconception_index(misconception_id)
if index is None:
raise ValueError(
'There is no misconception with the given id.')
self.misconceptions[index].feedback = feedback
class SkillSummary(python_utils.OBJECT):
"""Domain object for Skill Summary."""
def __init__(
self, skill_id, description, language_code, version,
misconception_count, worked_examples_count, skill_model_created_on,
skill_model_last_updated):
"""Constructs a SkillSummary domain object.
Args:
skill_id: str. The unique id of the skill.
description: str. The short description of the skill.
language_code: str. The language code of the skill.
version: int. The version of the skill.
misconception_count: int. The number of misconceptions associated
with the skill.
worked_examples_count: int. The number of worked examples in the
skill.
skill_model_created_on: datetime.datetime. Date and time when
the skill model is created.
skill_model_last_updated: datetime.datetime. Date and time
when the skill model was last updated.
"""
self.id = skill_id
self.description = description
self.language_code = language_code
self.version = version
self.misconception_count = misconception_count
self.worked_examples_count = worked_examples_count
self.skill_model_created_on = skill_model_created_on
self.skill_model_last_updated = skill_model_last_updated
def validate(self):
"""Validates various properties of the Skill Summary object.
Raises:
ValidationError: One or more attributes of skill summary are
invalid.
"""
if not isinstance(self.description, python_utils.BASESTRING):
raise utils.ValidationError('Description should be a string.')
if self.description == '':
raise utils.ValidationError('Description field should not be empty')
if not isinstance(self.language_code, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected language code to be a string, received %s' %
self.language_code)
if not utils.is_valid_language_code(self.language_code):
raise utils.ValidationError(
'Invalid language code: %s' % self.language_code)
if not isinstance(self.misconception_count, int):
raise utils.ValidationError(
'Expected misconception_count to be an int, '
'received \'%s\'' % self.misconception_count)
if self.misconception_count < 0:
raise utils.ValidationError(
'Expected misconception_count to be non-negative, '
'received \'%s\'' % self.misconception_count)
if not isinstance(self.worked_examples_count, int):
raise utils.ValidationError(
'Expected worked_examples_count to be an int, '
'received \'%s\'' % self.worked_examples_count)
if self.worked_examples_count < 0:
raise utils.ValidationError(
'Expected worked_examples_count to be non-negative, '
'received \'%s\'' % self.worked_examples_count)
def to_dict(self):
"""Returns a dictionary representation of this domain object.
Returns:
dict. A dict representing this SkillSummary object.
"""
return {
'id': self.id,
'description': self.description,
'language_code': self.language_code,
'version': self.version,
'misconception_count': self.misconception_count,
'worked_examples_count': self.worked_examples_count,
'skill_model_created_on': utils.get_time_in_millisecs(
self.skill_model_created_on),
'skill_model_last_updated': utils.get_time_in_millisecs(
self.skill_model_last_updated)
}
class SkillRights(python_utils.OBJECT):
"""Domain object for skill rights."""
def __init__(self, skill_id, skill_is_private, creator_id):
"""Constructor for a skill rights domain object.
Args:
skill_id: str. The id of the skill.
skill_is_private: bool. Whether the skill is private.
creator_id: str. The id of the creator of this skill.
"""
self.id = skill_id
self.skill_is_private = skill_is_private
self.creator_id = creator_id
def to_dict(self):
"""Returns a dict suitable for use by the frontend.
Returns:
dict. A dict version of SkillRights suitable for use by the
frontend.
"""
return {
'skill_id': self.id,
'skill_is_private': self.skill_is_private,
'creator_id': self.creator_id
}
def is_creator(self, user_id):
"""Checks whether the given user is the creator of this skill.
Args:
user_id: str. Id of the user.
Returns:
bool. Whether the user is the creator of this skill.
"""
return bool(user_id == self.creator_id)
def is_private(self):
"""Returns whether the skill is private.
Returns:
bool. Whether the skill is private.
"""
return self.skill_is_private
class SkillRightsChange(change_domain.BaseChange):
"""Domain object for changes made to a skill rights object.
The allowed commands, together with the attributes:
- 'create_new'
- 'publish_skill'.
"""
ALLOWED_COMMANDS = [{
'name': CMD_CREATE_NEW,
'required_attribute_names': [],
'optional_attribute_names': []
}, {
'name': CMD_PUBLISH_SKILL,
'required_attribute_names': [],
'optional_attribute_names': []
}]
class UserSkillMastery(python_utils.OBJECT):
"""Domain object for a user's mastery of a particular skill."""
def __init__(self, user_id, skill_id, degree_of_mastery):
"""Constructs a SkillMastery domain object for a user.
Args:
user_id: str. The user id of the user.
skill_id: str. The id of the skill.
degree_of_mastery: float. The user's mastery of the
corresponding skill.
"""
self.user_id = user_id
self.skill_id = skill_id
self.degree_of_mastery = degree_of_mastery
def to_dict(self):
"""Returns a dictionary representation of this domain object.
Returns:
dict. A dict representing this SkillMastery object.
"""
return {
'user_id': self.user_id,
'skill_id': self.skill_id,
'degree_of_mastery': self.degree_of_mastery
}
@classmethod
def from_dict(cls, skill_mastery_dict):
"""Returns a UserSkillMastery domain object from the given dict.
Args:
skill_mastery_dict: dict. A dict mapping all the fields of
UserSkillMastery object.
Returns:
SkillMastery. The SkillMastery domain object.
"""
return cls(
skill_mastery_dict['user_id'],
skill_mastery_dict['skill_id'],
skill_mastery_dict['degree_of_mastery']
)
|
py | 1a400ca7c89f21e88376d1df25e7cab790c24289 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = ['TriggerArgs', 'Trigger']
@pulumi.input_type
class TriggerArgs:
def __init__(__self__, *,
device_name: pulumi.Input[str],
kind: pulumi.Input[Union[str, 'TriggerEventType']],
resource_group_name: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Trigger resource.
:param pulumi.Input[str] device_name: Creates or updates a trigger
:param pulumi.Input[Union[str, 'TriggerEventType']] kind: Trigger Kind.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[str] name: The trigger name.
"""
pulumi.set(__self__, "device_name", device_name)
pulumi.set(__self__, "kind", kind)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="deviceName")
def device_name(self) -> pulumi.Input[str]:
"""
Creates or updates a trigger
"""
return pulumi.get(self, "device_name")
@device_name.setter
def device_name(self, value: pulumi.Input[str]):
pulumi.set(self, "device_name", value)
@property
@pulumi.getter
def kind(self) -> pulumi.Input[Union[str, 'TriggerEventType']]:
"""
Trigger Kind.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: pulumi.Input[Union[str, 'TriggerEventType']]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The resource group name.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The trigger name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
class Trigger(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
device_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[Union[str, 'TriggerEventType']]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Trigger details.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] device_name: Creates or updates a trigger
:param pulumi.Input[Union[str, 'TriggerEventType']] kind: Trigger Kind.
:param pulumi.Input[str] name: The trigger name.
:param pulumi.Input[str] resource_group_name: The resource group name.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TriggerArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Trigger details.
:param str resource_name: The name of the resource.
:param TriggerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TriggerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
device_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[Union[str, 'TriggerEventType']]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TriggerArgs.__new__(TriggerArgs)
if device_name is None and not opts.urn:
raise TypeError("Missing required property 'device_name'")
__props__.__dict__["device_name"] = device_name
if kind is None and not opts.urn:
raise TypeError("Missing required property 'kind'")
__props__.__dict__["kind"] = kind
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:databoxedge/v20190301:Trigger"), pulumi.Alias(type_="azure-native:databoxedge:Trigger"), pulumi.Alias(type_="azure-nextgen:databoxedge:Trigger"), pulumi.Alias(type_="azure-native:databoxedge/v20190701:Trigger"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190701:Trigger"), pulumi.Alias(type_="azure-native:databoxedge/v20190801:Trigger"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190801:Trigger"), pulumi.Alias(type_="azure-native:databoxedge/v20200501preview:Trigger"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20200501preview:Trigger"), pulumi.Alias(type_="azure-native:databoxedge/v20200901:Trigger"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20200901:Trigger"), pulumi.Alias(type_="azure-native:databoxedge/v20200901preview:Trigger"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20200901preview:Trigger"), pulumi.Alias(type_="azure-native:databoxedge/v20201201:Trigger"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20201201:Trigger"), pulumi.Alias(type_="azure-native:databoxedge/v20210201preview:Trigger"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20210201preview:Trigger")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Trigger, __self__).__init__(
'azure-native:databoxedge/v20190301:Trigger',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Trigger':
"""
Get an existing Trigger resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = TriggerArgs.__new__(TriggerArgs)
__props__.__dict__["kind"] = None
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
return Trigger(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
Trigger Kind.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The object name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The hierarchical type of the object.
"""
return pulumi.get(self, "type")
|
py | 1a400df6b2d5ca7b3fe7ef570e2af300b5e996cb | import random
import math
import time
import mysql.connector
import copy
import json
from .components.DBConfig import DBConfig
from .components.Configuration import Configuration
from .components.StudentsManager import StudentsManager
from .components.ContainersManager import ContainersManager
class CC:
def __init__(self, process_id, group_id, config_id):
self.process_id = process_id
self.group_id = group_id
self.config_id = config_id
def run(self):
print("Running CC...")
if self.group_id == "" or self.config_id == "":
return "NoGroupOrConfigSelected"
self.students_manager = StudentsManager(self.group_id)
self.configuration = Configuration(self.config_id)
self.containers_manager = ContainersManager(
14, # TODO: Set dynamic num of containers based on db configuration
# math.ceil(self.students_manager.get_number_of_students() / self.configuration.max_students),
self.configuration,
self.students_manager
)
self.total_number_of_students = self.students_manager.get_number_of_students()
print("\n\nCURRENT NUMBER OF STUDENTS INTO CONTAINERS: " + str(self.containers_manager.get_number_of_total_students_into_containers()) + "\n\n")
if self.total_number_of_students == 0:
return "ZeroStudentsIntoGroup"
print("Loaded students from db with id " + self.students_manager.group_id + ":",
self.total_number_of_students)
print("Loaded config from db with id " + self.configuration.config_id + ":",
self.configuration.config_name)
if self.is_already_generated():
print('Class Composition already generated! Exiting...')
return "CCAlreadyGenerated"
print("Created " + str(self.containers_manager.get_number_of_containers()) + " empty classes")
print("Sex priority: " + self.configuration.sex_priority)
configured_sex_priority_array = self.students_manager.get_sex_prioritized_students_array(
self.configuration.sex_priority,
self.configuration.num_sex_priority
)
print("Checking sex-prioritized array...")
for student_group in configured_sex_priority_array:
print("Student group length: " + str(len(student_group)), end="")
num_males, num_females = 0, 0
for student in student_group:
if student.sesso == "m":
num_males += 1
if student.sesso == "f":
num_females += 1
print(" - M: " + str(num_males) + " - F: " + str(num_females))
print("Finished checking sex-prioritized array...")
if len(configured_sex_priority_array) > self.containers_manager.get_number_of_containers():
print('<---WARNING---> Sex prioritized groups are more than possible containers!')
print('ABORT!')
return "TooManySexPrioritizedPeople"
students_not_inserted = self.containers_manager.distribute_sex_prioritized_groups_randomly_into_containers(
configured_sex_priority_array
)
print("Remaining students into StudentsManager:", self.students_manager.get_number_of_remaining_students())
print("\n\nCURRENT NUMBER OF STUDENTS INTO CONTAINERS: " + str(self.containers_manager.get_number_of_total_students_into_containers()) + "\n\n")
if len(students_not_inserted) > 0:
print("Some students from prioritized group weren't inserted!")
for student in students_not_inserted:
print("Student with matricola " + student.matricola + " was not inserted!")
else:
print("No students need to be reinserted, this is a good sign! :))")
# self.containers_manager.show_containers_statistics()
self.containers_manager.print_all_containers_current_dimensions()
print("Pairing and getting remaining students, matching by desiderata when possible...")
remaining_desiderata_students_array = self.students_manager.get_remaining_desiderata_students_array()
print("Found " + str(len(remaining_desiderata_students_array)) + " paired students!")
students_not_inserted = self.containers_manager.distribute_couples_randomly_into_containers(remaining_desiderata_students_array)
print("\n\nCURRENT NUMBER OF STUDENTS INTO CONTAINERS: " + str(self.containers_manager.get_number_of_total_students_into_containers()) + "\n\n")
if len(students_not_inserted) > 0:
print("Some O-O desiderata couple weren't inserted!")
for couple in students_not_inserted:
for student in couple:
print("Student with matricola " + student.matricola + " was not inserted!")
print("In total there are " + str(len(remaining_desiderata_students_array)) + " paired students to be reinserted!")
else:
print("No students need to be reinserted, this is a good sign! :))")
print("Getting remaining students on the database...")
remaining_students_array = self.students_manager.get_remaining_students_array()
remaining_students_after_random_insert = self.containers_manager.distribute_remaining_students_randomly_into_containers(remaining_students_array)
print("After random fill of remaining students, there are " + str(len(remaining_students_after_random_insert)) + " students to fill, still!")
if len(remaining_students_after_random_insert) == 0:
print("Well done, there is no students to swap of classroom, there!")
else:
print("We need to fill these " + str(len(remaining_students_after_random_insert)) + " students somewhere!")
if not self.containers_manager.fill_remaining_students_shuffling_classcontainers(remaining_students_after_random_insert):
return "CannotShuffleStudents"
print("\n\nCURRENT NUMBER OF STUDENTS INTO CONTAINERS: " + str(self.containers_manager.get_number_of_total_students_into_containers()) + "\n\n")
minimum_balancing_status = self.containers_manager.rebalance_students_to_reach_minimum_number_of_students_per_container()
if minimum_balancing_status:
print("Now classes are minimum balanced!")
else:
print("Cannot balance by mininum amount!")
return "CannotBalanceClassesByMininumValue"
"""
print("BEFORE OPTIMIZATION:")
std_sum_before = 0
for container in self.containers_manager.containers:
print(f"ContainerID: {container.containerid} - Container AVG: {container.get_avg()} - Container STD: {container.get_std()}")
std_sum_before += container.get_avg()
print(f"AVG: [{self.containers_manager.get_avg()}] - STD: [{self.containers_manager.get_std()}]")
"""
self.optimize()
"""
print("AFTER OPTIMIZATION:")
std_sum_after = 0
for container in self.containers_manager.containers:
print(f"ContainerID: {container.containerid} - Container AVG: {container.get_avg()} - Container STD: {container.get_std()}")
std_sum_after += container.get_avg()
print(f"AVG: [{self.containers_manager.get_avg()}] - STD: [{self.containers_manager.get_std()}]")
print(f"RESULTS: {std_sum_before} - {std_sum_after}")"""
print("\n\nCURRENT NUMBER OF STUDENTS INTO CONTAINERS: " + str(self.containers_manager.get_number_of_total_students_into_containers()) + "\n\n")
uninserted_students_by_matricola = self.students_manager.get_uninserted_students(self.containers_manager)
if len(uninserted_students_by_matricola) > 0:
print("\nWe found " + str(len(uninserted_students_by_matricola)) + " students not loaded, inserted and/or elaborated!")
print("Is it a correct number (TotalStudents == StudentsIntoContainers + UninsertedStudents)? -->", self.total_number_of_students == self.containers_manager.get_number_of_total_students_into_containers() + len(uninserted_students_by_matricola))
for matricola in uninserted_students_by_matricola:
print("Hey! Student with matricola " + matricola + " not loaded, inserted and/or elaborated!")
print("Remaining students into StudentsManager:", self.students_manager.get_number_of_remaining_students())
return "StudentsNotInsertedAfterShuffling"
else:
print("All students were inserted and elaborated correctly, good work!")
print("Saving CC to database...")
self.save_students_to_db()
print("Done!")
return True
def optimize(self):
def get_two_random_containers():
while True:
first_container = random.choice(self.containers_manager.containers)
second_container = random.choice(self.containers_manager.containers)
if first_container is not second_container:
break
return first_container, second_container
def get_std_of_two_containers(first_container, second_container):
first_container_avg = first_container.get_avg()
second_container_avg = second_container.get_avg()
containers_avg = (first_container_avg + second_container_avg) / 2
return math.sqrt(
(
math.pow(first_container_avg - containers_avg, 2) +
math.pow(second_container_avg - containers_avg, 2)
) / 2)
def optimize_random_couple_of_containers_fixed_cycles(num_of_cycles):
first_container, second_container = get_two_random_containers()
previous_swap_std = get_std_of_two_containers(first_container, second_container)
effective_changes = 0
for _ in range(num_of_cycles):
first_container_student = first_container.get_random_student()
second_container_student = second_container.get_random_student()
first_container_student_copy = copy.deepcopy(first_container_student)
second_container_student_copy = copy.deepcopy(second_container_student)
if first_container_student.eligible_to_swap(self.configuration.sex_priority) \
and second_container_student.eligible_to_swap(self.configuration.sex_priority) \
and not first_container.has_desiderata(first_container_student) \
and not second_container.has_desiderata(second_container_student):
first_container.remove_student(first_container_student)
second_container.remove_student(second_container_student)
first_result = first_container.add_student(second_container_student)
second_result = second_container.add_student(first_container_student)
after_swap_std = get_std_of_two_containers(first_container, second_container)
if first_result == None and second_result == None:
if after_swap_std >= previous_swap_std:
first_container.remove_student(second_container_student)
second_container.remove_student(first_container_student)
first_result = first_container.add_student(first_container_student_copy)
second_result = second_container.add_student(second_container_student_copy)
else:
effective_changes += 1
else:
first_container.remove_student(second_container_student)
second_container.remove_student(first_container_student)
first_result = first_container.add_student(first_container_student_copy)
second_result = second_container.add_student(second_container_student_copy)
return effective_changes
print("Optimizing...")
num_of_optimizations = self.total_number_of_students
num_of_effective_optimizations = 0
for i in range(0, num_of_optimizations):
num_of_effective_optimizations += optimize_random_couple_of_containers_fixed_cycles(25)
if i % 25 == 0:
print(str(round(i / num_of_optimizations * 100, 2)) + "%\t\t" + str(i) + "\toptcycle\toptsdone\t" + str(num_of_effective_optimizations) + "\tstudents\t" + str(self.containers_manager.get_number_of_total_students_into_containers()))
print("100%! Effective swaps done: " + str(num_of_effective_optimizations) + "\n")
def save_students_to_db(self):
connection = mysql.connector.connect(
user=DBConfig.user,
password=DBConfig.password,
host=DBConfig.host,
database=DBConfig.database)
cursor = connection.cursor()
for container in self.containers_manager.containers:
container_ids = container.get_students_id()
# print(f'Inserting container {container.containerid} with ids {container_ids}')
for student_id in container_ids:
query = "INSERT INTO classi_composte (`groupid`, `configid`, `studentid`, `classid`) VALUES (" + str(self.group_id) + ", " + str(self.config_id) + ", " + str(student_id) + ", " + str(container.containerid) + ")"
cursor.execute(query)
connection.commit()
cursor.close()
connection.close()
def is_already_generated(self):
connection = mysql.connector.connect(
user=DBConfig.user,
password=DBConfig.password,
host=DBConfig.host,
database=DBConfig.database)
cursor = connection.cursor()
query = "SELECT COUNT(*) FROM classi_composte WHERE groupid = " + self.group_id + " AND configid = " + self.config_id
cursor.execute(query)
num_of_students_already_inserted = cursor.fetchall()[0][0]
cursor.close()
connection.close()
return num_of_students_already_inserted > 0
def create_cc_instance(process_id, group_id, config_id):
cc = CC(process_id, group_id, config_id)
result_value = cc.run()
if result_value == True:
good_status_json = {
"querystatus" : "good",
"message" : "Composizione Classi completata!"
}
return json.dumps(good_status_json)
elif result_value == "ZeroStudentsIntoGroup":
bad_status_json = {
"querystatus" : "bad",
"message" : "Gruppo vuoto, non e' possibile generare alcuna configurazione!"
}
return json.dumps(bad_status_json)
elif result_value == "CCAlreadyGenerated":
bad_status_json = {
"querystatus" : "bad",
"message" : "Composizione Classi già generata per questo gruppo e configurazione!"
}
return json.dumps(bad_status_json)
elif result_value == "NoGroupOrConfigSelected":
bad_status_json = {
"querystatus" : "bad",
"message" : "Nessun gruppo e/o configurazione selezionato/a!"
}
return json.dumps(bad_status_json)
elif result_value == "CannotShuffleStudents":
bad_status_json = {
"querystatus" : "bad",
"message" : "Impossibile distribuire gli studenti con questa configurazione!"
}
return json.dumps(bad_status_json)
elif result_value == "TooManySexPrioritizedPeople":
bad_status_json = {
"querystatus" : "bad",
"message" : "Troppi utenti con priorità di sesso per questa richiesta!"
}
return json.dumps(bad_status_json)
elif result_value == "StudentsNotInsertedAfterShuffling":
bad_status_json = {
"querystatus" : "bad",
"message" : "Inserimento degli studenti tramite shuffling non possibile!"
}
return json.dumps(bad_status_json)
elif result_value == "CannotBalanceClassesByMininumValue":
bad_status_json = {
"querystatus" : "bad",
"message" : "Non è possibile bilanciare classi con un numero minimo di studenti così alto!"
}
return json.dumps(bad_status_json)
else:
bad_status_json = {
"querystatus" : "bad",
"message" : "Errore nella Composizione Classi! Contattare l'amministratore."
}
return json.dumps(bad_status_json)
|
py | 1a400dff62336b44ba04ae078d2e6b2bb4bb99aa | import json
from enum import Enum
from json.decoder import JSONDecodeError
import pygame
from lib import constants
_filePath = constants.res_loc() + "config.json"
_values = {}
class EntryType(Enum):
# lambda for converting key values to strings
Key = (0, lambda value: pygame.key.name(value).capitalize())
Toggle = (1, str)
Scroll = (2, str)
def __init__(self, index, func):
self._value_ = index
self.func = func
class Entries(Enum):
"""
Enumeration of all possible settings with it's default value
"""
KeyLeft = ("Move left", pygame.K_a, EntryType.Key)
KeyRight = ("Move right", pygame.K_d, EntryType.Key)
KeySpace = ("Jump", pygame.K_SPACE, EntryType.Key)
ShowDebug = ("Debug mode", False, EntryType.Toggle)
MusicVolume = ("Music volume", 1.0, EntryType.Scroll)
SoundVolume = ("Sound volume", 1.0, EntryType.Scroll)
def __init__(self, desc, default, entryType):
self.desc = desc
self.default = default
self.entryType = entryType
def getCurrentValue(self):
return _values[self.name]
def setCurrentValue(self, value):
global _values
_values[self.name] = value
def __str__(self):
return self.entryType.func(self.getCurrentValue())
def init():
loadConfig()
def resetConfig():
global _values
_values.clear()
for entry in Entries:
_values[entry.name] = entry.default
def loadConfig():
global _values
try:
with open(_filePath, "r") as file:
_values = json.load(file)
resolveComplete()
except (FileNotFoundError, JSONDecodeError):
resetConfig()
saveConfig()
def saveConfig():
with open(_filePath, "w") as file:
json.dump(_values, file, indent=4)
def resolveComplete():
global _values
update = False
for entry in Entries:
if entry.name not in _values:
update = True
_values[entry.name] = entry.default
if update:
saveConfig()
|
py | 1a400e18fb878da3f36d0ac77ebaa225791d57bd | nome=str(input('Digite o nome: '))
n1=int(input('Digite a nota: '))
n2=int(input('Digite a nota: '))
med=(n1+n2)/2
print('O nome do aluno é: {}. Sua média é: {}'.format(nome,med)) |
py | 1a400ec130d10390eaffde17f4dcc9b1b56424db | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from django.forms.widgets import flatatt
from django.template import Variable, VariableDoesNotExist
from django.template.base import FilterExpression, kwarg_re, TemplateSyntaxError
from .text import text_value
# RegEx for quoted string
QUOTED_STRING = re.compile(r'^["\'](?P<noquotes>.+)["\']$')
def handle_var(value, context):
"""
Handle template tag variable
"""
# Resolve FilterExpression and Variable immediately
if isinstance(value, FilterExpression) or isinstance(value, Variable):
return value.resolve(context)
# Return quoted strings unquoted
# http://djangosnippets.org/snippets/886
stringval = QUOTED_STRING.search(value)
if stringval:
return stringval.group('noquotes')
# Resolve variable or return string value
try:
return Variable(value).resolve(context)
except VariableDoesNotExist:
return value
def parse_token_contents(parser, token):
"""
Parse template tag contents
"""
bits = token.split_contents()
tag = bits.pop(0)
args = []
kwargs = {}
asvar = None
if len(bits) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
if len(bits):
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError(
'Malformed arguments to tag "{}"'.format(tag))
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter(value)
else:
args.append(parser.compile_filter(value))
return {
'tag': tag,
'args': args,
'kwargs': kwargs,
'asvar': asvar,
}
def split_css_classes(css_classes):
"""
Turn string into a list of CSS classes
"""
classes_list = text_value(css_classes).split(' ')
return [c for c in classes_list if c]
def add_css_class(css_classes, css_class, prepend=False):
"""
Add a CSS class to a string of CSS classes
"""
classes_list = split_css_classes(css_classes)
classes_to_add = [c for c in split_css_classes(css_class)
if c not in classes_list]
if prepend:
classes_list = classes_to_add + classes_list
else:
classes_list += classes_to_add
return ' '.join(classes_list)
def remove_css_class(css_classes, css_class):
"""
Remove a CSS class from a string of CSS classes
"""
remove = set(split_css_classes(css_class))
classes_list = [c for c in split_css_classes(css_classes)
if c not in remove]
return ' '.join(classes_list)
def render_link_tag(url, rel='stylesheet', media=None):
"""
Build a link tag
"""
attrs = {
'href': url,
'rel': rel,
}
if media:
attrs['media'] = media
return render_tag('link', attrs=attrs, close=False)
def render_tag(tag, attrs=None, content=None, close=True):
"""
Render a HTML tag
"""
builder = '<{tag}{attrs}>{content}'
if content or close:
builder += '</{tag}>'
return builder.format(
tag=tag,
attrs=flatatt(attrs) if attrs else '',
content=text_value(content),
)
|
py | 1a400ef24494b350e4bb75c5694f8d83e2d47b34 | from typing import Optional
from aiogram.contrib.middlewares.environment import EnvironmentMiddleware as _EnvironmentMiddleware
class EnvironmentMiddleware(_EnvironmentMiddleware):
async def trigger(self, action: str, args: list[dict]) -> Optional[bool]:
# Remove a condition to allow use this middleware before error handlers
if action.startswith('pre_process_'):
self.update_data(args[-1])
return True
return None
|
py | 1a401022dea812a1cae38244768aca55397503be | import wave
import sys
import struct
import time
import subprocess
# import inspect
import threading
import traceback
import shlex
import os
import string
import random
import datetime as dt
import numpy as np
import scipy as sp
import scipy.special
from contextlib import closing
from argparse import ArgumentParser
# for allowing the logging module to send emails through gmail
# import logging
import logging.handlers
try:
import simplejson as json
except ImportError:
import json
# class TlsSMTPHandler(logging.handlers.SMTPHandler):
# def emit(self, record):
# """
# Emit a record.
#
# Format the record and send it to the specified addressees.
# """
# try:
# import smtplib
# import string # for tls add this line
# try:
# from email.utils import formatdate
# except ImportError:
# formatdate = self.date_time
# port = self.mailport
# if not port:
# port = smtplib.SMTP_PORT
# smtp = smtplib.SMTP(self.mailhost, port)
# msg = self.format(record)
# msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
# self.fromaddr,
# string.join(self.toaddrs, ","),
# self.getSubject(record),
# formatdate(), msg)
# if self.username:
# smtp.ehlo() # for tls add this line
# smtp.starttls() # for tls add this line
# smtp.ehlo() # for tls add this line
# smtp.login(self.username, self.password)
# smtp.sendmail(self.fromaddr, self.toaddrs, msg)
# print Exception
# smtp.quit()
# except (KeyboardInterrupt, SystemExit):
# raise
# except:
# print("error failed to send")
# self.handleError(record)
class NumpyAwareJSONEncoder(json.JSONEncoder):
""" this json encoder converts numpy arrays to lists so that json can write them.
example usage:
>>> import numpy as np
>>> dict_to_save = {'array': np.zeros((5,))}
>>> json.dumps(dict_to_save,
cls=NumpyAwareJSONEncoder
)
'{"array": [0.0, 0.0, 0.0, 0.0, 0.0]}'
"""
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
# consider importing this from python-neo
class Event(object):
"""docstring for Event"""
def __init__(self, event_time=None, duration=None, label='', name=None, description=None, file_origin=None, *args,
**kwargs):
super(Event, self).__init__()
self.time = event_time
self.duration = duration
self.label = label
self.name = name
self.description = description
self.file_origin = file_origin
self.annotations = {}
self.annotate(**kwargs)
def annotate(self, **kwargs):
self.annotations.update(kwargs)
class Stimulus(Event):
"""docstring for Stimulus"""
def __init__(self, *args, **kwargs):
super(Stimulus, self).__init__(*args, **kwargs)
if self.label == '':
self.label = 'stimulus'
class AuditoryStimulus(Stimulus):
"""docstring for AuditoryStimulus"""
def __init__(self, *args, **kwargs):
super(AuditoryStimulus, self).__init__(*args, **kwargs)
if self.label == '':
self.label = 'auditory_stimulus'
def run_state_machine(start_in='pre', error_state=None, error_callback=None, **state_functions):
"""runs a state machine defined by the keyword arguments
>>> def run_start():
>>> print "in 'run_start'"
>>> return 'next'
>>> def run_next():
>>> print "in 'run_next'"
>>> return None
>>> run_state_machine(start_in='start',
>>> start=run_start,
>>> next=run_next)
in 'run_start'
in 'run_next'
None
"""
# make sure the start state has a function to run
assert (start_in in state_functions.keys())
# make sure all of the arguments passed in are callable
for func in state_functions.values():
assert hasattr(func, '__call__')
state = start_in
while state is not None:
try:
state = state_functions[state]()
except Exception as e:
if error_callback:
error_callback(e)
raise
else:
raise
# state = error_state # 3/12/19 (AR) not sure what the point of this statement is
class Trial(Event):
"""docstring for Trial"""
def __init__(self,
index=None,
type_='normal',
class_=None,
*args, **kwargs):
super(Trial, self).__init__(*args, **kwargs)
self.label = 'trial'
self.session = None
self.index = index
self.type_ = type_
self.stimulus = None
self.class_ = class_
self.response = None
self.correct = None
self.rt = None
self.reward = False
self.punish = False
self.events = []
self.stim_event = None
class Command(object):
"""
Enables to run subprocess commands in a different thread with TIMEOUT option.
via https://gist.github.com/kirpit/1306188
Based on jcollado's solution:
http://stackoverflow.com/questions/1191374/subprocess-with-timeout/4825933#4825933
"""
command = None
process = None
status = None
output, error = '', ''
def __init__(self, command):
if isinstance(command, str):
command = shlex.split(command)
self.command = command
def run(self, timeout=None, **kwargs):
""" Run a command then return: (status, output, error). """
def target(**kwargs):
try:
self.process = subprocess.Popen(self.command, **kwargs)
self.output, self.error = self.process.communicate()
self.status = self.process.returncode
except:
self.error = traceback.format_exc()
self.status = -1
# default stdout and stderr
if 'stdout' not in kwargs:
kwargs['stdout'] = subprocess.PIPE
if 'stderr' not in kwargs:
kwargs['stderr'] = subprocess.PIPE
# thread
thread = threading.Thread(target=target, kwargs=kwargs)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
return self.status, self.output, self.error
def parse_commandline(arg_str=sys.argv[1:]):
""" parse command line arguments
note: optparse is depreciated w/ v2.7 in favor of argparse
"""
parser = ArgumentParser()
parser.add_argument('-B', '--box',
action='store', type=int, dest='box', required=False,
help='(int) box identifier')
parser.add_argument('-S', '--subject',
action='store', type=str, dest='subj', required=False,
help='subject ID and folder name')
parser.add_argument('-c', '--config',
action='store', type=str, dest='config_file', default='config.json', required=True,
help='configuration file [default: %(default)s]')
args = parser.parse_args(arg_str)
return vars(args)
def check_cmdline_params(parameters, cmd_line):
# if someone is using red bands they should ammend the checks I perform here
allchars = string.maketrans('', '')
nodigs = allchars.translate(allchars, string.digits)
if not ('box' not in cmd_line or cmd_line['box'] == int(
parameters['panel_name'].encode('ascii', 'ignore').translate(allchars, nodigs))):
print("box number doesn't match config and command line")
return False
if not ('subj' not in cmd_line or
int(cmd_line['subj'].encode('ascii', 'ignore').translate(allchars, nodigs)) == int(
parameters['subject'].encode('ascii', 'ignore').translate(allchars, nodigs))):
print("subject number doesn't match config and command line")
return False
return True
def time_in_range(start, end, x):
"""Return true if x is in the range [start, end]"""
if start <= end:
return start <= x <= end
else:
return start <= x or x <= end
def is_day(city='Boston', lat='42.41', lon='-71.13'):
# def is_day((latitude, longitude) = ('32.82', '-117.14')):
# latitude='42.41', longitude='-71.13' for Medford, MA
# #Tuples not supported in Python 3, rewrote to separate tuples as this function is only called
# without parameters anyway (1/17/18 AR)
"""Is it daytime?
parameter: city, valid entries are large world cities (best option is to select your nearest large city
alternative is lat and lon of current location
Returns True if it is daytime
* Discovered by the Germans in 1904, they named it San Diego,
which of course in German means a whale's vagina. (Burgundy, 2004)
"""
import ephem
if city:
# print 'city'
try:
obs = ephem.city(city.capitalize())
except KeyError:
raise NoCityMatchError
except AttributeError:
obs = ephem.city(city.get('city').capitalize()) # 3/12/19 (AR) Does this work? There's no 'get' function
# for a str
elif lat and lon:
# print 'coords'
obs = ephem.Observer()
obs.lat = str(lat)
obs.long = str(lon)
else:
# print 'else'
obs = ephem.city('Boston')
next_sunrise = ephem.localtime(obs.next_rising(ephem.Sun()))
next_sunset = ephem.localtime(obs.next_setting(ephem.Sun()))
return next_sunset < next_sunrise
def check_time(schedule, fmt="%H:%M", **kwargs):
""" Determine whether current time is within $schedule
Primary use: determine whether trials should be done given the current time and light schedule or session schedule
returns Boolean if current time meets schedule
schedule='sun' will change lights according to local sunrise and sunset
schedule=[('07:00','17:00')] will have lights on between 7am and 5pm
schedule=[('06:00','12:00'),('18:00','24:00')] will have lights on between
"""
if schedule == 'sun':
if is_day(kwargs):
return True
else:
for epoch in schedule:
assert len(epoch) is 2
now = dt.datetime.time(dt.datetime.now())
start = dt.datetime.time(dt.datetime.strptime(epoch[0], fmt))
end = dt.datetime.time(dt.datetime.strptime(epoch[1], fmt))
if time_in_range(start, end, now):
return True
return False
def check_day(schedule):
""" determine whether trials should be done given the current day
"""
today = dt.datetime.today().weekday()
if schedule == 'weekday':
if today < 5: # .weekday() returns int of day of week, with Monday = 0
return True
else:
return False
elif schedule == 'daily':
return True
else: # Match current day of week to session_days parameter
todayDate = dt.datetime.today()
for eachDay in schedule:
if eachDay == today or eachDay == todayDate.strftime("%A").lower() or \
eachDay == todayDate.strftime("%a").lower():
return True
return False
def wait(secs=1.0, final_countdown=0.0, waitfunc=None):
"""Smartly wait for a given time period.
secs -- total time to wait in seconds
final_countdown -- time at end of secs to wait and constantly poll the clock
waitfunc -- optional function to run in a loop during hogCPUperiod
If secs=1.0 and final_countdown=0.2 then for 0.8s python's time.sleep function will be used,
which is not especially precise, but allows the cpu to perform housekeeping. In
the final hogCPUsecs the more precise method of constantly polling the clock
is used for greater precision.
"""
# initial relaxed period, using sleep (better for system resources etc)
if secs > final_countdown:
time.sleep(secs - final_countdown)
secs = final_countdown # only this much is now left
# It's the Final Countdown!!
# hog the cpu, checking time
t0 = time.time()
while (time.time() - t0) < secs:
# let's see if any events were collected in meantime
try:
waitfunc()
except:
pass
def auditory_stim_from_wav(wav):
with closing(wave.open(wav, 'rb')) as wf:
(nchannels, sampwidth, framerate, nframes, comptype, compname) = wf.getparams()
duration = float(nframes) / sampwidth
duration = duration * 2.0 / framerate
stim = AuditoryStimulus(time=0.0,
duration=duration,
name=wav,
label='wav',
description='',
file_origin=wav,
annotations={'nchannels': nchannels,
'sampwidth': sampwidth,
'framerate': framerate,
'nframes': nframes,
'comptype': comptype,
'compname': compname,
}
)
return stim
def concat_wav(input_file_list, output_filename='concat.wav'):
""" concat a set of wav files into a single wav file and return the output filename
takes in a tuple list of files and duration of pause after the file
input_file_list = [
('a.wav', 0.1),
('b.wav', 0.09),
('c.wav', 0.0),
]
returns a list of AuditoryStimulus objects
TODO: add checks for sampling rate, number of channels, etc.
"""
cursor = 0
epochs = [] # list of file epochs
audio_data = ''
with closing(wave.open(output_filename, 'wb')) as output:
for input_filename, isi in input_file_list:
# read in the wav file
with closing(wave.open(input_filename, 'rb')) as wav_part:
try:
params = wav_part.getparams()
output.setparams(params)
fs = output.getframerate()
except: # TODO: what was I trying to except here? be more specific
params = []
fs = 1
pass
audio_frames = wav_part.readframes(wav_part.getnframes())
# append the audio data
audio_data += audio_frames
part_start = cursor
part_dur = len(audio_frames) / params[1]
epochs.append(AuditoryStimulus(time=float(part_start) / fs,
duration=float(part_dur) / fs,
name=input_filename,
file_origin=input_filename,
annotations=params,
label='motif'
))
cursor += part_dur # move cursor length of the duration
# add isi
if isi > 0.0:
isi_frames = ''.join([struct.pack('h', fr) for fr in [0] * int(fs * isi)])
audio_data += isi_frames
cursor += len(isi_frames) / params[1]
# concat all of the audio together and write to file
output.writeframes(audio_data)
description = 'concatenated on-the-fly'
concat_wav = AuditoryStimulus(time=0.0,
duration=epochs[-1].time + epochs[-1].duration,
name=output_filename,
label='wav',
description=description,
file_origin=output_filename,
annotations=output.getparams(),
)
return concat_wav, epochs
def get_num_open_fds():
"""
return the number of open file descriptors for current process
.. warning: will only work on UNIX-like os-es.
"""
pid = os.getpid()
procs = subprocess.check_output(
["lsof", '-w', '-Ff', "-p", str(pid)])
nprocs = len(
filter(
lambda s: s and s[0] == 'f' and s[1:].isdigit(),
procs.split('\n'))
)
return nprocs
def rand_from_log_shape_dist(alpha=10):
"""
randomly samples from a distribution between 0 and 1 with pdf shaped like the log function
low probability of getting close to zero, increasing probability going towards 1
alpha determines how sharp the curve is, higher alpha, sharper curve.
"""
beta = (alpha + 1) * np.log(alpha + 1) - alpha
t = random.random()
ret = ((beta * t - 1) / (sp.special.lambertw((beta * t - 1) / np.e)) - 1) / alpha
return max(min(np.real(ret), 1), 0)
class NoCityMatchError(Exception):
"""Raised for is_day() when no matching city is found in the ephem module
"""
# print 'No city matches entered text. Try using coords instead (lat=xxx, lon=yyy)'
pass
class VarTypeError(Exception):
"""Raised for is_day() when coords are entered as values
"""
# print 'No city matches entered text. Try using coords instead (lat=xxx, lon=yyy)'
pass
|
py | 1a40118b2c5a6bc89a3c06c61083f61f28827679 | #!/usr/bin/env python
#
# Copyright 2007 Doug Hellmann.
#
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Doug
# Hellmann not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# DOUG HELLMANN DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL DOUG HELLMANN BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
"""Using glob to find files matching a pattern with a filename extension.
"""
__module_id__ = "$Id$"
#end_pymotw_header
import glob
for name in glob.glob('*.py'):
print name
|
py | 1a4011b032261f64823417ec8a938d04706830c0 | #! /usr/bin/env python
import sys
import os
from django.conf import settings, global_settings
APP_NAME = 'sitegate'
def main():
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
if not settings.configured:
settings.configure(
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'etc',
APP_NAME,
),
DATABASES={'default': {'ENGINE': 'django.db.backends.sqlite3'}},
MIDDLEWARE_CLASSES=(
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
),
ROOT_URLCONF='sitegate.tests',
MIGRATION_MODULES={
'auth': 'django.contrib.auth.tests.migrations',
},
AUTH_USER_MODEL=os.environ.get('DJANGO_AUTH_USER_MODEL', 'auth.User')
)
try: # Django 1.7 +
from django import setup
setup()
except ImportError:
pass
from django.test.utils import get_runner
runner = get_runner(settings)()
failures = runner.run_tests((APP_NAME,))
sys.exit(failures)
if __name__ == '__main__':
main()
|
py | 1a40120c5cdba1725e5c8c5f23ae0b74ebbf7427 | from .family_methods import trio_matrix, mendel_errors, transmission_disequilibrium_test, de_novo
from .impex import export_elasticsearch, export_gen, export_bgen, export_plink, export_vcf, \
import_locus_intervals, import_bed, import_fam, grep, import_bgen, import_gen, import_table, \
import_plink, read_matrix_table, read_table, get_vcf_metadata, import_vcf, import_gvcfs, \
import_vcfs, index_bgen, import_matrix_table
from .statgen import skat, identity_by_descent, impute_sex, \
genetic_relatedness_matrix, realized_relationship_matrix, pca, \
hwe_normalized_pca, pc_relate, split_multi, filter_alleles, filter_alleles_hts, \
split_multi_hts, balding_nichols_model, ld_prune, row_correlation, ld_matrix, \
linear_mixed_model, linear_regression_rows, logistic_regression_rows, poisson_regression_rows, \
linear_mixed_regression_rows, lambda_gc
from .qc import sample_qc, variant_qc, vep, concordance, nirvana, summarize_variants
from .misc import rename_duplicates, maximal_independent_set, filter_intervals
__all__ = ['trio_matrix',
'linear_mixed_model',
'skat',
'identity_by_descent',
'impute_sex',
'linear_regression_rows',
'logistic_regression_rows',
'poisson_regression_rows',
'linear_mixed_regression_rows',
'lambda_gc',
'sample_qc',
'variant_qc',
'genetic_relatedness_matrix',
'realized_relationship_matrix',
'pca',
'hwe_normalized_pca',
'pc_relate',
'rename_duplicates',
'split_multi',
'split_multi_hts',
'mendel_errors',
'export_elasticsearch',
'export_gen',
'export_bgen',
'export_plink',
'export_vcf',
'vep',
'concordance',
'maximal_independent_set',
'import_locus_intervals',
'import_bed',
'import_fam',
'import_matrix_table',
'nirvana',
'transmission_disequilibrium_test',
'grep',
'import_bgen',
'import_gen',
'import_table',
'import_plink',
'read_matrix_table',
'read_table',
'get_vcf_metadata',
'import_vcf',
'import_vcfs',
'import_gvcfs',
'index_bgen',
'balding_nichols_model',
'ld_prune',
'filter_intervals',
'de_novo',
'filter_alleles',
'filter_alleles_hts',
'summarize_variants',
'row_correlation',
'ld_matrix'
]
|
py | 1a40140cd5ee3bebbceae9e83feaf83e1318a425 | from .model import SegmentationModel
from .modules import (
Conv2dReLU,
Conv3dReLU,
Attention,
)
from .heads import (
SegmentationHead,
SegmentationHead_3D,
ClassificationHead,
) |
py | 1a40150e3e6af131ce5e9c918c088867cd0e951e | ''' Day 1: The Tyranny of the Rocket Equation '''
from math import floor
def calculate_fuel(mass, recursive=False):
''' Calculate fuel required to launch rocket (recursion is part two) '''
fuel = floor(mass / 3) - 2
if fuel > 8 and recursive:
fuel += calculate_fuel(fuel, recursive)
return fuel
MASS_INPUT = [108546, 76196, 144412, 100530, 143908, 79763, 109927, 53656,
82633, 103781, 97202, 81600, 115278, 90095, 85533, 58230, 142490,
65176, 132915, 82319, 148743, 91444, 145760, 78002, 127484,
67225, 74721, 145620, 146254, 135544, 74198, 88015, 53595,
142036, 113928, 65217, 56126, 110117, 57729, 99052, 89262,
141540, 70472, 145271, 81548, 68065, 93431, 125210, 66454, 67709,
149409, 101787, 130111, 60569, 131869, 149702, 135564, 135094,
71358, 100169, 127644, 147741, 102918, 93503, 74752, 135883,
120158, 94570, 129517, 85602, 55921, 76746, 107055, 79320, 81991,
58982, 63009, 91360, 147253, 51139, 61871, 107140, 146767, 77441,
125533, 70317, 125271, 73189, 141359, 144549, 104812, 91315,
145163, 147202, 95111, 82628, 116839, 132358, 99704, 85305]
FUEL = [calculate_fuel(i) for i in MASS_INPUT]
print(sum(FUEL))
|
py | 1a40153cc6c25c720359f9ca228ddc9ea79cdfd6 | # Copyright 2018 Jose Cambronero and Phillip Stanley-Marbell
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
from colormath.color_objects import sRGBColor, XYZColor
from colormath.color_conversions import convert_color
from colorsys import hsv_to_rgb
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
import numpy as np
def color_pairs_plot(*args, **kwargs):
"""
Plot swatches of color
:param args: separate rgb channels, 2 lists of rgb tuples, or a list of tuples of rgb tuples
:param kwargs: groups (in order to plot multiple columns of swatchs)
:return:
"""
if len(args) == 6:
return _color_pairs_plot_rgb(*args, **kwargs)
elif len(args) == 2:
return _color_pairs_plot_sep(*args, **kwargs)
else:
return _color_pairs_plot_tupled(*args, **kwargs)
def _color_pairs_plot_rgb(r1, g1, b1, r2, g2, b2, **kwargs):
return _color_pairs_plot_sep(zip(r1, g1, b1), zip(r2, g2, b2), **kwargs)
def _color_pairs_plot_sep(color1, color2, **kwargs):
return _color_pairs_plot_tupled(zip(color1, color2), **kwargs)
def _color_pairs_plot_tupled(rgb_pairs, **kwargs):
groups = kwargs.get('groups', 1)
normalize = kwargs.get('normalize', False)
# check if we should still normalize values
if not normalize:
normalize = max([v > 1 for color1, color2 in rgb_pairs for v in list(color1) + list(color2)])
nrows = len(rgb_pairs)
pairs_per_group = nrows / groups
if 'ax' in kwargs:
ax = kwargs['ax']
fig = ax.get_figure()
else:
fig, ax = plt.subplots()
# dimension info
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
X = width * fig.get_dpi()
Y = height * fig.get_dpi()
# space between swatches: arbitrary
swatch_space = 60
# make groups distinguishable
group_space = 0.5 * swatch_space
# we can define X = group_space * (groups - 1) + (swatch_space + 2 * swatch_width) * groups
swatch_width = (X - group_space * (groups - 1) - swatch_space * groups) / (2 * groups)
# offset between groups must consider swatch space etc
group_offset = 2 * swatch_width + swatch_space + group_space
# swatch height
h = Y / (pairs_per_group + 1)
for i, pair in enumerate(rgb_pairs):
# location for this pair on y axis
y = Y - (h * (i % pairs_per_group)) - h
# horizontal offset multipler based on group
group_id = i / pairs_per_group
for j, color in enumerate(pair):
# normalize rgb color to 0.0 to 1.0
if normalize:
color = [ channel / 255.0 for channel in color ]
# left/right swatch
is_second = j % 2
# starting point for this group
xmin = group_id * group_offset
# if it is the second swatch, we move a bit to the right
xmin += is_second * (swatch_width + swatch_space)
# max is simply the swatch width added to the start of the swatch
xmax = xmin + swatch_width
ax.hlines(y=y + h * 0.1, xmin= xmin, xmax=xmax, color=color, linewidth=h * 0.6)
# add an arrow
if j == 0:
ax.arrow(xmax + 10, y + h * 0.1, swatch_space * 0.5, 0, head_width = 8, width = 4, shape = 'full')
ax.set_axis_off()
return ax
def smash(x, min_v = 0.0, max_v = 1.0):
if x < min_v:
return min_v
elif x > max_v:
return max_v
else:
return x
def plot_along_hue(hues, y, ax = None, normalize = False, **kwargs):
# normalize x coordinates
if normalize or max(map(lambda x: x > 1.0, hues)):
hues = [h / 360.0 for h in hues]
# create "fake" HSV color with full saturation and value, but same hue as point
hsv_colors = [(h, 1, 1) for h in hues]
# convert color to rgb to actually color points in graph
rgb_colors = [hsv_to_rgb(*col) for col in hsv_colors]
# there may be some smudge, so anything outside of range gets put back into range
rgb_colors = [(smash(r), smash(g), smash(b)) for r, g, b in rgb_colors]
if ax is None:
fig, ax = plt.subplots()
ax.scatter(x = hues, y = y, c = rgb_colors, alpha = 1.0, s = 100, **kwargs)
return ax
# plots the spectral locus and then overlays colors as points by projecting into x,y
def chromaticity_scatter(colors, cs = None, marker = '*', converter = lambda x: convert_color(sRGBColor(*x), XYZColor), ax = None, **kwargs):
# plot basic background if not provided
if ax == None:
ax = _spectral_locus()
# convert every color to XYZ
XYZ = map(converter, colors)
# now convert every XYZ to x,y pairs
# check if we can iterate over points
try:
map(lambda x: x, XYZ[0])
except:
XYZ = map(lambda x: x.get_value_tuple(), XYZ)
xyz = [map(lambda x: x / sum(pt), pt) for pt in XYZ]
xs,ys,_ = zip(*xyz)
# create group colors if provided else sets to red
if not cs:
cs = 'red'
cmap = None
else:
cmap = plt.get_cmap('jet', len(cs))
cmap.set_under('gray')
ax.scatter(x = xs, y = ys, s = 100, c = cs, marker = marker, cmap = cmap, **kwargs)
return ax
def _spectral_locus():
# TODO we should just pickle xs, ys below
locus_pts_file = os.path.join(os.path.dirname(__file__), '../resources/spectral-locus.csv')
xs = []
ys = []
for line in open(locus_pts_file, "r"):
_, Xstr, Ystr, Zstr = line.split(",")
# convert from XYZ to x,y
XYZ = [ float(coord) for coord in [Xstr, Ystr, Zstr]]
denom = sum(XYZ)
xs.append(XYZ[0] / denom)
ys.append(XYZ[1] / denom)
fig, ax = plt.subplots()
poly = Polygon(np.array(zip(xs, ys)), fill = False, closed= True)
ax.add_patch(poly)
return ax
def plot_svd(m, xdim = 0, ydim = 1, colors = None, ax = None, title = "SVD plot", pct_var = True):
"""
Compute the SVD of a matrix and plot in 2-d as a scatter plot
:param m: matrix to decompose
:param xdim: vector of U to use as x axis
:param ydim: vector of U to use as y axis
:param colors: optional color mapping for each point
:param ax: optional existing axes
:param title: optional title
:param pct_var: if true returns the % of variance explained by the eigenvalues associated with xdim and ydim
:return: scatter plot and potentially % of variance explained by dimensions used
"""
if xdim < 0 or ydim < 0 or xdim == ydim:
raise ValueError("Must be valid 2-d for plotting")
u, s, v = np.linalg.svd(m)
if colors is None:
cmap = plt.get_cmap('jet')
else:
colors = np.array(colors)
cmap = plt.get_cmap('jet', len(colors))
cmap.set_under('gray')
if ax is None:
ax = plt.subplot()
ax.scatter(x=u[:, 0], y=u[:, 1], c = colors, cmap = cmap, label = "Group %s" )
ax.set_xlabel("U[:][%d]" % xdim)
ax.set_ylabel("U[:][%d]" % ydim)
ax.legend(loc = 'best')
ax.set_title(title)
if pct_var:
return ax, sum(s[[xdim, ydim]]) / sum(s)
else:
return ax
|
py | 1a40159960ecc6f3fba522cccb0cfdb2faffdbeb | import sys
sys.path.insert(0, 'augraphy')
import augraphy
import torchvision.transforms as transforms
import random
import torch
import numpy as np
import logging
import cv2
from albumentations import augmentations
from PIL import Image, ImageFilter
from augmixations.blots import HandWrittenBlot
from warp_mls import WarpMLS
logger = logging.getLogger(__name__)
class Paperize(object):
def __init__(self, process_datasets=None, p=0.5):
self.process_datasets = process_datasets or []
paper_phase = [
augraphy.PaperFactory(texture_path='augraphy/paper_textures/', p=1.),
augraphy.BrightnessTexturize(range=(0.8, 1.), deviation=0.05, p=0.5),
]
post_phase = [
augraphy.BookBinding(radius_range=(1, 10), curve_intensity_range=(0, 20), p=0.25),
augraphy.Brightness(range=(0.5, 1.), p=0.25),
augraphy.Gamma(range=(0.3, 1.8), p=0.25),
augraphy.LightingGradient(p=0.25),
]
self.pipeline = augraphy.AugraphyPipeline(ink_phase=[], paper_phase=paper_phase, post_phase=post_phase)
self.p = p
def __call__(self, inputs):
if not isinstance(inputs, (tuple, list)):
return inputs
image, dataset = inputs
if dataset not in self.process_datasets or random.random() < self.p:
return image
np_image = np.array(image)
np_image = self.mask_background(np_image)
if np_image.shape[0] >= 30 and np_image.shape[1] >= 30:
try:
np_image = self.pipeline.augment(np_image)['output']
except Exception as e:
logger.info(e)
image = Image.fromarray(np_image)
return image
@staticmethod
def mask_background(image):
original_image = image.copy()
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
_, image = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
image = cv2.bitwise_not(image)
kernel = np.ones((15, 15), np.uint8)
image = cv2.dilate(image, kernel, iterations=2)
gray_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
image = gray_image & image
original_image[np.where(image == 0)] = 0
return original_image
class NumpyAugmentation(object):
def __call__(self, image):
np_image = np.array(image)
np_image = self.forward(np_image)
return Image.fromarray(np_image)
def forward(self, np_image):
raise NotImplementedError
class ResizePad(NumpyAugmentation):
def __init__(self, width, height):
self.width = int(width)
self.height = int(height)
self.ratio = int(width / height)
def forward(self, img):
h, w, _ = img.shape
ratio = w / h
if ratio < self.ratio:
padding = np.zeros((h, self.ratio * h - w, 3), dtype=np.uint8)
img = cv2.hconcat([img, padding])
elif ratio > self.ratio:
padding = np.zeros((w // self.ratio - h, w, 3), dtype=np.uint8)
img = cv2.vconcat([img, padding])
img = cv2.resize(img, (self.width, self.height))
return img.astype(np.uint8)
class WeightedRandomChoice:
def __init__(self, trans, weights=None):
self.trans = trans
if not weights:
self.weights = [1] * len(trans)
else:
assert len(trans) == len(weights)
self.weights = weights
def __call__(self, img):
t = random.choices(self.trans, weights=self.weights, k=1)[0]
try:
tfm_img = t(img)
except Exception as e:
logger.warning('Error during data_aug:'+str(e))
return img
return tfm_img
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class Dilation(torch.nn.Module):
def __init__(self, kernel=3):
super().__init__()
self.kernel=kernel
def forward(self, img):
return img.filter(ImageFilter.MaxFilter(self.kernel))
def __repr__(self):
return self.__class__.__name__ + '(kernel={})'.format(self.kernel)
class Erosion(torch.nn.Module):
def __init__(self, kernel=3):
super().__init__()
self.kernel=kernel
def forward(self, img):
return img.filter(ImageFilter.MinFilter(self.kernel))
def __repr__(self):
return self.__class__.__name__ + '(kernel={})'.format(self.kernel)
class Underline(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, img):
img_np = np.array(img.convert('L'))
black_pixels = np.where(img_np < 50)
try:
y1 = max(black_pixels[0])
x0 = min(black_pixels[1])
x1 = max(black_pixels[1])
except:
return img
for x in range(x0, x1):
for y in range(y1, y1-3, -1):
try:
img.putpixel((x, y), (0, 0, 0))
except:
continue
return img
class KeepOriginal(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, img):
return img
class ToGray(NumpyAugmentation):
def __init__(self):
self.transform = augmentations.transforms.ToGray(always_apply=True)
def forward(self, image):
augmented = self.transform(image=image)
return augmented['image']
class Distort(NumpyAugmentation):
def __init__(self, segment=3):
self.segment = segment
def forward(self, src):
img_h, img_w = src.shape[:2]
cut = img_w // self.segment
thresh = cut // 3
src_pts = list()
dst_pts = list()
src_pts.append([0, 0])
src_pts.append([img_w, 0])
src_pts.append([img_w, img_h])
src_pts.append([0, img_h])
dst_pts.append([np.random.randint(thresh), np.random.randint(thresh)])
dst_pts.append([img_w - np.random.randint(thresh), np.random.randint(thresh)])
dst_pts.append([img_w - np.random.randint(thresh), img_h - np.random.randint(thresh)])
dst_pts.append([np.random.randint(thresh), img_h - np.random.randint(thresh)])
half_thresh = thresh * 0.5
for cut_idx in np.arange(1, self.segment, 1):
src_pts.append([cut * cut_idx, 0])
src_pts.append([cut * cut_idx, img_h])
dst_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
np.random.randint(thresh) - half_thresh])
dst_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
img_h + np.random.randint(thresh) - half_thresh])
trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)
dst = trans.generate()
return dst
class Stretch(NumpyAugmentation):
def __init__(self, segment=4):
self.segment = segment
def forward(self, src):
img_h, img_w = src.shape[:2]
cut = img_w // self.segment
thresh = cut * 4 // 5
src_pts = list()
dst_pts = list()
src_pts.append([0, 0])
src_pts.append([img_w, 0])
src_pts.append([img_w, img_h])
src_pts.append([0, img_h])
dst_pts.append([0, 0])
dst_pts.append([img_w, 0])
dst_pts.append([img_w, img_h])
dst_pts.append([0, img_h])
half_thresh = thresh * 0.5
for cut_idx in np.arange(1, self.segment, 1):
move = np.random.randint(thresh) - half_thresh
src_pts.append([cut * cut_idx, 0])
src_pts.append([cut * cut_idx, img_h])
dst_pts.append([cut * cut_idx + move, 0])
dst_pts.append([cut * cut_idx + move, img_h])
trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)
dst = trans.generate()
return dst
class Perspective(NumpyAugmentation):
def forward(self, src):
img_h, img_w = src.shape[:2]
thresh = img_h // 2
src_pts = list()
dst_pts = list()
src_pts.append([0, 0])
src_pts.append([img_w, 0])
src_pts.append([img_w, img_h])
src_pts.append([0, img_h])
dst_pts.append([0, np.random.randint(thresh)])
dst_pts.append([img_w, np.random.randint(thresh)])
dst_pts.append([img_w, img_h - np.random.randint(thresh)])
dst_pts.append([0, img_h - np.random.randint(thresh)])
trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)
dst = trans.generate()
return dst
class Blot(NumpyAugmentation):
def __init__(self, max_count=2):
def get_params(count):
return {
'incline': (-10, 10),
'intensivity': (0.5, 0.9),
'transparency': (0.05, 0.3),
'count': count,
}
self.blots = [HandWrittenBlot(params=get_params(count=i+1)) for i in range(max_count)]
def forward(self, image):
blot = self.blots[random.randint(0, len(self.blots) - 1)]
return blot(image)
class PaperColor(NumpyAugmentation):
def __init__(self):
post_phase = [
augraphy.BookBinding(radius_range=(1, 10), curve_intensity_range=(0, 20), p=0.25),
augraphy.Brightness(range=(0.5, 1.), p=0.25),
augraphy.Gamma(range=(0.3, 1.8), p=0.25),
augraphy.LightingGradient(p=0.25),
]
self.pipeline = augraphy.AugraphyPipeline(ink_phase=[], paper_phase=[], post_phase=post_phase)
def forward(self, np_image):
if np_image.shape[0] >= 30 and np_image.shape[1] >= 30:
try:
np_image = self.pipeline.augment(np_image)['output']
except Exception as e:
logger.info(e)
return np_image
# 0: InterpolationMode.NEAREST,
# 2: InterpolationMode.BILINEAR,
# 3: InterpolationMode.BICUBIC,
# 4: InterpolationMode.BOX,
# 5: InterpolationMode.HAMMING,
# 1: InterpolationMode.LANCZOS,
def build_data_aug(size, mode, preprocess_datasets, resnet=False, resizepad=True,
use_additional_augs=False):
if resnet:
norm_tfm = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
else:
norm_tfm = transforms.Normalize(0.5, 0.5)
if resizepad:
resize_tfm = ResizePad(size[0], size[1])
else:
resize_tfm = transforms.Resize(size, interpolation=3)
if mode == 'train':
augmentations = [
# transforms.RandomHorizontalFlip(p=1),
transforms.RandomRotation(degrees=(-10, 10), expand=True, fill=0),
transforms.GaussianBlur(3),
Dilation(3),
Erosion(3),
Underline(),
KeepOriginal(),
]
if use_additional_augs:
augmentations.extend([
Distort(),
Stretch(),
Perspective(),
Blot(),
PaperColor(),
])
return transforms.Compose([
Paperize(preprocess_datasets),
ToGray(),
WeightedRandomChoice(augmentations),
resize_tfm,
transforms.ToTensor(),
norm_tfm
])
else:
return transforms.Compose([
Paperize(),
ToGray(),
resize_tfm,
transforms.ToTensor(),
norm_tfm
])
if __name__ == '__main__':
tfm = ResizePad()
img = Image.open('temp.jpg')
tfm(img).save('temp2.jpg')
|
py | 1a401684cbcd5c87196f9ad2f17777dbf0dafcb1 | """Package tests."""
def test_package_import():
import pvfit.modeling.double_diode # noqa
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.