filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_1861 | import argparse
import sys
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.utils.tensorboard import SummaryWriter
from sklearn.model_selection import train_test_split
import utils
from utils import Dataset
from model import LinearNet
import matplotlib.pyplot as plt
import imageio
import numpy as np
def selection(data):
namelist = list()
for x in data.columns.values:
namelist.append(x)
return namelist
def variable_check(data, variable):
if variable not in selection(data):
print('[INFO] Missing variable!')
sys.exit(0)
else:
pass
# index = dict((y, x) for x, y in enumerate(selection(data)))
# if variable is not None:
# try:
# var = index[variable]
# except KeyError:
# print("Variable is empty or not found!")
# sys.exit(0)
# else:
# print(f"Variable '{variable}:{var}' is exist.")
# pass
def gpu_dataset(X, Y):
X_tensor = torch.FloatTensor(X).cuda()
y_tensor = torch.FloatTensor(Y).cuda()
X_train, X_test, y_train, y_test = train_test_split(X_tensor, y_tensor,
test_size=0.2,
random_state=0)
x, y = Variable(X_train), Variable(y_train)
return x, y
def cpu_dataset(X, Y):
X_tensor = torch.FloatTensor(X)
y_tensor = torch.FloatTensor(Y)
X_train, X_test, y_train, y_test = train_test_split(X_tensor, y_tensor,
test_size=0.2,
random_state=0)
x, y = X_train, y_train
return x, y
def train():
input_dir, var1, var2, adam, device = opt.input, opt.var1, opt.var2, opt.adam, opt.device
data = Dataset(input_dir).data
device = utils.select_device(device, batch_size=opt.batch_size)
for i in (var1, var2):
variable_check(data, i)
use_cuda = torch.cuda.is_available()
X_reshape = data[var1].values.reshape(-1, 1)
y_reshape = data[var2].values.reshape(-1, 1)
if use_cuda:
x, y = gpu_dataset(X_reshape, y_reshape)
else:
x, y = cpu_dataset(X_reshape, y_reshape)
# Initialize model
net = LinearNet(n_feature=x.size(1), n_output=y.size(1)).to(device)
if adam:
# optimizer using Adam
optimizer = torch.optim.Adam(net.parameters(), lr=0.001)
else:
# optimizer using SGD
optimizer = torch.optim.SGD(net.parameters(), lr=0.001)
loss_func = nn.MSELoss()
batch_size = opt.batch_size
n_epochs = opt.epoch
batch_no = len(x) // batch_size
train_loss = 0
train_loss_min = np.Inf
if use_cuda:
for epoch in range(n_epochs):
for i in range(batch_no):
start = i* batch_size
end = start + batch_size
optimizer.zero_grad()
prediction = net(x)
loss = loss_func(prediction, y)
loss.backward()
optimizer.step()
values, labels = torch.max(prediction, 1)
num_right = np.sum(labels.cpu().data.numpy() == y[start:end])
train_loss += loss.item()*batch_size
train_loss = train_loss / len(x)
if train_loss <= train_loss_min:
print("Validation loss decreased ({:6f} ===> {:6f}). Saving the model...".format(train_loss_min,train_loss))
torch.save(net.state_dict(), "regression_model.pt")
train_loss_min = train_loss
if epoch % 50 == 0:
print('')
print("Epoch: {} \tTrain Loss: {} \tTrain Accuracy: {}".format(epoch+1, train_loss,num_right / len(y[start:end]) ))
print('Training Ended! ')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--epoch', type=int, default=10, help='epoch value')
parser.add_argument('--batch-size', type=int, default=8)
parser.add_argument('--input', type=str, default='./example/data.csv', help='*.csv path')
parser.add_argument('--var1', type=str, default='H', help='independent variable')
parser.add_argument('--var2', type=str, default='VUB', help='dependent variable')
parser.add_argument('--adam', action='store_true', default=True, help='use adam optimizer')
parser.add_argument('--device', default='0', help='device id (i.e. 0 or 0,1 or cpu)')
opt = parser.parse_args()
print(opt)
# device = utils.select_device(opt.device, batch_size=opt.batch_size)
# print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/')
# tb_writer = SummaryWriter(comment=opt.name)
train()
|
the-stack_0_1862 | """
Copyright BOOSTRY Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
from app import config
class TestV2TokenABI:
"""
Test Case for v2.token_abi.~~
"""
# テスト対象API
apiurl_base = '/v2/ABI'
# <正常系1>
# 普通債券ABI取得
def test_straightbondabi_normal(self, client, session, shared_contract):
config.BOND_TOKEN_ENABLED = True
apiurl = self.apiurl_base + '/StraightBond'
query_string = ''
resp = client.simulate_get(apiurl, query_string=query_string)
assert resp.status_code == 200
assert resp.json['meta'] == {'code': 200, 'message': 'OK'}
assert resp.json['data'] is not None
# <正常系2>
# 株式ABI取得
def test_shareabi_normal(self, client, session, shared_contract):
config.SHARE_TOKEN_ENABLED = True
apiurl = self.apiurl_base + '/Share'
query_string = ''
resp = client.simulate_get(apiurl, query_string=query_string)
assert resp.status_code == 200
assert resp.json['meta'] == {'code': 200, 'message': 'OK'}
assert resp.json['data'] is not None
# <正常系3>
# 会員権ABI取得
def test_membershipabi_normal(self, client, session, shared_contract):
config.MEMBERSHIP_TOKEN_ENABLED = True
apiurl = self.apiurl_base + '/Membership'
query_string = ''
resp = client.simulate_get(apiurl, query_string=query_string)
assert resp.status_code == 200
assert resp.json['meta'] == {'code': 200, 'message': 'OK'}
assert resp.json['data'] is not None
# <正常系4>
# クーポンABI取得
def test_couponabi_normal(self, client, session, shared_contract):
config.COUPON_TOKEN_ENABLED = True
apiurl = self.apiurl_base + '/Coupon'
query_string = ''
resp = client.simulate_get(apiurl, query_string=query_string)
assert resp.status_code == 200
assert resp.json['meta'] == {'code': 200, 'message': 'OK'}
assert resp.json['data'] is not None
# <異常系1>
# 存在しないABI
def test_error_1(self, client, session, shared_contract):
apiurl = self.apiurl_base + '/Unknown'
query_string = ''
resp = client.simulate_get(apiurl, query_string=query_string)
assert resp.status_code == 404
# <異常系2>
# 普通債券ABI ENABLED=false
def test_error_2(self, client, session, shared_contract):
config.BOND_TOKEN_ENABLED = False
apiurl = self.apiurl_base + '/StraightBond'
query_string = ''
resp = client.simulate_get(apiurl, query_string=query_string)
assert resp.status_code == 404
assert resp.json['meta'] == {
'code': 10,
'message': 'Not Supported',
'description': 'method: GET, url: /v2/ABI/StraightBond'
}
# <異常系3>
# 株式ABI ENABLED=false
def test_error_3(self, client, session, shared_contract):
config.SHARE_TOKEN_ENABLED = False
apiurl = self.apiurl_base + '/Share'
query_string = ''
resp = client.simulate_get(apiurl, query_string=query_string)
assert resp.status_code == 404
assert resp.json['meta'] == {
'code': 10,
'message': 'Not Supported',
'description': 'method: GET, url: /v2/ABI/Share'
}
# <異常系4>
# 会員権ABI ENABLED=false
def test_error_4(self, client, session, shared_contract):
config.MEMBERSHIP_TOKEN_ENABLED = False
apiurl = self.apiurl_base + '/Membership'
query_string = ''
resp = client.simulate_get(apiurl, query_string=query_string)
assert resp.status_code == 404
assert resp.json['meta'] == {
'code': 10,
'message': 'Not Supported',
'description': 'method: GET, url: /v2/ABI/Membership'
}
# <異常系5>
# クーポンABI ENABLED=false
def test_error_5(self, client, session, shared_contract):
config.COUPON_TOKEN_ENABLED = False
apiurl = self.apiurl_base + '/Coupon'
query_string = ''
resp = client.simulate_get(apiurl, query_string=query_string)
assert resp.status_code == 404
assert resp.json['meta'] == {
'code': 10,
'message': 'Not Supported',
'description': 'method: GET, url: /v2/ABI/Coupon'
} |
the-stack_0_1866 | """Procedures to build trajectories for algorithms in the HMC family.
To propose a new state, algorithms in the HMC family generally proceed by [1]_:
1. Sampling a trajectory starting from the initial point;
2. Sampling a new state from this sampled trajectory.
Step (1) ensures that the process is reversible and thus that detailed balance
is respected. The traditional implementation of HMC does not sample a
trajectory, but instead takes a fixed number of steps in the same direction and
flips the momentum of the last state.
We distinguish here between two different methods to sample trajectories: static
and dynamic sampling. In the static setting we sample trajectories with a fixed
number of steps, while in the dynamic setting the total number of steps is
determined by a dynamic termination criterion. Traditional HMC falls in the
former category, NUTS in the latter.
There are also two methods to sample proposals from these trajectories. In the
static setting we first build the trajectory and then sample a proposal from
this trajectory. In the progressive setting we update the proposal as the
trajectory is being sampled. While the former is faster, we risk saturating the
memory by keeping states that will subsequently be discarded.
References
----------
.. [1]: Betancourt, Michael. "A conceptual introduction to Hamiltonian Monte Carlo." arXiv preprint arXiv:1701.02434 (2017).
"""
from typing import Callable, NamedTuple, Tuple
import jax
import jax.numpy as jnp
from blackjax.inference.hmc.integrators import IntegratorState
from blackjax.inference.hmc.proposal import (
Proposal,
progressive_biased_sampling,
progressive_uniform_sampling,
proposal_generator,
)
from blackjax.types import PRNGKey, PyTree
class Trajectory(NamedTuple):
leftmost_state: IntegratorState
rightmost_state: IntegratorState
momentum_sum: PyTree
num_states: int
def append_to_trajectory(trajectory: Trajectory, state: IntegratorState) -> Trajectory:
"""Append a state to the (right of the) trajectory to form a new trajectory."""
momentum_sum = jax.tree_util.tree_multimap(
jnp.add, trajectory.momentum_sum, state.momentum
)
return Trajectory(
trajectory.leftmost_state, state, momentum_sum, trajectory.num_states + 1
)
def reorder_trajectories(
direction: int, trajectory: Trajectory, new_trajectory: Trajectory
) -> Tuple[Trajectory, Trajectory]:
"""Order the two trajectories depending on the direction."""
return jax.lax.cond(
direction > 0,
lambda _: (
trajectory,
new_trajectory,
),
lambda _: (
new_trajectory,
trajectory,
),
operand=None,
)
def merge_trajectories(left_trajectory: Trajectory, right_trajectory: Trajectory):
momentum_sum = jax.tree_util.tree_multimap(
jnp.add, left_trajectory.momentum_sum, right_trajectory.momentum_sum
)
return Trajectory(
left_trajectory.leftmost_state,
right_trajectory.rightmost_state,
momentum_sum,
left_trajectory.num_states + right_trajectory.num_states,
)
# -------------------------------------------------------------------
# Integration
#
# Generating samples by choosing a direction and running the integrator
# several times along this direction. Distinct from sampling.
# -------------------------------------------------------------------
def static_integration(
integrator: Callable,
step_size: float,
num_integration_steps: int,
direction: int = 1,
) -> Callable:
"""Generate a trajectory by integrating several times in one direction."""
directed_step_size = direction * step_size
def integrate(initial_state: IntegratorState) -> IntegratorState:
def one_step(state, _):
state = integrator(state, directed_step_size)
return state, state
last_state, _ = jax.lax.scan(
one_step, initial_state, jnp.arange(num_integration_steps)
)
return last_state
return integrate
class DynamicIntegrationState(NamedTuple):
step: int
proposal: Proposal
trajectory: Trajectory
termination_state: NamedTuple
def dynamic_progressive_integration(
integrator: Callable,
kinetic_energy: Callable,
update_termination_state: Callable,
is_criterion_met: Callable,
divergence_threshold: float,
):
"""Integrate a trajectory and update the proposal sequentially in one direction
until the termination criterion is met.
Parameters
----------
integrator
The symplectic integrator used to integrate the hamiltonian trajectory.
kinetic_energy
Function to compute the current value of the kinetic energy.
update_termination_state
Updates the state of the termination mechanism.
is_criterion_met
Determines whether the termination criterion has been met.
divergence_threshold
Value of the difference of energy between two consecutive states above which we say a transition is divergent.
"""
_, generate_proposal = proposal_generator(kinetic_energy, divergence_threshold)
sample_proposal = progressive_uniform_sampling
def integrate(
rng_key: PRNGKey,
initial_state: IntegratorState,
direction: int,
termination_state,
max_num_steps: int,
step_size,
initial_energy,
):
"""Integrate the trajectory starting from `initial_state` and update
the proposal sequentially until the termination criterion is met.
Parameters
----------
rng_key
Key used by JAX's random number generator.
initial_state
The initial state from which we start expanding the trajectory.
direction int in {-1, 1}
The direction in which to expand the trajectory.
termination_state
The state that keeps track of the information needed for the termination criterion.
max_num_steps
The maximum number of integration steps. The expansion will stop
when this number is reached if the termination criterion has not
been met.
step_size
The step size of the symplectic integrator.
initial_energy
Initial energy H0 of the HMC step (not to confused with the initial energy of the subtree)
"""
def do_keep_integrating(loop_state):
"""Decide whether we should continue integrating the trajectory"""
_, integration_state, (is_diverging, has_terminated) = loop_state
return (
(integration_state.step < max_num_steps)
& ~has_terminated
& ~is_diverging
)
def add_one_state(loop_state):
rng_key, integration_state, _ = loop_state
step, proposal, trajectory, termination_state = integration_state
rng_key, proposal_key = jax.random.split(rng_key)
new_state = integrator(trajectory.rightmost_state, direction * step_size)
new_proposal, is_diverging = generate_proposal(initial_energy, new_state)
# At step 0, we always accept the proposal, since we
# take one step to get the leftmost state of the tree.
(new_trajectory, sampled_proposal) = jax.lax.cond(
step == 0,
lambda _: (
Trajectory(new_state, new_state, new_state.momentum,1),
new_proposal,
),
lambda _: (
append_to_trajectory(trajectory, new_state),
sample_proposal(proposal_key, proposal, new_proposal),
),
operand=None,
)
new_termination_state = update_termination_state(
termination_state, new_trajectory.momentum_sum, new_state.momentum, step
)
has_terminated = is_criterion_met(
new_termination_state, new_trajectory.momentum_sum, new_state.momentum
)
new_integration_state = DynamicIntegrationState(
step + 1,
sampled_proposal,
new_trajectory,
new_termination_state,
)
return (rng_key, new_integration_state, (is_diverging, has_terminated))
proposal_placeholder, _ = generate_proposal(initial_energy, initial_state)
trajectory_placeholder = Trajectory(
initial_state, initial_state, initial_state.momentum, 0
)
integration_state_placeholder = DynamicIntegrationState(
0,
proposal_placeholder,
trajectory_placeholder,
termination_state,
)
_, integration_state, (is_diverging, has_terminated) = jax.lax.while_loop(
do_keep_integrating,
add_one_state,
(rng_key, integration_state_placeholder, (False, False)),
)
step, proposal, trajectory, termination_state = integration_state
# In the while_loop we always extend on the right most direction.
new_trajectory = jax.lax.cond(
direction > 0,
lambda _: trajectory,
lambda _: Trajectory(
trajectory.rightmost_state,
trajectory.leftmost_state,
trajectory.momentum_sum,
trajectory.num_states,
),
operand=None,
)
return (
proposal,
new_trajectory,
termination_state,
is_diverging,
has_terminated,
)
return integrate
def dynamic_recursive_integration(
integrator: Callable,
kinetic_energy: Callable,
uturn_check_fn: Callable,
divergence_threshold: float,
use_robust_uturn_check: bool = False,
):
"""Integrate a trajectory and update the proposal recursively in Python
until the termination criterion is met.
This is the implementation of Algorithm 6 from [1] with multinomial sampling.
The implemenation here is mostly for validating the progressive implementation
to make sure the two are equivalent. The recursive implementation should not
be used for actually sampling as it cannot be jitted and thus likely slow.
Parameters
----------
integrator
The symplectic integrator used to integrate the hamiltonian trajectory.
kinetic_energy
Function to compute the current value of the kinetic energy.
uturn_check_fn
Determines whether the termination criterion has been met.
divergence_threshold
Value of the difference of energy between two consecutive states above which we say a transition is divergent.
use_robust_uturn_check
Bool to indicate whether to perform additional U turn check between two trajectory.
References
----------
.. [1]: Hoffman, Matthew D., and Andrew Gelman. "The No-U-Turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo." J. Mach. Learn. Res. 15.1 (2014): 1593-1623.
"""
_, generate_proposal = proposal_generator(kinetic_energy, divergence_threshold)
sample_proposal = progressive_uniform_sampling
def buildtree_integrate(
rng_key: PRNGKey,
initial_state: IntegratorState,
direction: int,
tree_depth: int,
step_size,
initial_energy: float,
):
"""Integrate the trajectory starting from `initial_state` and update
the proposal recursively with tree doubling until the termination criterion is met.
The function `buildtree_integrate` calls itself for tree_depth > 0, thus invokes
the recursive scheme that builds a trajectory by doubling a binary tree.
Parameters
----------
rng_key
Key used by JAX's random number generator.
initial_state
The initial state from which we start expanding the trajectory.
direction int in {-1, 1}
The direction in which to expand the trajectory.
tree_depth
The depth of the binary tree doubling.
step_size
The step size of the symplectic integrator.
initial_energy
Initial energy H0 of the HMC step (not to confused with the initial energy of the subtree)
"""
if tree_depth == 0:
# Base case - take one leapfrog step in the direction v.
next_state = integrator(initial_state, direction * step_size)
new_proposal, is_diverging = generate_proposal(initial_energy, next_state)
trajectory = Trajectory(next_state, next_state, next_state.momentum, 1)
return (
rng_key,
new_proposal,
trajectory,
is_diverging,
False,
)
else:
(
rng_key,
proposal,
trajectory,
is_diverging,
is_turning,
) = buildtree_integrate(
rng_key,
initial_state,
direction,
tree_depth - 1,
step_size,
initial_energy,
)
# Note that is_diverging and is_turning is inplace updated
if ~is_diverging & ~is_turning:
start_state = jax.lax.cond(
direction > 0,
lambda _: trajectory.rightmost_state,
lambda _: trajectory.leftmost_state,
operand=None,
)
(
rng_key,
new_proposal,
new_trajectory,
is_diverging,
is_turning,
) = buildtree_integrate(
rng_key,
start_state,
direction,
tree_depth - 1,
step_size,
initial_energy,
)
left_trajectory, right_trajectory = reorder_trajectories(
direction, trajectory, new_trajectory
)
trajectory = merge_trajectories(left_trajectory, right_trajectory)
if ~is_turning:
is_turning = uturn_check_fn(
trajectory.leftmost_state.momentum,
trajectory.rightmost_state.momentum,
trajectory.momentum_sum,
)
if use_robust_uturn_check & (tree_depth - 1 > 0):
momentum_sum_left = jax.tree_util.tree_multimap(
jnp.add,
left_trajectory.momentum_sum,
right_trajectory.leftmost_state.momentum,
)
is_turning_left = uturn_check_fn(
left_trajectory.leftmost_state.momentum,
right_trajectory.leftmost_state.momentum,
momentum_sum_left,
)
momentum_sum_right = jax.tree_util.tree_multimap(
jnp.add,
left_trajectory.rightmost_state.momentum,
right_trajectory.momentum_sum,
)
is_turning_right = uturn_check_fn(
left_trajectory.rightmost_state.momentum,
right_trajectory.rightmost_state.momentum,
momentum_sum_right,
)
is_turning = is_turning | is_turning_left | is_turning_right
rng_key, proposal_key = jax.random.split(rng_key)
proposal = sample_proposal(proposal_key, proposal, new_proposal)
return (
rng_key,
proposal,
trajectory,
is_diverging,
is_turning,
)
return buildtree_integrate
# -------------------------------------------------------------------
# Sampling
#
# Sampling a trajectory by choosing a direction at random and integrating
# the trajectory in this direction. In the simplest case we perform one
# integration step, but can also perform several as is the case in the
# NUTS algorithm.
# -------------------------------------------------------------------
class DynamicExpansionState(NamedTuple):
step: int
proposal: Proposal
trajectory: Trajectory
termination_state: NamedTuple
def dynamic_multiplicative_expansion(
trajectory_integrator: Callable,
uturn_check_fn: Callable,
step_size: float,
max_num_expansions: int = 10,
rate: int = 2,
) -> Callable:
"""Sample a trajectory and update the proposal sequentially
until the termination criterion is met.
The trajectory is sampled with the following procedure:
1. Pick a direction at random;
2. Integrate `num_step` steps in this direction;
3. If the integration has stopped prematurely, do not update the proposal;
4. Else if the trajectory is performing a U-turn, return current proposal;
5. Else update proposal, `num_steps = num_steps ** rate` and repeat from (1).
Parameters
----------
trajectory_integrator
A function that runs the symplectic integrators and returns a new proposal
and the integrated trajectory.
uturn_check_fn
Function used to check the U-Turn criterion.
step_size
The step size used by the symplectic integrator.
max_num_expansions
The maximum number of trajectory expansions until the proposal is
returned.
rate
The rate of the geometrical expansion. Typically 2 in NUTS, this is why
the literature often refers to "tree doubling".
"""
proposal_sampler = progressive_biased_sampling
def expand(
rng_key: PRNGKey,
initial_expansion_state: DynamicExpansionState,
initial_energy: float,
):
def do_keep_expanding(loop_state) -> bool:
"""Determine whether we need to keep expanding the trajectory."""
_, expansion_state, (is_diverging, is_turning) = loop_state
return (
(expansion_state.step < max_num_expansions)
& ~is_diverging
& ~is_turning
)
def expand_once(loop_state):
"""Expand the current trajectory.
At each step we draw a direction at random, build a subtrajectory starting
from the leftmost or rightmost point of the current trajectory that is
twice as long as the current trajectory.
Once that is done, possibly update the current proposal with that of
the subtrajectory.
"""
# Q: Should this function be aware of all the elements that need to
# be passed downstream?
rng_key, expansion_state, _ = loop_state
step, proposal, trajectory, termination_state = expansion_state
rng_key, direction_key, trajectory_key, proposal_key = jax.random.split(
rng_key, 4
)
# create new subtrajectory that is twice as long as the current
# trajectory.
direction = jnp.where(jax.random.bernoulli(direction_key), 1, -1)
start_state = jax.lax.cond(
direction > 0,
lambda _: trajectory.rightmost_state,
lambda _: trajectory.leftmost_state,
operand=None,
)
(
new_proposal,
new_trajectory,
termination_state,
is_diverging,
is_turning_subtree,
) = trajectory_integrator(
trajectory_key,
start_state,
direction,
termination_state,
rate ** step,
step_size,
initial_energy,
)
left_trajectory, right_trajectory = reorder_trajectories(
direction, trajectory, new_trajectory
)
merged_trajectory = merge_trajectories(left_trajectory, right_trajectory)
# update the proposal
# we reject proposals coming from diverging or turning subtrajectories,
# but accumulate average acceptance probabilty across entire trajectory
def update_sum_log_p_accept(inputs):
_, proposal, new_proposal = inputs
return Proposal(
proposal.state,
proposal.energy,
proposal.weight,
jnp.logaddexp(
proposal.sum_log_p_accept, new_proposal.sum_log_p_accept
),
)
sampled_proposal = jax.lax.cond(
is_diverging | is_turning_subtree,
update_sum_log_p_accept,
lambda x: proposal_sampler(*x),
operand=(proposal_key, proposal, new_proposal),
)
is_turning = uturn_check_fn(
merged_trajectory.leftmost_state.momentum,
merged_trajectory.rightmost_state.momentum,
merged_trajectory.momentum_sum,
)
new_state = DynamicExpansionState(
step + 1, sampled_proposal, merged_trajectory, termination_state
)
info = (is_diverging, is_turning_subtree | is_turning)
return (rng_key, new_state, info)
_, expansion_state, (is_diverging, is_turning) = jax.lax.while_loop(
do_keep_expanding,
expand_once,
(rng_key, initial_expansion_state, (False, False)),
)
return expansion_state, (is_diverging, is_turning)
return expand
|
the-stack_0_1867 | # Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import numpy as np
from quantization.quantization_record_base import QuantizationRecordBase
def pad(params,
in_tensors,
qrec: QuantizationRecordBase,
details=None):
del qrec, details
if params.pad_type == "zero":
return [np.pad(in_tensors[0], params.padding.numpy_pad_shape(params.in_dims[0]),
'constant', constant_values=0)]
raise NotImplementedError()
|
the-stack_0_1869 | # function for bubble sort
def Bubble_Sort(list):
for i in range(0, len(list) - 1):
for j in range(0, len(list) - i - 1):
# do swapping
if list[j] > list[j + 1]:
list[j], list[j + 1] = list[j + 1], list[j]
# function to print list
def Print_list(list):
for i in range(0, len(list)):
print(list[i], end = " ")
print()
list = [2, 4, 3, 1, 6, 8, 4]
Bubble_Sort(list)
Print_list(list)
# Output
# 1 2 3 4 4 6 8
|
the-stack_0_1870 | # model settings
model = dict(
type='CascadeRCNN',
num_stages=3,
pretrained='modelzoo://resnet50',
backbone=dict(
type='IPN_kite',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
with_att=False,
style='pytorch'),
neck=dict(
type='kiteFPN',
in_channels=[256, 256, 256, 256, 512, 512, 512, 1024, 1024, 2048],
out_channels=256,
with_att=False,
num_outs=10),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4,4/0.8333333,4/0.66666666,4/0.5,8/0.8333333,8/0.6666666,8/0.5,16/0.666666666,16/0.5,32/0.5],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4,4/0.8333333,4/0.66666666,4/0.5,8/0.8333333,8/0.6666666,8/0.5,16/0.666666666,16/0.5,32/0.5],),
bbox_head=[
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss',
beta=1.0,
loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss',
beta=1.0,
loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss',
beta=1.0,
loss_weight=1.0))
])
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)
],
stage_loss_weights=[1, 0.5, 0.25])
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100),
keep_all_stages=False)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=4,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0.5,
with_mask=False,
with_crowd=True,
with_label=True),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_crowd=True,
with_label=True),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_label=False,
test_mode=True))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/cascade_rcnn_r50_fpn_1x_kite_woatt'
load_from = None#'./ckp/cascade_rcnn_r50_fpn_1x_20190501-3b6211ab.pth'
resume_from = None
workflow = [('train', 1)]
|
the-stack_0_1871 | import os
from PIL import Image, ImageDraw, ImageColor, ImageOps
from skimage.feature import hog
import numpy as np
def sliding_window(image, stepSize, windowSize):
for y in range(0, image.size[1], stepSize):
for x in range(0, image.size[0], stepSize):
# If the current crop would be outside of the image, skip it.
# Else, PIL will add a black part of the image, which will confuse the white percentage threshold and try to classify
# a box which isn't part of the original image.
if (x + windowSize[0]) > image.size[0] or (y + windowSize [1]) > image.size[1]:
continue
yield (x, y, image.crop([x, y, x + windowSize[1], y + windowSize[0]]))
def draw_red_square(x, y, target_image):
draw = ImageDraw.Draw(target_image)
draw.rectangle((x,y) + (x + 20, y + 20), outline="#ff0000")
return target_image
def create_dump_folder_for_images():
if os.path.exists("./dump"):
return
print('Creating dump directory for output images')
try:
os.mkdir("./dump")
print("Successfully created dump folder")
except OSError:
print("Could not create a dump folder. Please create one in the same path as this file")
def get_image_as_array(filepath, use_hog, expand_inverted):
img = Image.open(filepath)
img = img.convert(mode="L")
img.resize((20, 20))
return convert_image_to_array(img, use_hog, expand_inverted)
# General function for converting an image into a list representation.
# Allows for setting invertion of image and HOG features on the list.
# The function flattens the list representation and squashes its values into floats of numbers between 0 and 1.
# It will return an empty array if the image is completely white.
def convert_image_to_array(img, use_hog, expand_inverted):
if expand_inverted:
img = ImageOps.invert(img)
if use_hog:
img = hog(img, orientations=8, pixels_per_cell=(4, 4), cells_per_block=(4, 4), block_norm='L2', feature_vector=True)
list_image = np.array(img, dtype=float).flatten()
if list_image.max() == 0:
return []
return list_image
# Returns the percentage of the image consisting of completely white spots.
# This is used to set a threshold for which windows should be considered.
def get_percentage_of_white(img):
list_image = np.array(img, dtype=float).flatten()
numberOfWhite = np.count_nonzero(list_image == 255.)
return numberOfWhite/400 |
the-stack_0_1873 | from unittest.mock import patch
from django.core.management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
class CommandTests(TestCase):
def test_wait_for_db_ready(self):
'''Test waiting for db when db is available'''
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.return_value = True
call_command('wait_for_db')
self.assertEqual(gi.call_count, 1)
@patch('time.sleep', return_value=None)
def test_wait_for_db(self, ts):
'''Test waiting for db'''
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.side_effect = [OperationalError] * 5 + [True]
call_command('wait_for_db')
self.assertEqual(gi.call_count, 6)
|
the-stack_0_1874 | #!/usr/bin/env python3
import subprocess
import re
import sys
RESULT_RE = re.compile(r'(T|F|[^ |])')
BASIC_PROPOSITION_RE = re.compile(r'([A-Za-z]+)')
REPLACEMENTS = {'~': r'\neg', '&': r'\wedge', '|': r'\vee', '<->': r'\leftrightarrow', '->': r'\rightarrow'}
wresult = subprocess.check_output(['hatt', '-e', sys.argv[1]])
result = [line.decode('UTF-8') for line in wresult.splitlines()]
del result[1] #row of --------
header_cols = re.findall(r'([A-Za-z] )+?| (.*)', result.pop(0))
expression = header_cols.pop()[1]
for k, v in REPLACEMENTS.items():
expression = expression.replace(k, v)
propositions = ['$' + x[0].strip() + '$' for x in header_cols]
print(str.format("\\begin{{tabular}}{{|{0}|c|}}", len(propositions) * 'c'))
print(r"\hline")
print(' & '.join(propositions), '& $', expression, r'$ \\')
print(r"\hline")
for line in result:
print(' & '.join(RESULT_RE.findall(line)), r'\\')
print(r"\hline")
print(r"\end{tabular}")
|
the-stack_0_1875 | # -*- coding: utf-8 -*-
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# Copyright 2013 martinqt <[email protected]> #
# Copyright 2014 Vincent Jacques <[email protected]> #
# Copyright 2016 Jannis Gebauer <[email protected]> #
# Copyright 2016 Peter Buckley <[email protected]> #
# Copyright 2018 sfdye <[email protected]> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import datetime
import github.GithubObject
import github.PaginatedList
import github.NamedUser
import github.Label
import six
class Milestone(github.GithubObject.CompletableGithubObject):
"""
This class represents Milestones. The reference can be found here http://developer.github.com/v3/issues/milestones/
"""
def __repr__(self):
return self.get__repr__({"number": self._number.value, "title": self._title.value})
@property
def closed_issues(self):
"""
:type: integer
"""
self._completeIfNotSet(self._closed_issues)
return self._closed_issues.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def creator(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._creator)
return self._creator.value
@property
def description(self):
"""
:type: string
"""
self._completeIfNotSet(self._description)
return self._description.value
@property
def due_on(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._due_on)
return self._due_on.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def labels_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._labels_url)
return self._labels_url.value
@property
def number(self):
"""
:type: integer
"""
self._completeIfNotSet(self._number)
return self._number.value
@property
def open_issues(self):
"""
:type: integer
"""
self._completeIfNotSet(self._open_issues)
return self._open_issues.value
@property
def state(self):
"""
:type: string
"""
self._completeIfNotSet(self._state)
return self._state.value
@property
def title(self):
"""
:type: string
"""
self._completeIfNotSet(self._title)
return self._title.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def delete(self):
"""
:calls: `DELETE /repos/:owner/:repo/milestones/:number <http://developer.github.com/v3/issues/milestones>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url
)
def edit(self, title, state=github.GithubObject.NotSet, description=github.GithubObject.NotSet, due_on=github.GithubObject.NotSet):
"""
:calls: `PATCH /repos/:owner/:repo/milestones/:number <http://developer.github.com/v3/issues/milestones>`_
:param title: string
:param state: string
:param description: string
:param due_on: date
:rtype: None
"""
assert isinstance(title, (str, six.text_type)), title
assert state is github.GithubObject.NotSet or isinstance(state, (str, six.text_type)), state
assert description is github.GithubObject.NotSet or isinstance(description, (str, six.text_type)), description
assert due_on is github.GithubObject.NotSet or isinstance(due_on, datetime.date), due_on
post_parameters = {
"title": title,
}
if state is not github.GithubObject.NotSet:
post_parameters["state"] = state
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
if due_on is not github.GithubObject.NotSet:
post_parameters["due_on"] = due_on.strftime("%Y-%m-%d")
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=post_parameters
)
self._useAttributes(data)
def get_labels(self):
"""
:calls: `GET /repos/:owner/:repo/milestones/:number/labels <http://developer.github.com/v3/issues/labels>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Label.Label`
"""
return github.PaginatedList.PaginatedList(
github.Label.Label,
self._requester,
self.url + "/labels",
None
)
@property
def _identity(self):
return self.number
def _initAttributes(self):
self._closed_issues = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._creator = github.GithubObject.NotSet
self._description = github.GithubObject.NotSet
self._due_on = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._labels_url = github.GithubObject.NotSet
self._number = github.GithubObject.NotSet
self._open_issues = github.GithubObject.NotSet
self._state = github.GithubObject.NotSet
self._title = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "closed_issues" in attributes: # pragma no branch
self._closed_issues = self._makeIntAttribute(attributes["closed_issues"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "creator" in attributes: # pragma no branch
self._creator = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["creator"])
if "description" in attributes: # pragma no branch
self._description = self._makeStringAttribute(attributes["description"])
if "due_on" in attributes: # pragma no branch
self._due_on = self._makeDatetimeAttribute(attributes["due_on"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "labels_url" in attributes: # pragma no branch
self._labels_url = self._makeStringAttribute(attributes["labels_url"])
if "number" in attributes: # pragma no branch
self._number = self._makeIntAttribute(attributes["number"])
if "open_issues" in attributes: # pragma no branch
self._open_issues = self._makeIntAttribute(attributes["open_issues"])
if "state" in attributes: # pragma no branch
self._state = self._makeStringAttribute(attributes["state"])
if "title" in attributes: # pragma no branch
self._title = self._makeStringAttribute(attributes["title"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
|
the-stack_0_1877 | ###############################################################################
#
# ChartStock - A class for writing the Excel XLSX Stock charts.
#
# Copyright 2013-2019, John McNamara, [email protected]
#
from . import chart
class ChartStock(chart.Chart):
"""
A class for writing the Excel XLSX Stock charts.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self, options=None):
"""
Constructor.
"""
super(ChartStock, self).__init__()
if options is None:
options = {}
self.show_crosses = 0
self.hi_low_lines = {}
self.date_category = True
# Override and reset the default axis values.
self.x_axis['defaults']['num_format'] = 'dd/mm/yyyy'
self.x2_axis['defaults']['num_format'] = 'dd/mm/yyyy'
# Set the available data label positions for this chart type.
self.label_position_default = 'right'
self.label_positions = {
'center': 'ctr',
'right': 'r',
'left': 'l',
'above': 't',
'below': 'b',
# For backward compatibility.
'top': 't',
'bottom': 'b'}
self.set_x_axis({})
self.set_x2_axis({})
###########################################################################
#
# Private API.
#
###########################################################################
def _write_chart_type(self, args):
# Override the virtual superclass method with a chart specific method.
# Write the c:stockChart element.
self._write_stock_chart(args)
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_stock_chart(self, args):
# Write the <c:stockChart> element.
# Overridden to add hi_low_lines().
if args['primary_axes']:
series = self._get_primary_axes_series()
else:
series = self._get_secondary_axes_series()
if not len(series):
return
# Add default formatting to the series data.
self._modify_series_formatting()
self._xml_start_tag('c:stockChart')
# Write the series elements.
for data in series:
self._write_ser(data)
# Write the c:dropLines element.
self._write_drop_lines()
# Write the c:hiLowLines element.
if args.get('primary_axes'):
self._write_hi_low_lines()
# Write the c:upDownBars element.
self._write_up_down_bars()
# Write the c:axId elements
self._write_axis_ids(args)
self._xml_end_tag('c:stockChart')
def _modify_series_formatting(self):
# Add default formatting to the series data.
index = 0
for series in self.series:
if index % 4 != 3:
if not series['line']['defined']:
series['line'] = {'width': 2.25,
'none': 1,
'defined': 1}
if series['marker'] is None:
if index % 4 == 2:
series['marker'] = {'type': 'dot', 'size': 3}
else:
series['marker'] = {'type': 'none'}
index += 1
|
the-stack_0_1878 | from PIL import Image
import os
os.chdir("/Users/Joan/Documents/python/rex1168.github.io")
def change_size(job_name='thumbnails'):
jobs = {"thumbnails":
{'source': './static/thumbnail', 'target': './static/thumbnail-small'},
'course-covers':
{'source': './static/img/course_cover', 'target': './static/img/course_cover-small'}
}
# select job
job = jobs[job_name]
source = job['source']
target = job['target']
basewidth = 300
for root, dirs, filenames in os.walk(source):
path = os.path.join(target, root.split('/')[-1] if root.replace("/", "") != source.replace("/", "") else "")
if not os.path.isdir(path):
os.mkdir(path)
for fn in filenames:
extension = fn.split('.')[-1].lower()
if extension in ['jpg', 'png', 'jpeg']:
save_path = os.path.join(path, fn)
if not os.path.isfile(save_path):
img = Image.open(os.path.join(root, fn))
wpercent = (basewidth/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
img = img.resize((basewidth, hsize), Image.ANTIALIAS)
img.save(save_path)
print(save_path)
def reduce_icon(to_height=64):
for name in os.listdir('./static/img/icon'):
if name.split('.')[-1].lower() in ['jpg', 'png']:
im = Image.open("./static/img/icon/%s" % name)
w, h = im.size
h_ = h / to_height
w /= h_
im = im.resize((int(w), to_height), Image.ANTIALIAS)
im.save("./static/img/icon/%s" % name)
def reduce_single(path, to_height=256):
im = Image.open(path)
w, h = im.size
h_ = h / to_height
w /= h_
im = im.resize((int(w), to_height), Image.ANTIALIAS)
im.save(path)
if __name__ == "__main__":
job_name = ['thumbnails', 'course-covers'][1]
change_size(job_name)
# reduce_icon()
# reduce_single(path='static/img/description/more_update.png', to_height=130)
|
the-stack_0_1879 | # """Assignment 03: Using inverse kinematics
# """
import json
import os
from compas_fab.backends import RosClient
from compas_fab.robots import Configuration
from compas.geometry import Frame
from compas.geometry import Point
from compas.geometry import Vector
from compas_fab.utilities import write_data_to_json
# This function defines the inputs of your assignment, you get a compas_fab.robots.Robot and a Frame
# and are expected to return ONE valid configuration to reach that frame
def calculate_ik(robot, frame):
# 1. define a valid start configuration for your frames
start_configuration = robot.zero_configuration()
# 2. use inverse kinematics to find out a valid configuration
configuration = robot.inverse_kinematics(frame, start_configuration)
# print("Found configuration", configuration)
return configuration
def store_configurations(configurations):
# 3. store all found configurations in a JSON file
here = os.path.dirname(__file__)
path = os.path.abspath(os.path.join(here, "json_file.json"))
configuration_json =[]
for configuration in configurations:
configuration_json.append(configuration.data)
write_data_to_json(configuration_json, path)
# pass
# Use the following to test from the command line
# Or copy solution_viewer.ghx next to the folder where you created assignment_03.py to visualize the same in Grasshopper
if __name__ == '__main__':
frame_list = [
Frame(Point(0.084, 0.319, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.152, 0.317, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.220, 0.315, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.288, 0.313, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.357, 0.310, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.425, 0.308, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.493, 0.306, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.561, 0.303, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.629, 0.301, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.698, 0.299, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.766, 0.297, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000))
]
# Loads the robot from ROS
with RosClient('localhost') as client:
robot = client.load_robot()
# And call our assignment functions for each frame in the example
configurations = []
for frame in frame_list:
configuration = calculate_ik(robot, frame)
configurations.append(configuration)
print("Found configuration", configuration)
store_configurations(configurations)
|
the-stack_0_1881 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .inventory_item_properties import InventoryItemProperties
class VirtualMachineInventoryItem(InventoryItemProperties):
"""The VM inventory item.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param managed_resource_id: Gets or sets the tracked resource id
corresponding to the inventory resource.
:type managed_resource_id: str
:param mo_ref_id: Gets or sets the MoRef (Managed Object Reference) ID for
the inventory item.
:type mo_ref_id: str
:param mo_name: Gets or sets the vCenter Managed Object name for the
inventory item.
:type mo_name: str
:ivar provisioning_state: Gets or sets the provisioning state.
:vartype provisioning_state: str
:param inventory_type: Required. Constant filled by server.
:type inventory_type: str
"""
_validation = {
'provisioning_state': {'readonly': True},
'inventory_type': {'required': True},
}
_attribute_map = {
'managed_resource_id': {'key': 'managedResourceId', 'type': 'str'},
'mo_ref_id': {'key': 'moRefId', 'type': 'str'},
'mo_name': {'key': 'moName', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'inventory_type': {'key': 'inventoryType', 'type': 'str'},
}
def __init__(self, **kwargs):
super(VirtualMachineInventoryItem, self).__init__(**kwargs)
self.inventory_type = 'VirtualMachine'
|
the-stack_0_1887 | # coding: iso-8859-15
import py
import random
from pypy.objspace.std.listobject import W_ListObject, SizeListStrategy,\
IntegerListStrategy, ObjectListStrategy
from pypy.interpreter.error import OperationError
from rpython.rlib.rarithmetic import is_valid_int
class TestW_ListObject(object):
def test_is_true(self):
w = self.space.wrap
w_list = W_ListObject(self.space, [])
assert self.space.is_true(w_list) == False
w_list = W_ListObject(self.space, [w(5)])
assert self.space.is_true(w_list) == True
w_list = W_ListObject(self.space, [w(5), w(3)])
assert self.space.is_true(w_list) == True
def test_len(self):
w = self.space.wrap
w_list = W_ListObject(self.space, [])
assert self.space.eq_w(self.space.len(w_list), w(0))
w_list = W_ListObject(self.space, [w(5)])
assert self.space.eq_w(self.space.len(w_list), w(1))
w_list = W_ListObject(self.space, [w(5), w(3), w(99)]*111)
assert self.space.eq_w(self.space.len(w_list), w(333))
def test_getitem(self):
w = self.space.wrap
w_list = W_ListObject(self.space, [w(5), w(3)])
assert self.space.eq_w(self.space.getitem(w_list, w(0)), w(5))
assert self.space.eq_w(self.space.getitem(w_list, w(1)), w(3))
assert self.space.eq_w(self.space.getitem(w_list, w(-2)), w(5))
assert self.space.eq_w(self.space.getitem(w_list, w(-1)), w(3))
self.space.raises_w(self.space.w_IndexError,
self.space.getitem, w_list, w(2))
self.space.raises_w(self.space.w_IndexError,
self.space.getitem, w_list, w(42))
self.space.raises_w(self.space.w_IndexError,
self.space.getitem, w_list, w(-3))
def test_getitems(self):
w = self.space.wrap
from pypy.objspace.std.listobject import make_range_list
r = make_range_list(self.space, 1,1,7)
l = [w(1),w(2),w(3),w(4),w(5),w(6),w(7)]
l2 = r.getitems()
for i in range(7):
assert self.space.eq_w(l[i], l2[i])
def test_getitems_fixedsize(self):
w = self.space.wrap
from pypy.objspace.std.listobject import make_range_list
rangelist = make_range_list(self.space, 1,1,7)
emptylist = W_ListObject(self.space, [])
intlist = W_ListObject(self.space, [w(1),w(2),w(3),w(4),w(5),w(6),w(7)])
strlist = W_ListObject(self.space, [w('1'),w('2'),w('3'),w('4'),w('5'),w('6'),w('7')])
floatlist = W_ListObject(self.space, [w(1.0),w(2.0),w(3.0),w(4.0),w(5.0),w(6.0),w(7.0)])
objlist = W_ListObject(self.space, [w(1),w('2'),w(3.0),w(4),w(5),w(6),w(7)])
emptylist_copy = emptylist.getitems_fixedsize()
assert emptylist_copy == []
rangelist_copy = rangelist.getitems_fixedsize()
intlist_copy = intlist.getitems_fixedsize()
strlist_copy = strlist.getitems_fixedsize()
floatlist_copy = floatlist.getitems_fixedsize()
objlist_copy = objlist.getitems_fixedsize()
for i in range(7):
assert self.space.eq_w(rangelist_copy[i], rangelist.getitem(i))
assert self.space.eq_w(intlist_copy[i], intlist.getitem(i))
assert self.space.eq_w(strlist_copy[i], strlist.getitem(i))
assert self.space.eq_w(floatlist_copy[i], floatlist.getitem(i))
assert self.space.eq_w(objlist_copy[i], objlist.getitem(i))
emptylist_copy = emptylist.getitems_unroll()
assert emptylist_copy == []
rangelist_copy = rangelist.getitems_unroll()
intlist_copy = intlist.getitems_unroll()
strlist_copy = strlist.getitems_unroll()
floatlist_copy = floatlist.getitems_unroll()
objlist_copy = objlist.getitems_unroll()
for i in range(7):
assert self.space.eq_w(rangelist_copy[i], rangelist.getitem(i))
assert self.space.eq_w(intlist_copy[i], intlist.getitem(i))
assert self.space.eq_w(strlist_copy[i], strlist.getitem(i))
assert self.space.eq_w(floatlist_copy[i], floatlist.getitem(i))
assert self.space.eq_w(objlist_copy[i], objlist.getitem(i))
def test_random_getitem(self):
w = self.space.wrap
s = list('qedx387tn3uixhvt 7fh387fymh3dh238 dwd-wq.dwq9')
w_list = W_ListObject(self.space, map(w, s))
keys = range(-len(s)-5, len(s)+5)
choices = keys + [None]*12
stepchoices = [None, None, None, 1, 1, -1, -1, 2, -2,
len(s)-1, len(s), len(s)+1,
-len(s)-1, -len(s), -len(s)+1]
for i in range(40):
keys.append(slice(random.choice(choices),
random.choice(choices),
random.choice(stepchoices)))
random.shuffle(keys)
for key in keys:
try:
expected = s[key]
except IndexError:
self.space.raises_w(self.space.w_IndexError,
self.space.getitem, w_list, w(key))
else:
w_result = self.space.getitem(w_list, w(key))
assert self.space.unwrap(w_result) == expected
def test_iter(self):
w = self.space.wrap
w_list = W_ListObject(self.space, [w(5), w(3), w(99)])
w_iter = self.space.iter(w_list)
assert self.space.eq_w(self.space.next(w_iter), w(5))
assert self.space.eq_w(self.space.next(w_iter), w(3))
assert self.space.eq_w(self.space.next(w_iter), w(99))
py.test.raises(OperationError, self.space.next, w_iter)
py.test.raises(OperationError, self.space.next, w_iter)
def test_contains(self):
w = self.space.wrap
w_list = W_ListObject(self.space, [w(5), w(3), w(99)])
assert self.space.eq_w(self.space.contains(w_list, w(5)),
self.space.w_True)
assert self.space.eq_w(self.space.contains(w_list, w(99)),
self.space.w_True)
assert self.space.eq_w(self.space.contains(w_list, w(11)),
self.space.w_False)
assert self.space.eq_w(self.space.contains(w_list, w_list),
self.space.w_False)
def test_getslice(self):
w = self.space.wrap
def test1(testlist, start, stop, step, expected):
w_slice = self.space.newslice(w(start), w(stop), w(step))
w_list = W_ListObject(self.space, [w(i) for i in testlist])
w_result = self.space.getitem(w_list, w_slice)
assert self.space.unwrap(w_result) == expected
for testlist in [[], [5,3,99]]:
for start in [-2, 0, 1, 10]:
for end in [-1, 2, 999]:
test1(testlist, start, end, 1, testlist[start:end])
test1([5,7,1,4], 3, 1, -2, [4,])
test1([5,7,1,4], 3, 0, -2, [4, 7])
test1([5,7,1,4], 3, -1, -2, [])
test1([5,7,1,4], -2, 11, 2, [1,])
test1([5,7,1,4], -3, 11, 2, [7, 4])
test1([5,7,1,4], -5, 11, 2, [5, 1])
def test_setslice(self):
w = self.space.wrap
def test1(lhslist, start, stop, rhslist, expected):
w_slice = self.space.newslice(w(start), w(stop), w(1))
w_lhslist = W_ListObject(self.space, [w(i) for i in lhslist])
w_rhslist = W_ListObject(self.space, [w(i) for i in rhslist])
self.space.setitem(w_lhslist, w_slice, w_rhslist)
assert self.space.unwrap(w_lhslist) == expected
test1([5,7,1,4], 1, 3, [9,8], [5,9,8,4])
test1([5,7,1,4], 1, 3, [9], [5,9,4])
test1([5,7,1,4], 1, 3, [9,8,6],[5,9,8,6,4])
test1([5,7,1,4], 1, 3, [], [5,4])
test1([5,7,1,4], 2, 2, [9], [5,7,9,1,4])
test1([5,7,1,4], 0, 99,[9,8], [9,8])
def test_add(self):
w = self.space.wrap
w_list0 = W_ListObject(self.space, [])
w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)])
w_list2 = W_ListObject(self.space, [w(-7)] * 111)
assert self.space.eq_w(self.space.add(w_list1, w_list1),
W_ListObject(self.space, [w(5), w(3), w(99),
w(5), w(3), w(99)]))
assert self.space.eq_w(self.space.add(w_list1, w_list2),
W_ListObject(self.space, [w(5), w(3), w(99)] +
[w(-7)] * 111))
assert self.space.eq_w(self.space.add(w_list1, w_list0), w_list1)
assert self.space.eq_w(self.space.add(w_list0, w_list2), w_list2)
def test_mul(self):
# only testing right mul at the moment
w = self.space.wrap
arg = w(2)
n = 3
w_lis = W_ListObject(self.space, [arg])
w_lis3 = W_ListObject(self.space, [arg]*n)
w_res = self.space.mul(w_lis, w(n))
assert self.space.eq_w(w_lis3, w_res)
# commute
w_res = self.space.mul(w(n), w_lis)
assert self.space.eq_w(w_lis3, w_res)
def test_mul_does_not_clone(self):
# only testing right mul at the moment
w = self.space.wrap
arg = w(2)
w_lis = W_ListObject(self.space, [arg])
w_lis.clone = None
# does not crash
self.space.mul(w_lis, w(5))
def test_setitem(self):
w = self.space.wrap
w_list = W_ListObject(self.space, [w(5), w(3)])
w_exp1 = W_ListObject(self.space, [w(5), w(7)])
w_exp2 = W_ListObject(self.space, [w(8), w(7)])
self.space.setitem(w_list, w(1), w(7))
assert self.space.eq_w(w_exp1, w_list)
self.space.setitem(w_list, w(-2), w(8))
assert self.space.eq_w(w_exp2, w_list)
self.space.raises_w(self.space.w_IndexError,
self.space.setitem, w_list, w(2), w(5))
self.space.raises_w(self.space.w_IndexError,
self.space.setitem, w_list, w(-3), w(5))
def test_random_setitem_delitem(self):
w = self.space.wrap
s = range(39)
w_list = W_ListObject(self.space, map(w, s))
expected = list(s)
keys = range(-len(s)-5, len(s)+5)
choices = keys + [None]*12
stepchoices = [None, None, None, 1, 1, -1, -1, 2, -2,
len(s)-1, len(s), len(s)+1,
-len(s)-1, -len(s), -len(s)+1]
for i in range(50):
keys.append(slice(random.choice(choices),
random.choice(choices),
random.choice(stepchoices)))
random.shuffle(keys)
n = len(s)
for key in keys:
if random.random() < 0.15:
random.shuffle(s)
w_list = W_ListObject(self.space, map(w, s))
expected = list(s)
try:
value = expected[key]
except IndexError:
self.space.raises_w(self.space.w_IndexError,
self.space.setitem, w_list, w(key), w(42))
else:
if is_valid_int(value): # non-slicing
if random.random() < 0.25: # deleting
self.space.delitem(w_list, w(key))
del expected[key]
else:
self.space.setitem(w_list, w(key), w(n))
expected[key] = n
n += 1
else: # slice assignment
mode = random.choice(['samesize', 'resize', 'delete'])
if mode == 'delete':
self.space.delitem(w_list, w(key))
del expected[key]
elif mode == 'samesize':
newvalue = range(n, n+len(value))
self.space.setitem(w_list, w(key), w(newvalue))
expected[key] = newvalue
n += len(newvalue)
elif mode == 'resize' and key.step is None:
newvalue = range(n, n+random.randrange(0, 20))
self.space.setitem(w_list, w(key), w(newvalue))
expected[key] = newvalue
n += len(newvalue)
assert self.space.unwrap(w_list) == expected
def test_eq(self):
w = self.space.wrap
w_list0 = W_ListObject(self.space, [])
w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)])
w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)])
w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)])
assert self.space.eq_w(self.space.eq(w_list0, w_list1),
self.space.w_False)
assert self.space.eq_w(self.space.eq(w_list1, w_list0),
self.space.w_False)
assert self.space.eq_w(self.space.eq(w_list1, w_list1),
self.space.w_True)
assert self.space.eq_w(self.space.eq(w_list1, w_list2),
self.space.w_True)
assert self.space.eq_w(self.space.eq(w_list2, w_list3),
self.space.w_False)
def test_ne(self):
w = self.space.wrap
w_list0 = W_ListObject(self.space, [])
w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)])
w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)])
w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)])
assert self.space.eq_w(self.space.ne(w_list0, w_list1),
self.space.w_True)
assert self.space.eq_w(self.space.ne(w_list1, w_list0),
self.space.w_True)
assert self.space.eq_w(self.space.ne(w_list1, w_list1),
self.space.w_False)
assert self.space.eq_w(self.space.ne(w_list1, w_list2),
self.space.w_False)
assert self.space.eq_w(self.space.ne(w_list2, w_list3),
self.space.w_True)
def test_lt(self):
w = self.space.wrap
w_list0 = W_ListObject(self.space, [])
w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)])
w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)])
w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)])
w_list4 = W_ListObject(self.space, [w(5), w(3), w(9), w(-1)])
assert self.space.eq_w(self.space.lt(w_list0, w_list1),
self.space.w_True)
assert self.space.eq_w(self.space.lt(w_list1, w_list0),
self.space.w_False)
assert self.space.eq_w(self.space.lt(w_list1, w_list1),
self.space.w_False)
assert self.space.eq_w(self.space.lt(w_list1, w_list2),
self.space.w_False)
assert self.space.eq_w(self.space.lt(w_list2, w_list3),
self.space.w_True)
assert self.space.eq_w(self.space.lt(w_list4, w_list3),
self.space.w_True)
def test_ge(self):
w = self.space.wrap
w_list0 = W_ListObject(self.space, [])
w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)])
w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)])
w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)])
w_list4 = W_ListObject(self.space, [w(5), w(3), w(9), w(-1)])
assert self.space.eq_w(self.space.ge(w_list0, w_list1),
self.space.w_False)
assert self.space.eq_w(self.space.ge(w_list1, w_list0),
self.space.w_True)
assert self.space.eq_w(self.space.ge(w_list1, w_list1),
self.space.w_True)
assert self.space.eq_w(self.space.ge(w_list1, w_list2),
self.space.w_True)
assert self.space.eq_w(self.space.ge(w_list2, w_list3),
self.space.w_False)
assert self.space.eq_w(self.space.ge(w_list4, w_list3),
self.space.w_False)
def test_gt(self):
w = self.space.wrap
w_list0 = W_ListObject(self.space, [])
w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)])
w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)])
w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)])
w_list4 = W_ListObject(self.space, [w(5), w(3), w(9), w(-1)])
assert self.space.eq_w(self.space.gt(w_list0, w_list1),
self.space.w_False)
assert self.space.eq_w(self.space.gt(w_list1, w_list0),
self.space.w_True)
assert self.space.eq_w(self.space.gt(w_list1, w_list1),
self.space.w_False)
assert self.space.eq_w(self.space.gt(w_list1, w_list2),
self.space.w_False)
assert self.space.eq_w(self.space.gt(w_list2, w_list3),
self.space.w_False)
assert self.space.eq_w(self.space.gt(w_list4, w_list3),
self.space.w_False)
def test_le(self):
w = self.space.wrap
w_list0 = W_ListObject(self.space, [])
w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)])
w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)])
w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)])
w_list4 = W_ListObject(self.space, [w(5), w(3), w(9), w(-1)])
assert self.space.eq_w(self.space.le(w_list0, w_list1),
self.space.w_True)
assert self.space.eq_w(self.space.le(w_list1, w_list0),
self.space.w_False)
assert self.space.eq_w(self.space.le(w_list1, w_list1),
self.space.w_True)
assert self.space.eq_w(self.space.le(w_list1, w_list2),
self.space.w_True)
assert self.space.eq_w(self.space.le(w_list2, w_list3),
self.space.w_True)
assert self.space.eq_w(self.space.le(w_list4, w_list3),
self.space.w_True)
def test_sizehint(self):
space = self.space
w_l = space.newlist([], sizehint=10)
assert isinstance(w_l.strategy, SizeListStrategy)
space.call_method(w_l, 'append', space.wrap(3))
assert isinstance(w_l.strategy, IntegerListStrategy)
w_l = space.newlist([], sizehint=10)
space.call_method(w_l, 'append', space.w_None)
assert isinstance(w_l.strategy, ObjectListStrategy)
def test_newlist_hint(self):
space = self.space
w_lst = space.newlist_hint(13)
assert isinstance(w_lst.strategy, SizeListStrategy)
assert w_lst.strategy.sizehint == 13
def test_find_fast_on_intlist(self, monkeypatch):
monkeypatch.setattr(self.space, "eq_w", None)
w = self.space.wrap
intlist = W_ListObject(self.space, [w(1),w(2),w(3),w(4),w(5),w(6),w(7)])
res = intlist.find(w(4), 0, 7)
assert res == 3
res = intlist.find(w(4), 0, 100)
assert res == 3
with py.test.raises(ValueError):
intlist.find(w(4), 4, 7)
with py.test.raises(ValueError):
intlist.find(w(4), 0, 2)
class AppTestListObject(object):
#spaceconfig = {"objspace.std.withliststrategies": True} # it's the default
def setup_class(cls):
import platform
import sys
on_cpython = (cls.runappdirect and
not hasattr(sys, 'pypy_translation_info'))
cls.w_on_cpython = cls.space.wrap(on_cpython)
cls.w_on_arm = cls.space.wrap(platform.machine().startswith('arm'))
cls.w_runappdirect = cls.space.wrap(cls.runappdirect)
def test_doc(self):
assert list.__doc__ == "list() -> new empty list\nlist(iterable) -> new list initialized from iterable's items"
assert list.__new__.__doc__ == "Create and return a new object. See help(type) for accurate signature."
assert list.__init__.__doc__ == "Initialize self. See help(type(self)) for accurate signature."
def test_getstrategyfromlist_w(self):
l0 = ["a", "2", "a", True]
# this raised TypeError on ListStrategies
l1 = ["a", "2", True, "a"]
l2 = [1, "2", "a", "a"]
assert set(l1) == set(l2)
def test_notequals(self):
assert [1,2,3,4] != [1,2,5,4]
def test_contains(self):
l = []
assert not l.__contains__(2)
l = [1,2,3]
assert l.__contains__(2)
assert not l.__contains__("2")
assert l.__contains__(1.0)
l = ["1","2","3"]
assert l.__contains__("2")
assert not l.__contains__(2)
l = range(4)
assert l.__contains__(2)
assert not l.__contains__("2")
l = [1,2,"3"]
assert l.__contains__(2)
assert not l.__contains__("2")
l = range(2, 20, 3) # = [2, 5, 8, 11, 14, 17]
assert l.__contains__(2)
assert l.__contains__(5)
assert l.__contains__(8)
assert l.__contains__(11)
assert l.__contains__(14)
assert l.__contains__(17)
assert not l.__contains__(3)
assert not l.__contains__(4)
assert not l.__contains__(7)
assert not l.__contains__(13)
assert not l.__contains__(20)
l = range(2, -20, -3) # [2, -1, -4, -7, -10, -13, -16, -19]
assert l.__contains__(2)
assert l.__contains__(-4)
assert l.__contains__(-13)
assert l.__contains__(-16)
assert l.__contains__(-19)
assert not l.__contains__(-17)
assert not l.__contains__(-3)
assert not l.__contains__(-20)
assert not l.__contains__(-21)
logger = []
class Foo(object):
def __init__(self, value, name=None):
self.value = value
self.name = name or value
def __repr__(self):
return '<Foo %s>' % self.name
def __eq__(self, other):
logger.append((self, other))
return self.value == other.value
foo1, foo2, foo3 = Foo(1), Foo(2), Foo(3)
foo42 = Foo(42)
foo_list = [foo1, foo2, foo3]
foo42 in foo_list
logger_copy = logger[:] # prevent re-evaluation during pytest error print
assert logger_copy == [(foo42, foo1), (foo42, foo2), (foo42, foo3)]
del logger[:]
foo2_bis = Foo(2, '2 bis')
foo2_bis in foo_list
logger_copy = logger[:] # prevent re-evaluation during pytest error print
assert logger_copy == [(foo2_bis, foo1), (foo2_bis, foo2)]
def test_call_list(self):
assert list('') == []
assert list('abc') == ['a', 'b', 'c']
assert list((1, 2)) == [1, 2]
l = [1]
assert list(l) is not l
assert list(l) == l
def test_explicit_new_init(self):
l = l0 = list.__new__(list)
l.__init__([1,2])
assert l is l0
assert l == [1,2]
list.__init__(l, [1,2,3])
assert l is l0
assert l == [1,2,3]
list.__init__(l, ['a', 'b', 'c'])
assert l is l0
assert l == ['a', 'b', 'c']
list.__init__(l)
assert l == []
def test_explicit_new_init_more_cases(self):
for assignment in [[], (), [3], ["foo"]]:
l = [1, 2]
l.__init__(assignment)
assert l == list(assignment)
def test_range_init(self):
x = list(range(5,1))
assert x == []
x = list(range(1,10))
x[22:0:-1] == range(1,10)
r = list(range(10, 10))
assert len(r) == 0
assert list(reversed(r)) == []
assert r[:] == []
def test_extend_list(self):
l = l0 = [1]
l.extend([2])
assert l is l0
assert l == [1,2]
l = ['a']
l.extend('b')
assert l == ['a', 'b']
l = ['a']
l.extend([0])
assert l == ['a', 0]
l = list(range(10))
l.extend([10])
assert l == list(range(11))
l = []
m = [1,2,3]
l.extend(m)
m[0] = 5
assert m == [5,2,3]
assert l == [1,2,3]
def test_extend_tuple(self):
l = l0 = [1]
l.extend((2,))
assert l is l0
assert l == [1,2]
l = ['a']
l.extend(('b',))
assert l == ['a', 'b']
def test_extend_iterable(self):
l = l0 = [1]
l.extend(iter([1, 2, 3, 4]))
assert l is l0
assert l == [1, 1, 2, 3, 4]
l = l0 = ['a']
l.extend(iter(['b', 'c', 'd']))
assert l == ['a', 'b', 'c', 'd']
assert l is l0
l = l0 = [1.2]
l.extend(iter([2.3, 3.4, 4.5]))
assert l == [1.2, 2.3, 3.4, 4.5]
assert l is l0
def test_extend_iterable_length_hint_overflow(self):
import sys
class CustomIterable(object):
def __iter__(self):
if False:
yield
def __length_hint__(self):
return sys.maxsize
a = [1, 2, 3, 4]
a.extend(CustomIterable())
assert a == [1, 2, 3, 4]
def test_sort(self):
l = l0 = [1, 5, 3, 0]
l.sort()
assert l is l0
assert l == [0, 1, 3, 5]
l = l0 = []
l.sort()
assert l is l0
assert l == []
l = l0 = [1]
l.sort()
assert l is l0
assert l == [1]
l = ["c", "a", "d", "b"]
l.sort(reverse=True)
assert l == ["d", "c", "b", "a"]
l = [3.3, 2.2, 4.4, 1.1, 3.1, 5.5]
l.sort()
assert l == [1.1, 2.2, 3.1, 3.3, 4.4, 5.5]
def test_sort_key(self):
def lower(x): return x.lower()
l = ['a', 'C', 'b']
l.sort(key=lower)
assert l == ['a', 'b', 'C']
l = []
l.sort(key=lower)
assert l == []
l = ['a']
l.sort(key=lower)
assert l == ['a']
r = list(range(10))
r.sort(key=lambda x: -x)
assert r == list(range(9, -1, -1))
def test_sort_reversed(self):
l = list(range(10))
l.sort(reverse=True)
assert l == list(range(9, -1, -1))
l = []
l.sort(reverse=True)
assert l == []
l = [1]
l.sort(reverse=True)
assert l == [1]
raises(TypeError, sorted, [], None, lambda x, y: 0)
def test_sort_cmp_key_reverse(self):
def lower(x): return x.lower()
l = ['a', 'C', 'b']
l.sort(reverse = True, key = lower)
assert l == ['C', 'b', 'a']
def test_sort_simple_string(self):
l = ["a", "d", "c", "b"]
l.sort()
assert l == ["a", "b", "c", "d"]
def test_sort_range(self):
l = list(range(3, 10, 3))
l.sort()
assert l == [3, 6, 9]
l.sort(reverse=True)
assert l == [9, 6, 3]
l.sort(reverse=True)
assert l == [9, 6, 3]
l.sort()
assert l == [3, 6, 9]
def test_getitem(self):
l = [1, 2, 3, 4, 5, 6, 9]
assert l[0] == 1
assert l[-1] == 9
assert l[-2] == 6
raises(IndexError, "l[len(l)]")
raises(IndexError, "l[-len(l)-1]")
l = ['a', 'b', 'c']
assert l[0] == 'a'
assert l[-1] == 'c'
assert l[-2] == 'b'
raises(IndexError, "l[len(l)]")
l = [1.1, 2.2, 3.3]
assert l[0] == 1.1
assert l[-1] == 3.3
assert l[-2] == 2.2
raises(IndexError, "l[len(l)]")
l = []
raises(IndexError, "l[1]")
def test_getitem_range(self):
l = range(5)
raises(IndexError, "l[-6]")
raises(IndexError, "l[5]")
assert l[0] == 0
assert l[-1] == 4
assert l[-2] == 3
assert l[-5] == 0
l = range(1, 5)
raises(IndexError, "l[-5]")
raises(IndexError, "l[4]")
assert l[0] == 1
assert l[-1] == 4
assert l[-2] == 3
assert l[-4] == 1
def test_setitem(self):
l = []
raises(IndexError, "l[1] = 2")
l = [5,3]
l[0] = 2
assert l == [2,3]
l = [5,3]
l[0] = "2"
assert l == ["2",3]
l = list(range(3))
l[0] = 1
assert l == [1,1,2]
def test_delitem(self):
l = [1, 2, 3, 4, 5, 6, 9]
del l[0]
assert l == [2, 3, 4, 5, 6, 9]
del l[-1]
assert l == [2, 3, 4, 5, 6]
del l[-2]
assert l == [2, 3, 4, 6]
raises(IndexError, "del l[len(l)]")
raises(IndexError, "del l[-len(l)-1]")
l = l0 = ['a', 'b', 'c']
del l[0]
assert l == ['b', 'c']
del l[-1]
assert l == ['b']
del l[-1]
assert l == []
assert l is l0
raises(IndexError, "del l[0]")
l = l0 = [1.1, 2.2, 3.3]
del l[0]
assert l == [2.2, 3.3]
del l[-1]
assert l == [2.2]
del l[-1]
assert l == []
assert l is l0
raises(IndexError, "del l[0]")
l = list(range(10))
del l[5]
assert l == [0, 1, 2, 3, 4, 6, 7, 8, 9]
def test_getitem_slice(self):
l = list(range(10))
assert l[::] == l
del l[::2]
assert l == [1,3,5,7,9]
l[-2::-1] = l[:-1]
assert l == [7,5,3,1,9]
del l[-1:2:-1]
assert l == [7,5,3]
del l[:2]
assert l == [3]
assert l[1:] == []
assert l[1::2] == []
assert l[::] == l
assert l[0::-2] == l
assert l[-1::-5] == l
l = ['']
assert l[1:] == []
assert l[1::2] == []
assert l[::] == l
assert l[0::-5] == l
assert l[-1::-5] == l
l.extend(['a', 'b'])
assert l[::-1] == ['b', 'a', '']
l = [1,2,3,4,5]
assert l[1:0:None] == []
assert l[1:0] == []
def test_getslice_invalid(self):
x = [1,2,3,4]
assert x[10:0] == []
assert x[10:0:None] == []
x = list(range(1,5))
assert x[10:0] == []
assert x[10:0:None] == []
assert x[0:22] == [1,2,3,4]
assert x[-1:10] == [4]
assert x[0:22:None] == [1,2,3,4]
assert x[-1:10:None] == [4]
def test_getslice_range_backwards(self):
x = list(range(1,10))
assert x[22:-10] == []
assert x[22:-10:-1] == [9,8,7,6,5,4,3,2,1]
assert x[10:3:-1] == [9,8,7,6,5]
assert x[10:3:-2] == [9,7,5]
assert x[1:5:-1] == []
def test_delall(self):
l = l0 = [1,2,3]
del l[:]
assert l is l0
assert l == []
l = ['a', 'b']
del l[:]
assert l == []
l = [1.1, 2.2]
del l[:]
assert l == []
def test_clear(self):
l = l0 = [1,2,3]
l.clear()
assert l is l0
assert l == []
l = ['a', 'b']
l.clear()
assert l == []
l = [1.1, 2.2]
l.clear()
assert l == []
l = []
l.clear()
assert l == []
def test_iadd(self):
l = l0 = [1,2,3]
l += [4,5]
assert l is l0
assert l == [1,2,3,4,5]
l = l0 = [1.1,2.2,3.3]
l += [4.4,5.5]
assert l is l0
assert l == [1.1,2.2,3.3,4.4,5.5]
l = l0 = ['a', 'b', 'c']
l1 = l[:]
l += ['d']
assert l is l0
assert l == ['a', 'b', 'c', 'd']
l1 += [0]
assert l1 == ['a', 'b', 'c', 0]
r1 = r2 = list(range(5))
assert r1 is r2
r1 += [15]
assert r1 is r2
assert r1 == [0, 1, 2, 3, 4, 15]
assert r2 == [0, 1, 2, 3, 4, 15]
def test_iadd_iterable(self):
l = l0 = [1,2,3]
l += iter([4,5])
assert l is l0
assert l == [1,2,3,4,5]
def test_iadd_subclass(self):
class Bar(object):
def __radd__(self, other):
return ('radd', self, other)
bar = Bar()
l1 = [1,2,3]
l1 += bar
assert l1 == ('radd', bar, [1,2,3])
def test_add_lists(self):
l1 = [1,2,3]
l2 = [4,5,6]
l3 = l1 + l2
assert l3 == [1,2,3,4,5,6]
def test_imul(self):
l = l0 = [4,3]
l *= 2
assert l is l0
assert l == [4,3,4,3]
l *= 0
assert l is l0
assert l == []
l = l0 = [4,3]
l *= (-1)
assert l is l0
assert l == []
l = l0 = ['a', 'b']
l *= 2
assert l is l0
assert l == ['a', 'b', 'a', 'b']
l *= 0
assert l is l0
assert l == []
l = ['a']
l *= -5
assert l == []
l = l0 = [1.1, 2.2]
l *= 2
assert l is l0
assert l == [1.1, 2.2, 1.1, 2.2]
l = list(range(2))
l *= 2
assert l == [0, 1, 0, 1]
r1 = r2 = list(range(3))
assert r1 is r2
r1 *= 2
assert r1 is r2
assert r1 == [0, 1, 2, 0, 1, 2]
assert r2 == [0, 1, 2, 0, 1, 2]
def test_mul_errors(self):
try:
[1, 2, 3] * (3,)
except TypeError:
pass
def test_mul___index__(self):
class MyInt(object):
def __init__(self, x):
self.x = x
def __int__(self):
return self.x
class MyIndex(object):
def __init__(self, x):
self.x = x
def __index__(self):
return self.x
assert [0] * MyIndex(3) == [0, 0, 0]
raises(TypeError, "[0]*MyInt(3)")
raises(TypeError, "[0]*MyIndex(MyInt(3))")
def test_index(self):
c = list(range(10))
assert c.index(0) == 0
raises(ValueError, c.index, 10)
c = list('hello world')
assert c.index('l') == 2
raises(ValueError, c.index, '!')
assert c.index('l', 3) == 3
assert c.index('l', 4) == 9
raises(ValueError, c.index, 'l', 10)
assert c.index('l', -5) == 9
assert c.index('l', -25) == 2
assert c.index('o', 1, 5) == 4
raises(ValueError, c.index, 'o', 1, 4)
assert c.index('o', 1, 5-11) == 4
raises(ValueError, c.index, 'o', 1, 4-11)
raises(TypeError, c.index, 'c', 0, 4.3)
raises(TypeError, c.index, 'c', 1.0, 5.6)
c = [0, 2, 4]
assert c.index(0) == 0
raises(ValueError, c.index, 3)
c = [0.0, 2.2, 4.4]
assert c.index(0) == 0.0
e = raises(ValueError, c.index, 3)
import sys
if sys.version_info[:2] == (2, 7): # CPython 2.7, PyPy
assert str(e.value) == '3 is not in list'
def test_index_cpython_bug(self):
if self.on_cpython:
skip("cpython has a bug here")
c = list('hello world')
assert c.index('l', None, None) == 2
assert c.index('l', 3, None) == 3
assert c.index('l', None, 4) == 2
def test_ass_slice(self):
l = list(range(6))
l[1:3] = 'abc'
assert l == [0, 'a', 'b', 'c', 3, 4, 5]
l = []
l[:-3] = []
assert l == []
l = list(range(6))
l[:] = []
assert l == []
l = l0 = ['a', 'b']
l[1:1] = ['ae']
assert l == ['a', 'ae', 'b']
l[1:100] = ['B']
assert l == ['a', 'B']
l[:] = []
assert l == []
assert l is l0
l = []
l2 = range(3)
l.__setitem__(slice(0,3),l2)
assert l == [0,1,2]
def test_assign_extended_slice(self):
l = l0 = ['a', 'b', 'c']
l[::-1] = ['a', 'b', 'c']
assert l == ['c', 'b', 'a']
l[::-2] = [0, 1]
assert l == [1, 'b', 0]
l[-1:5:2] = [2]
assert l == [1, 'b', 2]
l[:-1:2] = [0]
assert l == [0, 'b', 2]
assert l is l0
l = [1,2,3]
raises(ValueError, "l[0:2:2] = [1,2,3,4]")
raises(ValueError, "l[::2] = []")
l = list(range(6))
l[::3] = ('a', 'b')
assert l == ['a', 1, 2, 'b', 4, 5]
l = [0.0, 1.1, 2.2, 3.3, 4.4, 5.5]
l[::3] = ('a', 'b')
assert l == ['a', 1.1, 2.2, 'b', 4.4, 5.5]
l_int = [5]; l_int.pop() # IntListStrategy
l_empty = [] # EmptyListStrategy
raises(ValueError, "l_int[::-1] = [42]")
raises(ValueError, "l_int[::7] = [42]")
raises(ValueError, "l_empty[::-1] = [42]")
raises(ValueError, "l_empty[::7] = [42]")
l_int[::1] = [42]; assert l_int == [42]
l_empty[::1] = [42]; assert l_empty == [42]
def test_setslice_with_self(self):
l = [1,2,3,4]
l[:] = l
assert l == [1,2,3,4]
l = [1,2,3,4]
l[0:2] = l
assert l == [1,2,3,4,3,4]
l = [1,2,3,4]
l[0:2] = l
assert l == [1,2,3,4,3,4]
l = [1,2,3,4,5,6,7,8,9,10]
raises(ValueError, "l[5::-1] = l")
l = [1,2,3,4,5,6,7,8,9,10]
raises(ValueError, "l[::2] = l")
l = [1,2,3,4,5,6,7,8,9,10]
l[5:] = l
assert l == [1,2,3,4,5,1,2,3,4,5,6,7,8,9,10]
l = [1,2,3,4,5,6]
l[::-1] = l
assert l == [6,5,4,3,2,1]
def test_setitem_slice_performance(self):
# because of a complexity bug, this used to take forever on a
# translated pypy. On CPython2.6 -A, it takes around 5 seconds.
if self.on_arm:
skip("consumes too much memory for most ARM machines")
if self.runappdirect:
count = 16*1024*1024
else:
count = 1024
b = [None] * count
for i in range(count):
b[i:i+1] = ['y']
assert b == ['y'] * count
def test_recursive_repr(self):
l = []
assert repr(l) == '[]'
l.append(l)
assert repr(l) == '[[...]]'
def test_copy(self):
# test that empty list copies the empty list
l = []
c = l.copy()
assert c == []
# test that the items of a list are the same
l = list(range(3))
c = l.copy()
assert l == c
# test that it's indeed a copy and not a reference
l = ['a', 'b']
c = l.copy()
c.append('i')
assert l == ['a', 'b']
assert c == l + ['i']
# test that it's a shallow, not a deep copy
l = [1, 2, [3, 4], 5]
c = l.copy()
assert l == c
assert c[3] == l[3]
raises(TypeError, l.copy, None)
def test_append(self):
l = []
l.append('X')
assert l == ['X']
l.append('Y')
l.append('Z')
assert l == ['X', 'Y', 'Z']
l = []
l.append(0)
assert l == [0]
for x in range(1, 5):
l.append(x)
assert l == list(range(5))
l = [1,2,3]
l.append("a")
assert l == [1,2,3,"a"]
l = [1.1, 2.2, 3.3]
l.append(4.4)
assert l == [1.1, 2.2, 3.3, 4.4]
l = list(range(4))
l.append(4)
assert l == list(range(5))
l = list(range(5))
l.append(26)
assert l == [0,1,2,3,4,26]
l = list(range(5))
l.append("a")
assert l == [0,1,2,3,4,"a"]
l = list(range(5))
l.append(5)
assert l == [0,1,2,3,4,5]
def test_count(self):
c = list('hello')
assert c.count('l') == 2
assert c.count('h') == 1
assert c.count('w') == 0
def test_insert(self):
c = list('hello world')
c.insert(0, 'X')
assert c[:4] == ['X', 'h', 'e', 'l']
c.insert(2, 'Y')
c.insert(-2, 'Z')
assert ''.join(c) == 'XhYello worZld'
ls = [1, 2, 3, 4, 5, 6, 7]
for i in range(5):
ls.insert(0, i)
assert len(ls) == 12
l = []
l.insert(4,2)
assert l == [2]
l = [1,2,3]
l.insert(0,"a")
assert l == ["a", 1, 2, 3]
l = list(range(3))
l.insert(1,5)
assert l == [0,5,1,2]
def test_pop(self):
c = list('hello world')
s = ''
for i in range(11):
s += c.pop()
assert s == 'dlrow olleh'
raises(IndexError, c.pop)
assert len(c) == 0
l = list(range(10))
l.pop()
assert l == list(range(9))
assert l.pop(0) == 0
l = [1.1, 2.2, 3.3]
l.pop()
assert l == [1.1, 2.2]
l = []
raises(IndexError, l.pop, 0)
def test_pop_custom_int(self):
class A(object):
def __init__(self, x):
self.x = x
def __int__(self):
return self.x
l = list(range(10))
x = l.pop(A(-1))
assert x == 9
assert l == list(range(9))
raises(TypeError, list(range(10)).pop, 1.0)
def test_pop_negative(self):
l1 = [1,2,3,4]
l2 = ["1", "2", "3", "4"]
l3 = list(range(5))
l4 = [1, 2, 3, "4"]
l5 = [1.1, 2.2, 3.3, 4.4]
raises(IndexError, l1.pop, -5)
raises(IndexError, l2.pop, -5)
raises(IndexError, l3.pop, -6)
raises(IndexError, l4.pop, -5)
raises(IndexError, l5.pop, -5)
assert l1.pop(-2) == 3
assert l2.pop(-2) == "3"
assert l3.pop(-2) == 3
assert l4.pop(-2) == 3
assert l5.pop(-2) == 3.3
def test_remove(self):
c = list('hello world')
c.remove('l')
assert ''.join(c) == 'helo world'
c.remove('l')
assert ''.join(c) == 'heo world'
c.remove('l')
assert ''.join(c) == 'heo word'
raises(ValueError, c.remove, 'l')
assert ''.join(c) == 'heo word'
l = list(range(5))
l.remove(2)
assert l == [0, 1, 3, 4]
l = [0, 3, 5]
raises(ValueError, c.remove, 2)
l = [0.0, 1.1, 2.2, 3.3, 4.4]
l.remove(2.2)
assert l == [0.0, 1.1, 3.3, 4.4]
l = [0.0, 3.3, 5.5]
raises(ValueError, c.remove, 2)
e = raises(ValueError, c.remove, 2.2)
if not self.on_cpython:
assert str(e.value) == 'list.remove(): 2.2 is not in list'
def test_reverse(self):
c = list('hello world')
c.reverse()
assert ''.join(c) == 'dlrow olleh'
l = list(range(3))
l.reverse()
assert l == [2,1,0]
r = list(range(3))
r[0] = 1
assert r == [1, 1, 2]
r.reverse()
assert r == [2, 1, 1]
def test_reversed(self):
assert list(list('hello').__reversed__()) == ['o', 'l', 'l', 'e', 'h']
assert list(reversed(list('hello'))) == ['o', 'l', 'l', 'e', 'h']
def test_mutate_while_remove(self):
class Mean(object):
def __init__(self, i):
self.i = i
def __eq__(self, other):
if self.i == 9:
del l[self.i - 1]
return True
else:
return False
l = [Mean(i) for i in range(10)]
# does not crash
l.remove(None)
class Mean2(object):
def __init__(self, i):
self.i = i
def __eq__(self, other):
l.append(self.i)
return False
l = [Mean2(i) for i in range(10)]
# does not crash
l.remove(5)
assert l[10:] == [0, 1, 2, 3, 4, 6, 7, 8, 9]
def test_mutate_while_contains(self):
class Mean(object):
def __init__(self, i):
self.i = i
def __eq__(self, other):
if self.i == 9 == other:
del l[0]
return True
else:
return False
l = [Mean(i) for i in range(10)]
assert l.__contains__(9)
assert not l.__contains__(2)
def test_mutate_while_extend(self):
# this used to segfault pypy-c (with py.test -A)
import sys
if hasattr(sys, 'pypy_translation_info'):
if sys.pypy_translation_info['translation.gc'] == 'boehm':
skip("not reliable on top of Boehm")
class A(object):
def __del__(self):
print('del')
del lst[:]
for i in range(10):
keepalive = []
lst = list(str(i)) * 100
A()
while lst:
keepalive.append(lst[:])
def test_unicode(self):
s = "\ufffd\ufffd\ufffd"
assert s.encode("ascii", "replace") == b"???"
assert s.encode("ascii", "ignore") == b""
l1 = [s.encode("ascii", "replace")]
assert l1[0] == b"???"
l2 = [s.encode("ascii", "ignore")]
assert l2[0] == b""
l3 = [s]
assert l3[0].encode("ascii", "replace") == b"???"
def test_list_from_set(self):
l = ['a']
l.__init__(set('b'))
assert l == ['b']
def test_list_from_generator(self):
l = ['a']
g = (i*i for i in range(5))
l.__init__(g)
assert l == [0, 1, 4, 9, 16]
l.__init__(g)
assert l == []
assert list(g) == []
def test_list_from_bytes(self):
b = list(b'abc')
assert b == [97, 98, 99]
def test_uses_custom_iterator(self):
# obscure corner case: space.listview*() must not shortcut subclasses
# of dicts, because the OrderedDict in the stdlib relies on this.
# we extend the use case to lists and sets, i.e. all types that have
# strategies, to avoid surprizes depending on the strategy.
class X: pass
for base, arg in [
(list, []), (list, [5]), (list, ['x']), (list, [X]), (list, ['x']),
(set, []), (set, [5]), (set, ['x']), (set, [X]), (set, ['x']),
(dict, []), (dict, [(5,6)]), (dict, [('x',7)]), (dict, [(X,8)]),
(dict, [('x', 7)]),
]:
print(base, arg)
class SubClass(base):
def __iter__(self):
return iter("foobar")
sub = SubClass(arg)
assert list(sub) == ['f', 'o', 'o', 'b', 'a', 'r']
l = []
l.extend(sub)
assert l == ['f', 'o', 'o', 'b', 'a', 'r']
# test another list strategy
l = ['Z']
l.extend(sub)
assert l == ['Z', 'f', 'o', 'o', 'b', 'a', 'r']
class Sub2(base):
pass
assert list(Sub2(arg)) == list(base(arg))
s = set()
s.update(Sub2(arg))
assert s == set(base(arg))
def test_comparison(self):
assert ([] < []) is False
assert ([] <= []) is True
assert ([] == []) is True
assert ([] != []) is False
assert ([] > []) is False
assert ([] >= []) is True
assert ([5] < []) is False
assert ([5] <= []) is False
assert ([5] == []) is False
assert ([5] != []) is True
assert ([5] > []) is True
assert ([5] >= []) is True
assert ([] < [5]) is True
assert ([] <= [5]) is True
assert ([] == [5]) is False
assert ([] != [5]) is True
assert ([] > [5]) is False
assert ([] >= [5]) is False
assert ([4] < [5]) is True
assert ([4] <= [5]) is True
assert ([4] == [5]) is False
assert ([4] != [5]) is True
assert ([4] > [5]) is False
assert ([4] >= [5]) is False
assert ([5] < [5]) is False
assert ([5] <= [5]) is True
assert ([5] == [5]) is True
assert ([5] != [5]) is False
assert ([5] > [5]) is False
assert ([5] >= [5]) is True
assert ([6] < [5]) is False
assert ([6] <= [5]) is False
assert ([6] == [5]) is False
assert ([6] != [5]) is True
assert ([6] > [5]) is True
assert ([6] >= [5]) is True
N = float('nan')
assert ([N] < [5]) is False
assert ([N] <= [5]) is False
assert ([N] == [5]) is False
assert ([N] != [5]) is True
assert ([N] > [5]) is False
assert ([N] >= [5]) is False
assert ([5] < [N]) is False
assert ([5] <= [N]) is False
assert ([5] == [N]) is False
assert ([5] != [N]) is True
assert ([5] > [N]) is False
assert ([5] >= [N]) is False
def test_resizelist_hint(self):
if self.on_cpython:
skip('pypy-only test')
import __pypy__
l2 = []
__pypy__.resizelist_hint(l2, 100)
l1 = [1, 2, 3]
l1[:] = l2
assert len(l1) == 0
def test_use_method_for_wrong_object(self):
if self.on_cpython:
skip('pypy-only test')
raises(TypeError, list.append, 1, 2)
def test_ne_NotImplemented(self):
class NonList(object):
pass
non_list = NonList()
assert [] != non_list
def test_extend_from_empty_list_with_subclasses(self):
# some of these tests used to fail by ignoring the
# custom __iter__() --- but only if the list has so
# far the empty strategy, as opposed to .extend()ing
# a non-empty list.
class T(tuple):
def __iter__(self):
yield "ok"
assert list(T([5, 6])) == ["ok"]
#
class L(list):
def __iter__(self):
yield "ok"
assert list(L([5, 6])) == ["ok"]
assert list(L([5.2, 6.3])) == ["ok"]
#
class S(bytes):
def __iter__(self):
yield "ok"
assert list(S(b"don't see me")) == ["ok"]
#
class U(str):
def __iter__(self):
yield "ok"
assert list(U("don't see me")) == ["ok"]
#
class S(bytes):
def __getitem__(self, index):
never_called
assert list(S(b"abc")) == list(b"abc") # __getitem__ ignored
#
class U(str):
def __getitem__(self, index):
never_called
assert list(U("abc")) == list("abc") # __getitem__ ignored
def test_extend_from_nonempty_list_with_subclasses(self):
l = ["hi!"]
class T(tuple):
def __iter__(self):
yield "okT"
l.extend(T([5, 6]))
#
class L(list):
def __iter__(self):
yield "okL"
l.extend(L([5, 6]))
l.extend(L([5.2, 6.3]))
#
class S(bytes):
def __iter__(self):
yield "okS"
l.extend(S(b"don't see me"))
#
class U(str):
def __iter__(self):
yield "okU"
l.extend(U("don't see me"))
#
assert l == ["hi!", "okT", "okL", "okL", "okS", "okU"]
#
class S(bytes):
def __getitem__(self, index):
never_called
l = []
l.extend(S(b"abc"))
assert l == list(b"abc") # __getitem__ ignored
#
class U(str):
def __getitem__(self, index):
never_called
l = []
l.extend(U("abc"))
assert l == list("abc") # __getitem__ ignored
def test_issue1266(self):
l = list(range(1))
l.pop()
# would previously crash
l.append(1)
assert l == [1]
l = list(range(1))
l.pop()
# would previously crash
l.reverse()
assert l == []
def test_issue1266_ovf(self):
import sys
l = list(range(0, sys.maxsize, sys.maxsize))
l.append(sys.maxsize)
# -2 would be next in the range sequence if overflow were
# allowed
l.append(-2)
assert l == [0, sys.maxsize, -2]
assert -2 in l
l = list(range(-sys.maxsize, sys.maxsize, sys.maxsize // 10))
item11 = l[11]
assert l[::11] == [-sys.maxsize, item11]
assert item11 in l[::11]
def test_bug_list_of_nans(self):
N = float('nan')
L1 = [N, 'foo'] # general object strategy
assert N in L1
assert L1.index(N) == 0
assert L1 == [N, 'foo']
# our float list strategy needs to consider NaNs are equal!
L2 = [N, 0.0] # float strategy
assert N in L2
assert L2.index(N) == 0
assert L2.index(-0.0) == 1
assert L2 == [N, -0.0]
# same with the int-or-float list strategy
L3 = [N, 0.0, -0.0, 0]
assert N in L3
assert L3.index(N) == 0
for i in [1, 2, 3]:
assert L3[i] == 0
assert L3[i] == 0.0
assert L3[i] == -0.0
assert L3.index(0, i) == i
assert L3.index(0.0, i) == i
assert L3.index(-0.0, i) == i
class AppTestWithoutStrategies:
spaceconfig = {"objspace.std.withliststrategies": False}
def test_no_shared_empty_list(self):
l = []
copy = l[:]
copy.append({})
assert copy == [{}]
notshared = l[:]
assert notshared == []
class AppTestListFastSubscr:
spaceconfig = {"objspace.std.optimized_list_getitem": True}
def test_getitem(self):
import operator
l = [0, 1, 2, 3, 4]
for i in range(5):
assert l[i] == i
assert l[3:] == [3, 4]
raises(TypeError, operator.getitem, l, "str")
|
the-stack_0_1888 | import torch
from torch.autograd import Variable
import time
import sys
from utils import *
def val_epoch(epoch, data_loader, model, criterion, opt, logger):
print('validation at epoch {}'.format(epoch))
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end_time = time.time()
for i, (inputs, targets) in enumerate(data_loader):
data_time.update(time.time() - end_time)
if not opt.no_cuda:
targets = targets.cuda()
with torch.no_grad():
inputs = Variable(inputs)
targets = Variable(targets)
outputs = model(inputs)
loss = criterion(outputs, targets)
prec1, prec5 = calculate_accuracy(outputs.data, targets.data, topk=(1,1))
top1.update(prec1, inputs.size(0))
top5.update(prec5, inputs.size(0))
losses.update(loss.data, inputs.size(0))
batch_time.update(time.time() - end_time)
end_time = time.time()
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.5f} ({batch_time.avg:.5f})\t'
'Data {data_time.val:.5f} ({data_time.avg:.5f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.5f} ({top1.avg:.5f})\t'
'Prec@5 {top5.val:.5f} ({top5.avg:.5f})'.format(
epoch,
i + 1,
len(data_loader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
top1=top1,
top5=top5))
logger.log({'epoch': epoch,
'loss': losses.avg.item(),
'prec1': top1.avg.item(),
'prec5': top5.avg.item()})
return losses.avg.item(), top1.avg.item() |
the-stack_0_1892 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow testing subclass to automate numerical testing.
Reference tests determine when behavior deviates from some "gold standard," and
are useful for determining when layer definitions have changed without
performing full regression testing, which is generally prohibitive. This class
handles the symbolic graph comparison as well as loading weights to avoid
relying on random number generation, which can change.
The tests performed by this class are:
1) Compare a generated graph against a reference graph. Differences are not
necessarily fatal.
2) Attempt to load known weights for the graph. If this step succeeds but
changes are present in the graph, a warning is issued but does not raise
an exception.
3) Perform a calculation and compare the result to a reference value.
This class also provides a method to generate reference data.
Note:
The test class is responsible for fixing the random seed during graph
definition. A convenience method name_to_seed() is provided to make this
process easier.
The test class should also define a .regenerate() class method which (usually)
just calls the op definition function with test=False for all relevant tests.
A concise example of this class in action is provided in:
official/utils/testing/reference_data_test.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import hashlib
import json
import os
import shutil
import sys
import numpy as np
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
class BaseTest(tf.test.TestCase):
"""TestCase subclass for performing reference data tests."""
def regenerate(self):
"""Subclasses should override this function to generate a new reference."""
raise NotImplementedError
@property
def test_name(self):
"""Subclass should define its own name."""
raise NotImplementedError
@property
def data_root(self):
"""Use the subclass directory rather than the parent directory.
Returns:
The path prefix for reference data.
"""
return os.path.join(os.path.split(
os.path.abspath(__file__))[0], "reference_data", self.test_name)
ckpt_prefix = "model.ckpt"
@staticmethod
def name_to_seed(name):
"""Convert a string into a 32 bit integer.
This function allows test cases to easily generate random fixed seeds by
hashing the name of the test. The hash string is in hex rather than base 10
which is why there is a 16 in the int call, and the modulo projects the
seed from a 128 bit int to 32 bits for readability.
Args:
name: A string containing the name of a test.
Returns:
A pseudo-random 32 bit integer derived from name.
"""
seed = hashlib.md5(name.encode("utf-8")).hexdigest()
return int(seed, 16) % (2**32 - 1)
@staticmethod
def common_tensor_properties(input_array):
"""Convenience function for matrix testing.
In tests we wish to determine whether a result has changed. However storing
an entire n-dimensional array is impractical. A better approach is to
calculate several values from that array and test that those derived values
are unchanged. The properties themselves are arbitrary and should be chosen
to be good proxies for a full equality test.
Args:
input_array: A numpy array from which key values are extracted.
Returns:
A list of values derived from the input_array for equality tests.
"""
output = list(input_array.shape)
flat_array = input_array.flatten()
output.extend([float(i) for i in
[flat_array[0], flat_array[-1], np.sum(flat_array)]])
return output
def default_correctness_function(self, *args):
"""Returns a vector with the concatenation of common properties.
This function simply calls common_tensor_properties() for every element.
It is useful as it allows one to easily construct tests of layers without
having to worry about the details of result checking.
Args:
*args: A list of numpy arrays corresponding to tensors which have been
evaluated.
Returns:
A list of values containing properties for every element in args.
"""
output = []
for arg in args:
output.extend(self.common_tensor_properties(arg))
return output
def _construct_and_save_reference_files(
self, name, graph, ops_to_eval, correctness_function):
"""Save reference data files.
Constructs a serialized graph_def, layer weights, and computation results.
It then saves them to files which are read at test time.
Args:
name: String defining the run. This will be used to define folder names
and will be used for random seed construction.
graph: The graph in which the test is conducted.
ops_to_eval: Ops which the user wishes to be evaluated under a controlled
session.
correctness_function: This function accepts the evaluated results of
ops_to_eval, and returns a list of values. This list must be JSON
serializable; in particular it is up to the user to convert numpy
dtypes into builtin dtypes.
"""
data_dir = os.path.join(self.data_root, name)
# Make sure there is a clean space for results.
if os.path.exists(data_dir):
shutil.rmtree(data_dir)
os.makedirs(data_dir)
# Serialize graph for comparison.
graph_bytes = graph.as_graph_def().SerializeToString()
expected_file = os.path.join(data_dir, "expected_graph")
with tf.gfile.Open(expected_file, "wb") as f:
f.write(graph_bytes)
with graph.as_default():
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with self.test_session(graph=graph) as sess:
sess.run(init)
saver.save(sess=sess, save_path=os.path.join(
data_dir, self.ckpt_prefix))
# These files are not needed for this test.
os.remove(os.path.join(data_dir, "checkpoint"))
os.remove(os.path.join(data_dir, self.ckpt_prefix + ".meta"))
# ops are evaluated even if there is no correctness function to ensure
# that they can be evaluated.
eval_results = [op.eval() for op in ops_to_eval]
if correctness_function is not None:
results = correctness_function(*eval_results)
with tf.gfile.Open(os.path.join(data_dir, "results.json"), "w") as f:
json.dump(results, f)
with tf.gfile.Open(os.path.join(data_dir, "tf_version.json"), "w") as f:
json.dump([tf.VERSION, tf.GIT_VERSION], f)
def _evaluate_test_case(self, name, graph, ops_to_eval, correctness_function):
"""Determine if a graph agrees with the reference data.
Args:
name: String defining the run. This will be used to define folder names
and will be used for random seed construction.
graph: The graph in which the test is conducted.
ops_to_eval: Ops which the user wishes to be evaluated under a controlled
session.
correctness_function: This function accepts the evaluated results of
ops_to_eval, and returns a list of values. This list must be JSON
serializable; in particular it is up to the user to convert numpy
dtypes into builtin dtypes.
"""
data_dir = os.path.join(self.data_root, name)
# Serialize graph for comparison.
graph_bytes = graph.as_graph_def().SerializeToString()
expected_file = os.path.join(data_dir, "expected_graph")
with tf.gfile.Open(expected_file, "rb") as f:
expected_graph_bytes = f.read()
# The serialization is non-deterministic byte-for-byte. Instead there is
# a utility which evaluates the semantics of the two graphs to test for
# equality. This has the added benefit of providing some information on
# what changed.
# Note: The summary only show the first difference detected. It is not
# an exhaustive summary of differences.
differences = pywrap_tensorflow.EqualGraphDefWrapper(
graph_bytes, expected_graph_bytes).decode("utf-8")
with graph.as_default():
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.gfile.Open(os.path.join(data_dir, "tf_version.json"), "r") as f:
tf_version_reference, tf_git_version_reference = json.load(
f) # pylint: disable=unpacking-non-sequence
tf_version_comparison = ""
if tf.GIT_VERSION != tf_git_version_reference:
tf_version_comparison = (
"Test was built using: {} (git = {})\n"
"Local TensorFlow version: {} (git = {})"
.format(tf_version_reference, tf_git_version_reference,
tf.VERSION, tf.GIT_VERSION)
)
with self.test_session(graph=graph) as sess:
sess.run(init)
try:
saver.restore(sess=sess, save_path=os.path.join(
data_dir, self.ckpt_prefix))
if differences:
tf.logging.warn(
"The provided graph is different than expected:\n {}\n"
"However the weights were still able to be loaded.\n{}".format(
differences, tf_version_comparison)
)
except: # pylint: disable=bare-except
raise self.failureException(
"Weight load failed. Graph comparison:\n {}{}"
.format(differences, tf_version_comparison))
eval_results = [op.eval() for op in ops_to_eval]
if correctness_function is not None:
results = correctness_function(*eval_results)
with tf.gfile.Open(os.path.join(data_dir, "results.json"), "r") as f:
expected_results = json.load(f)
self.assertAllClose(results, expected_results)
def _save_or_test_ops(self, name, graph, ops_to_eval=None, test=True,
correctness_function=None):
"""Utility function to automate repeated work of graph checking and saving.
The philosophy of this function is that the user need only define ops on
a graph and specify which results should be validated. The actual work of
managing snapshots and calculating results should be automated away.
Args:
name: String defining the run. This will be used to define folder names
and will be used for random seed construction.
graph: The graph in which the test is conducted.
ops_to_eval: Ops which the user wishes to be evaluated under a controlled
session.
test: Boolean. If True this function will test graph correctness, load
weights, and compute numerical values. If False the necessary test data
will be generated and saved.
correctness_function: This function accepts the evaluated results of
ops_to_eval, and returns a list of values. This list must be JSON
serializable; in particular it is up to the user to convert numpy
dtypes into builtin dtypes.
"""
ops_to_eval = ops_to_eval or []
if test:
try:
self._evaluate_test_case(
name=name, graph=graph, ops_to_eval=ops_to_eval,
correctness_function=correctness_function
)
except:
tf.logging.error("Failed unittest {}".format(name))
raise
else:
self._construct_and_save_reference_files(
name=name, graph=graph, ops_to_eval=ops_to_eval,
correctness_function=correctness_function
)
class ReferenceDataActionParser(argparse.ArgumentParser):
"""Minimal arg parser so that test regeneration can be called from the CLI."""
def __init__(self):
super(ReferenceDataActionParser, self).__init__()
self.add_argument(
"--regenerate", "-regen",
action="store_true",
help="Enable this flag to regenerate test data. If not set unit tests"
"will be run."
)
def main(argv, test_class):
"""Simple switch function to allow test regeneration from the CLI."""
flags = ReferenceDataActionParser().parse_args(argv[1:])
if flags.regenerate:
if sys.version_info[0] == 2:
raise NameError("\nPython2 unittest does not support being run as a "
"standalone class.\nAs a result tests must be "
"regenerated using Python3.\n"
"Tests can be run under 2 or 3.")
test_class().regenerate()
else:
tf.test.main()
|
the-stack_0_1894 | import logging
import math
import time
from asyncio import Lock
from random import choice, randrange
from secrets import randbits
from typing import Dict, List, Optional, Set, Tuple
from hddcoin.types.peer_info import PeerInfo, TimestampedPeerInfo
from hddcoin.util.hash import std_hash
from hddcoin.util.ints import uint16, uint64
TRIED_BUCKETS_PER_GROUP = 8
NEW_BUCKETS_PER_SOURCE_GROUP = 64
TRIED_BUCKET_COUNT = 256
NEW_BUCKET_COUNT = 1024
BUCKET_SIZE = 64
TRIED_COLLISION_SIZE = 10
NEW_BUCKETS_PER_ADDRESS = 8
LOG_TRIED_BUCKET_COUNT = 3
LOG_NEW_BUCKET_COUNT = 10
LOG_BUCKET_SIZE = 6
HORIZON_DAYS = 30
MAX_RETRIES = 3
MIN_FAIL_DAYS = 7
MAX_FAILURES = 10
log = logging.getLogger(__name__)
# This is a Python port from 'CAddrInfo' class from Bitcoin core code.
class ExtendedPeerInfo:
def __init__(
self,
addr: TimestampedPeerInfo,
src_peer: Optional[PeerInfo],
):
self.peer_info: PeerInfo = PeerInfo(
addr.host,
addr.port,
)
self.timestamp: int = addr.timestamp
self.src: Optional[PeerInfo] = src_peer
if src_peer is None:
self.src = self.peer_info
self.random_pos: Optional[int] = None
self.is_tried: bool = False
self.ref_count: int = 0
self.last_success: int = 0
self.last_try: int = 0
self.num_attempts: int = 0
self.last_count_attempt: int = 0
def to_string(self) -> str:
assert self.src is not None
out = (
self.peer_info.host
+ " "
+ str(int(self.peer_info.port))
+ " "
+ str(int(self.timestamp))
+ " "
+ self.src.host
+ " "
+ str(int(self.src.port))
)
return out
@classmethod
def from_string(cls, peer_str: str):
blobs = peer_str.split(" ")
assert len(blobs) == 5
peer_info = TimestampedPeerInfo(blobs[0], uint16(int(blobs[1])), uint64(int(blobs[2])))
src_peer = PeerInfo(blobs[3], uint16(int(blobs[4])))
return cls(peer_info, src_peer)
def get_tried_bucket(self, key: int) -> int:
hash1 = int.from_bytes(
bytes(std_hash(key.to_bytes(32, byteorder="big") + self.peer_info.get_key())[:8]),
byteorder="big",
)
hash1 = hash1 % TRIED_BUCKETS_PER_GROUP
hash2 = int.from_bytes(
bytes(std_hash(key.to_bytes(32, byteorder="big") + self.peer_info.get_group() + bytes([hash1]))[:8]),
byteorder="big",
)
return hash2 % TRIED_BUCKET_COUNT
def get_new_bucket(self, key: int, src_peer: Optional[PeerInfo] = None) -> int:
if src_peer is None:
src_peer = self.src
assert src_peer is not None
hash1 = int.from_bytes(
bytes(std_hash(key.to_bytes(32, byteorder="big") + self.peer_info.get_group() + src_peer.get_group())[:8]),
byteorder="big",
)
hash1 = hash1 % NEW_BUCKETS_PER_SOURCE_GROUP
hash2 = int.from_bytes(
bytes(std_hash(key.to_bytes(32, byteorder="big") + src_peer.get_group() + bytes([hash1]))[:8]),
byteorder="big",
)
return hash2 % NEW_BUCKET_COUNT
def get_bucket_position(self, key: int, is_new: bool, nBucket: int) -> int:
ch = "N" if is_new else "K"
hash1 = int.from_bytes(
bytes(
std_hash(
key.to_bytes(32, byteorder="big")
+ ch.encode()
+ nBucket.to_bytes(3, byteorder="big")
+ self.peer_info.get_key()
)[:8]
),
byteorder="big",
)
return hash1 % BUCKET_SIZE
def is_terrible(self, now: Optional[int] = None) -> bool:
if now is None:
now = int(math.floor(time.time()))
# never remove things tried in the last minute
if self.last_try > 0 and self.last_try >= now - 60:
return False
# came in a flying DeLorean
if self.timestamp > now + 10 * 60:
return True
# not seen in recent history
if self.timestamp == 0 or now - self.timestamp > HORIZON_DAYS * 24 * 60 * 60:
return True
# tried N times and never a success
if self.last_success == 0 and self.num_attempts >= MAX_RETRIES:
return True
# N successive failures in the last week
if now - self.last_success > MIN_FAIL_DAYS * 24 * 60 * 60 and self.num_attempts >= MAX_FAILURES:
return True
return False
def get_selection_chance(self, now: Optional[int] = None):
if now is None:
now = int(math.floor(time.time()))
chance = 1.0
since_last_try = max(now - self.last_try, 0)
# deprioritize very recent attempts away
if since_last_try < 60 * 10:
chance *= 0.01
# deprioritize 66% after each failed attempt,
# but at most 1/28th to avoid the search taking forever or overly penalizing outages.
chance *= pow(0.66, min(self.num_attempts, 8))
return chance
# This is a Python port from 'CAddrMan' class from Bitcoin core code.
class AddressManager:
id_count: int
key: int
random_pos: List[int]
tried_matrix: List[List[int]]
new_matrix: List[List[int]]
tried_count: int
new_count: int
map_addr: Dict[str, int]
map_info: Dict[int, ExtendedPeerInfo]
last_good: int
tried_collisions: List[int]
used_new_matrix_positions: Set[Tuple[int, int]]
used_tried_matrix_positions: Set[Tuple[int, int]]
allow_private_subnets: bool
def __init__(self) -> None:
self.clear()
self.lock: Lock = Lock()
def clear(self) -> None:
self.id_count = 0
self.key = randbits(256)
self.random_pos = []
self.tried_matrix = [[-1 for x in range(BUCKET_SIZE)] for y in range(TRIED_BUCKET_COUNT)]
self.new_matrix = [[-1 for x in range(BUCKET_SIZE)] for y in range(NEW_BUCKET_COUNT)]
self.tried_count = 0
self.new_count = 0
self.map_addr = {}
self.map_info = {}
self.last_good = 1
self.tried_collisions = []
self.used_new_matrix_positions = set()
self.used_tried_matrix_positions = set()
self.allow_private_subnets = False
def make_private_subnets_valid(self) -> None:
self.allow_private_subnets = True
# Use only this method for modifying new matrix.
def _set_new_matrix(self, row: int, col: int, value: int) -> None:
self.new_matrix[row][col] = value
if value == -1:
if (row, col) in self.used_new_matrix_positions:
self.used_new_matrix_positions.remove((row, col))
else:
if (row, col) not in self.used_new_matrix_positions:
self.used_new_matrix_positions.add((row, col))
# Use only this method for modifying tried matrix.
def _set_tried_matrix(self, row: int, col: int, value: int) -> None:
self.tried_matrix[row][col] = value
if value == -1:
if (row, col) in self.used_tried_matrix_positions:
self.used_tried_matrix_positions.remove((row, col))
else:
if (row, col) not in self.used_tried_matrix_positions:
self.used_tried_matrix_positions.add((row, col))
def load_used_table_positions(self) -> None:
self.used_new_matrix_positions = set()
self.used_tried_matrix_positions = set()
for bucket in range(NEW_BUCKET_COUNT):
for pos in range(BUCKET_SIZE):
if self.new_matrix[bucket][pos] != -1:
self.used_new_matrix_positions.add((bucket, pos))
for bucket in range(TRIED_BUCKET_COUNT):
for pos in range(BUCKET_SIZE):
if self.tried_matrix[bucket][pos] != -1:
self.used_tried_matrix_positions.add((bucket, pos))
def create_(self, addr: TimestampedPeerInfo, addr_src: Optional[PeerInfo]) -> Tuple[ExtendedPeerInfo, int]:
self.id_count += 1
node_id = self.id_count
self.map_info[node_id] = ExtendedPeerInfo(addr, addr_src)
self.map_addr[addr.host] = node_id
self.map_info[node_id].random_pos = len(self.random_pos)
self.random_pos.append(node_id)
return (self.map_info[node_id], node_id)
def find_(self, addr: PeerInfo) -> Tuple[Optional[ExtendedPeerInfo], Optional[int]]:
if addr.host not in self.map_addr:
return (None, None)
node_id = self.map_addr[addr.host]
if node_id not in self.map_info:
return (None, node_id)
return (self.map_info[node_id], node_id)
def swap_random_(self, rand_pos_1: int, rand_pos_2: int) -> None:
if rand_pos_1 == rand_pos_2:
return None
assert rand_pos_1 < len(self.random_pos) and rand_pos_2 < len(self.random_pos)
node_id_1 = self.random_pos[rand_pos_1]
node_id_2 = self.random_pos[rand_pos_2]
self.map_info[node_id_1].random_pos = rand_pos_2
self.map_info[node_id_2].random_pos = rand_pos_1
self.random_pos[rand_pos_1] = node_id_2
self.random_pos[rand_pos_2] = node_id_1
def make_tried_(self, info: ExtendedPeerInfo, node_id: int) -> None:
for bucket in range(NEW_BUCKET_COUNT):
pos = info.get_bucket_position(self.key, True, bucket)
if self.new_matrix[bucket][pos] == node_id:
self._set_new_matrix(bucket, pos, -1)
info.ref_count -= 1
assert info.ref_count == 0
self.new_count -= 1
cur_bucket = info.get_tried_bucket(self.key)
cur_bucket_pos = info.get_bucket_position(self.key, False, cur_bucket)
if self.tried_matrix[cur_bucket][cur_bucket_pos] != -1:
# Evict the old node from the tried table.
node_id_evict = self.tried_matrix[cur_bucket][cur_bucket_pos]
assert node_id_evict in self.map_info
old_info = self.map_info[node_id_evict]
old_info.is_tried = False
self._set_tried_matrix(cur_bucket, cur_bucket_pos, -1)
self.tried_count -= 1
# Find its position into new table.
new_bucket = old_info.get_new_bucket(self.key)
new_bucket_pos = old_info.get_bucket_position(self.key, True, new_bucket)
self.clear_new_(new_bucket, new_bucket_pos)
old_info.ref_count = 1
self._set_new_matrix(new_bucket, new_bucket_pos, node_id_evict)
self.new_count += 1
self._set_tried_matrix(cur_bucket, cur_bucket_pos, node_id)
self.tried_count += 1
info.is_tried = True
def clear_new_(self, bucket: int, pos: int) -> None:
if self.new_matrix[bucket][pos] != -1:
delete_id = self.new_matrix[bucket][pos]
delete_info = self.map_info[delete_id]
assert delete_info.ref_count > 0
delete_info.ref_count -= 1
self._set_new_matrix(bucket, pos, -1)
if delete_info.ref_count == 0:
self.delete_new_entry_(delete_id)
def mark_good_(self, addr: PeerInfo, test_before_evict: bool, timestamp: int) -> None:
self.last_good = timestamp
(info, node_id) = self.find_(addr)
if not addr.is_valid(self.allow_private_subnets):
return None
if info is None:
return None
if node_id is None:
return None
if not (info.peer_info.host == addr.host and info.peer_info.port == addr.port):
return None
# update info
info.last_success = timestamp
info.last_try = timestamp
info.num_attempts = 0
# timestamp is not updated here, to avoid leaking information about
# currently-connected peers.
# if it is already in the tried set, don't do anything else
if info.is_tried:
return None
# find a bucket it is in now
bucket_rand = randrange(NEW_BUCKET_COUNT)
new_bucket = -1
for n in range(NEW_BUCKET_COUNT):
cur_new_bucket = (n + bucket_rand) % NEW_BUCKET_COUNT
cur_new_bucket_pos = info.get_bucket_position(self.key, True, cur_new_bucket)
if self.new_matrix[cur_new_bucket][cur_new_bucket_pos] == node_id:
new_bucket = cur_new_bucket
break
# if no bucket is found, something bad happened;
if new_bucket == -1:
return None
# NOTE(Florin): Double check this. It's not used anywhere else.
# which tried bucket to move the entry to
tried_bucket = info.get_tried_bucket(self.key)
tried_bucket_pos = info.get_bucket_position(self.key, False, tried_bucket)
# Will moving this address into tried evict another entry?
if test_before_evict and self.tried_matrix[tried_bucket][tried_bucket_pos] != -1:
if len(self.tried_collisions) < TRIED_COLLISION_SIZE:
if node_id not in self.tried_collisions:
self.tried_collisions.append(node_id)
else:
self.make_tried_(info, node_id)
def delete_new_entry_(self, node_id: int) -> None:
info = self.map_info[node_id]
if info is None or info.random_pos is None:
return None
self.swap_random_(info.random_pos, len(self.random_pos) - 1)
self.random_pos = self.random_pos[:-1]
del self.map_addr[info.peer_info.host]
del self.map_info[node_id]
self.new_count -= 1
def add_to_new_table_(self, addr: TimestampedPeerInfo, source: Optional[PeerInfo], penalty: int) -> bool:
is_unique = False
peer_info = PeerInfo(
addr.host,
addr.port,
)
if not peer_info.is_valid(self.allow_private_subnets):
return False
(info, node_id) = self.find_(peer_info)
if info is not None and info.peer_info.host == addr.host and info.peer_info.port == addr.port:
penalty = 0
if info is not None:
# periodically update timestamp
currently_online = time.time() - addr.timestamp < 24 * 60 * 60
update_interval = 60 * 60 if currently_online else 24 * 60 * 60
if addr.timestamp > 0 and (
info.timestamp > 0 or info.timestamp < addr.timestamp - update_interval - penalty
):
info.timestamp = max(0, addr.timestamp - penalty)
# do not update if no new information is present
if addr.timestamp == 0 or (info.timestamp > 0 and addr.timestamp <= info.timestamp):
return False
# do not update if the entry was already in the "tried" table
if info.is_tried:
return False
# do not update if the max reference count is reached
if info.ref_count == NEW_BUCKETS_PER_ADDRESS:
return False
# stochastic test: previous ref_count == N: 2^N times harder to increase it
factor = 1 << info.ref_count
if factor > 1 and randrange(factor) != 0:
return False
else:
(info, node_id) = self.create_(addr, source)
info.timestamp = max(0, info.timestamp - penalty)
self.new_count += 1
is_unique = True
new_bucket = info.get_new_bucket(self.key, source)
new_bucket_pos = info.get_bucket_position(self.key, True, new_bucket)
if self.new_matrix[new_bucket][new_bucket_pos] != node_id:
add_to_new = self.new_matrix[new_bucket][new_bucket_pos] == -1
if not add_to_new:
info_existing = self.map_info[self.new_matrix[new_bucket][new_bucket_pos]]
if info_existing.is_terrible() or (info_existing.ref_count > 1 and info.ref_count == 0):
add_to_new = True
if add_to_new:
self.clear_new_(new_bucket, new_bucket_pos)
info.ref_count += 1
if node_id is not None:
self._set_new_matrix(new_bucket, new_bucket_pos, node_id)
else:
if info.ref_count == 0:
if node_id is not None:
self.delete_new_entry_(node_id)
return is_unique
def attempt_(self, addr: PeerInfo, count_failures: bool, timestamp: int) -> None:
info, _ = self.find_(addr)
if info is None:
return None
if not (info.peer_info.host == addr.host and info.peer_info.port == addr.port):
return None
info.last_try = timestamp
if count_failures and info.last_count_attempt < self.last_good:
info.last_count_attempt = timestamp
info.num_attempts += 1
def select_peer_(self, new_only: bool) -> Optional[ExtendedPeerInfo]:
if len(self.random_pos) == 0:
return None
if new_only and self.new_count == 0:
return None
# Use a 50% chance for choosing between tried and new table entries.
if not new_only and self.tried_count > 0 and (self.new_count == 0 or randrange(2) == 0):
chance = 1.0
start = time.time()
cached_tried_matrix_positions: List[Tuple[int, int]] = []
if len(self.used_tried_matrix_positions) < math.sqrt(TRIED_BUCKET_COUNT * BUCKET_SIZE):
cached_tried_matrix_positions = list(self.used_tried_matrix_positions)
while True:
if len(self.used_tried_matrix_positions) < math.sqrt(TRIED_BUCKET_COUNT * BUCKET_SIZE):
if len(self.used_tried_matrix_positions) == 0:
log.error(f"Empty tried table, but tried_count shows {self.tried_count}.")
return None
# The table is sparse, randomly pick from positions list.
index = randrange(len(cached_tried_matrix_positions))
tried_bucket, tried_bucket_pos = cached_tried_matrix_positions[index]
else:
# The table is dense, randomly trying positions is faster than loading positions list.
tried_bucket = randrange(TRIED_BUCKET_COUNT)
tried_bucket_pos = randrange(BUCKET_SIZE)
while self.tried_matrix[tried_bucket][tried_bucket_pos] == -1:
tried_bucket = (tried_bucket + randbits(LOG_TRIED_BUCKET_COUNT)) % TRIED_BUCKET_COUNT
tried_bucket_pos = (tried_bucket_pos + randbits(LOG_BUCKET_SIZE)) % BUCKET_SIZE
node_id = self.tried_matrix[tried_bucket][tried_bucket_pos]
assert node_id != -1
info = self.map_info[node_id]
if randbits(30) < (chance * info.get_selection_chance() * (1 << 30)):
end = time.time()
log.debug(f"address_manager.select_peer took {(end - start):.2e} seconds in tried table.")
return info
chance *= 1.2
else:
chance = 1.0
start = time.time()
cached_new_matrix_positions: List[Tuple[int, int]] = []
if len(self.used_new_matrix_positions) < math.sqrt(NEW_BUCKET_COUNT * BUCKET_SIZE):
cached_new_matrix_positions = list(self.used_new_matrix_positions)
while True:
if len(self.used_new_matrix_positions) < math.sqrt(NEW_BUCKET_COUNT * BUCKET_SIZE):
if len(self.used_new_matrix_positions) == 0:
log.error(f"Empty new table, but new_count shows {self.new_count}.")
return None
index = randrange(len(cached_new_matrix_positions))
new_bucket, new_bucket_pos = cached_new_matrix_positions[index]
else:
new_bucket = randrange(NEW_BUCKET_COUNT)
new_bucket_pos = randrange(BUCKET_SIZE)
while self.new_matrix[new_bucket][new_bucket_pos] == -1:
new_bucket = (new_bucket + randbits(LOG_NEW_BUCKET_COUNT)) % NEW_BUCKET_COUNT
new_bucket_pos = (new_bucket_pos + randbits(LOG_BUCKET_SIZE)) % BUCKET_SIZE
node_id = self.new_matrix[new_bucket][new_bucket_pos]
assert node_id != -1
info = self.map_info[node_id]
if randbits(30) < chance * info.get_selection_chance() * (1 << 30):
end = time.time()
log.debug(f"address_manager.select_peer took {(end - start):.2e} seconds in new table.")
return info
chance *= 1.2
def resolve_tried_collisions_(self) -> None:
for node_id in self.tried_collisions[:]:
resolved = False
if node_id not in self.map_info:
resolved = True
else:
info = self.map_info[node_id]
peer = info.peer_info
tried_bucket = info.get_tried_bucket(self.key)
tried_bucket_pos = info.get_bucket_position(self.key, False, tried_bucket)
if self.tried_matrix[tried_bucket][tried_bucket_pos] != -1:
old_id = self.tried_matrix[tried_bucket][tried_bucket_pos]
old_info = self.map_info[old_id]
if time.time() - old_info.last_success < 4 * 60 * 60:
resolved = True
elif time.time() - old_info.last_try < 4 * 60 * 60:
if time.time() - old_info.last_try > 60:
self.mark_good_(peer, False, math.floor(time.time()))
resolved = True
elif time.time() - info.last_success > 40 * 60:
self.mark_good_(peer, False, math.floor(time.time()))
resolved = True
else:
self.mark_good_(peer, False, math.floor(time.time()))
resolved = True
if resolved:
self.tried_collisions.remove(node_id)
def select_tried_collision_(self) -> Optional[ExtendedPeerInfo]:
if len(self.tried_collisions) == 0:
return None
new_id = choice(self.tried_collisions)
if new_id not in self.map_info:
self.tried_collisions.remove(new_id)
return None
new_info = self.map_info[new_id]
tried_bucket = new_info.get_tried_bucket(self.key)
tried_bucket_pos = new_info.get_bucket_position(self.key, False, tried_bucket)
old_id = self.tried_matrix[tried_bucket][tried_bucket_pos]
return self.map_info[old_id]
def get_peers_(self) -> List[TimestampedPeerInfo]:
addr: List[TimestampedPeerInfo] = []
num_nodes = math.ceil(23 * len(self.random_pos) / 100)
if num_nodes > 1000:
num_nodes = 1000
for n in range(len(self.random_pos)):
if len(addr) >= num_nodes:
return addr
rand_pos = randrange(len(self.random_pos) - n) + n
self.swap_random_(n, rand_pos)
info = self.map_info[self.random_pos[n]]
if not info.peer_info.is_valid(self.allow_private_subnets):
continue
if not info.is_terrible():
cur_peer_info = TimestampedPeerInfo(
info.peer_info.host,
uint16(info.peer_info.port),
uint64(info.timestamp),
)
addr.append(cur_peer_info)
return addr
def cleanup(self, max_timestamp_difference: int, max_consecutive_failures: int):
now = int(math.floor(time.time()))
for bucket in range(NEW_BUCKET_COUNT):
for pos in range(BUCKET_SIZE):
if self.new_matrix[bucket][pos] != -1:
node_id = self.new_matrix[bucket][pos]
cur_info = self.map_info[node_id]
if (
cur_info.timestamp < now - max_timestamp_difference
and cur_info.num_attempts >= max_consecutive_failures
):
self.clear_new_(bucket, pos)
def connect_(self, addr: PeerInfo, timestamp: int):
info, _ = self.find_(addr)
if info is None:
return None
# check whether we are talking about the exact same peer
if not (info.peer_info.host == addr.host and info.peer_info.port == addr.port):
return None
update_interval = 20 * 60
if timestamp - info.timestamp > update_interval:
info.timestamp = timestamp
async def size(self) -> int:
async with self.lock:
return len(self.random_pos)
async def add_to_new_table(
self,
addresses: List[TimestampedPeerInfo],
source: Optional[PeerInfo] = None,
penalty: int = 0,
) -> bool:
is_added = False
async with self.lock:
for addr in addresses:
cur_peer_added = self.add_to_new_table_(addr, source, penalty)
is_added = is_added or cur_peer_added
return is_added
# Mark an entry as accesible.
async def mark_good(
self,
addr: PeerInfo,
test_before_evict: bool = True,
timestamp: int = -1,
):
if timestamp == -1:
timestamp = math.floor(time.time())
async with self.lock:
self.mark_good_(addr, test_before_evict, timestamp)
# Mark an entry as connection attempted to.
async def attempt(
self,
addr: PeerInfo,
count_failures: bool,
timestamp: int = -1,
):
if timestamp == -1:
timestamp = math.floor(time.time())
async with self.lock:
self.attempt_(addr, count_failures, timestamp)
# See if any to-be-evicted tried table entries have been tested and if so resolve the collisions.
async def resolve_tried_collisions(self):
async with self.lock:
self.resolve_tried_collisions_()
# Randomly select an address in tried that another address is attempting to evict.
async def select_tried_collision(self) -> Optional[ExtendedPeerInfo]:
async with self.lock:
return self.select_tried_collision_()
# Choose an address to connect to.
async def select_peer(self, new_only: bool = False) -> Optional[ExtendedPeerInfo]:
async with self.lock:
return self.select_peer_(new_only)
# Return a bunch of addresses, selected at random.
async def get_peers(self) -> List[TimestampedPeerInfo]:
async with self.lock:
return self.get_peers_()
async def connect(self, addr: PeerInfo, timestamp: int = -1):
if timestamp == -1:
timestamp = math.floor(time.time())
async with self.lock:
return self.connect_(addr, timestamp)
|
the-stack_0_1895 | # -*- coding: utf-8 -*-
"""
All spiders should yield data shaped according to the Open Civic Data
specification (http://docs.opencivicdata.org/en/latest/data/event.html).
"""
from datetime import datetime
from pytz import timezone
from legistar.events import LegistarEventsScraper
from documenters_aggregator.spider import Spider
class Cook_boardSpider(Spider):
name = 'cook_board'
long_name = 'Cook County Board of Commissioners'
allowed_domains = ['cook-county.legistar.com']
event_timezone = 'America/Chicago'
start_urls = ['https://www.cook-county.legistar.com'] # use LegistarEventsScraper instead
def parse(self, response):
"""
`parse` should always `yield` a dict that follows the `Open Civic Data
event standard <http://docs.opencivicdata.org/en/latest/data/event.html>`.
Change the `_parse_id`, `_parse_name`, etc methods to fit your scraping
needs.
"""
events = self._make_legistar_call()
return self._parse_events(events)
def _make_legistar_call(self, since=None):
les = LegistarEventsScraper(jurisdiction=None, datadir=None)
les.EVENTSPAGE = 'https://cook-county.legistar.com/Calendar.aspx'
les.BASE_URL = 'https://cook-county.legistar.com'
if not since:
since = datetime.today().year
return les.events(since=since)
def _parse_events(self, events):
for item, _ in events:
start_time = self._parse_start(item)
data = {
'_type': 'event',
'name': self._parse_name(item),
'description': self._parse_description(item),
'classification': self._parse_classification(item),
'start_time': start_time,
'end_time': self._parse_end(item),
'all_day': self._parse_all_day(item),
'timezone': self.event_timezone,
'location': self._parse_location(item),
'sources': self._parse_sources(item)
}
data['status'] = self._parse_status(item, data['start_time'])
data['id'] = self._generate_id(data, start_time)
yield data
def _parse_classification(self, item):
"""
Parse or generate classification (e.g. town hall).
"""
return 'Not classified'
def _parse_status(self, item, start_time):
"""
passed = meeting already started
tentative = no agenda posted
confirmed = agenda posted
"""
if datetime.now().replace(tzinfo=timezone(self.event_timezone)) > start_time:
return 'passed'
if 'url' in item['Agenda']:
return 'confirmed'
return 'tentative'
def _parse_location(self, item):
"""
Parse or generate location. Url, latitutde and longitude are all
optional and may be more trouble than they're worth to collect.
"""
return {
'url': None,
'address': item.get('Meeting Location', None),
'name': None,
'coordinates': {
'latitude': None,
'longitude': None,
},
}
def _parse_all_day(self, item):
"""
Parse or generate all-day status. Defaults to false.
"""
return False
def _parse_name(self, item):
"""
Parse or generate event name.
"""
return item['Name']['label']
def _parse_description(self, item):
"""
Parse or generate event name.
"""
agenda = item['Agenda']
try:
return agenda['url']
except:
return agenda
def _parse_start(self, item):
"""
Parse start date and time.
"""
time = item.get('Meeting Time', None)
date = item.get('Meeting Date', None)
if date and time:
time_string = '{0} {1}'.format(date, time)
naive = datetime.strptime(time_string, '%m/%d/%Y %I:%M %p')
return self._naive_datetime_to_tz(naive, self.event_timezone)
return None
def _parse_end(self, item):
"""
Parse end date and time.
"""
return None
def _parse_sources(self, item):
"""
Parse sources.
"""
try:
url = item['Name']['url']
except:
url = 'https://cook-county.legistar.com/Calendar.aspx'
return [{'url': url, 'note': ''}]
|
the-stack_0_1898 | import json
import numpy as np
import os.path, datetime, subprocess
#from astropy.io import fits as pyfits
#from time import sleep
#from scipy.ndimage import gaussian_filter#, rotate
#from scipy.interpolate import interp1d
#from scipy.optimize import curve_fit
from .tools import *
from .phi_fits import *
from .phi_gen import *
from .phi_reg import *
from .phi_rte import *
#from .phi_utils import newton,azimutal_average,limb_darkening,genera_2d,find_string
from .phifdt_pipe_modules import phi_correct_dark,phi_correct_prefilter,phi_apply_demodulation,\
crosstalk_ItoQUV,cross_talk_QUV,crosstalk_ItoQUV2d,phi_correct_ghost,phi_correct_fringes,\
generate_level2
import SPGPylibs.GENtools.plot_lib as plib
import SPGPylibs.GENtools.cog as cog
#global variables
PLT_RNG = 5
def phifdt_pipe(json_input = None,
data_f: str = None, dark_f: str = None, flat_f: str = None,
input_data_dir: str = './', output_dir:str = './',
instrument: str = 'FDT40',
flat_c:bool = True, dark_c:bool = True, ItoQUV:bool = False, VtoQU:bool = False, ind_wave:bool = False, #correction options
hough_params:list = [250, 800, 100],
norm_f:bool = False, flat_scaling:float = 1., flat_index:list = None, #flatfield options
prefilter:bool = True, prefilter_fits:str = '0000990710_noMeta.fits',
realign:bool = False, verbose:bool = True, shrink_mask:int = 2, correct_fringes:str = False,
correct_ghost:bool = False, putmediantozero:bool = True,
rte = False, debug:bool = False, nlevel:float = 0.3, center_method:str = 'circlefit',
loopthis = 0, #developing purpose
do2d = 0, outfile = None #not in use
) -> int:
'''
Parameters
----------
:param str data_f:
Input parameters
----------
json_input = json input (for convenience). All parameters are then described there).
data_f = data_f : string
Fits file of the raw FDT data (for path use input_data_dir keyword)
dark_f = dark_f : string
Fits file of a Valid dark file (processed dark) (including path, if necessary)
flat_f = flat_f : string
Fits file of a Valid FDT flatfield (including path, if necessary)
input_data_dir: directory where input data is located. Default is local directory
output_dir: output directory. If default, takes local './'
IMPORTANT: dark_f, flat_f, and prefilter file must be provided with the FULL PATH.
the data has to be provided as a list of files (fits) and the directory via "input_data_dir = "
The output directories (Depending on RTE on or off) are
A) directory + Level 2: reduced raw data L2+ilam plus RTE output (so far, BLOS, VLOS and SO1: continuum)
B) directory + Level 2 + png: png figures of the RTE output (so far, BLOS and VLOS)
B) directory + Level 2 + npz: NPZ (python) reduced raw data L2
** OPTIONAL ARGUMENTS **
instrument = 'FDT40' : select the instrument and PMP temperature (for demod)
-> implemented cases: -- 'FDT40','FDT45' --
flat_c = True : default is to apply flat field correction to the data
dark_c = True : default is to apply dark field correction to the data
norm_f = False : To normalize flats internally to the mean value of 5% of the disk (central) intensity
flat_scaling = 1.0 : flat scaling (flat = flat / flat_scaling)
flat_index = None : in case you want a particular flat to be applied at another wave, e.g.,
flat_index = [5,1,2,3,4,0] exchange the first and last wave flats
This is for testing stuff, mainly.
prefilter = 1 : To correct for the prefilter
prefilter_fits = '../RSW1/0000990710_noMeta.fits' : User should provide prefilter data fits file location
realign = False : bool
Realign all images before demodulating using FFT
ind_wave = False : bool
Correct crosstalk from I to QUV for individual wavelengths
vervose: True prints a lot of stuff (and plots)
shrink_mask = 2: 'Number of pixels to contract the sun mask for output of RTE'
correct_fringes = False: Fringe correction
'manual': first FM version. Freq, mask applied to all images with fixed frequencies
'auto' : calculate fringes freq. automatically (in development).
correct_ghost = False; Correcto ghost images
putmediantozero=True; puts median value to zero before RTE
rte = False: Run RTE if rte == 'RTE' or rte == 'CE' or rte == 'CE+RTE':
'RTE': RTE only
'CE+RTE': RTE with classical estiamtes
'CE': Only classical estimates
'cog': Only Center of gravity (To be implemented)
ItoQUV= False: apply crostalk correction from Stokes I to Stokes Q, U, and V.
VtoQU= False: apply crostalk correction from Stokes V to Stokes Q and U.
nlevel = 0.3: Noise level above which to evaluate cross_talk_VQU (To be implemented)
center_method = ['circlefit','hough']
Default is 'circlefit'. If set to 'hough' uses the given find_center parameters
If find_center is set to None then uses header information, in any case
hough_params = [250, 800, 100]; inner_radius = 250, outer_radius = 600, steps = 100 : initial values for finding sun center
verbose: increase the verbosity (many plots here) - default False
Returns
-------
0 if fail, 1 any other case
Raises
------
References
----------
Examples
--------
>>> import SPGPylibs as spg
Notes
-----
This program is not optimized for speed. It assumes that input data is 6 wavelength.
C-MILOS must be compiled in each specific machine (C)
The software update some of the information in the fits keyword:
TODO:
# data_f -> input data (single file for FDT - ADD MULTIPLE FILES!!!! )
keyword to provide fixed cross-talk coefficients
keyword to provide fixed data normalization (based on a first obs)
# pending to add class stile (for module development)
FDT pipeline steps:
1- Read data
2- Check dimensions (open to bugs since only few data were tested)
3- Read flats
4- Read and correct dark field (taking into account the scaling)
5- Find center of the Sun in the data for masking and ghost correction
6- get wavelength sampling from header
7- move the continuum to the blue (if needed) in flat and data
8- interpolate flats (Not implemented yet - in case of deviations in voltage)
9- Correct flat-field
9- Correct prefilter - needs prefilter data file!
9- Correct ghost (complex step) [NEEDED in FM - high priority]
10- realign data before demodulation [NEEDED in FM - low priority]
11- Demodulate data using appropriate dem matrix
12- Normalize to average continuum [NEEDED in FM - low priority - determine the Icont automatically]
13- correct cross-talk from I to QUV [NEEDED in FM - evaluation of these automatically - mid priority]
14- correct cross-talk from V to QU (interactive) [NEEDED in FM - evaluation of these automatically - mid priority]
15- correct cross-talk from I to QUV in 2D (do not use this, your PC will have a hangover)
16- Fringes correction [NEEDED in FM -TBD]
17- median to zero [NEEDED in FM]
18- save
19- RTE (RTE or CE or CE+RTE)
20- plots
Keywords in the header (modified or added) within this program:
CAL_DARK = 26181001 / Onboard calibrated for dark field ! Dark correction ( DID/file of dark if True)
CAL_FLAT = 26181101 / Onboard calibrated for gain table ! Dark correction ( DID/file of flat if True)
CAL_PRE = Prefilter / Prefilter correction ( DID/file of flat if True)
CAL_GHST= Prefilter / Ghost correction ( name+version of py module if True )
CAL_REAL= Prefilter / Prealigment of images before demodulation ( name+version of py module if True )
CAL_IPOL= 990510 / Onboard calibrated for instrumental polarizatio ! demodulation ( DID of demod matrix if True ) - demod matrix may be 4x4 or 2048x2048x4x4
CAL_CRT0= float / cross-talk from I to Q (slope value, wrt normalized data in python)
CAL_CRT1= float / cross-talk from I to Q (off-set value, wrt normalized data in python)
CAL_CRT2= float / cross-talk from I to U (slope value, wrt normalized data in python)
CAL_CRT3= float / cross-talk from I to U (off-set value, wrt normalized data in python)
CAL_CRT4= float / cross-talk from I to V (slope value, wrt normalized data in python)
CAL_CRT5= float / cross-talk from I to V (off-set value, wrt normalized data in python)
CAL_CRT6= float / cross-talk from V to Q (slope value, wrt normalized data in python)
CAL_CRT7= float / cross-talk from V to Q (off-set value, wrt normalized data in python)
CAL_CRT8= float / cross-talk from V to U (slope value, wrt normalized data in python)
CAL_CRT9= float / cross-talk from V to U (off-set value, wrt normalized data in python)
CAL_NORM= 990510 / Normalization constant PROC_Ic)
CAL_FRIN= 990510 / Fringe correction ( name+version of py module if True ) TBD (posibly we need the freqs.)
* CAL_PSF= 990510 / Onboard calibrated for instrumental PSF ! TBD
CAL_RTE= 990510 / ok
* CAL_SCIP= 'None' / Onboard scientific data analysis
* RTE_ITER= 4294967295 / Number RTE inversion iterations
(*) are not touched in this software.
Keywords CRPIX1 and CRPIX2 are updated following the new center calculation within the pipeline. Old values are stored in the history.
Keywords CRVAL1 and CRVAL2 are NOT updated but should be SET to zero!!!!
'''
version = 'V1.0 July 2021'
version = 'V1.0 13th September 2021'
version = 'V1.0 3th November 2021'
#added json configuration and modify all keyword names to be consistent with HRT pipe
version_cmilos = 'CMILOS v0.91 (July - 2021)'
printc('--------------------------------------------------------------',bcolors.OKGREEN)
printc('PHI FDT data reduction software (for develping purposes only) ',bcolors.OKGREEN)
printc(' version: '+ version,bcolors.OKGREEN)
printc(' version_cmilos: '+ version_cmilos,bcolors.OKGREEN)
printc('--------------------------------------------------------------',bcolors.OKGREEN)
if json_input:
# =========================================================================== #
# READING CONFIG FILE AND PRINTING
printc('--------------------------------------------------------------',bcolors.OKGREEN)
printc(' Reading config json file '+json_input,bcolors.OKGREEN)
with open(json_input) as j:
CONFIG = json.load(j)
verbose = CONFIG['verbose']
input_data_dir = CONFIG['input_data_dir']
data_f = CONFIG['data_f']
shrink_mask = CONFIG['shrink_mask']
center_method = CONFIG['center_method']
hough_params = CONFIG['hough_params']
instrument = CONFIG['instrument']
flat_f = CONFIG['flat_f']
dark_f = CONFIG['dark_f']
dark_c = CONFIG['dark_c']
flat_c = CONFIG['flat_c']
flat_index = CONFIG['flat_index']
norm_f = CONFIG['norm_f']
flat_scaling = CONFIG['flat_scaling']
prefilter_fits = CONFIG['prefilter_fits']
prefilter = CONFIG['prefilter']
output_dir = CONFIG['output_dir']
rte = CONFIG['rte']
correct_fringes = CONFIG['correct_fringes']
correct_ghost = CONFIG['correct_ghost']
putmediantozero = CONFIG['putmediantozero']
debug = CONFIG['debug']
loopthis = CONFIG['loopthis']
ItoQUV = CONFIG['ItoQUV']
VtoQU = CONFIG['VtoQU']
realign = CONFIG['realign']
ind_wave = CONFIG['ind_wave']
nlevel = CONFIG['nlevel']
import pprint
# Prints the nicely formatted dictionary
pprint.pprint(CONFIG)#, sort_dicts=False)
#-----------------
# READ DATA
#-----------------
data_filename = input_data_dir + data_f
if os.path.isfile(data_filename):
print("File exist")
else:
print("File not exist")
try:
data, header = fits_get(data_filename)
printc('-->>>>>>> Reading Data file: '+data_filename,color=bcolors.OKGREEN)
#
# PXBEG1 = 385 ; First read-out pixel in dimension 1
# PXEND1 = 1664 ; Last read-out pixel in dimension 1
# PXBEG2 = 385 ; First read-out pixel in dimension 2
# PXEND2 = 1664 ; Last read-out pixel in dimension 2
DID = header['PHIDATID']
ACC = header['ACCACCUM']
printc('-->>>>>>> data DID '+DID,color=bcolors.OKGREEN)
printc(' DATA IS DIVIDED by 256. ',color=bcolors.OKGREEN)
printc('-->>>>>>> Reshaping data to [wave,Stokes,y-dim,x-dim] ',color=bcolors.OKGREEN)
zd,yd,xd = data.shape
data = np.reshape(data,(zd//4,4,yd, xd))
data = data / 256. #from fix to 32
data = np.ascontiguousarray(data)
#/ PHI_FITS_FPA_settings
# FPIMGCMD= 8 / FPA image command
# FPA_SROW= 0 / FPA start row setting FPA_EROW= 1022 / FPA end row setting
# FPA_NIMG= 20 / FPA number of images set FPEXPSTC= 1592452786 / [s] FPA exposure start time coarse
# FPEXPSTF= 699245 / [us] FPA exposure start time fine
# INTTIME = 0.01 / [s] Exposure time of single readout
# TELAPSE = 58.1974400877953 / [s]
# Elapsed time between start and end of obser
# NSUMEXP = 480 / Number of detector readouts
# XPOSURE = 4.8 / [s] Total effective exposure time
# ACCLENGT= 4194304 / ACCU number of pixel set
# ACCNROWS= 6 / ACCU number of rows set
# ACCROWIT= 1 / ACCU number of row iterations set
# ACCNCOLS= 4 / ACCU number of columns set
# ACCCOLIT= 1 / ACCU number of column iterations set
# ACCACCUM= 20 / ACCU number of accumulations set
# ACCADDR = 0 / ACCU readout address (start)
except Exception:
printc("ERROR, Unable to open fits file: {}",data_filename,color=bcolors.FAIL)
return 0
header['history'] = ' Data processed with phifdt_pipe.py '+ version
header['history'] = ' and time '+ str(datetime.datetime.now())
header['history'] = ' Parameters normalize_flat: '+ str(norm_f)
header['history'] = ' Parameters flat_scaling: '+ str(flat_scaling)
header['history'] = ' Parameters shrink_mask: '+ str(shrink_mask)
header['history'] = ' Parameters center_method: '+ str(center_method)
header['history'] = ' Parameters Hough: '+ str(hough_params)
if verbose:
plib.show_one(data[0,0,:,:],vmin=0,xlabel='pixel',ylabel='pixel',title='Data first image raw (1 of 24)',cbarlabel='DN',save=None,cmap='gray')
# * CAL_RTE= 990510 / ok
# * CAL_SCIP= 'None' / Onboard scientific data analysis
# * RTE_ITER= 4294967295 / Number RTE inversion iterations
# * PHIDATID= '142010402' / PHI dataset Id
#-----------------
# TAKE DATA DIMENSIONS AND SCALING
#-----------------
PXBEG1 = int(header['PXBEG1']) - 1
PXEND1 = int(header['PXEND1']) - 1
PXBEG2 = int(header['PXBEG2']) - 1
PXEND2 = int(header['PXEND2']) - 1
printc('Dimensions: ',PXBEG1, PXEND1, PXBEG2, PXEND2,color=bcolors.OKGREEN)
if xd != (PXEND1 - PXBEG1 + 1) or yd != (PXEND2 - PXBEG2 + 1):
printc('ERROR, Keyword dimensions and data array dimensions dont match ',color=bcolors.FAIL)
return 0
if xd < 2047:
printc(' data cropped to: [',PXBEG1,',',PXEND1,'],[',PXBEG2,',',PXEND2,']',color=bcolors.WARNING)
data_scale = fits_get(data_filename,scaling = True)
#-----------------
# READ FLAT FIELDS
#-----------------
if flat_c:
printc('-->>>>>>> Reading flat file'+flat_f,color=bcolors.OKGREEN)
printc(' Assumes they are already normalized to ONE ',color=bcolors.OKGREEN)
printc(' input should be [wave X Stokes,y-dim,x-dim].',color=bcolors.OKGREEN)
try:
dummy,flat_header = fits_get(flat_f)
fz_d,fy_d,fx_d = dummy.shape
flat = np.zeros([24,2048,2048]).astype(np.float32)
PXBEG1_f = int(flat_header['PXBEG1']) - 1
PXEND1_f = int(flat_header['PXEND1']) - 1
PXBEG2_f = int(flat_header['PXBEG2']) - 1
PXEND2_f = int(flat_header['PXEND2']) - 1
if fx_d < 2047:
printc(' input flat was cropped to: [',PXBEG1_f,',',PXEND1_f,'],[',PXBEG2_f,',',PXEND2_f,']',color=bcolors.WARNING)
flat[:,PXBEG1_f:PXEND1_f+1,PXBEG2_f:PXEND2_f+1] = dummy
del dummy
printc('-->>>>>>> Reshaping Flat to [wave,Stokes,y-dim,x-dim] ',color=bcolors.OKGREEN)
fz,fy,fx = flat.shape
flat = np.reshape(flat,(fz//4,4,fy,fx))
except Exception:
printc("ERROR, Unable to open flats file: {}",flat_f,color=bcolors.FAIL)
return 0
if verbose:
plib.show_one(flat[0,0,:,:],xlabel='pixel',ylabel='pixel',title='Flat first image raw (1 of 24)',cbarlabel='Any (as input)',save=None,cmap='gray')
else:
printc('-->>>>>>> No flats mode ',color=bcolors.WARNING)
#-----------------
# READ AND CORRECT DARK FIELD
#-----------------
if dark_c:
data,header = phi_correct_dark(dark_f,data,header,data_scale,verbose = verbose)
else:
printc('-->>>>>>> No darks mode ',color=bcolors.WARNING)
#-----------------
# FIND DATA CENTER
#-----------------
printc('-->>>>>>> finding the center of the solar disk (needed for masking) ',color=bcolors.OKGREEN)
try:
if center_method == 'Hough':
inner_radius,outer_radius,steps = hough_params
c, radius,threshold = find_circle_hough(data[0,0,:,:],inner_radius,outer_radius,steps,threshold = 0.01,normalize=False,verbose=False)
#c = np.roll(c,1)
cx = c[0]
cy = c[1]
#TBE PUT IN CORRECT UNITS
elif center_method == 'circlefit':
cy,cx,radius=find_center(data[0,0,:,:]) #OJO Cy... Cx
c = np.array([int(cx),int(cy)]) #El vector es [0,1,2,...] == [x,y,z,...] == [cx,cy,cz,...] Pero esto ultimo esta al reves
radius = int(radius)
elif center_method == None:
#get from header
cx = header['CRPIX1']
cy = header['CRPIX2']
c = np.array([int(cx),int(cy)]) #El vector es [0,1,2,...] == [x,y,z,...] == [cx,cy,cz,...] Pero esto ultimo esta al reves
radius = header['RSUN_ARC']/header['CDELT1']
else:
raise ValueError("ERROR in center determination method - check input 'circlefit','Hough',null/None")
except ValueError as err:
print(err.args)
return 0
#Uptade header with new centers
if center_method == 'Hough' or center_method == 'circlefit':
printc(' Uptade header with new center:',color=bcolors.OKBLUE)
printc(' OLD center:',color=bcolors.OKBLUE)
printc(' at: CRPIX1[x]=',header['CRPIX1'],' CRPIX2[y]=',header['CRPIX2'],' radius=',radius,color=bcolors.OKBLUE)
header['history'] = ' CRPIX 1 and CRPIX2 uptated from ' + str(header['CRPIX1'])+ ' and ' + str(header['CRPIX2'])
header['CRPIX1'] = (round(cx, 2))
header['CRPIX2'] = (round(cy, 2))
printc(' NEW center:',color=bcolors.OKBLUE)
printc(' at: CRPIX1[x]=',header['CRPIX1'],' CRPIX2[y]=',header['CRPIX2'],' radius=',radius,color=bcolors.OKBLUE)
printc('ATTENTION: Keywords CRVAL1 and CRVAL2 are NOT updated but should be SET to zero!!!!',color=bcolors.FAIL)
else:
printc(' Using header image center:',color=bcolors.OKBLUE)
printc(' at: CRPIX1[x]=',header['CRPIX1'],' CRPIX2[y]=',header['CRPIX2'],' radius=',radius,color=bcolors.OKBLUE)
#OJO.
# find_circle_hough devuelve c = c[0] = x and c[1] = y !!!!!!!!!!!!!!
# Esto viene porque en el KLL esta definido así (al reves) en la rutina votes()
#-----------------
# TAKE ONLY DISK WITH MARGIN
#-----------------
printc('-->>>>>>> Creating a mask for RTE with ',shrink_mask,' px margin')
size_of_mask = radius - shrink_mask
rx = [int(c[0]-size_of_mask),int(c[0]+size_of_mask)]
ry = [int(c[1]-size_of_mask),int(c[1]+size_of_mask)]
mask,coords = generate_circular_mask([xd-1,yd-1],size_of_mask,size_of_mask)
mask = shift(mask, shift=(c[0]-xd//2,c[1]-yd//2), fill_value=0)
printc(' RX = ', rx, 'RY = ', ry, color=bcolors.WARNING)
#-----------------
# GET INFO ABOUT VOLTAGES/WAVELENGTHS, determine continuum and new flat
#-----------------
printc('-->>>>>>> Obtaining voltages from data ',color=bcolors.OKGREEN)
wave_axis,voltagesData,tunning_constant,cpos,ref_wavelength = fits_get_sampling(data_filename)
printc(' Data FG voltages: ',voltagesData,color=bcolors.OKBLUE)
printc(' Continuum position at wave: ', cpos,color=bcolors.OKBLUE)
printc(' Data ref_wavelength [mA]: ',ref_wavelength,color=bcolors.OKBLUE)
printc(' Data wave axis [mA]: ',wave_axis,color=bcolors.OKBLUE)
printc(' Data wave axis - axis[0] [mA]: ',wave_axis - wave_axis[0],color=bcolors.OKBLUE)
dummy_1 = (voltagesData-np.roll(voltagesData,-1))*(tunning_constant*1000)
dummy_2 = np.sort(np.abs(dummy_1))
sampling = np.mean(dummy_2[0:-2])
printc(' Data average sampling [mA]: ',sampling,' using tunning constant: ',(tunning_constant*1000),color=bcolors.OKBLUE)
#-----------------
# ROLL DATA IF CONTINUUM IS IN DIFFERENT POSITION
# Probably before demodulation and flat is better!!!!
#-----------------
if cpos != 0:
datar = np.copy(data)
voltagesDatar = np.copy(voltagesData)
wave_axisr = np.copy(wave_axis)
if voltagesData[cpos] < voltagesData[0]:
#continuum is on the right but it is in the blue!!!!
printc(' Rolling data to move continuum from right (red) to left (blue)',color=bcolors.WARNING)
printc(' * the continuum was TAKEN in the BLUE, stored in the RED, but we want it in the BLUE *',color=bcolors.WARNING)
for i in range(zd//4):
#print((i+1)%(zd//4),i%(zd//4),i,(zd//4))
datar[(i+1)%(zd//4),:,:,:] = data[i%(zd//4),:,:,:] # np.roll(data, 4, axis=0)
voltagesDatar[(i+1)%(zd//4)] = voltagesData[i%(zd//4)] # np.roll(data, 4, axis=0)
wave_axisr[(i+1)%(zd//4)] = wave_axis[i%(zd//4)] # np.roll(data, 4, axis=0)
if voltagesData[cpos] > voltagesData[0]:
#continuum is on the right but it is in the blue!!!!
printc(' Rolling data to move continuum from right (red) to left (blue)',color=bcolors.WARNING)
printc(' * the continuum was TAKEN in the RED but we want it in the BLUE',color=bcolors.WARNING)
printc(' * Notice that this is necessary for the FLAT correction',color=bcolors.FAIL)
printc(' * but the wavelength axis has changed the continuum point *',color=bcolors.FAIL)
for i in range(zd//4):
#print((i+1)%(zd//4),i%(zd//4),i,(zd//4))
datar[(i+1)%(zd//4),:,:,:] = data[i%(zd//4),:,:,:] # np.roll(data, 4, axis=0)
voltagesDatar[(i+1)%(zd//4)] = voltagesData[i%(zd//4)] # np.roll(data, 4, axis=0)
wave_axisr[(i+1)%(zd//4)] = wave_axis[i%(zd//4)] # np.roll(data, 4, axis=0)
data = np.copy(datar)
voltagesData = np.copy(voltagesDatar)
wave_axis = np.copy(wave_axisr)
del datar
del voltagesDatar
del wave_axisr
cpos = 0
printc(' New FG voltages: ',voltagesData,color=bcolors.OKBLUE)
printc(' NEW continuum position at wave: ', cpos,color=bcolors.OKBLUE)
printc(' NEW data wave axis [mA]: ',wave_axis,color=bcolors.OKBLUE)
if flat_c:
printc('-->>>>>>> Obtaining voltages from flats ',color=bcolors.OKGREEN)
#ff = '../Nov-2020-STP122/solo_L0_phi-fdt-flat_0645767986_V202012091123I_0066181100.fits'
wave_axis_f,voltagesFlat,tunning_constant_f,cpos_f,ref_wavelength_f = fits_get_sampling(flat_f)
printc(' FLAT FG voltages: ',voltagesFlat,color=bcolors.OKBLUE)
printc(' FLAT Continuum position at wave: ', cpos_f,color=bcolors.OKBLUE)
printc(' FLAT wave axis [mA]: ',wave_axis_f,color=bcolors.OKBLUE)
printc(' FLAT ref_wavelength [mA]: ',ref_wavelength_f,color=bcolors.OKBLUE)
printc(' FLAT wave axis [mA]: ',wave_axis_f,color=bcolors.OKBLUE)
printc(' FLAT wave axis - ref_wavelength [mA]: ',wave_axis_f - ref_wavelength_f,color=bcolors.OKBLUE)
dummy_1 = (voltagesFlat-np.roll(voltagesFlat,-1))*(tunning_constant*1000)
dummy_2 = np.sort(np.abs(dummy_1))
sampling_f = np.mean(dummy_2[0:-2])
printc(' FLAT average sampling [mA]: ',sampling_f,color=bcolors.OKBLUE)
# printc('-->>>>>>> Reshaping flat to [wave,Stokes,y-dim,x-dim]',color=bcolors.OKGREEN)
# flat = np.reshape(flat,(fz//4,4,fy, fx))
#-----------------
# ROLL FLAT IF CONTINUUM IS IN DIFFERENT POSITION
# Probably before demodulation and flat is better!!!!
#-----------------
if flat_c:
if cpos_f != 0:
flatr = np.copy(flat)
voltagesFlatr = np.copy(voltagesFlat)
wave_axis_fr = np.copy(wave_axis_f)
if voltagesFlat[cpos_f] < voltagesFlat[0]:
#continuum is on the right but it is in the blue!!!!
printc(' Rolling flat to move continuum from right (red) to left (blue)',color=bcolors.WARNING)
printc(' * the continuum was TAKEN in the BLUE, stored in the RED, but we want it in the BLUE *',color=bcolors.WARNING)
for i in range(fz//4):
#print((i+1)%(fz//4),i%(fz//4),i)
flatr[(i+1)%(fz//4),:,:,:] = flat[i%(fz//4),:,:,:] # np.roll(data, 4, axis=0)
voltagesFlatr[(i+1)%6] = voltagesFlat[i%(fz//4)] # np.roll(data, 4, axis=0)
wave_axis_fr[(i+1)%(zd//4)] = wave_axis_f[i%(zd//4)] # np.roll(data, 4, axis=0)
if voltagesFlat[cpos_f] > voltagesFlat[0]:
#continuum is on the right but it is in the blue!!!!
printc(' Rolling flat to move continuum from right (red) to left (blue)',color=bcolors.WARNING)
printc(' * the continuum was TAKEN in the RED but we want it in the BLUE',color=bcolors.WARNING)
printc(' * Notice that this is necessary for the FLAT correction',color=bcolors.FAIL)
printc(' * but the wavelength axis has changed the continuum point *',color=bcolors.FAIL)
for i in range(fz//4):
#print((i+1)%(fz//4),i%(fz//4),i)
flatr[(i+1)%(fz//4),:,:,:] = flat[i%(fz//4),:,:,:] # np.roll(data, 4, axis=0)
voltagesFlatr[(i+1)%6] = voltagesFlat[i%(fz//4)] # np.roll(data, 4, axis=0)
wave_axis_fr[(i+1)%(zd//4)] = wave_axis_f[i%(zd//4)] # np.roll(data, 4, axis=0)
flat = np.copy(flatr)
voltagesFlat = np.copy(voltagesFlatr)
wave_axis_f = np.copy(wave_axis_fr)
del flatr
del voltagesFlatr
del wave_axis_fr
cpos_f = 0
printc(' New Flat FG voltages: ',voltagesFlat,color=bcolors.OKBLUE)
printc(' NEW Flat continuum position at wave: ', cpos_f,color=bcolors.OKBLUE)
printc(' NEW Flat data wave axis [mA]: ',wave_axis_f,color=bcolors.OKBLUE)
# TODO: INTERPOLATE THE FLAT TO MATCH THE WAVELENG (CAVITY)
# from scipy.interpolate import RegularGridInterpolator
# x = np.linspace(0,2047,2048).astype(int)
# y = np.linspace(0,2047,2048).astype(int)
# z = np.array([-300.,-140.,-70.,0.,70.,140.,300.]) #ojo con el -300
# zn = np.array([-175.,-140.,-105.,-70.,-35.,0.,35.,70.,105.,140.,175.,300.])
# flat_rsw1 = np.concatenate(((flat_rsw1[5,:,:,:])[np.newaxis,:,:,:],flat_rsw1))
# fn = RegularGridInterpolator((z,y,x), flat_rsw1[:,0,:,:])
# pts = np.array([-40,10,10])
# print(fn(pts))
# pts = np.meshgrid(-40.,y,x)
# pts = np.array([m.flatten() for m in pts])
# flat_n = fn(pts.T)
# result = flat_n.reshape((2048,2048))
# plt.imshow(result,vmin=0.9,vmax=1.1)
# flat_n = np.zeros((12,4,2048,2048))
# for i in range(4):
# fn = RegularGridInterpolator((z,y,x), flat_rsw1[:,i,:,:],bounds_error=False)
# for j in range(12):
# print(i,zn[j])
# pts_list = np.meshgrid(zn[j],y,x)
# pts = np.array([m.flatten() for m in pts_list])
# flat_n[j,i,:,:] = fn(pts.T).reshape((2048,2048))
#-----------------
# APPLY FLAT CORRECTION
# TODO: TAKE THE REAL FLAT
#-----------------
factor = 0.05
rrx = [int(c[0]-radius*factor),int(c[0]+radius*factor)]
rry = [int(c[1]-radius*factor),int(c[1]+radius*factor)]
if flat_c:
printc('-->>>>>>> Correcting Flatfield',color=bcolors.OKGREEN)
try:
if (len(flat_index)) == 6:
print(' Changing flat index to ',flat_index)
except:
flat_index = [0,1,2,3,4,5]
for p in range(4):
for l in range(int(zd//4)):
print(' ... pol: ',p,' wave: ',l,' index: ',flat_index[l])
dummy_flat = (flat[flat_index[l],p,PXBEG2:PXEND2+1,PXBEG1:PXEND1+1]/float(flat_scaling))
if norm_f:
print(' normalizing flats using region x = [',rrx[0],':',rrx[1],'] y = ]',rry[0],':',rry[1],']')
mm = np.mean(dummy_flat[rry[0]:rry[1],rrx[0]:rrx[1]])
dummy_flat = dummy_flat / mm
data[l,p,:,:] = data[l,p,:,:]/dummy_flat
del dummy_flat
# locations = find_string(flat_f,'_')
# try:
# DID_flat = flat_f[locations[-1]+1:locations[-1]+10]
# print('DID: ',np.float(DID_flat))
# except:
# DID_flat = flat_f[:-4]
# printc("Unable to get DID from: {}",flat_f,color=bcolors.WARNING)
# locations = find_string(dark_f,'/')
# DID_flat = flat_f[locations[-1]+1:]
# printc('DID: ',DID_flat,' -->> WILL NOT BE A NUMBER',color=bcolors.WARNING)
DID_flat = flat_header['PHIDATID']
if 'CAL_FLAT' in header: # Check for existence
header['CAL_FLAT'] = DID_flat
else:
header.set('CAL_FLAT', DID_flat, 'Onboard calibrated for gain table',after='CAL_DARK')
if verbose:
plib.show_one(data[cpos,0,:,:],vmax=None,vmin=0,xlabel='pixel',ylabel='pixel',title='Data / flat at continuum',cbarlabel='DN',save=None,cmap='gray')
#-----------------
# CORRECT PREFILTER
#-----------------
if prefilter:
data,header = phi_correct_prefilter(prefilter_fits,header,data,voltagesData,verbose = verbose)
#-----------------
# GHOST CORRECTION
#-----------------
if correct_ghost:
data,header = phi_correct_ghost(data,header,radius,verbose = True)
#-----------------
# REALIGN DATA BEFORE DEMODULATION
#-----------------
if realign:
printc('-->>>>>>> Realigning data... ',color=bcolors.OKGREEN)
for i in range(zd//4):
s_x,s_y,_ = PHI_shifts_FFT(data[i,:,:,:],prec=500,verbose=verbose,norma=False)
for j in range(4):
data[i,j,:,:] = shift_subp(data[i,j,:,:], shift=[s_x[j],s_y[j]]) #estra y,z asi que esta al reves FFT
if 'CAL_REAL' in header: # Check for existence
header['CAL_REAL'] = 'FFT'
else:
header.set('CAL_REAL', 'FFT', 'Realigment of data (phifdt_pipe_modules.py)',after='CAL_DARK')
#-----------------
# APPLY DEMODULATION
#-----------------
printc('-->>>>>>> Demodulating data... ',color=bcolors.OKGREEN)
if debug:
datan = np.copy(data)
ds = np.copy(data)
demodM = np.array([[0.168258, 0.357277, 0.202212, 0.273266],\
[-0.660351, 0.314981, 0.650029, -0.299685],\
[ 0.421242, 0.336994, -0.183068, -0.576202],\
[-0.351933, 0.459820, -0.582167, 0.455458]])
for i in range(zd//4):
for l in range(xd):
for m in range(yd):
datan[i,:,m,l] = np.matmul(demodM, ds[i,:,m,l] )
plib.show_four_row(datan[3,0,:,:],datan[3,1,:,:],datan[3,2,:,:],datan[3,3,:,:],svmin=[0,-0.2,-0.2,-1.],svmax=[100,0.1,0.1,0.1])
data, header = phi_apply_demodulation(data,instrument,header=header)
if verbose == 1:
plib.show_four_row(data[3,0,:,:],data[3,1,:,:],data[3,2,:,:],data[3,3,:,:],title=['I','Q','U','V'],zoom = 3,svmin=[0,-0.004,-0.004,-0.004],svmax=[1.2,0.004,0.004,0.004])
# with pyfits.open(data_filename) as hdu_list:
# hdu_list[0].data = data
# hdu_list[0].header = header
# hdu_list.writeto('dummy.fits', clobber=True)
#-----------------
# APPLY NORMALIZATION
#-----------------
printc('-->>>>>>> Applying normalization --',color=bcolors.OKGREEN)
nrm = np.mean(data[cpos,0,rry[0]:rry[1],rrx[0]:rrx[1]])
print(' Norma is: ',nrm,' evaluated in x = [',rrx[0],':',rrx[1],'] y = [',rry[0],':',rry[1],']')
data = data/nrm
if 'CAL_NORM' in header: # Check for existence
header['CAL_NORM'] = np.round(nrm,6)
else:
header.set('CAL_NORM', np.round(nrm,6), 'Normalization constant PROC_Ic',after='CAL_DARK')
if debug:
datan = datan/nrm
if debug:
plib.show_four_row(data[3,0,:,:],data[3,1,:,:],data[3,2,:,:],data[3,3,:,:])
plib.show_four_row(datan[3,0,:,:],datan[3,1,:,:],datan[3,2,:,:],datan[3,3,:,:])
if verbose == 1:
plib.show_four_row(data[3,0,:,:],data[3,1,:,:],data[3,2,:,:],data[3,3,:,:],title=['I','Q','U','V'],zoom = 3,svmin=[0,-0.004,-0.004,-0.004],svmax=[1.2,0.004,0.004,0.004])
#-----------------
# GHOST CORRECTION AFTER DEMODULATION
#-----------------
# if correct_ghost:
# data,header = phi_correct_ghost_dm(data,header,radius,verbose = verbose)
#-----------------
# CROSS-TALK CALCULATION
#-----------------
if ItoQUV or putmediantozero:
factor_media = 0.8 # 80% of the disk
rrx_m = [int(c[0]-radius*factor_media),int(c[0]+radius*factor_media)]
rry_m = [int(c[1]-radius*factor_media),int(c[1]+radius*factor_media)]
maski,coords = generate_circular_mask([xd-1,yd-1],radius*factor_media,radius*factor_media)
maski = shift(maski, shift=(c[0]-xd//2,c[1]-yd//2), fill_value=0).astype(int)
if ItoQUV:
printc('-->>>>>>> Cross-talk correction from Stokes I to Stokes Q,U,V --',color=bcolors.OKGREEN)
printc(' Using ',factor_media*100,'% of the disk ',color=bcolors.OKGREEN)
printc(' Crosstalk evaluated in x = [',rrx_m[0],':',rrx_m[1],'] y = [',rry_m[0],':',rry_m[1],']',' using ',factor_media*100,"% of the disk",color=bcolors.OKBLUE)
if ind_wave:
for i in range(zd//4):
printc(' Individual wavelengths....',color=bcolors.OKBLUE)
broadcastd = data[i,:,rry_m[0]:rry_m[1],rrx_m[0]:rrx_m[1]]
data_dummy = data[:,:,rry_m[0]:rry_m[1],rrx_m[0]:rrx_m[1]]*0. + broadcastd[np.newaxis,:,:,:]
cQ,cU,cV = crosstalk_ItoQUV(data_dummy[:,:,rry_m[0]:rry_m[1],rrx_m[0]:rrx_m[1]],npoints=10000,verbose=verbose)
#-----------------
# CROSS-TALK CORRECTION
#-----------------
printc(' Applying cross-talk correction...',color=bcolors.OKGREEN)
data[i,1,:,:] = data[i,1,:,:] - cQ[0]*data[i,0,:,:] - cQ[1]
data[i,2,:,:] = data[i,2,:,:] - cU[0]*data[i,0,:,:] - cU[1]
data[i,3,:,:] = data[i,3,:,:] - cV[0]*data[i,0,:,:] - cV[1]
if verbose:
plt.hist(data[0,1,maski > 0].flatten(), bins='auto')
plt.title('Stokes Q')
plt.hist(data[0,2,maski > 0].flatten(), bins='auto')
plt.title('Stokes U')
plt.hist(data[0,3,maski > 0].flatten(), bins='auto')
plt.title('Stokes V')
plt.show()
else:
cQ,cU,cV = crosstalk_ItoQUV(data[:,:,rry_m[0]:rry_m[1],rrx_m[0]:rrx_m[1]],verbose=verbose,npoints=10000)
# rrx = [int(c[1]),int(c[1]+r*factor)]
# rry = [int(c[0]-r*factor),int(c[0])]
# cQ,cU,cV = crosstalk_ItoQUV(data[:,:,rry[0]:rry[1],rrx[0]:rrx[1]],verbose=verbose,npoints=10000)
# rrx = [int(c[1]-r*factor),int(c[1])]
# rry = [int(c[0]),int(c[0]+r*factor)]
# cQ,cU,cV = crosstalk_ItoQUV(data[:,:,rry[0]:rry[1],rrx[0]:rrx[1]],verbose=verbose,npoints=10000)
# return
#-----------------
# CROSS-TALK CORRECTION
#-----------------
printc(' Applying cross-talk correction...',color=bcolors.OKGREEN)
data[:,1,:,:] = data[:,1,:,:] - cQ[0]*data[:,0,:,:] - cQ[1]
data[:,2,:,:] = data[:,2,:,:] - cU[0]*data[:,0,:,:] - cU[1]
data[:,3,:,:] = data[:,3,:,:] - cV[0]*data[:,0,:,:] - cV[1]
if verbose:
plib.show_hist(data[0,1, maski > 0].flatten(), bins='auto',title=' ',leave = 'open',color='green')
plib.show_hist(data[0,2, maski > 0].flatten(), bins='auto',title=' ',leave = 'open',color='red')
plib.show_hist(data[0,3, maski > 0].flatten(), bins='auto',title='Stokes Q/U/V - no zero',color='blue')
plib.show_four_row(data[2,0,:,:],data[2,1,:,:],data[2,2,:,:],data[2,3,:,:],title=['I - corr','Q - corr','U - corr','V - corr'],zoom=3,svmin=[0,-0.004,-0.004,-0.004],svmax=[1.2,0.004,0.004,0.004])
# PLT_RNG = 2
# plib.show_four_row(data[1,0,:,:],data[1,1,:,:],data[1,2,:,:],data[1,3,:,:],title=['I','Q','U','V'],svmin=[0.1,-0.002,-0.002,-0.003],svmax=[1.1,0.002,0.002,0.003])#,save='t1_'+str(loopthis)+'.png')
# plib.show_four_row(data[3,0,:,:],data[3,1,:,:],data[3,2,:,:],data[3,3,:,:],title=['I','Q','U','V'],svmin=[0.1,-0.002,-0.002,-0.003],svmax=[1.1,0.002,0.002,0.003])#,save='t3_'+str(loopthis)+'.png')
# plib.show_four_row(data[5,0,:,:],data[5,1,:,:],data[5,2,:,:],data[5,3,:,:],title=['I','Q','U','V'],svmin=[0.1,-0.002,-0.002,-0.003],svmax=[1.1,0.002,0.002,0.003])#,save='t5_'+str(loopthis)+'.png')
# np.save('data_dummy',data)
if 'CAL_CRT0' in header: # Check for existence
header['CAL_CRT0'] = np.round(cQ[0]*100,3)
else:
header.set('CAL_CRT0', np.round(cQ[0]*100,3), 'cross-talk I to Q (slope in %), wrt CAL_NROM ',after='CAL_DARK')
if 'CAL_CRT1' in header: # Check for existence
header['CAL_CRT1'] = np.round(cQ[1]*100,3)
else:
header.set('CAL_CRT1', np.round(cQ[1]*100,3), 'cross-talk I to Q (off-set in %), wrt CAL_NROM ',after='CAL_CRT0')
if 'CAL_CRT2' in header: # Check for existence
header['CAL_CRT2'] = np.round(cU[0]*100,3)
else:
header.set('CAL_CRT2', np.round(cU[0]*100,3), 'cross-talk I to U (slope in %) alue, wrt CAL_NROM ',after='CAL_CRT1')
if 'CAL_CRT3' in header: # Check for existence
header['CAL_CRT3'] = np.round(cU[1]*100,3)
else:
header.set('CAL_CRT3', np.round(cU[1]*100,3), 'cross-talk I to U (off-set in %), wrt CAL_NROM ',after='CAL_CRT2')
if 'CAL_CRT4' in header: # Check for existence
header['CAL_CRT4'] = np.round(cV[0]*100,3)
else:
header.set('CAL_CRT4', np.round(cV[0]*100,3), 'cross-talk I to V (slope in %), wrt CAL_NROM',after='CAL_CRT3')
if 'CAL_CRT5' in header: # Check for existence
header['CAL_CRT5'] = np.round(cV[1]*100,3)
else:
header.set('CAL_CRT5', np.round(cV[1]*100,3), 'cross-talk I to V (off-set in %), wrt CAL_NROM ',after='CAL_CRT4')
#-----------------
# CROSS-TALK CALCULATION FROM V TO QU (Interactive)
#-----------------
if VtoQU:
printc('-->>>>>>> Cross-talk correction from Stokes V to Stokes Q,U ',color=bcolors.OKGREEN)
factor = 0.3 # 30% of the disk
rrx = [int(c[0]-radius*factor),int(c[0]+radius*factor)]
rry = [int(c[1]-radius*factor),int(c[1]+radius*factor)]
print(' Cross-talk evaluated in x = [',rrx[0],':',rrx[1],'] y = [',rry[0],':',rry[1],']',' using ',factor*100,"% of the disk")
cVQ,cVU = cross_talk_QUV(data[:,:,rry[0]:rry[1],rrx[0]:rrx[1]],nran = 2000,nlevel=nlevel,block=False)
option = input('Do you want to apply the correction (y/n) [n]: ')
if option == 'y':
datao = np.copy(data)
print('Applying V to QU cross-talk correction...')
datao[:,2,:,:] = data[:,2,:,:] - cVQ[0]*data[:,3,:,:] - cVQ[1]
datao[:,3,:,:] = data[:,3,:,:] - cVU[0]*data[:,3,:,:] - cVU[1]
plib.show_two(data[3,1,ry[0]:ry[1],rx[0]:rx[1]],datao[3,1,ry[0]:ry[1],rx[0]:rx[1]],block=False,title=['Stokes Q','Stokes Q corrected'],zoom=3)
plib.show_two(data[3,2,ry[0]:ry[1],rx[0]:rx[1]],datao[3,2,ry[0]:ry[1],rx[0]:rx[1]],block=False,title=['Stokes U','Stokes U corrected'],zoom=3)
option2 = input('Do you wnat to continue (y/n) [n]: ')
if option2 == 'y':
data = np.copy(datao)
del datao
plt.close()
if 'CAL_CRT6' in header: # Check for existence
header['CAL_CRT6'] = np.round(cVQ[0]*100,3)
else:
header.set('CAL_CRT6', np.round(cVQ[0]*100,3), 'cross-talk V to Q (slope in %), wrt CAL_NROM ',after='CAL_CRT5')
if 'CAL_CRT7' in header: # Check for existence
header['CAL_CRT7'] = np.round(cVQ[1]*100,3)
else:
header.set('CAL_CRT7', np.round(cVQ[1]*100,3), 'cross-talk V to Q (off-set in %), wrt CAL_NROM ',after='CAL_CRT6')
if 'CAL_CRT8' in header: # Check for existence
header['CAL_CRT8'] = np.round(cVU[0]*100,3)
else:
header.set('CAL_CRT8', np.round(cVU[0]*100,3), 'cross-talk V to U (slope in %), wrt CAL_NROM ',after='CAL_CRT7')
if 'CAL_CRT9' in header: # Check for existence
header['CAL_CRT9'] = np.round(cVU[1]*100,3)
else:
header.set('CAL_CRT9', np.round(cVU[1]*100,3), 'cross-talk V to U (off-set in %), wrt CAL_NROM ',after='CAL_CRT8')
#-----------------
# CROSS-TALK CALCULATION FROM I TO QUV (2D)
#-----------------
if do2d >= 2:
printc('---------------------------------------------------------',color=bcolors.OKGREEN)
printc('-- IN 2-Dimensions --')
printc('-- Cross-talk correction from Stokes I to Stokes Q,U,V --')
printc('---------------------------------------------------------',color=bcolors.OKGREEN)
size = do2d
cV0,cV1 = crosstalk_ItoQUV2d(data[:,:,ry[0]:ry[1],rx[0]:rx[1]],size=size)
nsize = size-1
dim = ry[1]-ry[0]-nsize
cV0 = cV0.reshape(dim,dim)
cV1 = cV1.reshape(dim,dim)
plib.show_one(cV0,vmin=-0.005,vmax=0.005)
data[:,3,ry[0]+nsize//2:ry[1]-nsize//2-1,rx[0]+nsize//2:rx[1]-nsize//2-1] = \
data[:,3,ry[0]+nsize//2:ry[1]-nsize//2-1,rx[0]+nsize//2:rx[1]-nsize//2-1] -\
cV0*data[:,0,ry[0]+nsize//2:ry[1]-nsize//2-1,rx[0]+nsize//2:rx[1]-nsize//2-1] #- 0.95*cV1
#-----------------
# FRINGING -
#-----------------
if correct_fringes == 'auto' or correct_fringes == 'manual':
if verbose:
plib.show_four_row(data[2,0,:,:],data[2,1,:,:],data[2,2,:,:],data[2,3,:,:],title=['I - before fringe','Q','U','V'],zoom=3,svmin=[0,-0.004,-0.004,-0.004],svmax=[1.2,0.004,0.004,0.004])
data, header = phi_correct_fringes(data,header,option=correct_fringes,verbose=verbose)
if verbose:
plib.show_four_row(data[2,0,:,:],data[2,1,:,:],data[2,2,:,:],data[2,3,:,:],title=['I - after fringe','Q','U','V'],zoom=3,svmin=[0,-0.004,-0.004,-0.004],svmax=[1.2,0.004,0.004,0.004])
elif correct_fringes == False:
pass
else:
printc('Error in option finge correction. Options are "manual", "auto" or false. Given: ',color=bcolors.WARNING)
print(correct_fringes)
#-----------------
# MEDIAN TO CERO
#-----------------
if putmediantozero:
printc('-->>>>>>> Putting median to zero ',color=bcolors.OKGREEN)
printc(' Median evaluated in x = [',rrx_m[0],':',rrx_m[1],'] y = [',rry_m[0],':',rry_m[1],']',' using ',factor*100,"% of the disk",color=bcolors.OKBLUE)
for i in range(zd//4):
PQ = np.median( data[i,1, maski > 0])#,axis=(1,2))
PU = np.median( data[i,2, maski > 0])#,axis=(1,2))
PV = np.median( data[i,3, maski > 0])#,axis=(1,2))
# PQ = np.median(data[:,1,rry[0]:rry[1],rrx[0]:rrx[1]],axis=(1,2))
# PU = np.median(data[:,2,rry[0]:rry[1],rrx[0]:rrx[1]],axis=(1,2))
# PV = np.median(data[:,3,rry[0]:rry[1],rrx[0]:rrx[1]],axis=(1,2))
data[i,1,:,:] = data[i,1,:,:] - PQ#[:,np.newaxis,np.newaxis]
data[i,2,:,:] = data[i,2,:,:] - PU#[:,np.newaxis,np.newaxis]
data[i,3,:,:] = data[i,3,:,:] - PV#[:,np.newaxis,np.newaxis]
printc(PQ,PU,PV)
if verbose == 1:
plib.show_hist(data[0,1, maski > 0].flatten(), bins='auto',title=' ',leave='open',color='green')
plib.show_hist(data[0,2, maski > 0].flatten(), bins='auto',title=' ',leave='open',color='red')
plib.show_hist(data[0,3, maski > 0].flatten(), bins='auto',title='Stokes Q/U/V - zero',color='blue')
plib.show_four_row(data[3,0,:,:],data[3,1,:,:],data[3,2,:,:],data[3,3,:,:],title=['I','Q','U','V'],zoom=3,svmin=[0,-0.004,-0.004,-0.004],svmax=[1.2,0.004,0.004,0.004])
header['history'] = ' Parameters putmediantozero [%]: '+ str(np.round(PQ*100,6))+ ' '+ str(np.round(PU*100,6))+ ' '+ str(np.round(PV*100,6))
#-----------------
#CHECK FOR INFs
#-----------------
data[np.isinf(data)] = 0
data[np.isnan(data)] = 0
#-----------------
# SAVE DATA TODO: CMILOS FORMAT AND FITS
#-----------------
#check if npz,pngs and level2 exist
dirs = ['npz','pngs','level2']
for checkit in dirs:
check_dir = os.path.isdir(output_dir+checkit)
if not check_dir:
os.makedirs(output_dir+checkit)
print("created folder : ", output_dir+checkit)
else:
print(output_dir+checkit, "folder already exists.")
printc('---------------------------------------------------------',color=bcolors.OKGREEN)
if outfile == None:
#basically replace L1 by L1.5
try:
outfile_L2 = set_level(data_f,'L1','L2')
outfile_L2 = set_level(outfile_L2,'ilam','stokes')
except:
outfile_L2 = set_level(data_f,'L0','L2')
outfile_L2 = set_level(outfile_L2,'ilam','stokes')
printc(' Saving data to: ',output_dir+'level2/'+outfile_L2)
# hdu = pyfits.PrimaryHDU(data)
# hdul = pyfits.HDUList([hdu])
# hdul.writeto(outfile, overwrite=True)
with pyfits.open(data_filename) as hdu_list:
hdu_list[0].data = data
# header = hdu_list[0].header
hdu_list[0].header = header
# Add a new key to the header
# header.insert(20, ('NEWKEY', 'OMIT', 'test'))
#header.set('NEWKEY','50.5')
hdu_list.writeto(output_dir+'level2/'+outfile_L2, clobber=True)
# hdu_list.writeto(directory+outfile+'_L1.fits', clobber=True)
# with pyfits.open(data_f) as hdu_list:
# hdu_list[0].data = mask
# hdu_list.writeto(directory+outfile+'_red-mask.fits', clobber=True)
#-----------------
# INVERSION OF DATA WITH CMILOS
#-----------------
if rte == 'RTE' or rte == 'CE' or rte == 'CE+RTE':
printc('---------------------RUNNING CMILOS --------------------------',color=bcolors.OKGREEN)
rte_invs = np.zeros((12,yd,xd)).astype(float)
rte_invs[:,ry[0]:ry[1],rx[0]:rx[1]] = generate_level2(data[:,:,ry[0]:ry[1],rx[0]:rx[1]],wave_axis,rte)
rte_invs_noth = np.copy(rte_invs)
umbral = 3.
noise_in_V = np.mean(data[0,3,rry[0]:rry[1],rrx[0]:rrx[1]])
low_values_flags = np.max(np.abs(data[:,3,:,:]),axis=0) < noise_in_V*umbral # Where values are low
rte_invs[2,low_values_flags] = 0
rte_invs[3,low_values_flags] = 0
rte_invs[4,low_values_flags] = 0
for i in range(12):
rte_invs[i,:,:] = rte_invs[i,:,:] * mask
#save plots!!!!
if verbose:
plib.show_four_row(rte_invs_noth[2,:,:],rte_invs_noth[3,:,:],rte_invs_noth[4,:,:],rte_invs_noth[8,:,:],svmin=[0,0,0,-6.],svmax=[1200,180,180,+6.],title=['Field strengh [Gauss]','Field inclination [degree]','Field azimuth [degree]','LoS velocity [km/s]'],xlabel='Pixel',ylabel='Pixel')#,save=outfile+'_VLoS.png')
plib.show_four_row(rte_invs[2,:,:],rte_invs[3,:,:],rte_invs[4,:,:],rte_invs[8,:,:],svmin=[0,0,0,-6.],svmax=[1200,180,180,+6.],title=['Field strengh [Gauss]','Field inclination [degree]','Field azimuth [degree]','LoS velocity [km/s]'],xlabel='Pixel',ylabel='Pixel')#,save=outfile+'BLoS.png')
rte_invs_noth[8,:,:] = rte_invs_noth[8,:,:] - np.mean(rte_invs_noth[8,rry[0]:rry[1],rrx[0]:rrx[1]])
rte_invs[8,:,:] = rte_invs[8,:,:] - np.mean(rte_invs[8,rry[0]:rry[1],rrx[0]:rrx[1]])
#np.savez_compressed(output_dir+'npz/'+outfile_L2+'_RTE', rte_invs=rte_invs, rte_invs_noth=rte_invs_noth,mask=mask)
b_los = rte_invs_noth[2,:,:]*np.cos(rte_invs_noth[3,:,:]*np.pi/180.)*mask
b_los = rte_invs_noth[2,:,:]*np.cos(rte_invs_noth[3,:,:]*np.pi/180.)*mask
b_los = rte_invs_noth[2,:,:]*np.cos(rte_invs_noth[3,:,:]*np.pi/180.)*mask
b_los = rte_invs_noth[2,:,:]*np.cos(rte_invs_noth[3,:,:]*np.pi/180.)*mask
# b_los = np.zeros((2048,2048))
# b_los[PXBEG2:PXEND2+1,PXBEG1:PXEND1+1] = b_los_cropped
v_los = rte_invs_noth[8,:,:] * mask
# v_los = np.zeros((2048,2048))
# v_los[PXBEG2:PXEND2+1,PXBEG1:PXEND1+1] = v_los_cropped
if verbose:
plib.show_one(v_los,vmin=-2.5,vmax=2.5,title='LoS velocity')
plib.show_one(b_los,vmin=-30,vmax=30,title='LoS magnetic field')
printc(' ---- >>>>> Updating L2 header.... ',color=bcolors.OKGREEN)
header['history'] = ' RTE CMILOS INVERTER: '+ rte
header['history'] = ' CMILOS VER: '+ version_cmilos
if 'RTE_ITER' in header: # Check for existence
header['RTE_ITER'] = str(15)
else:
header.set('RTE_ITER', str(15), 'Number RTE inversion iterations',after='CAL_SCIP')
printc(' ---- >>>>> Saving L2 data.... ',color=bcolors.OKGREEN)
with pyfits.open(data_filename) as hdu_list:
hdu_list[0].data = rte_invs_noth[2,:,:] * mask
# header = hdu_list[0].header
hdu_list[0].header = header
writeto = set_level(outfile_L2,'stokes','bmag')
hdu_list.writeto(output_dir+'level2/'+writeto, clobber=True)
# with pyfits.open(data_filename) as hdu_list:
hdu_list[0].data = rte_invs_noth[3,:,:] * mask
# hdu_list[0].header = header
writeto = set_level(outfile_L2,'stokes','binc')
hdu_list.writeto(output_dir+'level2/'+writeto, clobber=True)
# with pyfits.open(data_filename) as hdu_list:
hdu_list[0].data = rte_invs[4,:,:] * mask
# hdu_list[0].header = header
writeto = set_level(outfile_L2,'stokes','bazi')
hdu_list.writeto(output_dir+'level2/'+writeto, clobber=True)
# with pyfits.open(data_filename) as hdu_list:
hdu_list[0].data = b_los
# hdu_list[0].header = header
writeto = set_level(outfile_L2,'stokes','blos')
hdu_list.writeto(output_dir+'level2/'+writeto, clobber=True)
# with pyfits.open(data_filename) as hdu_list:
hdu_list[0].data = v_los
# hdu_list[0].header = header
writeto = set_level(outfile_L2,'stokes','vlos')
hdu_list.writeto(output_dir+'level2/'+writeto, clobber=True)
# with pyfits.open(data_filename) as hdu_list:
hdu_list[0].data = rte_invs[9,:,:]+rte_invs[10,:,:]
# hdu_list[0].header = header
writeto = set_level(outfile_L2,'stokes','icnt')
hdu_list.writeto(output_dir+'level2/'+writeto, clobber=True)
printc(' ---- >>>>> Saving plots.... ',color=bcolors.OKGREEN)
#-----------------
# PLOTS VLOS
#-----------------
Zm = np.ma.masked_where(mask == 1, mask)
plt.figure(figsize=(10, 10))
ax = plt.gca()
plt.title('PHI-FDT LoS velocity',size=20)
# Hide grid lines
ax.grid(False)
# Hide axes ticks
ax.set_xticks([])
ax.set_yticks([])
# im = ax.imshow(np.fliplr(rotate(v_los[PXBEG2:PXEND2+1,PXBEG1:PXEND1+1], 52, reshape=False)), cmap='bwr',vmin=-3.,vmax=3.)
im = ax.imshow(v_los, cmap='bwr',vmin=-3.,vmax=3.)
divider = plib.make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plib.plt.colorbar(im, cax=cax)
cbar.set_label('[km/s]')
cbar.ax.tick_params(labelsize=16)
#ax.imshow(Zm, cmap='gray')
writeto = set_level(outfile_L2,'stokes','vlos')
writeto = set_level(writeto,'.fits','.png')
plt.savefig(output_dir+'pngs/'+writeto,dpi=300)
plt.close()
#-----------------
# PLOTS BLOS
#-----------------
plt.figure(figsize=(10, 10))
ax = plt.gca()
plt.title('PHI-FDT Magnetogram',size=20)
# Hide grid lines
ax.grid(False)
# Hide axes ticks
ax.set_xticks([])
ax.set_yticks([])
#im = ax.imshow(np.fliplr(rotate(b_los[PXBEG2:PXEND2+1,PXBEG1:PXEND1+1], 52, reshape=False)), cmap='gray',vmin=-100,vmax=100)
im = ax.imshow(b_los, cmap='gray',vmin=-100,vmax=100)
divider = plib.make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plib.plt.colorbar(im, cax=cax)
# cbar.set_label('Stokes V amplitude [%]')
cbar.set_label('LoS magnetic field [Mx/cm$^2$]')
cbar.ax.tick_params(labelsize=16)
#ax.imshow(Zm, cmap='gray')
writeto = set_level(outfile_L2,'stokes','blos')
writeto = set_level(writeto,'.fits','.png')
plt.savefig(output_dir+'pngs/'+writeto,dpi=300)
plt.close()
printc('--------------------- END ----------------------------',color=bcolors.FAIL)
return wave_axis
# if rte == 'cog':
# printc('---------------------RUNNING COG --------------------------',color=bcolors.OKGREEN)
# wavelength = 6173.3356
# v_los,b_los = cog(data,wavelength,wave_axis,lande_factor=3,cpos = cpos)
# #-----------------
# # MASK DATA AND SAVE
# #-----------------
# v_los = v_los * mask
# b_los = b_los * mask
# plib.show_one(v_los,vmin=-1.5,vmax=1.5)
# plib.show_one(b_los,vmin=-150,vmax=150)
# if verbose == 1:
# plib.show_one(v_los,vmin=-2.5,vmax=2.5)
# plib.show_one(b_los,vmin=-150,vmax=150)
# with pyfits.open(data_f) as hdu_list:
# hdu_list[0].data = v_los
# hdu_list[0].header = header
# writeto = set_level(outfile_L2,'ilam','vlos-cog')
# writeto = set_level(writeto,'.fits','.png')
# plt.savefig(directory+writeto,dpi=300)
# with pyfits.open(data_f) as hdu_list:
# hdu_list[0].data = b_los
# hdu_list[0].header = header
# writeto = set_level(outfile_L2,'ilam','blos-cog')
# writeto = set_level(writeto,'.fits','.png')
# plt.savefig(directory+'pngs/'+writeto,dpi=300)
# return
#-----------------
# CORRECT CAVITY
#-----------------
# try:
# if cavity == 0:
# cavity=np.zeros_like(vl)
# print("zerooooooooooo")
# except:
# # cavity = cavity[ry[0]:ry[1],rx[0]:rx[1]]
# pass
# factor = 0.5
# rrx = [int(c[0]-r*factor),int(c[0]+r*factor)]
# rry = [int(c[1]-r*factor),int(c[1]+r*factor)]
# print(rrx,rry,' check these for los vel calib')
# off = np.mean(vl[rry[0]:rry[1],rrx[0]:rrx[1]])
# vl = vl - off #- cavity
# print('velocity offset ',off)
# # cavity,h = phi.fits_read('HRT_cavity_map_IP5.fits')
# # cavity = cavity * 0.3513e-3/6173.*300000. #A/V
# # cavity = cavity - np.median(cavity[800:1200,800:1200])
# return vl
|
the-stack_0_1901 | import ast
import copy
from vyper.exceptions import (
ParserException,
InvalidLiteralException,
StructureException,
TypeMismatchException,
FunctionDeclarationException,
EventDeclarationException
)
from vyper.signatures.function_signature import (
FunctionSignature,
VariableRecord,
)
from vyper.signatures.event_signature import (
EventSignature,
)
from vyper.parser.stmt import Stmt
from vyper.parser.expr import Expr
from vyper.parser.context import Context, Constancy
from vyper.parser.global_context import GlobalContext
from vyper.parser.lll_node import LLLnode
from vyper.parser.pre_parser import pre_parse
from vyper.parser.parser_utils import (
make_setter,
base_type_conversion,
byte_array_to_num,
decorate_ast,
getpos,
make_byte_array_copier,
resolve_negative_literals,
unwrap_location,
)
from vyper.types import (
BaseType,
ByteArrayLike,
ListType,
)
from vyper.types import (
get_size_of_type,
is_base_type,
ceil32,
)
from vyper.utils import (
MemoryPositions,
LOADED_LIMIT_MAP,
string_to_bytes,
)
from vyper.utils import (
bytes_to_int,
calc_mem_gas,
)
if not hasattr(ast, 'AnnAssign'):
raise Exception("Requires python 3.6 or higher for annotation support")
# Converts code to parse tree
def parse_to_ast(code):
class_names, code = pre_parse(code)
if '\x00' in code:
raise ParserException('No null bytes (\\x00) allowed in the source code.')
o = ast.parse(code) # python ast
decorate_ast(o, code, class_names) # decorated python ast
o = resolve_negative_literals(o)
return o.body
# Header code
initializer_list = ['seq', ['mstore', 28, ['calldataload', 0]]]
# Store limit constants at fixed addresses in memory.
initializer_list += [['mstore', pos, limit_size] for pos, limit_size in LOADED_LIMIT_MAP.items()]
initializer_lll = LLLnode.from_list(initializer_list, typ=None)
# Is a function the initializer?
def is_initializer(code):
return code.name == '__init__'
# Is a function the default function?
def is_default_func(code):
return code.name == '__default__'
# Generate default argument function signatures.
def generate_default_arg_sigs(code, contracts, global_ctx):
# generate all sigs, and attach.
total_default_args = len(code.args.defaults)
if total_default_args == 0:
return [
FunctionSignature.from_definition(
code,
sigs=contracts,
custom_units=global_ctx._custom_units,
custom_structs=global_ctx._structs,
constants=global_ctx._constants
)
]
base_args = code.args.args[:-total_default_args]
default_args = code.args.args[-total_default_args:]
# Generate a list of default function combinations.
row = [False] * (total_default_args)
table = [row.copy()]
for i in range(total_default_args):
row[i] = True
table.append(row.copy())
default_sig_strs = []
sig_fun_defs = []
for truth_row in table:
new_code = copy.deepcopy(code)
new_code.args.args = copy.deepcopy(base_args)
new_code.args.default = []
# Add necessary default args.
for idx, val in enumerate(truth_row):
if val is True:
new_code.args.args.append(default_args[idx])
sig = FunctionSignature.from_definition(
new_code,
sigs=contracts,
custom_units=global_ctx._custom_units,
custom_structs=global_ctx._structs,
constants=global_ctx._constants
)
default_sig_strs.append(sig.sig)
sig_fun_defs.append(sig)
return sig_fun_defs
# Get ABI signature
def mk_full_signature(code, sig_formatter=None, interface_codes=None):
if sig_formatter is None:
# Use default JSON style output.
sig_formatter = lambda sig, custom_units_descriptions: sig.to_abi_dict(custom_units_descriptions)
o = []
global_ctx = GlobalContext.get_global_context(code, interface_codes=interface_codes)
# Produce event signatues.
for code in global_ctx._events:
sig = EventSignature.from_declaration(code, global_ctx)
o.append(sig_formatter(sig, global_ctx._custom_units_descriptions))
# Produce function signatures.
for code in global_ctx._defs:
sig = FunctionSignature.from_definition(code,
sigs=global_ctx._contracts,
custom_units=global_ctx._custom_units,
custom_structs=global_ctx._structs,
constants=global_ctx._constants
)
if not sig.private:
default_sigs = generate_default_arg_sigs(code, global_ctx._contracts, global_ctx)
for s in default_sigs:
o.append(sig_formatter(s, global_ctx._custom_units_descriptions))
return o
def mk_method_identifiers(code, interface_codes=None):
o = {}
global_ctx = GlobalContext.get_global_context(parse_to_ast(code), interface_codes=interface_codes)
for code in global_ctx._defs:
sig = FunctionSignature.from_definition(code, sigs=global_ctx._contracts, custom_units=global_ctx._custom_units, constants=global_ctx._constants)
if not sig.private:
default_sigs = generate_default_arg_sigs(code, global_ctx._contracts, global_ctx)
for s in default_sigs:
o[s.sig] = hex(s.method_id)
return o
def parse_events(sigs, global_ctx):
for event in global_ctx._events:
sigs[event.target.id] = EventSignature.from_declaration(event, global_ctx)
return sigs
def parse_external_contracts(external_contracts, _contracts, _structs, _constants):
for _contractname in _contracts:
_contract_defs = _contracts[_contractname]
_defnames = [_def.name for _def in _contract_defs]
contract = {}
if len(set(_defnames)) < len(_contract_defs):
raise FunctionDeclarationException("Duplicate function name: %s" % [name for name in _defnames if _defnames.count(name) > 1][0])
for _def in _contract_defs:
constant = False
# test for valid call type keyword.
if len(_def.body) == 1 and \
isinstance(_def.body[0], ast.Expr) and \
isinstance(_def.body[0].value, ast.Name) and \
_def.body[0].value.id in ('modifying', 'constant'):
constant = True if _def.body[0].value.id == 'constant' else False
else:
raise StructureException('constant or modifying call type must be specified', _def)
# Recognizes already-defined structs
sig = FunctionSignature.from_definition(_def, contract_def=True, constant=constant, custom_structs=_structs, constants=_constants)
contract[sig.name] = sig
external_contracts[_contractname] = contract
return external_contracts
def parse_other_functions(o, otherfuncs, sigs, external_contracts, origcode, global_ctx, default_function, runtime_only):
sub = ['seq', initializer_lll]
add_gas = initializer_lll.gas
for _def in otherfuncs:
sub.append(parse_func(_def, {**{'self': sigs}, **external_contracts}, origcode, global_ctx)) # noqa E999
sub[-1].total_gas += add_gas
add_gas += 30
for sig in generate_default_arg_sigs(_def, external_contracts, global_ctx):
sig.gas = sub[-1].total_gas
sigs[sig.sig] = sig
# Add fallback function
if default_function:
default_func = parse_func(default_function[0], {**{'self': sigs}, **external_contracts}, origcode, global_ctx)
sub.append(default_func)
else:
sub.append(LLLnode.from_list(['revert', 0, 0], typ=None, annotation='Default function'))
if runtime_only:
return sub
else:
o.append(['return', 0, ['lll', sub, 0]])
return o
# Main python parse tree => LLL method
def parse_tree_to_lll(code, origcode, runtime_only=False, interface_codes=None):
global_ctx = GlobalContext.get_global_context(code, interface_codes=interface_codes)
_names_def = [_def.name for _def in global_ctx._defs]
# Checks for duplicate function names
if len(set(_names_def)) < len(_names_def):
raise FunctionDeclarationException("Duplicate function name: %s" % [name for name in _names_def if _names_def.count(name) > 1][0])
_names_events = [_event.target.id for _event in global_ctx._events]
# Checks for duplicate event names
if len(set(_names_events)) < len(_names_events):
raise EventDeclarationException("Duplicate event name: %s" % [name for name in _names_events if _names_events.count(name) > 1][0])
# Initialization function
initfunc = [_def for _def in global_ctx._defs if is_initializer(_def)]
# Default function
defaultfunc = [_def for _def in global_ctx._defs if is_default_func(_def)]
# Regular functions
otherfuncs = [_def for _def in global_ctx._defs if not is_initializer(_def) and not is_default_func(_def)]
sigs = {}
external_contracts = {}
# Create the main statement
o = ['seq']
if global_ctx._events:
sigs = parse_events(sigs, global_ctx)
if global_ctx._contracts:
external_contracts = parse_external_contracts(external_contracts, global_ctx._contracts, global_ctx._structs, global_ctx._constants)
# If there is an init func...
if initfunc:
o.append(['seq', initializer_lll])
o.append(parse_func(initfunc[0], {**{'self': sigs}, **external_contracts}, origcode, global_ctx))
# If there are regular functions...
if otherfuncs or defaultfunc:
o = parse_other_functions(
o, otherfuncs, sigs, external_contracts, origcode, global_ctx, defaultfunc, runtime_only
)
# Check interface.
if global_ctx._interface:
funcs_left = global_ctx._interface.copy()
for sig, func_sig in sigs.items():
if isinstance(func_sig, FunctionSignature):
if sig in funcs_left and not func_sig.private:
del funcs_left[sig]
if isinstance(func_sig, EventSignature) and func_sig.sig in funcs_left:
del funcs_left[func_sig.sig]
if funcs_left:
error_message = 'Contract does not comply to supplied Interface(s).\n'
missing_functions = [sig_name for sig_name, func_sig in funcs_left.items() if isinstance(func_sig, FunctionSignature)]
missing_events = [sig_name for sig_name, func_sig in funcs_left.items() if isinstance(func_sig, EventSignature)]
if missing_functions:
error_message += 'Missing interface functions:\n\t{}'.format('\n\t'.join(missing_functions))
if missing_events:
error_message += 'Missing interface events:\n\t{}'.format('\n\t'.join(missing_events))
raise StructureException(error_message)
return LLLnode.from_list(o, typ=None)
# Checks that an input matches its type
def make_clamper(datapos, mempos, typ, is_init=False):
if not is_init:
data_decl = ['calldataload', ['add', 4, datapos]]
copier = lambda pos, sz: ['calldatacopy', mempos, ['add', 4, pos], sz]
else:
data_decl = ['codeload', ['add', '~codelen', datapos]]
copier = lambda pos, sz: ['codecopy', mempos, ['add', '~codelen', pos], sz]
# Numbers: make sure they're in range
if is_base_type(typ, 'int128'):
return LLLnode.from_list(['clamp', ['mload', MemoryPositions.MINNUM], data_decl, ['mload', MemoryPositions.MAXNUM]],
typ=typ, annotation='checking int128 input')
# Booleans: make sure they're zero or one
elif is_base_type(typ, 'bool'):
return LLLnode.from_list(['uclamplt', data_decl, 2], typ=typ, annotation='checking bool input')
# Addresses: make sure they're in range
elif is_base_type(typ, 'address'):
return LLLnode.from_list(['uclamplt', data_decl, ['mload', MemoryPositions.ADDRSIZE]], typ=typ, annotation='checking address input')
# Bytes: make sure they have the right size
elif isinstance(typ, ByteArrayLike):
return LLLnode.from_list(['seq',
copier(data_decl, 32 + typ.maxlen),
['assert', ['le', ['calldataload', ['add', 4, data_decl]], typ.maxlen]]],
typ=None, annotation='checking bytearray input')
# Lists: recurse
elif isinstance(typ, ListType):
o = []
for i in range(typ.count):
offset = get_size_of_type(typ.subtype) * 32 * i
o.append(make_clamper(datapos + offset, mempos + offset, typ.subtype, is_init))
return LLLnode.from_list(['seq'] + o, typ=None, annotation='checking list input')
# Otherwise don't make any checks
else:
return LLLnode.from_list('pass')
def get_sig_statements(sig, pos):
method_id_node = LLLnode.from_list(sig.method_id, pos=pos, annotation='%s' % sig.sig)
if sig.private:
sig_compare = 0
private_label = LLLnode.from_list(
['label', 'priv_{}'.format(sig.method_id)],
pos=pos, annotation='%s' % sig.sig
)
else:
sig_compare = ['eq', ['mload', 0], method_id_node]
private_label = ['pass']
return sig_compare, private_label
def get_arg_copier(sig, total_size, memory_dest, offset=4):
# Copy arguments.
# For private function, MSTORE arguments and callback pointer from the stack.
if sig.private:
copier = ['seq']
for pos in range(0, total_size, 32):
copier.append(['mstore', memory_dest + pos, 'pass'])
else:
copier = ['calldatacopy', memory_dest, offset, total_size]
return copier
def make_unpacker(ident, i_placeholder, begin_pos):
start_label = 'dyn_unpack_start_' + ident
end_label = 'dyn_unpack_end_' + ident
return ['seq_unchecked',
['mstore', begin_pos, 'pass'], # get len
['mstore', i_placeholder, 0],
['label', start_label],
['if', ['ge', ['mload', i_placeholder], ['ceil32', ['mload', begin_pos]]], ['goto', end_label]], # break
['mstore', ['add', ['add', begin_pos, 32], ['mload', i_placeholder]], 'pass'], # pop into correct memory slot.
['mstore', i_placeholder, ['add', 32, ['mload', i_placeholder]]], # increment i
['goto', start_label],
['label', end_label]]
# Parses a function declaration
def parse_func(code, sigs, origcode, global_ctx, _vars=None):
if _vars is None:
_vars = {}
sig = FunctionSignature.from_definition(
code,
sigs=sigs,
custom_units=global_ctx._custom_units,
custom_structs=global_ctx._structs,
constants=global_ctx._constants
)
# Get base args for function.
total_default_args = len(code.args.defaults)
base_args = sig.args[:-total_default_args] if total_default_args > 0 else sig.args
default_args = code.args.args[-total_default_args:]
default_values = dict(zip([arg.arg for arg in default_args], code.args.defaults))
# __init__ function may not have defaults.
if sig.name == '__init__' and total_default_args > 0:
raise FunctionDeclarationException("__init__ function may not have default parameters.")
# Check for duplicate variables with globals
for arg in sig.args:
if arg.name in global_ctx._globals:
raise FunctionDeclarationException("Variable name duplicated between function arguments and globals: " + arg.name)
# Create a local (per function) context.
context = Context(
vars=_vars,
global_ctx=global_ctx,
sigs=sigs,
return_type=sig.output_type,
constancy=Constancy.Constant if sig.const else Constancy.Mutable,
is_payable=sig.payable,
origcode=origcode,
is_private=sig.private,
method_id=sig.method_id
)
# Copy calldata to memory for fixed-size arguments
max_copy_size = sum([32 if isinstance(arg.typ, ByteArrayLike) else get_size_of_type(arg.typ) * 32 for arg in sig.args])
base_copy_size = sum([32 if isinstance(arg.typ, ByteArrayLike) else get_size_of_type(arg.typ) * 32 for arg in base_args])
context.next_mem += max_copy_size
clampers = []
# Create callback_ptr, this stores a destination in the bytecode for a private
# function to jump to after a function has executed.
_post_callback_ptr = "{}_{}_post_callback_ptr".format(sig.name, sig.method_id)
if sig.private:
context.callback_ptr = context.new_placeholder(typ=BaseType('uint256'))
clampers.append(
LLLnode.from_list(['mstore', context.callback_ptr, 'pass'], annotation='pop callback pointer')
)
if total_default_args > 0:
clampers.append(['label', _post_callback_ptr])
# private functions without return types need to jump back to
# the calling function, as there is no return statement to handle the
# jump.
stop_func = [['stop']]
if sig.output_type is None and sig.private:
stop_func = [['jump', ['mload', context.callback_ptr]]]
if not len(base_args):
copier = 'pass'
elif sig.name == '__init__':
copier = ['codecopy', MemoryPositions.RESERVED_MEMORY, '~codelen', base_copy_size]
else:
copier = get_arg_copier(
sig=sig,
total_size=base_copy_size,
memory_dest=MemoryPositions.RESERVED_MEMORY
)
clampers.append(copier)
# Add asserts for payable and internal
# private never gets payable check.
if not sig.payable and not sig.private:
clampers.append(['assert', ['iszero', 'callvalue']])
# Fill variable positions
for i, arg in enumerate(sig.args):
if i < len(base_args) and not sig.private:
clampers.append(make_clamper(arg.pos, context.next_mem, arg.typ, sig.name == '__init__'))
if isinstance(arg.typ, ByteArrayLike):
context.vars[arg.name] = VariableRecord(arg.name, context.next_mem, arg.typ, False)
context.next_mem += 32 * get_size_of_type(arg.typ)
else:
context.vars[arg.name] = VariableRecord(arg.name, MemoryPositions.RESERVED_MEMORY + arg.pos, arg.typ, False)
# Private function copiers. No clamping for private functions.
dyn_variable_names = [a.name for a in base_args if isinstance(a.typ, ByteArrayLike)]
if sig.private and dyn_variable_names:
i_placeholder = context.new_placeholder(typ=BaseType('uint256'))
unpackers = []
for idx, var_name in enumerate(dyn_variable_names):
var = context.vars[var_name]
ident = "_load_args_%d_dynarg%d" % (sig.method_id, idx)
o = make_unpacker(ident=ident, i_placeholder=i_placeholder, begin_pos=var.pos)
unpackers.append(o)
if not unpackers:
unpackers = ['pass']
clampers.append(LLLnode.from_list(
['seq_unchecked'] + unpackers + [0], # [0] to complete full overarching 'seq' statement, see private_label.
typ=None, annotation='dynamic unpacker', pos=getpos(code))
)
# Create "clampers" (input well-formedness checkers)
# Return function body
if sig.name == '__init__':
o = LLLnode.from_list(['seq'] + clampers + [parse_body(code.body, context)], pos=getpos(code))
elif is_default_func(sig):
if len(sig.args) > 0:
raise FunctionDeclarationException('Default function may not receive any arguments.', code)
if sig.private:
raise FunctionDeclarationException('Default function may only be public.', code)
o = LLLnode.from_list(['seq'] + clampers + [parse_body(code.body, context)], pos=getpos(code))
else:
if total_default_args > 0: # Function with default parameters.
function_routine = "{}_{}".format(sig.name, sig.method_id)
default_sigs = generate_default_arg_sigs(code, sigs, global_ctx)
sig_chain = ['seq']
for default_sig in default_sigs:
sig_compare, private_label = get_sig_statements(default_sig, getpos(code))
# Populate unset default variables
populate_arg_count = len(sig.args) - len(default_sig.args)
set_defaults = []
if populate_arg_count > 0:
current_sig_arg_names = {x.name for x in default_sig.args}
missing_arg_names = [arg.arg for arg in default_args if arg.arg not in current_sig_arg_names]
for arg_name in missing_arg_names:
value = Expr(default_values[arg_name], context).lll_node
var = context.vars[arg_name]
left = LLLnode.from_list(var.pos, typ=var.typ, location='memory',
pos=getpos(code), mutable=var.mutable)
set_defaults.append(make_setter(left, value, 'memory', pos=getpos(code)))
current_sig_arg_names = {x.name for x in default_sig.args}
base_arg_names = {arg.name for arg in base_args}
if sig.private:
# Load all variables in default section, if private,
# because the stack is a linear pipe.
copier_arg_count = len(default_sig.args)
copier_arg_names = current_sig_arg_names
else:
copier_arg_count = len(default_sig.args) - len(base_args)
copier_arg_names = current_sig_arg_names - base_arg_names
# Order copier_arg_names, this is very important.
copier_arg_names = [x.name for x in default_sig.args if x.name in copier_arg_names]
# Variables to be populated from calldata/stack.
default_copiers = []
if copier_arg_count > 0:
# Get map of variables in calldata, with thier offsets
offset = 4
calldata_offset_map = {}
for arg in default_sig.args:
calldata_offset_map[arg.name] = offset
offset += 32 if isinstance(arg.typ, ByteArrayLike) else get_size_of_type(arg.typ) * 32
# Copy set default parameters from calldata
dynamics = []
for arg_name in copier_arg_names:
var = context.vars[arg_name]
calldata_offset = calldata_offset_map[arg_name]
if sig.private:
_offset = calldata_offset
if isinstance(var.typ, ByteArrayLike):
_size = 32
dynamics.append(var.pos)
else:
_size = var.size * 32
default_copiers.append(get_arg_copier(sig=sig, memory_dest=var.pos, total_size=_size, offset=_offset))
else:
# Add clampers.
default_copiers.append(make_clamper(calldata_offset - 4, var.pos, var.typ))
# Add copying code.
if isinstance(var.typ, ByteArrayLike):
_offset = ['add', 4, ['calldataload', calldata_offset]]
else:
_offset = calldata_offset
default_copiers.append(get_arg_copier(sig=sig, memory_dest=var.pos, total_size=var.size * 32, offset=_offset))
# Unpack byte array if necessary.
if dynamics:
i_placeholder = context.new_placeholder(typ=BaseType('uint256'))
for idx, var_pos in enumerate(dynamics):
ident = 'unpack_default_sig_dyn_%d_arg%d' % (default_sig.method_id, idx)
default_copiers.append(
make_unpacker(ident=ident, i_placeholder=i_placeholder, begin_pos=var_pos)
)
default_copiers.append(0) # for over arching seq, POP
sig_chain.append([
'if', sig_compare,
['seq',
private_label,
LLLnode.from_list(['mstore', context.callback_ptr, 'pass'], annotation='pop callback pointer', pos=getpos(code)) if sig.private else ['pass'],
['seq'] + set_defaults if set_defaults else ['pass'],
['seq_unchecked'] + default_copiers if default_copiers else ['pass'],
['goto', _post_callback_ptr if sig.private else function_routine]]
])
# With private functions all variable loading occurs in the default
# function sub routine.
if sig.private:
_clampers = [['label', _post_callback_ptr]]
else:
_clampers = clampers
# Function with default parameters.
o = LLLnode.from_list(
['seq',
sig_chain,
['if', 0, # can only be jumped into
['seq',
['label', function_routine] if not sig.private else ['pass'],
['seq'] + _clampers + [parse_body(c, context) for c in code.body] + stop_func]]], typ=None, pos=getpos(code))
else:
# Function without default parameters.
sig_compare, private_label = get_sig_statements(sig, getpos(code))
o = LLLnode.from_list(
['if',
sig_compare,
['seq'] + [private_label] + clampers + [parse_body(c, context) for c in code.body] + stop_func], typ=None, pos=getpos(code))
# Check for at leasts one return statement if necessary.
if context.return_type and context.function_return_count == 0:
raise FunctionDeclarationException(
"Missing return statement in function '%s' " % sig.name, code
)
o.context = context
o.total_gas = o.gas + calc_mem_gas(o.context.next_mem)
o.func_name = sig.name
return o
# Parse a piece of code
def parse_body(code, context):
if not isinstance(code, list):
return parse_stmt(code, context)
o = []
for stmt in code:
lll = parse_stmt(stmt, context)
o.append(lll)
return LLLnode.from_list(['seq'] + o, pos=getpos(code[0]) if code else None)
# Parse an expression
def parse_expr(expr, context):
return Expr(expr, context).lll_node
# Parse a statement (usually one line of code but not always)
def parse_stmt(stmt, context):
return Stmt(stmt, context).lll_node
def pack_logging_topics(event_id, args, expected_topics, context, pos):
topics = [event_id]
code_pos = pos
for pos, expected_topic in enumerate(expected_topics):
expected_type = expected_topic.typ
arg = args[pos]
value = parse_expr(arg, context)
arg_type = value.typ
if isinstance(arg_type, ByteArrayLike) and isinstance(expected_type, ByteArrayLike):
if arg_type.maxlen > expected_type.maxlen:
raise TypeMismatchException("Topic input bytes are too big: %r %r" % (arg_type, expected_type), code_pos)
if isinstance(arg, ast.Str):
bytez, bytez_length = string_to_bytes(arg.s)
if len(bytez) > 32:
raise InvalidLiteralException("Can only log a maximum of 32 bytes at a time.", code_pos)
topics.append(bytes_to_int(bytez + b'\x00' * (32 - bytez_length)))
else:
if value.location == "memory":
size = ['mload', value]
elif value.location == "storage":
size = ['sload', ['sha3_32', value]]
topics.append(byte_array_to_num(value, arg, 'uint256', size))
else:
value = unwrap_location(value)
value = base_type_conversion(value, arg_type, expected_type, pos=code_pos)
topics.append(value)
return topics
def pack_args_by_32(holder, maxlen, arg, typ, context, placeholder,
dynamic_offset_counter=None, datamem_start=None, zero_pad_i=None, pos=None):
"""
Copy necessary variables to pre-allocated memory section.
:param holder: Complete holder for all args
:param maxlen: Total length in bytes of the full arg section (static + dynamic).
:param arg: Current arg to pack
:param context: Context of arg
:param placeholder: Static placeholder for static argument part.
:param dynamic_offset_counter: position counter stored in static args.
:param dynamic_placeholder: pointer to current position in memory to write dynamic values to.
:param datamem_start: position where the whole datemem section starts.
"""
if isinstance(typ, BaseType):
if isinstance(arg, LLLnode):
value = unwrap_location(arg)
else:
value = parse_expr(arg, context)
value = base_type_conversion(value, value.typ, typ, pos)
holder.append(LLLnode.from_list(['mstore', placeholder, value], typ=typ, location='memory'))
elif isinstance(typ, ByteArrayLike):
if isinstance(arg, LLLnode): # Is prealloacted variable.
source_lll = arg
else:
source_lll = parse_expr(arg, context)
# Set static offset, in arg slot.
holder.append(LLLnode.from_list(['mstore', placeholder, ['mload', dynamic_offset_counter]]))
# Get the biginning to write the ByteArray to.
dest_placeholder = LLLnode.from_list(
['add', datamem_start, ['mload', dynamic_offset_counter]],
typ=typ, location='memory', annotation="pack_args_by_32:dest_placeholder")
copier = make_byte_array_copier(dest_placeholder, source_lll, pos=pos)
holder.append(copier)
# Add zero padding.
new_maxlen = ceil32(source_lll.typ.maxlen)
holder.append(
['with', '_ceil32_end', ['ceil32', ['mload', dest_placeholder]],
['seq',
['with', '_bytearray_loc', dest_placeholder,
['seq',
['repeat', zero_pad_i, ['mload', '_bytearray_loc'], new_maxlen,
['seq',
['if', ['ge', ['mload', zero_pad_i], '_ceil32_end'], 'break'], # stay within allocated bounds
['mstore8', ['add', ['add', '_bytearray_loc', 32], ['mload', zero_pad_i]], 0]]]]]]]
)
# Increment offset counter.
increment_counter = LLLnode.from_list(
['mstore', dynamic_offset_counter,
['add', ['add', ['mload', dynamic_offset_counter], ['ceil32', ['mload', dest_placeholder]]], 32]],
annotation='Increment dynamic offset counter'
)
holder.append(increment_counter)
elif isinstance(typ, ListType):
maxlen += (typ.count - 1) * 32
typ = typ.subtype
def check_list_type_match(provided): # Check list types match.
if provided != typ:
raise TypeMismatchException(
"Log list type '%s' does not match provided, expected '%s'" % (provided, typ)
)
# List from storage
if isinstance(arg, ast.Attribute) and arg.value.id == 'self':
stor_list = context.globals[arg.attr]
check_list_type_match(stor_list.typ.subtype)
size = stor_list.typ.count
mem_offset = 0
for i in range(0, size):
storage_offset = i
arg2 = LLLnode.from_list(['sload', ['add', ['sha3_32', Expr(arg, context).lll_node], storage_offset]],
typ=typ)
holder, maxlen = pack_args_by_32(holder, maxlen, arg2, typ, context, placeholder + mem_offset, pos=pos)
mem_offset += get_size_of_type(typ) * 32
# List from variable.
elif isinstance(arg, ast.Name):
size = context.vars[arg.id].size
pos = context.vars[arg.id].pos
check_list_type_match(context.vars[arg.id].typ.subtype)
mem_offset = 0
for i in range(0, size):
arg2 = LLLnode.from_list(pos + mem_offset, typ=typ, location='memory')
# p_holder = context.new_placeholder(BaseType(32)) if i > 0 else placeholder
holder, maxlen = pack_args_by_32(holder, maxlen, arg2, typ, context, placeholder + mem_offset, pos=pos)
mem_offset += get_size_of_type(typ) * 32
# List from list literal.
else:
mem_offset = 0
for i, arg2 in enumerate(arg.elts):
holder, maxlen = pack_args_by_32(holder, maxlen, arg2, typ, context, placeholder + mem_offset, pos=pos)
mem_offset += get_size_of_type(typ) * 32
return holder, maxlen
# Pack logging data arguments
def pack_logging_data(expected_data, args, context, pos):
# Checks to see if there's any data
if not args:
return ['seq'], 0, None, 0
holder = ['seq']
maxlen = len(args) * 32 # total size of all packed args (upper limit)
# Unroll any function calls, to temp variables.
prealloacted = {}
for idx, (arg, expected_arg) in enumerate(zip(args, expected_data)):
if isinstance(arg, (ast.Str, ast.Call)):
expr = Expr(arg, context)
source_lll = expr.lll_node
typ = source_lll.typ
if isinstance(arg, ast.Str):
if len(arg.s) > typ.maxlen:
raise TypeMismatchException("Data input bytes are to big: %r %r" % (len(arg.s), typ), pos)
tmp_variable = context.new_variable('_log_pack_var_%i_%i' % (arg.lineno, arg.col_offset), source_lll.typ)
tmp_variable_node = LLLnode.from_list(
tmp_variable, typ=source_lll.typ,
pos=getpos(arg), location="memory", annotation='log_prealloacted %r' % source_lll.typ
)
# Store len.
# holder.append(['mstore', len_placeholder, ['mload', unwrap_location(source_lll)]])
# Copy bytes.
holder.append(
make_setter(tmp_variable_node, source_lll, pos=getpos(arg), location='memory')
)
prealloacted[idx] = tmp_variable_node
requires_dynamic_offset = any([isinstance(data.typ, ByteArrayLike) for data in expected_data])
if requires_dynamic_offset:
zero_pad_i = context.new_placeholder(BaseType('uint256')) # Iterator used to zero pad memory.
dynamic_offset_counter = context.new_placeholder(BaseType(32))
dynamic_placeholder = context.new_placeholder(BaseType(32))
else:
dynamic_offset_counter = None
zero_pad_i = None
# Create placeholder for static args. Note: order of new_*() is important.
placeholder_map = {}
for i, (arg, data) in enumerate(zip(args, expected_data)):
typ = data.typ
if not isinstance(typ, ByteArrayLike):
placeholder = context.new_placeholder(typ)
else:
placeholder = context.new_placeholder(BaseType(32))
placeholder_map[i] = placeholder
# Populate static placeholders.
for i, (arg, data) in enumerate(zip(args, expected_data)):
typ = data.typ
placeholder = placeholder_map[i]
if not isinstance(typ, ByteArrayLike):
holder, maxlen = pack_args_by_32(holder, maxlen, prealloacted.get(i, arg), typ, context, placeholder, zero_pad_i=zero_pad_i, pos=pos)
# Dynamic position starts right after the static args.
if requires_dynamic_offset:
holder.append(LLLnode.from_list(['mstore', dynamic_offset_counter, maxlen]))
# Calculate maximum dynamic offset placeholders, used for gas estimation.
for i, (arg, data) in enumerate(zip(args, expected_data)):
typ = data.typ
if isinstance(typ, ByteArrayLike):
maxlen += 32 + ceil32(typ.maxlen)
if requires_dynamic_offset:
datamem_start = dynamic_placeholder + 32
else:
datamem_start = placeholder_map[0]
# Copy necessary data into allocated dynamic section.
for i, (arg, data) in enumerate(zip(args, expected_data)):
typ = data.typ
if isinstance(typ, ByteArrayLike):
pack_args_by_32(
holder=holder,
maxlen=maxlen,
arg=prealloacted.get(i, arg),
typ=typ,
context=context,
placeholder=placeholder_map[i],
datamem_start=datamem_start,
dynamic_offset_counter=dynamic_offset_counter,
zero_pad_i=zero_pad_i,
pos=pos
)
return holder, maxlen, dynamic_offset_counter, datamem_start
def parse_to_lll(kode, runtime_only=False, interface_codes=None):
code = parse_to_ast(kode)
return parse_tree_to_lll(code, kode, runtime_only=runtime_only, interface_codes=interface_codes)
|
the-stack_0_1902 | import datajoint as dj
import numpy as np
from numpy.lib import emath
from functools import reduce
from .common_session import Session # noqa: F401
schema = dj.schema('common_interval')
# TODO: ADD export to NWB function to save relevant intervals in an NWB file
@schema
class IntervalList(dj.Manual):
definition = """
# Time intervals used for analysis
-> Session
interval_list_name: varchar(200) # descriptive name of this interval list
---
valid_times: longblob # numpy array with start and end times for each interval
"""
@classmethod
def insert_from_nwbfile(cls, nwbf, *, nwb_file_name):
"""Add each entry in the NWB file epochs table to the IntervalList table.
The interval list name for each epoch is set to the first tag for the epoch.
If the epoch has no tags, then 'interval_x' will be used as the interval list name, where x is the index
(0-indexed) of the epoch in the epochs table.
The start time and stop time of the epoch are stored in the valid_times field as a numpy array of
[start time, stop time] for each epoch.
Parameters
----------
nwbf : pynwb.NWBFile
The source NWB file object.
nwb_file_name : str
The file name of the NWB file, used as a primary key to the Session table.
"""
if nwbf.epochs is None:
print('No epochs found in NWB file.')
return
epochs = nwbf.epochs.to_dataframe()
for epoch_index, epoch_data in epochs.iterrows():
epoch_dict = dict()
epoch_dict['nwb_file_name'] = nwb_file_name
if epoch_data.tags[0]:
epoch_dict['interval_list_name'] = epoch_data.tags[0]
else:
epoch_dict['interval_list_name'] = 'interval_' + str(epoch_index)
epoch_dict['valid_times'] = np.asarray(
[[epoch_data.start_time, epoch_data.stop_time]])
cls.insert1(epoch_dict, skip_duplicates=True)
# TODO: make all of the functions below faster if possible
def intervals_by_length(interval_list, min_length=0.0, max_length=1e10):
"""Returns an interval list with only the intervals whose length is > min_length and < max_length
Args:
interval_list ((N,2) np.array): input interval list.
min_length (float, optional): [minimum interval length in seconds]. Defaults to 0.0.
max_length ([type], optional): [maximum interval length in seconds]. Defaults to 1e10.
"""
# get the length of each interval
lengths = np.ravel(np.diff(interval_list))
# return only intervals of the appropriate lengths
return interval_list[np.logical_and(lengths > min_length, lengths < max_length)]
def interval_list_contains_ind(valid_times, timestamps):
"""Returns the indices for the timestamps that are contained within the valid_times intervals
:param valid_times: Array of [start, end] times
:type valid_times: numpy array
:param timestamps: list of timestamps
:type timestamps: numpy array or list
:return: indices of timestamps that are in one of the valid_times intervals
"""
ind = []
for valid_time in valid_times:
ind += np.ravel(np.argwhere(np.logical_and(timestamps >= valid_time[0],
timestamps <= valid_time[1]))).tolist()
return np.asarray(ind)
def interval_list_contains(valid_times, timestamps):
"""Returns the timestamps that are contained within the valid_times intervals
:param valid_times: Array of [start, end] times
:type valid_times: numpy array
:param timestamps: list of timestamps
:type timestamps: numpy array or list
:return: numpy array of timestamps that are in one of the valid_times intervals
"""
ind = []
for valid_time in valid_times:
ind += np.ravel(np.argwhere(np.logical_and(timestamps >= valid_time[0],
timestamps <= valid_time[1]))).tolist()
return timestamps[ind]
def interval_list_excludes_ind(valid_times, timestamps):
"""Returns the indices of the timestamps that are excluded from the valid_times intervals
:param valid_times: Array of [start, end] times
:type valid_times: numpy array
:param timestamps: list of timestamps
:type timestamps: numpy array or list
:return: numpy array of timestamps that are in one of the valid_times intervals
"""
# add the first and last times to the list and creat a list of invalid intervals
valid_times_list = np.ndarray.ravel(valid_times).tolist()
valid_times_list.insert(0, timestamps[0] - 0.00001)
valid_times_list.append(timestamps[-1] + 0.001)
invalid_times = np.array(valid_times_list).reshape(-1, 2)
# add the first and last timestamp indices
ind = []
for invalid_time in invalid_times:
ind += np.ravel(np.argwhere(np.logical_and(timestamps > invalid_time[0],
timestamps < invalid_time[1]))).tolist()
return np.asarray(ind)
def interval_list_excludes(valid_times, timestamps):
"""Returns the indices of the timestamps that are excluded from the valid_times intervals
:param valid_times: Array of [start, end] times
:type valid_times: numpy array
:param timestamps: list of timestamps
:type timestamps: numpy array or list
:return: numpy array of timestamps that are in one of the valid_times intervals
"""
# add the first and last times to the list and creat a list of invalid intervals
valid_times_list = np.ravel(valid_times).tolist()
valid_times_list.insert(0, timestamps[0] - 0.00001)
valid_times_list.append(timestamps[-1] + 0.00001)
invalid_times = np.array(valid_times_list).reshape(-1, 2)
# add the first and last timestamp indices
ind = []
for invalid_time in invalid_times:
ind += np.ravel(np.argwhere(np.logical_and(timestamps > invalid_time[0],
timestamps < invalid_time[1]))).tolist()
return timestamps[ind]
def interval_list_intersect(interval_list1, interval_list2, min_length=0):
"""Finds the intersections between two interval lists
Parameters
----------
interval_list1 : np.array, (N,2) where N = number of intervals
interval_list2 : np.array, (N,2) where N = number of intervals
min_length : float, optional.
Minimum length of intervals to include, default 0
Each interval is (start time, stop time)
Returns
-------
interval_list: np.array, (N,2)
"""
# first, consolidate interval lists to disjoint intervals by sorting and applying union
if interval_list1.ndim==1:
interval_list1 = np.expand_dims(interval_list1,0)
else:
interval_list1 = interval_list1[np.argsort(interval_list1[:,0])]
interval_list1 = reduce(_union_concat, interval_list1)
# the following check is needed in the case where the interval list is a single element (behavior of reduce)
if interval_list1.ndim==1:
interval_list1 = np.expand_dims(interval_list1,0)
if interval_list2.ndim==1:
interval_list2 = np.expand_dims(interval_list2,0)
else:
interval_list2 = interval_list2[np.argsort(interval_list2[:,0])]
interval_list2 = reduce(_union_concat, interval_list2)
# the following check is needed in the case where the interval list is a single element (behavior of reduce)
if interval_list2.ndim==1:
interval_list2 = np.expand_dims(interval_list2,0)
# then do pairwise comparison and collect intersections
intersecting_intervals = []
for interval2 in interval_list2:
for interval1 in interval_list1:
if _intersection(interval2, interval1) is not None:
intersecting_intervals.append(_intersection(interval1, interval2))
# if no intersection, then return an empty list
if not intersecting_intervals:
return []
else:
intersecting_intervals = np.asarray(intersecting_intervals)
intersecting_intervals = intersecting_intervals[np.argsort(intersecting_intervals[:,0])]
return intervals_by_length(intersecting_intervals, min_length=min_length)
def _intersection(interval1, interval2):
"Takes the (set-theoretic) intersection of two intervals"
intersection = np.array([max([interval1[0],interval2[0]]),
min([interval1[1],interval2[1]])])
if intersection[1]>intersection[0]:
return intersection
else:
return None
def _union(interval1, interval2):
"Takes the (set-theoretic) union of two intervals"
if _intersection(interval1, interval2) is None:
return np.array([interval1, interval2])
else:
return np.array([min([interval1[0],interval2[0]]),
max([interval1[1],interval2[1]])])
def _union_concat(interval_list, interval):
"""Compares the last interval of the interval list to the given interval and
* takes their union if overlapping
* concatenates the interval to the interval list if not
Recursively called with `reduce`.
"""
if interval_list.ndim==1:
interval_list = np.expand_dims(interval_list, 0)
if interval.ndim==1:
interval = np.expand_dims(interval, 0)
x = _union(interval_list[-1], interval[0])
if x.ndim==1:
x = np.expand_dims(x, 0)
return np.concatenate((interval_list[:-1], x), axis=0)
def union_adjacent_index(interval1, interval2):
"""unions two intervals that are adjacent in index
e.g. [a,b] and [b+1, c] is converted to [a,c]
if not adjacent, just concatenates interval2 at the end of interval1
Parameters
----------
interval1 : np.array
[description]
interval2 : np.array
[description]
"""
if interval1.ndim==1:
interval1 = np.expand_dims(interval1, 0)
if interval2.ndim==1:
interval2 = np.expand_dims(interval2, 0)
if interval1[-1][1]+1 == interval2[0][0] or interval2[0][1]+1 == interval1[-1][0]:
x = np.array([[np.min([interval1[-1][0],interval2[0][0]]),
np.max([interval1[-1][1],interval2[0][1]])]])
return np.concatenate((interval1[:-1], x), axis=0)
else:
return np.concatenate((interval1, interval2),axis=0)
# TODO: test interval_list_union code
def interval_list_union(interval_list1, interval_list2, min_length=0.0, max_length=1e10):
"""Finds the union (all times in one or both) for two interval lists
:param interval_list1: The first interval list
:type interval_list1: numpy array of intervals [start, stop]
:param interval_list2: The second interval list
:type interval_list2: numpy array of intervals [start, stop]
:param min_length: optional minimum length of interval for inclusion in output, default 0.0
:type min_length: float
:param max_length: optional maximum length of interval for inclusion in output, default 1e10
:type max_length: float
:return: interval_list
:rtype: numpy array of intervals [start, stop]
"""
# return np.array([min(interval_list1[0],interval_list2[0]),
# max(interval_list1[1],interval_list2[1])])
interval_list1 = np.ravel(interval_list1)
# create a parallel list where 1 indicates the start and -1 the end of an interval
interval_list1_start_end = np.ones(interval_list1.shape)
interval_list1_start_end[1::2] = -1
interval_list2 = np.ravel(interval_list2)
# create a parallel list for the second interval where 1 indicates the start and -1 the end of an interval
interval_list2_start_end = np.ones(interval_list2.shape)
interval_list2_start_end[1::2] = -1
# concatenate the two lists so we can resort the intervals and apply the same sorting to the start-end arrays
combined_intervals = np.concatenate((interval_list1, interval_list2))
ss = np.concatenate((interval_list1_start_end, interval_list2_start_end))
sort_ind = np.argsort(combined_intervals)
combined_intervals = combined_intervals[sort_ind]
# a cumulative sum of 1 indicates the beginning of a joint interval; a cumulative sum of 0 indicates the end
union_starts = np.ravel(np.array(np.where(np.cumsum(ss[sort_ind]) == 1)))
union_stops = np.ravel(np.array(np.where(np.cumsum(ss[sort_ind]) == 0)))
union = []
for start, stop in zip(union_starts, union_stops):
union.append([combined_intervals[start], combined_intervals[stop]])
return np.asarray(union)
def interval_list_censor(interval_list, timestamps):
"""returns a new interval list that starts and ends at the first and last timestamp
Args:
interval_list (numpy array of intervals [start, stop]): interval list from IntervalList valid times
timestamps (numpy array or list): timestamp list
Returns:
interval_list (numpy array of intervals [start, stop])
"""
# check that all timestamps are in the interval list
assert len(interval_list_contains_ind(interval_list, timestamps)) == len(timestamps), 'interval_list must contain all timestamps'
timestamps_interval = np.asarray([[timestamps[0], timestamps[-1]]])
return interval_list_intersect(interval_list, timestamps_interval)
|
the-stack_0_1906 | # Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import re
from azurelinuxagent.common.utils.textutil import parse_doc, find, findall
from tests.tools import load_bin_data, load_data, MagicMock, Mock
from azurelinuxagent.common.exception import HttpError, ResourceGoneError
from azurelinuxagent.common.future import httpclient
from azurelinuxagent.common.utils.cryptutil import CryptUtil
DATA_FILE = {
"version_info": "wire/version_info.xml",
"goal_state": "wire/goal_state.xml",
"hosting_env": "wire/hosting_env.xml",
"shared_config": "wire/shared_config.xml",
"certs": "wire/certs.xml",
"ext_conf": "wire/ext_conf.xml",
"manifest": "wire/manifest.xml",
"ga_manifest": "wire/ga_manifest.xml",
"trans_prv": "wire/trans_prv",
"trans_cert": "wire/trans_cert",
"test_ext": "ext/sample_ext-1.3.0.zip",
"remote_access": None,
"in_vm_artifacts_profile": None
}
DATA_FILE_IN_VM_ARTIFACTS_PROFILE = DATA_FILE.copy()
DATA_FILE_IN_VM_ARTIFACTS_PROFILE["ext_conf"] = "wire/ext_conf_in_vm_artifacts_profile.xml"
DATA_FILE_IN_VM_ARTIFACTS_PROFILE["in_vm_artifacts_profile"] = "wire/in_vm_artifacts_profile.json"
DATA_FILE_NO_EXT = DATA_FILE.copy()
DATA_FILE_NO_EXT["goal_state"] = "wire/goal_state_no_ext.xml"
DATA_FILE_NO_EXT["ext_conf"] = None
DATA_FILE_EXT_NO_SETTINGS = DATA_FILE.copy()
DATA_FILE_EXT_NO_SETTINGS["ext_conf"] = "wire/ext_conf_no_settings.xml"
DATA_FILE_EXT_NO_PUBLIC = DATA_FILE.copy()
DATA_FILE_EXT_NO_PUBLIC["ext_conf"] = "wire/ext_conf_no_public.xml"
DATA_FILE_EXT_AUTOUPGRADE = DATA_FILE.copy()
DATA_FILE_EXT_AUTOUPGRADE["ext_conf"] = "wire/ext_conf_autoupgrade.xml"
DATA_FILE_EXT_INTERNALVERSION = DATA_FILE.copy()
DATA_FILE_EXT_INTERNALVERSION["ext_conf"] = "wire/ext_conf_internalversion.xml"
DATA_FILE_EXT_AUTOUPGRADE_INTERNALVERSION = DATA_FILE.copy()
DATA_FILE_EXT_AUTOUPGRADE_INTERNALVERSION["ext_conf"] = "wire/ext_conf_autoupgrade_internalversion.xml"
DATA_FILE_EXT_ROLLINGUPGRADE = DATA_FILE.copy()
DATA_FILE_EXT_ROLLINGUPGRADE["ext_conf"] = "wire/ext_conf_upgradeguid.xml"
DATA_FILE_EXT_SEQUENCING = DATA_FILE.copy()
DATA_FILE_EXT_SEQUENCING["ext_conf"] = "wire/ext_conf_sequencing.xml"
DATA_FILE_EXT_DELETION = DATA_FILE.copy()
DATA_FILE_EXT_DELETION["manifest"] = "wire/manifest_deletion.xml"
DATA_FILE_EXT_SINGLE = DATA_FILE.copy()
DATA_FILE_EXT_SINGLE["manifest"] = "wire/manifest_deletion.xml"
DATA_FILE_MULTIPLE_EXT = DATA_FILE.copy()
DATA_FILE_MULTIPLE_EXT["ext_conf"] = "wire/ext_conf_multiple_extensions.xml"
DATA_FILE_NO_CERT_FORMAT = DATA_FILE.copy()
DATA_FILE_NO_CERT_FORMAT["certs"] = "wire/certs_no_format_specified.xml"
DATA_FILE_CERT_FORMAT_NOT_PFX = DATA_FILE.copy()
DATA_FILE_CERT_FORMAT_NOT_PFX["certs"] = "wire/certs_format_not_pfx.xml"
DATA_FILE_REMOTE_ACCESS = DATA_FILE.copy()
DATA_FILE_REMOTE_ACCESS["goal_state"] = "wire/goal_state_remote_access.xml"
DATA_FILE_REMOTE_ACCESS["remote_access"] = "wire/remote_access_single_account.xml"
DATA_FILE_PLUGIN_SETTINGS_MISMATCH = DATA_FILE.copy()
DATA_FILE_PLUGIN_SETTINGS_MISMATCH["ext_conf"] = "wire/ext_conf_plugin_settings_version_mismatch.xml"
class WireProtocolData(object):
def __init__(self, data_files=DATA_FILE):
self.emulate_stale_goal_state = False
self.call_counts = {
"comp=versions": 0,
"/versions": 0,
"/health": 0,
"/HealthService": 0,
"/vmAgentLog": 0,
"goalstate": 0,
"hostingenvuri": 0,
"sharedconfiguri": 0,
"certificatesuri": 0,
"extensionsconfiguri": 0,
"remoteaccessinfouri": 0,
"extensionArtifact": 0,
"manifest.xml": 0,
"manifest_of_ga.xml": 0,
"ExampleHandlerLinux": 0,
"in_vm_artifacts_profile": 0
}
self.data_files = data_files
self.version_info = None
self.goal_state = None
self.hosting_env = None
self.shared_config = None
self.certs = None
self.ext_conf = None
self.manifest = None
self.ga_manifest = None
self.trans_prv = None
self.trans_cert = None
self.ext = None
self.remote_access = None
self.in_vm_artifacts_profile = None
self.reload()
def reload(self):
self.version_info = load_data(self.data_files.get("version_info"))
self.goal_state = load_data(self.data_files.get("goal_state"))
self.hosting_env = load_data(self.data_files.get("hosting_env"))
self.shared_config = load_data(self.data_files.get("shared_config"))
self.certs = load_data(self.data_files.get("certs"))
self.ext_conf = self.data_files.get("ext_conf")
if self.ext_conf is not None:
self.ext_conf = load_data(self.ext_conf)
self.manifest = load_data(self.data_files.get("manifest"))
self.ga_manifest = load_data(self.data_files.get("ga_manifest"))
self.trans_prv = load_data(self.data_files.get("trans_prv"))
self.trans_cert = load_data(self.data_files.get("trans_cert"))
self.ext = load_bin_data(self.data_files.get("test_ext"))
remote_access_data_file = self.data_files.get("remote_access")
if remote_access_data_file is not None:
self.remote_access = load_data(remote_access_data_file)
in_vm_artifacts_profile_file = self.data_files.get("in_vm_artifacts_profile")
if in_vm_artifacts_profile_file is not None:
self.in_vm_artifacts_profile = load_data(in_vm_artifacts_profile_file)
def mock_http_get(self, url, *args, **kwargs):
content = None
resp = MagicMock()
resp.status = httpclient.OK
if "comp=versions" in url: # wire server versions
content = self.version_info
self.call_counts["comp=versions"] += 1
elif "/versions" in url: # HostPlugin versions
content = '["2015-09-01"]'
self.call_counts["/versions"] += 1
elif url.endswith("/health"): # HostPlugin health
content = ''
self.call_counts["/health"] += 1
elif "goalstate" in url:
content = self.goal_state
self.call_counts["goalstate"] += 1
elif "hostingenvuri" in url:
content = self.hosting_env
self.call_counts["hostingenvuri"] += 1
elif "sharedconfiguri" in url:
content = self.shared_config
self.call_counts["sharedconfiguri"] += 1
elif "certificatesuri" in url:
content = self.certs
self.call_counts["certificatesuri"] += 1
elif "extensionsconfiguri" in url:
content = self.ext_conf
self.call_counts["extensionsconfiguri"] += 1
elif "remoteaccessinfouri" in url:
content = self.remote_access
self.call_counts["remoteaccessinfouri"] += 1
elif ".vmSettings" in url or ".settings" in url:
content = self.in_vm_artifacts_profile
self.call_counts["in_vm_artifacts_profile"] += 1
else:
# A stale GoalState results in a 400 from the HostPlugin
# for which the HTTP handler in restutil raises ResourceGoneError
if self.emulate_stale_goal_state:
if "extensionArtifact" in url:
self.emulate_stale_goal_state = False
self.call_counts["extensionArtifact"] += 1
raise ResourceGoneError()
else:
raise HttpError()
# For HostPlugin requests, replace the URL with that passed
# via the x-ms-artifact-location header
if "extensionArtifact" in url:
self.call_counts["extensionArtifact"] += 1
if "headers" not in kwargs:
raise ValueError("HostPlugin request is missing the HTTP headers: {0}", kwargs)
if "x-ms-artifact-location" not in kwargs["headers"]:
raise ValueError("HostPlugin request is missing the x-ms-artifact-location header: {0}", kwargs)
url = kwargs["headers"]["x-ms-artifact-location"]
if "manifest.xml" in url:
content = self.manifest
self.call_counts["manifest.xml"] += 1
elif "manifest_of_ga.xml" in url:
content = self.ga_manifest
self.call_counts["manifest_of_ga.xml"] += 1
elif "ExampleHandlerLinux" in url:
content = self.ext
self.call_counts["ExampleHandlerLinux"] += 1
resp.read = Mock(return_value=content)
return resp
elif ".vmSettings" in url or ".settings" in url:
content = self.in_vm_artifacts_profile
self.call_counts["in_vm_artifacts_profile"] += 1
else:
raise Exception("Bad url {0}".format(url))
resp.read = Mock(return_value=content.encode("utf-8"))
return resp
def mock_http_post(self, url, *args, **kwargs):
content = None
resp = MagicMock()
resp.status = httpclient.OK
if url.endswith('/HealthService'):
self.call_counts['/HealthService'] += 1
content = ''
else:
raise Exception("Bad url {0}".format(url))
resp.read = Mock(return_value=content.encode("utf-8"))
return resp
def mock_http_put(self, url, *args, **kwargs):
content = None
resp = MagicMock()
resp.status = httpclient.OK
if url.endswith('/vmAgentLog'):
self.call_counts['/vmAgentLog'] += 1
content = ''
else:
raise Exception("Bad url {0}".format(url))
resp.read = Mock(return_value=content.encode("utf-8"))
return resp
def mock_crypt_util(self, *args, **kw):
#Partially patch instance method of class CryptUtil
cryptutil = CryptUtil(*args, **kw)
cryptutil.gen_transport_cert = Mock(side_effect=self.mock_gen_trans_cert)
return cryptutil
def mock_gen_trans_cert(self, trans_prv_file, trans_cert_file):
with open(trans_prv_file, 'w+') as prv_file:
prv_file.write(self.trans_prv)
with open(trans_cert_file, 'w+') as cert_file:
cert_file.write(self.trans_cert)
def get_no_of_plugins_in_extension_config(self):
if self.ext_conf is None:
return 0
ext_config_doc = parse_doc(self.ext_conf)
plugins_list = find(ext_config_doc, "Plugins")
return len(findall(plugins_list, "Plugin"))
#
# Having trouble reading the regular expressions below? you are not alone!
#
# For the use of "(?<=" "(?=" see 7.2.1 in https://docs.python.org/3.1/library/re.html
# For the use of "\g<1>" see backreferences in https://docs.python.org/3.1/library/re.html#re.sub
#
# Note that these regular expressions are not enough to parse all valid XML documents (e.g. they do
# not account for metacharacters like < or > in the values) but they are good enough for the test
# data. There are some basic checks, but the functions may not match valid XML or produce invalid
# XML if their input is too complex.
#
@staticmethod
def replace_xml_element_value(xml_document, element_name, element_value):
new_xml_document = re.sub(r'(?<=<{0}>).+(?=</{0}>)'.format(element_name), element_value, xml_document)
if new_xml_document == xml_document:
raise Exception("Could not match element '{0}'", element_name)
return new_xml_document
@staticmethod
def replace_xml_attribute_value(xml_document, element_name, attribute_name, attribute_value):
new_xml_document = re.sub(r'(?<=<{0} )(.*{1}=")[^"]+(?="[^>]*>)'.format(element_name, attribute_name), r'\g<1>{0}'.format(attribute_value), xml_document)
if new_xml_document == xml_document:
raise Exception("Could not match attribute '{0}' of element '{1}'".format(attribute_name, element_name))
return new_xml_document
def set_incarnation(self, incarnation):
'''
Sets the incarnation in the goal state, but not on its subcomponents (e.g. hosting env, shared config)
'''
self.goal_state = WireProtocolData.replace_xml_element_value(self.goal_state, "Incarnation", str(incarnation))
def set_container_id(self, container_id):
self.goal_state = WireProtocolData.replace_xml_element_value(self.goal_state, "ContainerId", container_id)
def set_role_config_name(self, role_config_name):
self.goal_state = WireProtocolData.replace_xml_element_value(self.goal_state, "ConfigName", role_config_name)
def set_hosting_env_deployment_name(self, deployment_name):
self.hosting_env = WireProtocolData.replace_xml_attribute_value(self.hosting_env, "Deployment", "name", deployment_name)
def set_shared_config_deployment_name(self, deployment_name):
self.shared_config = WireProtocolData.replace_xml_attribute_value(self.shared_config, "Deployment", "name", deployment_name)
def set_extensions_config_sequence_number(self, sequence_number):
'''
Sets the sequence number for *all* extensions
'''
self.ext_conf = WireProtocolData.replace_xml_attribute_value(self.ext_conf, "RuntimeSettings", "seqNo", str(sequence_number))
def set_extensions_config_version(self, version):
'''
Sets the version for *all* extensions
'''
self.ext_conf = WireProtocolData.replace_xml_attribute_value(self.ext_conf, "Plugin", "version", version)
def set_extensions_config_state(self, state):
'''
Sets the state for *all* extensions
'''
self.ext_conf = WireProtocolData.replace_xml_attribute_value(self.ext_conf, "Plugin", "state", state)
def set_manifest_version(self, version):
'''
Sets the version of the extension manifest
'''
self.manifest = WireProtocolData.replace_xml_element_value(self.manifest, "Version", version)
|
the-stack_0_1907 | import logging
import sys
import time
class ColorFormatter(logging.Formatter):
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
RESET_SEQ = '\033[0m'
COLOR_SEQ_TEMPLATE = '\033[1;{fore_color_int}m'
LEVELNO_TO_COLOR_INT_DICT = {
logging.WARNING: YELLOW,
logging.ERROR: RED,
}
def format(self, record):
message = logging.Formatter.format(self, record)
color_seq = ''
if record.levelno in self.LEVELNO_TO_COLOR_INT_DICT:
fore_color_int = 30 + self.LEVELNO_TO_COLOR_INT_DICT[record.levelno]
color_seq = self.COLOR_SEQ_TEMPLATE.format(fore_color_int=fore_color_int)
return '{0}{1}{2}'.format(color_seq, message, self.RESET_SEQ)
class NullHandler(logging.Handler):
def emit(self, record):
pass
class LessThanFilter(logging.Filter, object):
def __init__(self, exclusive_maximum, name=""):
super(LessThanFilter, self).__init__(name)
self.max_level = exclusive_maximum
def filter(self, record):
return 1 if record.levelno < self.max_level else 0
class DeferInfoToDebugFilter(logging.Filter, object):
def __init__(self, name=""):
super(DeferInfoToDebugFilter, self).__init__(name)
def filter(self, record):
if record.levelno == logging.INFO:
record.levelno = logging.DEBUG
record.levelname = 'DEBUG'
return 1
def register_tty_handler(stream, minlevel, maxlevel):
logging_handler = logging.StreamHandler(stream)
logging_handler.setFormatter(ColorFormatter('%(message)s'))
if minlevel is not None:
logging_handler.setLevel(minlevel)
else:
logging_handler.setLevel(logging.NOTSET)
if maxlevel is not None:
logging_handler.addFilter(LessThanFilter(maxlevel))
logging.getLogger().addHandler(logging_handler)
def register_file_handler(log_file_path, level=logging.DEBUG):
logging_handler = logging.FileHandler(log_file_path)
logging_handler.setFormatter(logging.Formatter('%(asctime)s.%(msecs)03dZ - %(levelname)7s - %(filename)30s:%(lineno)4d - %(message)s', '%Y-%m-%dT%H:%M:%S'))
logging_handler.formatter.converter = time.gmtime
logging_handler.setLevel(logging.DEBUG)
logging.getLogger().addHandler(logging_handler)
|
the-stack_0_1908 | """ Module who handle updating """
import os
from dataclasses import dataclass
from pathlib import Path
from shutil import copyfile
from typing import Union
from packaging.version import InvalidVersion, Version
from inupdater.config import SettingsManager
from inupdater.ui import UserInterface
@dataclass(eq=False, order=False)
class Exefile:
path: Path
version: Version
def __eq__(self, o: object) -> bool:
return self.version == o.version
def __lt__(self, o: object) -> bool:
return self.version < o.version
def __le__(self, o: object) -> bool:
return self.version <= o.version
def __gt__(self, o: object) -> bool:
return self.version > o.version
def __ge__(self, o: object) -> bool:
return self.version >= o.version
class ExeUpdater:
def __init__(self, install_path: Path, ui: UserInterface) -> None:
self.install_path = install_path
self.ui = ui
settings_path = install_path / Path("settings.json")
# --------------------------------------
# Test Purpose only
# --------------------------------------
appexemple_path = Path().cwd() / Path("tests/appexemple")
test_settings = appexemple_path / Path("settings.json")
if not settings_path.exists() and test_settings.exists():
self.install_path = appexemple_path
settings_path = test_settings
# --------------------------------------
settingsmanager = SettingsManager(settings_path)
with settingsmanager as self.settings:
self.local = None
self.update = None
self.ui.show_message("Checking for updates...")
self.ui.set_state(2)
if self.update > self.local:
self.ui.show_message(f"We find a new update ! : {self.update.version}")
self.ui.set_state(4)
copyfile(self.update.path, self.local.path)
self.settings.version = self.update.version
self.ui.show_message("Update installed !")
self.ui.set_state(6)
@property
def local(self) -> Exefile:
exe_path = self.install_path / Path(
f"{self.settings._exe_name}.exe"
) # TODO EXE or not?!? check with no Admin
assert exe_path.exists()
exe_version = self.settings.version
return Exefile(exe_path, exe_version)
@local.setter
def local(self, _):
return
@property
def update(self) -> Exefile:
exe_path = self.check_for_latest_update(self.settings.dist_location)
exe_version = self.get_version(exe_path)
return Exefile(exe_path, exe_version)
@update.setter
def update(self, _) -> None:
return
@staticmethod
def get_version(pathver: Path) -> Version:
try:
return Version(pathver.stem.split("_")[1])
except IndexError as idx:
raise idx
except InvalidVersion as ive:
raise ive
@staticmethod
def get_exe_list(path: Path) -> list[Path]:
return [
f
for f in Path(path).iterdir()
if f.suffix == ".exe" and f.stem != "unins000"
]
def check_for_latest_update(self, path: Path) -> Path:
"""Check for latest update in a given path"""
exe_list = self.get_exe_list(path)
last = sorted(exe_list, key=self.get_version)[-1]
return last
def launch(self, *args):
command = [str(self.local.path), *args]
self.ui.show_message(f"Launching {self.settings._exe_name}")
self.ui.set_state(8)
self.ui.show_message("Please wait..")
self.ui.set_state(10)
self.ui.close()
os.system(" ".join([str(c) for c in command]))
|
the-stack_0_1909 | import yaml
import json
from teuthology.test import fake_archive
from teuthology import report
class TestSerializer(object):
def setup(self):
self.archive = fake_archive.FakeArchive()
self.archive.setup()
self.archive_base = self.archive.archive_base
self.reporter = report.ResultsReporter(archive_base=self.archive_base)
def teardown(self):
self.archive.teardown()
def test_all_runs_one_run(self):
run_name = "test_all_runs"
yaml_path = "examples/3node_ceph.yaml"
job_count = 3
self.archive.create_fake_run(run_name, job_count, yaml_path)
assert [run_name] == self.reporter.serializer.all_runs
def test_all_runs_three_runs(self):
run_count = 3
runs = {}
for i in range(run_count):
run_name = "run #%s" % i
yaml_path = "examples/3node_ceph.yaml"
job_count = 3
job_ids = self.archive.create_fake_run(
run_name,
job_count,
yaml_path)
runs[run_name] = job_ids
assert sorted(runs.keys()) == sorted(self.reporter.serializer.all_runs)
def test_jobs_for_run(self):
run_name = "test_jobs_for_run"
yaml_path = "examples/3node_ceph.yaml"
job_count = 3
jobs = self.archive.create_fake_run(run_name, job_count, yaml_path)
job_ids = [str(job['job_id']) for job in jobs]
got_jobs = self.reporter.serializer.jobs_for_run(run_name)
assert sorted(job_ids) == sorted(got_jobs.keys())
def test_running_jobs_for_run(self):
run_name = "test_jobs_for_run"
yaml_path = "examples/3node_ceph.yaml"
job_count = 10
num_hung = 3
self.archive.create_fake_run(run_name, job_count, yaml_path,
num_hung=num_hung)
got_jobs = self.reporter.serializer.running_jobs_for_run(run_name)
assert len(got_jobs) == num_hung
def test_json_for_job(self):
run_name = "test_json_for_job"
yaml_path = "examples/3node_ceph.yaml"
job_count = 1
jobs = self.archive.create_fake_run(run_name, job_count, yaml_path)
job = jobs[0]
with open(yaml_path) as yaml_file:
obj_from_yaml = yaml.safe_load(yaml_file)
full_obj = obj_from_yaml.copy()
full_obj.update(job['info'])
full_obj.update(job['summary'])
out_json = self.reporter.serializer.json_for_job(
run_name, str(job['job_id']))
out_obj = json.loads(out_json)
assert full_obj == out_obj
|
the-stack_0_1910 | import sys
import pyportus as portus
class ConstFlow():
INIT_RATE = 1000000
def __init__(self, datapath, datapath_info):
self.datapath = datapath
self.datapath_info = datapath_info
self.rate = ConstFlow.INIT_RATE
self.datapath.set_program("default", [("Rate", self.rate)])
def on_report(self, r):
self.datapath.update_field("Rate", self.rate)
class Const(portus.AlgBase):
def datapath_programs(self):
return {
"default" : """\
(def (Report
(volatile acked 0)
(volatile loss 0)
(volatile rtt 0)
))
(when true
(:= Report.rtt Flow.rtt_sample_us)
(:= Report.acked (+ Report.acked Ack.bytes_acked))
(:= Report.loss Ack.lost_pkts_sample)
(report)
)
"""
}
def new_flow(self, datapath, datapath_info):
return ConstFlow(datapath, datapath_info)
alg = Const()
portus.start("unix", alg)
|
the-stack_0_1911 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.Element import Element
class AltGeneratingUnitMeas(Element):
"""A prioritized measurement to be used for the generating unit in the control area specificaiton.A prioritized measurement to be used for the generating unit in the control area specificaiton.
"""
def __init__(self, priority=0, AnalogValue=None, ControlAreaGeneratingUnit=None, *args, **kw_args):
"""Initialises a new 'AltGeneratingUnitMeas' instance.
@param priority: Priority of a measurement usage. Lower numbers have first priority.
@param AnalogValue: The specific analog value used as a source.
@param ControlAreaGeneratingUnit: The control aread generating unit to which the prioritized measurement assignment is applied.
"""
#: Priority of a measurement usage. Lower numbers have first priority.
self.priority = priority
self._AnalogValue = None
self.AnalogValue = AnalogValue
self._ControlAreaGeneratingUnit = None
self.ControlAreaGeneratingUnit = ControlAreaGeneratingUnit
super(AltGeneratingUnitMeas, self).__init__(*args, **kw_args)
_attrs = ["priority"]
_attr_types = {"priority": int}
_defaults = {"priority": 0}
_enums = {}
_refs = ["AnalogValue", "ControlAreaGeneratingUnit"]
_many_refs = []
def getAnalogValue(self):
"""The specific analog value used as a source.
"""
return self._AnalogValue
def setAnalogValue(self, value):
if self._AnalogValue is not None:
filtered = [x for x in self.AnalogValue.AltGeneratingUnit if x != self]
self._AnalogValue._AltGeneratingUnit = filtered
self._AnalogValue = value
if self._AnalogValue is not None:
if self not in self._AnalogValue._AltGeneratingUnit:
self._AnalogValue._AltGeneratingUnit.append(self)
AnalogValue = property(getAnalogValue, setAnalogValue)
def getControlAreaGeneratingUnit(self):
"""The control aread generating unit to which the prioritized measurement assignment is applied.
"""
return self._ControlAreaGeneratingUnit
def setControlAreaGeneratingUnit(self, value):
if self._ControlAreaGeneratingUnit is not None:
filtered = [x for x in self.ControlAreaGeneratingUnit.AltGeneratingUnitMeas if x != self]
self._ControlAreaGeneratingUnit._AltGeneratingUnitMeas = filtered
self._ControlAreaGeneratingUnit = value
if self._ControlAreaGeneratingUnit is not None:
if self not in self._ControlAreaGeneratingUnit._AltGeneratingUnitMeas:
self._ControlAreaGeneratingUnit._AltGeneratingUnitMeas.append(self)
ControlAreaGeneratingUnit = property(getControlAreaGeneratingUnit, setControlAreaGeneratingUnit)
|
the-stack_0_1912 | import collections
from typing import Any, Iterable, Iterator, Optional, Tuple
from river.base.typing import Dataset
from river.metrics import RegressionMetric
from .base import Forecaster
from .metric import HorizonMetric
TimeSeries = Iterator[
Tuple[
Optional[dict], # x
Any, # y
Iterable[Optional[dict]], # x_horizon
Iterable[Any], # y_horizon
]
]
def _iter_with_horizon(dataset: Dataset, horizon: int) -> TimeSeries:
"""
Examples
--------
>>> from river import datasets
>>> from river.time_series.evaluate import _iter_with_horizon
>>> dataset = datasets.AirlinePassengers()
>>> for x, y, x_horizon, y_horizon in _iter_with_horizon(dataset.take(8), horizon=3):
... print(x['month'].strftime('%Y-%m-%d'), y)
... print([x['month'].strftime('%Y-%m-%d') for x in x_horizon])
... print(list(y_horizon))
... print('---')
1949-01-01 112
['1949-02-01', '1949-03-01', '1949-04-01']
[118, 132, 129]
---
1949-02-01 118
['1949-03-01', '1949-04-01', '1949-05-01']
[132, 129, 121]
---
1949-03-01 132
['1949-04-01', '1949-05-01', '1949-06-01']
[129, 121, 135]
---
1949-04-01 129
['1949-05-01', '1949-06-01', '1949-07-01']
[121, 135, 148]
---
1949-05-01 121
['1949-06-01', '1949-07-01', '1949-08-01']
[135, 148, 148]
---
"""
x_horizon = collections.deque(maxlen=horizon)
y_horizon = collections.deque(maxlen=horizon)
stream = iter(dataset)
for _ in range(horizon):
x, y = next(stream)
x_horizon.append(x)
y_horizon.append(y)
for x, y in stream:
x_now = x_horizon.popleft()
y_now = y_horizon.popleft()
x_horizon.append(x)
y_horizon.append(y)
yield x_now, y_now, x_horizon, y_horizon
def _evaluate(
dataset: Dataset,
model: Forecaster,
metric: RegressionMetric,
horizon: int,
grace_period: int,
) -> HorizonMetric:
horizon_metric = HorizonMetric(metric)
steps = _iter_with_horizon(dataset, horizon)
for _ in range(grace_period):
x, y, x_horizon, y_horizon = next(steps)
model.learn_one(y=y, x=x)
for x, y, x_horizon, y_horizon in steps:
y_pred = model.forecast(horizon, xs=x_horizon)
horizon_metric.update(y_horizon, y_pred)
model.learn_one(y=y, x=x)
yield y_pred, horizon_metric
def evaluate(
dataset: Dataset,
model: Forecaster,
metric: RegressionMetric,
horizon: int,
grace_period=1,
) -> HorizonMetric:
"""Evaluates the performance of a forecaster on a time series dataset.
To understand why this method is useful, it's important to understand the difference between
nowcasting and forecasting. Nowcasting is about predicting a value at the next time step. This
can be seen as a special case of regression, where the value to predict is the value at the
next time step. In this case, the `evaluate.progressive_val_score` function may be used to
evaluate a model via progressive validation.
Forecasting models can also be evaluated via progressive validation. This is the purpose of
this function. At each time step `t`, the forecaster is asked to predict the values at `t + 1`,
`t + 2`, ..., `t + horizon`. The performance at each time step is measured and returned.
Parameters
----------
dataset
A sequential time series.
model
A forecaster.
metric
A regression metric.
horizon
grace_period
Initial period during which the metric is not updated. This is to fairly evaluate models
which need a warming up period to start producing meaningful forecasts. The first forecast
is skipped by default.
"""
horizon_metric = None
steps = _evaluate(dataset, model, metric, horizon, grace_period)
for _, horizon_metric in steps:
pass
return horizon_metric
|
the-stack_0_1913 | import os
IMAGE_SIZE = 256
NUM_WORKERS = 4
TRAINING_BATCH_SIZE = 8
VAL_BATCH_SIZE = 8
EPOCH = 20
MILESTONES = [5, 10, 15]
SAVE_EPOCH = 5
WARM_EPOCH = 1
CHECKPOINTS_PATH = './checkpoints/'
LEARNING_RATE = 0.1
WEIGHT_DECAY = 5e-4
MOMENTUM = 0.9
GAMMA = 0.2
FRAME_SAMPLE = 10 |
the-stack_0_1915 | #!/usr/bin/env python3
# Publications markdown generator for academicpages
# Data format: JSON, see publications.json for examples
# Caution: Overwrites ../auto-publications.md
import json
JOURNAL_PUB = "journal"
CONFERENCE_PUB = "conference"
SHORT_PUB = "short"
ARXIV_PUB = "arxiv"
DISSERTATION_PUB = "dissertation"
PATENT_PUB = "patent"
POSTER_PUB = "poster"
def writeOutPrefix(handle):
handle.write("""---
layout: single
title: "Publications"
permalink: /publications/
author_profile: true
---
Here are the publications to which I have contributed.
To see them organized by project, see [here](/research).
""")
FILE_PATH = "{{ site.url }}/{{ site.baseurl }}/{{ site.filesurl }}/publications"
def makeLink(url):
if 'http' in url:
return url
else:
return "{}/{}".format(FILE_PATH, url)
def pub2md(pub):
links = []
if 'paperBasename' in pub and pub['paperBasename']:
links.append('<a href="{}"><i class="fas fa-file-pdf"></i></a>'.format(
makeLink(pub['paperBasename'])
))
if 'slidesBasename' in pub and pub['slidesBasename']:
links.append('<a href="{}"><i class="fas fa-file-powerpoint"></i></a>'.format(
makeLink(pub['slidesBasename'])
))
if 'artifactURL' in pub and pub['artifactURL']:
links.append('<a href="{}"><i class="fas fa-file-code"></i></a>'.format(
makeLink(pub['artifactURL'])
))
if 'videoURL' in pub and pub['videoURL']:
links.append('<a href="{}"><i class="fas fa-video"></i></a>'.format(
makeLink(pub['videoURL'])
))
if 'blogURL' in pub and pub['blogURL']:
links.append('<a href="{}"><i class="fab fa-medium"></i></a>'.format(
makeLink(pub['blogURL'])
))
if 'bestPaperAward' in pub and pub['bestPaperAward']:
links.append('[Best Paper Award](){: .btn}')
if len(pub['authors']) == 1:
authList = pub['authors'][0]
elif len(pub['authors']) == 2:
authList = ' and '.join(pub['authors'])
else:
authList = ', '.join(pub['authors'][:-1])
authList += ", and " + pub['authors'][-1]
cite = "*{}*. \n {}. \n {} {}. ".format(
pub['title'],
authList,
pub['venue'], pub['year'],
)
return cite + "\n " + ' '.join(links)
def writePubs(handle, headingTitle, pubs):
handle.write('\n## {}\n\n'.format(headingTitle))
for i, pub in enumerate(pubs):
handle.write("{}. {}\n".format(i+1, pub2md(pub)))
with open('publications.json', 'r') as infile, open('../auto-publications.md', 'w') as outfile:
writeOutPrefix(outfile)
pubs = json.load(infile)['publications']
pubs = sorted(pubs, key=lambda p: p['year'], reverse=True)
confPubs = [ pub for pub in pubs if pub['type'] == CONFERENCE_PUB ]
journalPubs = [ pub for pub in pubs if pub['type'] == JOURNAL_PUB ]
shortPubs = [ pub for pub in pubs if pub['type'] == SHORT_PUB ]
arxivPubs = [ pub for pub in pubs if pub['type'] == ARXIV_PUB ]
patentPubs = [ pub for pub in pubs if pub['type'] == PATENT_PUB ]
posterPubs = [ pub for pub in pubs if pub['type'] == POSTER_PUB ]
dissertationPubs = [ pub for pub in pubs if pub['type'] == DISSERTATION_PUB ]
if confPubs:
print("Writing the {} conference pubs".format(len(confPubs)))
writePubs(outfile, "Peer-reviewed conference papers", confPubs)
if journalPubs:
print("Writing the {} journal pubs".format(len(journalPubs)))
writePubs(outfile, "Peer-reviewed journal papers", journalPubs)
if shortPubs:
print("Writing the {} short pubs".format(len(shortPubs)))
writePubs(outfile, "Peer-reviewed short papers", shortPubs)
if arxivPubs:
print("Writing the {} arxiv pubs".format(len(arxivPubs)))
writePubs(outfile, "arXiv papers", arxivPubs)
if patentPubs:
print("Writing the {} patents".format(len(patentPubs)))
writePubs(outfile, "US Patents", patentPubs)
if posterPubs:
print("Writing the {} posters".format(len(posterPubs)))
writePubs(outfile, "Posters", posterPubs)
if dissertationPubs:
print("Writing the {} dissertations".format(len(dissertationPubs)))
writePubs(outfile, "Dissertation", dissertationPubs)
outfile.write('\n') |
the-stack_0_1918 | """
Provides functionality to emulate keyboard presses on host machine.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/keyboard/
"""
import voluptuous as vol
from homeassistant.const import (
SERVICE_MEDIA_NEXT_TRACK, SERVICE_MEDIA_PLAY_PAUSE,
SERVICE_MEDIA_PREVIOUS_TRACK, SERVICE_VOLUME_DOWN, SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_UP)
DOMAIN = "keyboard"
REQUIREMENTS = ['pyuserinput==0.1.9']
TAP_KEY_SCHEMA = vol.Schema({})
def volume_up(hass):
"""Press the keyboard button for volume up."""
hass.services.call(DOMAIN, SERVICE_VOLUME_UP)
def volume_down(hass):
"""Press the keyboard button for volume down."""
hass.services.call(DOMAIN, SERVICE_VOLUME_DOWN)
def volume_mute(hass):
"""Press the keyboard button for muting volume."""
hass.services.call(DOMAIN, SERVICE_VOLUME_MUTE)
def media_play_pause(hass):
"""Press the keyboard button for play/pause."""
hass.services.call(DOMAIN, SERVICE_MEDIA_PLAY_PAUSE)
def media_next_track(hass):
"""Press the keyboard button for next track."""
hass.services.call(DOMAIN, SERVICE_MEDIA_NEXT_TRACK)
def media_prev_track(hass):
"""Press the keyboard button for prev track."""
hass.services.call(DOMAIN, SERVICE_MEDIA_PREVIOUS_TRACK)
def setup(hass, config):
"""Listen for keyboard events."""
import pykeyboard
keyboard = pykeyboard.PyKeyboard()
keyboard.special_key_assignment()
hass.services.register(DOMAIN, SERVICE_VOLUME_UP,
lambda service:
keyboard.tap_key(keyboard.volume_up_key),
schema=TAP_KEY_SCHEMA)
hass.services.register(DOMAIN, SERVICE_VOLUME_DOWN,
lambda service:
keyboard.tap_key(keyboard.volume_down_key),
schema=TAP_KEY_SCHEMA)
hass.services.register(DOMAIN, SERVICE_VOLUME_MUTE,
lambda service:
keyboard.tap_key(keyboard.volume_mute_key),
schema=TAP_KEY_SCHEMA)
hass.services.register(DOMAIN, SERVICE_MEDIA_PLAY_PAUSE,
lambda service:
keyboard.tap_key(keyboard.media_play_pause_key),
schema=TAP_KEY_SCHEMA)
hass.services.register(DOMAIN, SERVICE_MEDIA_NEXT_TRACK,
lambda service:
keyboard.tap_key(keyboard.media_next_track_key),
schema=TAP_KEY_SCHEMA)
hass.services.register(DOMAIN, SERVICE_MEDIA_PREVIOUS_TRACK,
lambda service:
keyboard.tap_key(keyboard.media_prev_track_key),
schema=TAP_KEY_SCHEMA)
return True
|
the-stack_0_1920 | import random
from datetime import datetime
import csv
import os
import sys
import struct
import argparse
import datetime
import paho.mqtt.client as mqtt
times = []
acc1 = []
acc2 = []
acc3 = []
ane1 = []
ane2 = []
ane3 = []
node = ""
# Buat fungsi umpan balik ketika koneksi ke mqtt berhasil dilakukan.
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# subscribe ke channel/topik saat on_connect()
client.subscribe("BAMS")
# Buat fungsi umpan balik ketika PUBLISH MESSAGE diterima dari mqtt server.
def on_message(client, userdata, msg):
n = 8 # pisah setiap 8 karakter
node = msg.payload[0:3].decode('ascii')
timestamp = datetime.datetime.now() - datetime.timedelta(seconds=3)
timestamp = timestamp.strftime("%H:%M:%S")
max_length=msg.payload[11:]
sensor = [struct.unpack('!f', bytes.fromhex(msg.payload[i:i+n].decode('ascii')))[0]
for i in range(11, len(msg.payload[11:]) + n, n)]
if node == "sb1":
array = [{"node" : node, "acc1" : sensor[80], "acc2" : sensor[180], "acc3" : 0, "ane1" : sensor[-3], "ane2" : sensor[-2], "ane3" : sensor[-1], "timestamp" : timestamp},]
elif node == "sb2":
array = [{"node" : node, "acc1" : sensor[80], "acc2" : sensor[180], "acc3" : sensor[280], "ane1" : sensor[-3], "ane2" : sensor[-2], "ane3" : sensor[-1], "timestamp" : timestamp},]
else:
array = [{"node" : node, "acc1" : sensor[80], "acc2" : sensor[180], "acc3" : 0, "ane1" : 0, "ane2" : 0, "ane3" : 0, "timestamp" : timestamp},]
if len(times) < 5:
if node == "sb1":
acc1.append(sensor[80])
acc2.append(sensor[180])
acc3.append(0)
ane1.append(sensor[-3])
ane2.append(sensor[-2])
ane3.append(sensor[-1])
times.append(timestamp)
elif node == "sb2":
acc1.append(sensor[80])
acc2.append(sensor[180])
acc3.append(sensor[280])
ane1.append(sensor[-3])
ane2.append(sensor[-2])
ane3.append(sensor[-1])
times.append(timestamp)
else:
acc1.append(sensor[80])
acc2.append(sensor[180])
acc3.append(0)
ane1.append(0)
ane2.append(0)
ane3.append(0)
times.append(timestamp)
else:
if node == "sb1":
acc1[:-1] = acc1[1:]
acc1[-1] = sensor[80]
acc2[:-1] = acc2[1:]
acc2[-1] = sensor[180]
acc3[:-1] = acc3[1:]
acc3[-1] = 0
ane1[:-1] = ane1[1:]
ane1[-1] = sensor[-3]
ane2[:-1] = ane2[1:]
ane2[-1] = sensor[-2]
ane3[:-1] = ane3[1:]
ane3[-1] = sensor[-1]
times[:-1] = times[1:]
times[-1] = timestamp
elif node == "sb2":
acc1[:-1] = acc1[1:]
acc1[-1] = sensor[80]
acc2[:-1] = acc2[1:]
acc2[-1] = sensor[180]
acc3[:-1] = acc3[1:]
acc3[-1] = sensor[280]
ane1[:-1] = ane1[1:]
ane1[-1] = sensor[-3]
ane2[:-1] = ane2[1:]
ane2[-1] = sensor[-2]
ane3[:-1] = ane3[1:]
ane3[-1] = sensor[-1]
times[:-1] = times[1:]
times[-1] = timestamp
else:
acc1[:-1] = acc1[1:]
acc1[-1] = sensor[80]
acc2[:-1] = acc2[1:]
acc2[-1] = sensor[180]
acc3[:-1] = acc3[1:]
acc3[-1] = 0
ane1[:-1] = ane1[1:]
ane1[-1] = 0
ane2[:-1] = ane2[1:]
ane2[-1] = 0
ane3[:-1] = ane3[1:]
ane3[-1] = 0
times[:-1] = times[1:]
times[-1] = timestamp
# print(sensor)
print('\n',node, timestamp, len(sensor), len(max_length), len(max_length)/8)
# print(sensor)
print(acc1)
print(acc2)
print(acc3)
print(ane1)
print(ane2)
print(ane3)
print(times)
"""Menulis File CSV pada python"""
# array = [{"node" : node, "acc1" : sensor[80], "acc2" : sensor[180], "acc3" : sensor[280], "ane1" : sensor[-2], "ane2" : sensor[-3], "ane3" : sensor[-1], "timestamp" : timestamp},]
with open("csvfile/file.csv", "w") as csvfile:
fields = ["node", "acc1", "acc2", "acc3", "ane1", "ane2", "ane3", "timestamp"]
writer = csv.DictWriter(csvfile, fieldnames = fields)
writer.writeheader()
writer.writerows(array)
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
print("Melakukan koneksi ke Broker...")
client.connect("103.224.137.180", 9621)
client.username_pw_set('bams', 'bams.pwd')
client.loop_forever()
|
the-stack_0_1923 | # coding=utf-8
import os, re
import time
import string
#统计某一个进程名所占用的内存,同一个进程名,可能有多个进程
def countProcessMemoey(processName):
pattern = re.compile(r'([^\s]+)\s+(\d+)\s.*\s([^\s]+\sK)')
cmd = 'tasklist /fi "imagename eq ' + processName + '"' + ' | findstr.exe ' + processName
result = os.popen(cmd).read()
resultList = result.split("\n")
total=0
print("resultList ==",resultList)
for srcLine in resultList:
srcLine = "".join(srcLine.split('\n'))
if len(srcLine) == 0:
break
m = pattern.search(srcLine)
if m == None:
continue
#由于是查看python进程所占内存,因此通过pid将本程序过滤掉
if str(os.getpid()) == m.group(2):
continue
ori_mem = m.group(3).replace(',','')
ori_mem = ori_mem.replace(' K','')
ori_mem = ori_mem.replace(r'\sK','')
memEach = string.atoi(ori_mem)
# print 'ProcessName:'+ m.group(1) + '\tPID:' + m.group(2) + '\tmemory size:%.2f'% (memEach * 1.0 /1024), 'M'
total += memEach
print(total)
print("*" * 58)
#if __name__ == '__main__':
#进程名
processName = 'postgres.exe'
for i in range(1,1):
countProcessMemoey(processName)
time.sleep(5) |
the-stack_0_1924 | import pulumi
import pulumi.runtime
from ... import tables
class CertificateSigningRequestList(pulumi.CustomResource):
def __init__(self, __name__, __opts__=None, items=None, metadata=None):
if not __name__:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(__name__, str):
raise TypeError('Expected resource name to be a string')
if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['apiVersion'] = 'certificates.k8s.io/v1beta1'
__props__['kind'] = 'CertificateSigningRequestList'
if items is None:
raise TypeError('Missing required property items')
__props__['items'] = items
__props__['metadata'] = metadata
super(CertificateSigningRequestList, self).__init__(
"kubernetes:certificates.k8s.io/v1beta1:CertificateSigningRequestList",
__name__,
__props__,
__opts__)
def translate_output_property(self, prop: str) -> str:
return tables._CASING_FORWARD_TABLE.get(prop) or prop
def translate_input_property(self, prop: str) -> str:
return tables._CASING_BACKWARD_TABLE.get(prop) or prop
|
the-stack_0_1925 | import sys
if sys.version_info[0] < 3:
import configparser
else:
import configparser
if __name__ == '__main__':
import re
CONFIG = [
{
'section_name': 'STARTUP',
'section_title': 'Startup Configuration',
'questions': [
{
'variable': 'STARTUP_MSG_DURATION',
'prompt': 'Startup mode duration (in seconds)?',
'help': 'Sets how long in seconds the startup mode will last',
'default': '5'
}
]
},
{
'section_name': 'DISPLAY',
'section_title': 'Display Configuration',
'questions': [
{
'prompt': 'Display type?',
'variable': 'DISPLAY_DRIVER',
'allowed': [ 'winstar_weg', 'hd44780', 'hd44780_i2c', 'hd44780_mcp23008', 'luma_i2c' ],
'help': 'Configures pydPiper for the display type you have installed',
'followup_questions': {
'^winstar_weg$|^hd44780$':
[
{ 'prompt': 'Register select pin?', 'variable': 'DISPLAY_PIN_RS', 'default': '7', 'help': 'What GPIO pin is the display\'s register select line connected to' },
{ 'prompt': 'Enable pin?', 'variable': 'DISPLAY_PIN_E', 'default': '8', 'help': 'What GPIO pin is the display\'s enable line connected to' },
{ 'prompt': 'Data 4 pin?', 'variable': 'DISPLAY_PIN_D4', 'default': '25', 'help': 'What GPIO pin is the display\'s data 4 line connected to' },
{ 'prompt': 'Data 5 pin?', 'variable': 'DISPLAY_PIN_D5', 'default': '24', 'help': 'What GPIO pin is the display\'s data 5 line connected to' },
{ 'prompt': 'Data 6 pin?', 'variable': 'DISPLAY_PIN_D6', 'default': '23', 'help': 'What GPIO pin is the display\'s data 6 line connected to' },
{ 'prompt': 'Data 7 pin?', 'variable': 'DISPLAY_PIN_D7', 'default': '27', 'help': 'What GPIO pin is the display\'s data 7 line connected to' }
],
'^hd44780_i2c$|^hd44780_mcp23008$|^luma_i2c$':
[
{ 'prompt': 'I2C Port?', 'variable': 'DISPLAY_I2C_PORT', 'default': '1', 'help': 'What I2C bus is the display connected to' },
{ 'prompt': 'I2C Address?', 'variable': 'DISPLAY_I2C_ADDRESS', 'default': '0x3d', 'help': 'What is the display\'s I2C address' }
],
'^luma_i2c$':
[
{ 'prompt': 'Type of Display?', 'variable': 'DISPLAY_DEVICETYPE', 'allowed': ['ssd1306', 'sh1106', 'ssd1322', 'ssd1325', 'ssd1331'], 'default': 'ssd1306', 'help': 'What is the display device type' },
{ 'prompt': 'Width of display (in pixels)?', 'variable': 'DISPLAY_WIDTH', 'default': '128', 'help': 'What is the horizontal resolution of the display in pixels' },
{ 'prompt': 'Height of display (in pixels)?', 'variable': 'DISPLAY_HEIGHT', 'default': '64', 'help': 'What is the vertical resolution of the display in pixels' },
],
'^winstar_weg$':
[
{ 'prompt': 'Width of display (in pixels)?', 'variable': 'DISPLAY_WIDTH', 'default': '80', 'help': 'What is the horizontal resolution of the display in pixels. Note: even if using the character version of the winstar, the value you enter should be in pixels. For reference, a 16x2 character display has a horizontal resolution of 80' },
{ 'prompt': 'Height of display (in pixels)?', 'variable': 'DISPLAY_HEIGHT', 'default': '16', 'help': 'What is the vertical resolution of the display in pixels. Note: even if using the character version of the winstar, the value you enter should be in pixels. For reference, a 16x2 character display has a vertical resolution of 16' },
{
'prompt': 'Enable pulse duration (in microseconds)?',
'variable': 'DISPLAY_ENABLE_DURATION',
'default': '0.1',
'help': 'Determines how long in microseconds the enable pulse should last. This should be set as low as possible but setting it too low may cause display instability. Recommended value is 1 ms for LCDs and 0.1 ms for OLEDs'
}, ],
'^hd44780$|^hd44780_i2c$|^hd44780_mcp23008$':
[
{ 'prompt': 'Width of display (in pixels)?', 'variable': 'DISPLAY_WIDTH', 'default': '80', 'help': 'What is the horizontal resolution of the display in pixels. Note: even though the hd44780 is a character device, the value you enter should be in pixels. For reference, a 16x2 character display has a horizontal resolution of 80' },
{ 'prompt': 'Height of display (in pixels)?', 'variable': 'DISPLAY_HEIGHT', 'default': '16', 'help': 'What is the vertical resolution of the display in pixels. Note: even though the hd44780 is a character device, the value you enter should be in pixels. For reference, a 16x2 character display has a vertical resolution of 16' },
{
'prompt': 'Enable pulse duration (in microseconds)?',
'variable': 'DISPLAY_ENABLE_DURATION',
'default': '1',
'help': 'Determines how long in microseconds the enable pulse should last. This should be set as low as possible but setting it too low may cause display instability. Recommended value is 1 ms for LCDs and 0.1 ms for OLEDs'
},
],
}
},
{
'prompt': 'Location of the pagefile?',
'variable': 'PAGEFILE',
'help': 'Sets which page file should be used to determine what and when to display content',
'default': 'pages_lcd_16x2.py'
},
{
'prompt': 'Animation Smoothing (in seconds)?',
'variable': 'ANIMATION_SMOOTHING',
'default': '0.15',
'help': 'Determines how often the display will attempt to update. This is used to smooth the animation effects'
}
]
},
{
'section_name': 'SYSTEM',
'section_title': 'System configuration',
'questions': [
{
'prompt': 'Location of log file?',
'variable': 'LOGFILE',
'default': '/var/log/pydPiper.log',
'help': 'Where should the log file be written to?'
},
{
'prompt': 'Logging Level?',
'variable': 'LOGLEVEL',
'allowed': ['debug', 'info', 'warning', 'error', 'critical'],
'casesensitive': False,
'default': 'info',
'help': 'Set logging level. Normal logging for the system is info. Setting to debug will provide much more information about how the system is operating which is useful for debugging'
},
{
'prompt': 'Time Zone?',
'variable': 'TIMEZONE',
'default': 'US/Eastern',
'help': 'Sets the time zone for the system. Use ISO 3166 compliant values. See https://en.wikipedia.org/wiki/List_of_tz_database_time_zones'
},
{
'prompt': '24-hour clock?',
'variable': 'TIME24HOUR',
'default': 'FALSE',
'casesensitive': False,
'allowed': ['true', 'false'],
'help': 'Determines whether the (deprecated) variable "current_time" is formatted as a 24 hour or 12 hour clock'
},
{
'prompt': 'Temperature Scale?',
'variable': 'TEMPERATURE',
'default': 'fahrenheit',
'casesensitive': False,
'allowed': ['fahrenheit', 'celsius'],
'help': 'Determines whether the temperature values will be shown in Fahrenheit or Celsius'
},
{
'prompt': 'Enable weather?',
'allowed': ['y','n','yes','no'],
'default': 'n',
'help': 'Do you want to enable the weather system? Requires an API key from a supported weather provider.',
'casesensitive': False,
'followup_questions': {
'^y$|^yes$':
[
{
'prompt': 'Weather service?',
'variable': 'WEATHER_SERVICE',
'default': 'accuweather',
'allowed': ['accuweather', 'wunderground', 'weerlive'],
'casesensitive': False,
'followup_questions': {
'^accuweather$|^wunderground$|^weerlive$':[
{
'prompt': 'API key?',
'variable': 'WEATHER_API',
'help': 'If using accuweather, an API can be requested from http://developer.accuweather.com. Note: Weather Underground no longer supports free API keys. weerlive.nl API key can be requested from http://weerlive.nl/delen.php'
},
{
'prompt': 'Location?',
'variable': 'WEATHER_LOCATION',
'help': 'You much provide a valid location. If using Accuweather, these can be searched for using the API calls shown on https://developer.accuweather.com/accuweather-locations-api/apis'
}
]
}
},
]
}
}
]
},
{
'section_name': 'SOURCE',
'section_title': 'Music distribution',
'questions': [
{
'prompt': 'Name of distribution?',
'variable': 'SOURCE_TYPE',
'allowed': ['volumio', 'moode', 'rune', 'lms', 'mpd', 'spop'],
'casesensitive': False,
'mandatory': True,
'followup_questions': {
'^volumio$':
[
{
'prompt': 'Server address?',
'variable': 'VOLUMIO_SERVER',
'default': 'localhost'
},
{
'prompt': 'Port?',
'variable': 'VOLUMIO_PORT',
'default': '3000'
}
],
'^rune$':
[
{
'prompt': 'Server address?',
'variable': 'RUNE_SERVER',
'default': 'localhost'
},
{
'prompt': 'Port?',
'variable': 'RUNE_PORT',
'default': '6379'
}
],
'^lms$':
[
{
'prompt': 'Server address?',
'variable': 'LMS_SERVER',
'default': 'localhost'
},
{
'prompt': 'Port?',
'variable': 'LMS_PORT',
'default': '9090'
},
{
'prompt': 'Username?',
'variable': 'LMS_USER',
},
{
'prompt': 'Password?',
'variable': 'LMS_PASSWORD',
},
{
'prompt': 'LMS Player MAC address?',
'variable': 'LMS_PLAYER',
}
],
'^mpd$|^moode$':
[
{
'prompt': 'Server address?',
'variable': 'MPD_SERVER',
'default': 'localhost'
},
{
'prompt': 'Port?',
'variable': 'MPD_PORT',
'default': '6600'
},
{
'prompt': 'Password?',
'variable': 'MPD_Password',
}
],
'^spop$':
[
{
'prompt': 'Server address?',
'variable': 'SPOP_SERVER',
'default': 'localhost'
},
{
'prompt': 'Port?',
'variable': 'SPOP_PORT',
'default': '6602'
},
{
'prompt': 'Password?',
'variable': 'SPOP_Password',
}
]
}
}
]
}
]
def process_section(section, config):
# if section does not exist, add it
try:
config.add_section(section['section_name'])
except:
pass
print(('\n'+section['section_title'].upper()+'\n'))
process_questions(section['section_name'],section['questions'],config)
def process_questions(section_name, questions, config):
for question in questions:
# If an previous value is available in the config file make it the default answer
try:
question['default'] = config.get(section_name, question['variable'])
except:
pass
value = ask_question(question)
if value and 'variable' in question:
config.set(section_name, question['variable'], value)
if 'followup_questions' in question:
if sys.version_info[0] < 3:
for match, followup_questions in question['followup_questions'].items():
if re.match(match,value):
process_questions(section_name, followup_questions, config)
else:
for match, followup_questions in list(question['followup_questions'].items()):
if re.match(match,value):
process_questions(section_name, followup_questions, config)
def ask_question(question):
prompt = question['prompt'] + ' [' + question['default'] + ']: ' if 'default' in question else question['prompt'] + ': '
while True:
if sys.version_info[0] < 3:
value = input(prompt)
else:
value = eval(input(prompt))
if value == '':
value = question.get('default','')
if 'casesensitive' in question and not question['casesensitive']:
value = value.lower()
if 'allowed' in question:
question['allowed'] = [allowed_value.lower() for allowed_value in question['allowed']]
if value == '?' or value.lower() == 'help':
if 'help' in question:
print((question['help']))
if 'allowed' in question:
line = 'Possible values are: '
for possible in question['allowed']:
line += possible + ', '
line = line[:-2]
print (line)
continue
if 'allowed' in question:
if value not in question['allowed'] and value:
print(('{0} is not a valid value'.format(value)))
continue
if 'mandatory' in question and question['mandatory'] is True and not value:
print ('This value can not be blank. Please enter a valid value')
continue
return value
print ('\nCreating configuration file for pydPiper')
print ('----------------------------------------')
if sys.version_info[0] < 3:
config = configparser.RawConfigParser()
serviceconfig = configparser.RawConfigParser()
else:
config = configparser.RawConfigParser()
serviceconfig = configparser.RawConfigParser()
serviceconfig.optionxform = str
config.read('pydPiper.cfg')
for section in CONFIG:
process_section(section,config)
print ('\nUPDATING pydPiper.cfg')
with open('pydPiper.cfg', 'w') as fp:
config.write(fp)
serviceconfig.add_section('Unit')
serviceconfig.add_section('Service')
serviceconfig.add_section('Install')
serviceconfig.set('Unit', 'Description', 'pydPiper')
serviceconfig.set('Service', 'Restart', 'always')
serviceconfig.set('Install', 'WantedBy', 'multi-user.target')
if config.get('SOURCE', 'source_type') == 'volumio':
serviceconfig.set('Unit', 'Requires', 'docker.service')
serviceconfig.set('Unit', 'After', 'volumio.service')
serviceconfig.set('Service', 'ExecStart', '/usr/bin/docker run --network=host --privileged -v /var/log:/var/log:rw -v /home/volumio/pydPiper:/app:rw dhrone/pydpiper:v0.31-alpha python /app/pydPiper.py')
elif config.get('SOURCE', 'source_type') == 'moode':
serviceconfig.set('Unit', 'Requires', 'docker.service')
serviceconfig.set('Unit', 'After', 'mpd.service docker.service')
serviceconfig.set('Service', 'ExecStart', '/usr/bin/docker run --network=host --privileged -v /var/log:/var/log:rw -v /home/pi/pydPiper:/app:rw dhrone/pydpiper:v0.31-alpha python /app/pydPiper.py')
elif config.get('SOURCE', 'source_type') == 'rune':
serviceconfig.set('Unit', 'After', 'network.target redis.target')
serviceconfig.set('Service', 'WorkingDirectory', '/root/pydPiper')
serviceconfig.set('Service', 'ExecStart', '/root/.local/bin/pipenv run python2 pydPiper.py')
if config.get('SOURCE', 'source_type') in ['volumio', 'moode', 'rune']:
print ('Creating pydpiper.service file\n')
with open('pydpiper.service', 'w') as fp:
serviceconfig.write(fp)
|
the-stack_0_1927 | import torch
import pyredner.transform as transform
import redner
import math
import pyredner
from typing import Tuple, Optional, List
class Camera:
"""
Redner supports four types of cameras\: perspective, orthographic, fisheye, and panorama.
The camera takes a look at transform or a cam_to_world matrix to
transform from camera local space to world space. It also can optionally
take an intrinsic matrix that models field of view and camera skew.
Args
====
position: Optional[torch.Tensor]
the origin of the camera, 1-d tensor with size 3 and type float32
look_at: Optional[torch.Tensor]
the point camera is looking at, 1-d tensor with size 3 and type float32
up: Optional[torch.tensor]
the up vector of the camera, 1-d tensor with size 3 and type float32
fov: Optional[torch.Tensor]
the field of view of the camera in angle
no effect if the camera is a fisheye or panorama camera
1-d tensor with size 1 and type float32
clip_near: float
the near clipping plane of the camera, need to > 0
resolution: Tuple[int, int]
the size of the output image in (height, width)
cam_to_world: Optional[torch.Tensor]
overrides position, look_at, up vectors
4x4 matrix, optional
intrinsic_mat: Optional[torch.Tensor]
a matrix that transforms a point in camera space before the point
is projected to 2D screen space
used for modelling field of view and camera skewing
after the multiplication the point should be in
[-1, 1/aspect_ratio] x [1, -1/aspect_ratio] in homogeneous coordinates
the projection is then carried by the specific camera types
perspective camera normalizes the homogeneous coordinates
while orthogonal camera drop the Z coordinate.
ignored by fisheye or panorama cameras
overrides fov
3x3 matrix, optional
camera_type: render.camera_type
the type of the camera (perspective, orthographic, fisheye, or panorama)
fisheye: bool
whether the camera is a fisheye camera
(legacy parameter just to ensure compatibility).
"""
def __init__(self,
position: Optional[torch.Tensor] = None,
look_at: Optional[torch.Tensor] = None,
up: Optional[torch.Tensor] = None,
fov: Optional[torch.Tensor] = None,
clip_near: float = 1e-4,
resolution: Tuple[int, int] = (256, 256),
cam_to_world: Optional[torch.Tensor] = None,
intrinsic_mat: Optional[torch.Tensor] = None,
camera_type = pyredner.camera_type.perspective,
fisheye: bool = False):
if position is not None:
assert(position.dtype == torch.float32)
assert(len(position.shape) == 1 and position.shape[0] == 3)
if look_at is not None:
assert(look_at.dtype == torch.float32)
assert(len(look_at.shape) == 1 and look_at.shape[0] == 3)
if up is not None:
assert(up.dtype == torch.float32)
assert(len(up.shape) == 1 and up.shape[0] == 3)
if fov is not None:
assert(fov.dtype == torch.float32)
assert(len(fov.shape) == 1 and fov.shape[0] == 1)
assert(isinstance(clip_near, float))
if position is None and look_at is None and up is None:
assert(cam_to_world is not None)
self.position = position
self.look_at = look_at
self.up = up
self._fov = fov
self._cam_to_world = cam_to_world
if cam_to_world is not None:
self.world_to_cam = torch.inverse(self.cam_to_world).contiguous()
else:
self.world_to_cam = None
if intrinsic_mat is None:
if camera_type == redner.CameraType.perspective:
fov_factor = 1.0 / torch.tan(transform.radians(0.5 * fov))
o = torch.ones([1], dtype=torch.float32)
diag = torch.cat([fov_factor, fov_factor, o], 0)
self._intrinsic_mat = torch.diag(diag).contiguous()
else:
self._intrinsic_mat = torch.eye(3, dtype=torch.float32)
else:
self._intrinsic_mat = intrinsic_mat
self.intrinsic_mat_inv = torch.inverse(self.intrinsic_mat).contiguous()
self.clip_near = clip_near
self.resolution = resolution
self.camera_type = camera_type
if fisheye:
self.camera_type = pyredner.camera_type.fisheye
@property
def fov(self):
return self._fov
@fov.setter
def fov(self, value):
self._fov = value
fov_factor = 1.0 / torch.tan(transform.radians(0.5 * self._fov))
o = torch.ones([1], dtype=torch.float32)
diag = torch.cat([fov_factor, fov_factor, o], 0)
self._intrinsic_mat = torch.diag(diag).contiguous()
self.intrinsic_mat_inv = torch.inverse(self._intrinsic_mat).contiguous()
@property
def intrinsic_mat(self):
return self._intrinsic_mat
@intrinsic_mat.setter
def intrinsic_mat(self, value):
if value is not None:
self._intrinsic_mat = value
self.intrinsic_mat_inv = torch.inverse(self._intrinsic_mat).contiguous()
else:
assert(self.fov is not None)
self.fov = self._fov
@property
def cam_to_world(self):
return self._cam_to_world
@cam_to_world.setter
def cam_to_world(self, value):
if value is not None:
self._cam_to_world = value
self.world_to_cam = torch.inverse(self.cam_to_world).contiguous()
else:
self._cam_to_world = None
self.world_to_cam = None
def state_dict(self):
return {
'position': self._position,
'look_at': self._look_at,
'up': self._up,
'fov': self._fov,
'cam_to_world': self._cam_to_world,
'intrinsic_mat': self._intrinsic_mat,
'clip_near': self.clip_near,
'resolution': self.resolution,
'camera_type': self.camera_type
}
@classmethod
def load_state_dict(cls, state_dict):
out = cls.__new__(Camera)
out._position = state_dict['position']
out._look_at = state_dict['look_at']
out._up = state_dict['up']
out._fov = state_dict['fov']
out.cam_to_world = state_dict['cam_to_world']
out.intrinsic_mat = state_dict['intrinsic_mat']
out.clip_near = state_dict['clip_near']
out.resolution = state_dict['resolution']
out.camera_type = state_dict['camera_type']
return out
def automatic_camera_placement(shapes: List,
resolution: Tuple[int, int]):
"""
Given a list of objects or shapes, generates camera parameters automatically
using the bounding boxes of the shapes. Place the camera at
some distances from the shapes, so that it can see all of them.
Inspired by https://github.com/mitsuba-renderer/mitsuba/blob/master/src/librender/scene.cpp#L286
Parameters
==========
shapes: List
a list of redner Shape or Object
resolution: Tuple[int, int]
the size of the output image in (height, width)
Returns
=======
pyredner.Camera
a camera that can see all the objects.
"""
aabb_min = torch.tensor((float('inf'), float('inf'), float('inf')))
aabb_max = -torch.tensor((float('inf'), float('inf'), float('inf')))
for shape in shapes:
v = shape.vertices
v_min = torch.min(v, 0)[0].cpu()
v_max = torch.max(v, 0)[0].cpu()
aabb_min = torch.min(aabb_min, v_min)
aabb_max = torch.max(aabb_max, v_max)
assert(torch.isfinite(aabb_min).all() and torch.isfinite(aabb_max).all())
center = (aabb_max + aabb_min) * 0.5
extents = aabb_max - aabb_min
max_extents_xy = torch.max(extents[0], extents[1])
distance = max_extents_xy / (2 * math.tan(45 * 0.5 * math.pi / 180.0))
max_extents_xyz = torch.max(extents[2], max_extents_xy)
return Camera(position = torch.tensor((center[0], center[1], aabb_min[2] - distance)),
look_at = center,
up = torch.tensor((0.0, 1.0, 0.0)),
fov = torch.tensor([45.0]),
clip_near = 0.001 * float(distance),
resolution = resolution)
def generate_intrinsic_mat(fx: torch.Tensor,
fy: torch.Tensor,
skew: torch.Tensor,
x0: torch.Tensor,
y0: torch.Tensor):
"""
| Generate the following 3x3 intrinsic matrix given the parameters.
| fx, skew, x0
| 0, fy, y0
| 0, 0, 1
Parameters
==========
fx: torch.Tensor
Focal length at x dimension. 1D tensor with size 1.
fy: torch.Tensor
Focal length at y dimension. 1D tensor with size 1.
skew: torch.Tensor
Axis skew parameter describing shearing transform. 1D tensor with size 1.
x0: torch.Tensor
Principle point offset at x dimension. 1D tensor with size 1.
y0: torch.Tensor
Principle point offset at y dimension. 1D tensor with size 1.
Returns
=======
torch.Tensor
3x3 intrinsic matrix
"""
z = torch.zeros_like(fx)
o = torch.ones_like(fx)
row0 = torch.cat([fx, skew, x0])
row1 = torch.cat([ z, fy, y0])
row2 = torch.cat([ z, z, o])
return torch.stack([row0, row1, row2]).contiguous()
|
the-stack_0_1930 | # -*- coding: utf-8 -*-
from openprocurement.api.validation import (
validate_accreditation_level
)
from openprocurement.api.utils import (
get_resource_accreditation
)
def validate_change_ownership_accreditation(request, **kwargs): # pylint: disable=unused-argument
levels = get_resource_accreditation(request, 'lot', request.context, 'create')
err_msg = 'Broker Accreditation level does not permit ownership change'
validate_accreditation_level(request, request.validated['lot'], levels, err_msg)
|
the-stack_0_1931 | import asyncio
import zlib
from aiocache import Cache
from aiocache.serializers import BaseSerializer
class CompressionSerializer(BaseSerializer):
# This is needed because zlib works with bytes.
# this way the underlying backend knows how to
# store/retrieve values
DEFAULT_ENCODING = None
def dumps(self, value):
print("I've received:\n{}".format(value))
compressed = zlib.compress(value.encode())
print("But I'm storing:\n{}".format(compressed))
return compressed
def loads(self, value):
print("I've retrieved:\n{}".format(value))
decompressed = zlib.decompress(value).decode()
print("But I'm returning:\n{}".format(decompressed))
return decompressed
cache = Cache(Cache.REDIS, serializer=CompressionSerializer(), namespace="main")
async def serializer():
text = (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt"
"ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation"
"ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in"
"reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur"
"sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit"
"anim id est laborum."
)
await cache.set("key", text)
print("-----------------------------------")
real_value = await cache.get("key")
compressed_value = await cache.raw("get", "main:key")
assert len(compressed_value) < len(real_value.encode())
def test_serializer():
loop = asyncio.get_event_loop()
loop.run_until_complete(serializer())
loop.run_until_complete(cache.delete("key"))
loop.run_until_complete(cache.close())
if __name__ == "__main__":
test_serializer()
|
the-stack_0_1932 | # Copyright 2016 Osvaldo Santana Neto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from requests import Session
from zeep import Client, Transport
logger = logging.getLogger(__name__)
class SoapClient(Client):
def __init__(self, wsdl, cert=None, verify=True, timeout=8, **kwargs):
session = Session()
session.cert = cert
session.verify = verify
session.timeout = timeout
session.headers.update({'Content-Type': 'text/xml;charset=UTF-8'})
transport = Transport(
operation_timeout=timeout,
session=session
)
super().__init__(wsdl=wsdl, transport=transport, **kwargs)
|
the-stack_0_1935 | """This file is used for custom testing."""
import argparse
import os
import sys
import threading
import time
import requests
from google.protobuf import text_format
import Request_pb2
parser = argparse.ArgumentParser()
parser.add_argument('-st', '--start_port', help='Starting port number', type=int)
parser.add_argument('-c', '--count', help='Number of port needed' ,type=int)
parser.add_argument('-f', '--filename', help='File name of text file')
parser.add_argument('-n', '--number', help='Number of times to repeat the request', type=int)
args = parser.parse_args()
port = args.start_port
def new_dummy_server():
global port
port = port + 1
os.system('python dummy_vm_server.py ' + str(port))
def master_server():
os.system('python master_server.py -d b')
def send_request():
"""Reads expected output and task request files from specified folder.
Then sends the request to Master server specified number of times."""
text_file = open('task_request.txt', 'r')
task_request = Request_pb2.TaskRequest()
text_format.Parse(text_file.read(), task_request)
text_file.close()
file_a = Request_pb2.TaskStatusResponse()
file_b = Request_pb2.TaskStatusResponse()
fil = open('initial_task_response.txt', 'r')
text_format.Parse(fil.read(), file_a)
fil.close()
fil = open('final_task_response.txt', 'r')
text_format.Parse(fil.read(), file_b)
fil.close()
for i in range(args.number):
RESPONSE = requests.post(url='http://127.0.0.1:5000/assign_task',
files={'file': task_request.SerializeToString()})
file_A = Request_pb2.TaskStatusResponse()
file_A.ParseFromString(RESPONSE.content)
if file_A.status == Request_pb2.TaskStatusResponse.ACCEPTED :
process = threading.Thread(target = response, args= (file_a, file_A,
file_b, task_request.timeout, task_request.number_of_retries))
process.start()
else:
print(file_A)
def response(file_a, file_A, file_b, timeout, number_of_retries):
"""Query the Master server about the previous request,we sent to Master server."""
timer = timeout * (number_of_retries + 10)
time.sleep(timer)
task_status_request = Request_pb2.TaskStatusRequest()
task_status_request.request_id = file_A.current_task_id
RESPONSE = requests.post(url= 'http://127.0.0.1:5000/get_status',
files = {'file': task_status_request.SerializeToString()})
file_B = Request_pb2.TaskStatusResponse()
file_B.ParseFromString(RESPONSE.content)
match_proto(file_a, file_A , file_b, file_B)
def match_proto(file_a, file_A ,file_b, file_B):
"""Match the expected and received files of the response."""
if file_b.status == file_B.status and file_b.task_response.status == file_B.task_response.status:
print('Task request ' + str(file_A.current_task_id) + ' matched successfully')
else:
print('Task request ' + str(file_A.current_task_id) + ' did not matched successfully')
if __name__ == '__main__':
print( 'Starting_port {} Count {} filename {} number {} '.format(
args.start_port,
args.count,
args.filename,
args.number
))
process = threading.Thread(target = master_server)
process.start()
time.sleep(10)
count = args.count
for i in range(count):
time.sleep(2)
process = threading.Thread(target = new_dummy_server)
process.start()
time.sleep(5)
folder_list = args.filename.split(',')
for folder in folder_list:
os.chdir(os.path.join(os.getcwd(), folder))
send_request()
os.chdir('..')
|
the-stack_0_1936 | # pylint: disable=missing-docstring
from unittest.mock import Mock, patch
from peltak import testing
from peltak.core import conf
from peltak.core import context
from peltak.core import fs
from peltak.core import types
@patch('peltak.core.fs.filtered_walk')
@testing.patch_pelconf()
def test_calls_filtered_walk_with_paths_configured(p_filtered_walk: Mock):
files = types.FilesCollection.from_config({
'paths': ['path1', 'path2'],
})
fs.collect_files(files)
assert p_filtered_walk.call_count == 2
args, _ = p_filtered_walk.call_args_list[0]
expected = (conf.proj_path('path1'), files.whitelist(), files.blacklist())
assert tuple(args) == expected
args, _ = p_filtered_walk.call_args_list[1]
expected = (conf.proj_path('path2'), files.whitelist(), files.blacklist())
assert tuple(args) == expected
@patch('peltak.core.fs.filtered_walk', Mock(return_value=[]))
@patch('peltak.core.shell.cprint')
@testing.patch_pelconf()
def test_prints_debug_info_if_verbose_lvl_ge_3(p_cprint):
# type: (Mock) -> None
files = types.FilesCollection.from_config({
'paths': ['path1', 'path2'],
})
context.RunContext().set('verbose', 3)
fs.collect_files(files)
context.RunContext().set('verbose', 0)
assert next(
(True for x in p_cprint.call_args_list if 'only_staged: ' in x[0][0]),
False
)
assert next(
(True for x in p_cprint.call_args_list if 'untracked: ' in x[0][0]),
False
)
assert next(
(True for x in p_cprint.call_args_list if 'whitelist: ' in x[0][0]),
False
)
assert next(
(True for x in p_cprint.call_args_list if 'blacklist: ' in x[0][0]),
False
)
@patch('peltak.core.git.staged', Mock(return_value=['file1.txt', 'file2.yml']))
@testing.patch_pelconf()
def test_return_empty_list_if_none_of_the_whitelisted_files_are_staged():
"""
GIVEN files collection has a non-empty whitelist and only_staged == True
WHEN no staged files match the whitelist
THEN return empty list.
"""
files = types.FilesCollection.from_config({
'paths': ['path1'],
'include': ['*.py'],
'only_staged': True,
})
assert fs.collect_files(files) == []
|
the-stack_0_1938 | """Return True if two arrays are element-wise equal within a tolerance."""
from __future__ import annotations
import numpy
import numpoly
from ..baseclass import PolyLike
from ..dispatch import implements
@implements(numpy.allclose)
def allclose(
a: PolyLike,
b: PolyLike,
rtol: float = 1e-5,
atol: float = 1e-8,
equal_nan: bool = False,
) -> bool:
"""
Return True if two arrays are element-wise equal within a tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
If either array contains one or more NaNs, False is returned.
Infs are treated as equal if they are in the same place and of the same
sign in both arrays.
Args:
a, b:
Input arrays to compare.
rtol:
The relative tolerance parameter (see Notes).
atol:
The absolute tolerance parameter (see Notes).
equal_nan:
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
Returns:
Returns True if the two arrays are equal within the given tolerance;
False otherwise.
Notes:
If the following equation is element-wise True, then allclose returns
True.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
``allclose(a, b)`` might be different from ``allclose(b, a)`` in some
rare cases.
The comparison of `a` and `b` uses standard broadcasting, which means
that `a` and `b` need not have the same shape in order for
``allclose(a, b)`` to evaluate to True. The same is true for `equal`
but not `array_equal`.
Examples:
>>> q0, q1 = numpoly.variable(2)
>>> numpoly.allclose([1e9*q0, 1e-7], [1.00001e9*q0, 1e-8])
False
>>> numpoly.allclose([1e9*q0, 1e-8], [1.00001e9*q0, 1e-9])
True
>>> numpoly.allclose([1e9*q0, 1e-8], [1.00001e9*q1, 1e-9])
False
>>> numpoly.allclose([q0, numpy.nan],
... [q0, numpy.nan], equal_nan=True)
True
"""
a, b = numpoly.align_polynomials(a, b)
for coeff1, coeff2 in zip(a.coefficients, b.coefficients):
if not numpy.allclose(
coeff1, coeff2, atol=atol, rtol=rtol, equal_nan=equal_nan):
return False
return True
|
the-stack_0_1940 | import asyncio
import logging
import time
from typing import Set, List, Tuple, Optional
import ray
from ray.experimental.workflow import workflow_storage
from ray.experimental.workflow.common import (Workflow, WorkflowStatus,
WorkflowMetaData)
from ray.experimental.workflow.step_executor import commit_step
from ray.experimental.workflow.storage import get_global_storage
from ray.experimental.workflow.workflow_access import (
MANAGEMENT_ACTOR_NAME, flatten_workflow_output,
get_or_create_management_actor)
logger = logging.getLogger(__name__)
def run(entry_workflow: Workflow,
workflow_id: Optional[str] = None) -> ray.ObjectRef:
"""Run a workflow asynchronously. See "api.run()" for details."""
store = get_global_storage()
assert ray.is_initialized()
if workflow_id is None:
# Workflow ID format: {Entry workflow UUID}.{Unix time to nanoseconds}
workflow_id = f"{entry_workflow.id}.{time.time():.9f}"
logger.info(f"Workflow job created. [id=\"{workflow_id}\", storage_url="
f"\"{store.storage_url}\"].")
# checkpoint the workflow
ws = workflow_storage.get_workflow_storage(workflow_id)
commit_step(ws, "", entry_workflow)
workflow_manager = get_or_create_management_actor()
# NOTE: It is important to 'ray.get' the returned output. This
# ensures caller of 'run()' holds the reference to the workflow
# result. Otherwise if the actor removes the reference of the
# workflow output, the caller may fail to resolve the result.
output = ray.get(workflow_manager.run_or_resume.remote(workflow_id))
return flatten_workflow_output(workflow_id, output)
# TODO(suquark): support recovery with ObjectRef inputs.
def resume(workflow_id: str) -> ray.ObjectRef:
"""Resume a workflow asynchronously. See "api.resume()" for details.
"""
storage = get_global_storage()
logger.info(f"Resuming workflow [id=\"{workflow_id}\", storage_url="
f"\"{storage.storage_url}\"].")
workflow_manager = get_or_create_management_actor()
# NOTE: It is important to 'ray.get' the returned output. This
# ensures caller of 'run()' holds the reference to the workflow
# result. Otherwise if the actor removes the reference of the
# workflow output, the caller may fail to resolve the result.
output = ray.get(workflow_manager.run_or_resume.remote(workflow_id))
direct_output = flatten_workflow_output(workflow_id, output)
logger.info(f"Workflow job {workflow_id} resumed.")
return direct_output
def get_output(workflow_id: str) -> ray.ObjectRef:
"""Get the output of a running workflow.
See "api.get_output()" for details.
"""
assert ray.is_initialized()
try:
workflow_manager = ray.get_actor(MANAGEMENT_ACTOR_NAME)
except ValueError as e:
raise ValueError(
"Failed to connect to the workflow management "
"actor. The workflow could have already failed. You can use "
"workflow.resume() to resume the workflow.") from e
output = ray.get(workflow_manager.get_output.remote(workflow_id))
return flatten_workflow_output(workflow_id, output)
def cancel(workflow_id: str) -> None:
try:
workflow_manager = ray.get_actor(MANAGEMENT_ACTOR_NAME)
ray.get(workflow_manager.cancel_workflow.remote(workflow_id))
except ValueError:
wf_store = workflow_storage.get_workflow_storage(workflow_id)
wf_store.save_workflow_meta(WorkflowMetaData(WorkflowStatus.CANCELED))
def get_status(workflow_id: str) -> Optional[WorkflowStatus]:
try:
workflow_manager = ray.get_actor(MANAGEMENT_ACTOR_NAME)
running = ray.get(
workflow_manager.is_workflow_running.remote(workflow_id))
except Exception:
running = False
if running:
return WorkflowStatus.RUNNING
store = workflow_storage.get_workflow_storage(workflow_id)
meta = store.load_workflow_meta()
if meta is None:
raise ValueError(f"No such workflow_id {workflow_id}")
return meta.status
def list_all(status_filter: Set[WorkflowStatus]
) -> List[Tuple[str, WorkflowStatus]]:
try:
workflow_manager = ray.get_actor(MANAGEMENT_ACTOR_NAME)
except ValueError:
workflow_manager = None
if workflow_manager is None:
runnings = []
else:
runnings = ray.get(workflow_manager.list_running_workflow.remote())
if WorkflowStatus.RUNNING in status_filter and len(status_filter) == 1:
return [(r, WorkflowStatus.RUNNING) for r in runnings]
runnings = set(runnings)
# Here we don't have workflow id, so use empty one instead
store = workflow_storage.get_workflow_storage("")
ret = []
for (k, s) in store.list_workflow():
if s == WorkflowStatus.RUNNING and k not in runnings:
s = WorkflowStatus.RESUMABLE
if s in status_filter:
ret.append((k, s))
return ret
def resume_all(with_failed: bool) -> List[Tuple[str, ray.ObjectRef]]:
filter_set = {WorkflowStatus.RESUMABLE}
if with_failed:
filter_set.add(WorkflowStatus.FAILED)
all_failed = list_all(filter_set)
try:
workflow_manager = ray.get_actor(MANAGEMENT_ACTOR_NAME)
except Exception as e:
raise RuntimeError("Failed to get management actor") from e
async def _resume_one(wid: str) -> Tuple[str, Optional[ray.ObjectRef]]:
try:
obj = await workflow_manager.run_or_resume.remote(wid)
return (wid, flatten_workflow_output(wid, obj))
except Exception:
logger.error(f"Failed to resume workflow {wid}")
return (wid, None)
ret = workflow_storage.asyncio_run(
asyncio.gather(*[_resume_one(wid) for (wid, _) in all_failed]))
return [(wid, obj) for (wid, obj) in ret if obj is not None]
|
the-stack_0_1942 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Modified from go/bootstrap.py in Chromium infrastructure's repository to patch
# out everything but the core toolchain.
#
# https://chromium.googlesource.com/infra/infra/
"""Prepares a local hermetic Go installation.
- Downloads and unpacks the Go toolset in ../golang.
"""
import contextlib
import logging
import os
import platform
import shutil
import stat
import subprocess
import sys
import tarfile
import tempfile
import urllib
import zipfile
# TODO(vadimsh): Migrate to new golang.org/x/ paths once Golang moves to
# git completely.
LOGGER = logging.getLogger(__name__)
# /path/to/util/bot
ROOT = os.path.dirname(os.path.abspath(__file__))
# Where to install Go toolset to. GOROOT would be <TOOLSET_ROOT>/go.
TOOLSET_ROOT = os.path.join(os.path.dirname(ROOT), 'golang')
# Default workspace with infra go code.
WORKSPACE = os.path.join(ROOT, 'go')
# Platform depended suffix for executable files.
EXE_SFX = '.exe' if sys.platform == 'win32' else ''
# Pinned version of Go toolset to download.
TOOLSET_VERSION = 'go1.11.4'
# Platform dependent portion of a download URL. See http://golang.org/dl/.
TOOLSET_VARIANTS = {
('darwin', 'x86-64'): 'darwin-amd64.tar.gz',
('linux2', 'x86-32'): 'linux-386.tar.gz',
('linux2', 'x86-64'): 'linux-amd64.tar.gz',
('win32', 'x86-32'): 'windows-386.zip',
('win32', 'x86-64'): 'windows-amd64.zip',
}
# Download URL root.
DOWNLOAD_URL_PREFIX = 'https://storage.googleapis.com/golang'
class Failure(Exception):
"""Bootstrap failed."""
def get_toolset_url():
"""URL of a platform specific Go toolset archive."""
# TODO(vadimsh): Support toolset for cross-compilation.
arch = {
'amd64': 'x86-64',
'x86_64': 'x86-64',
'i386': 'x86-32',
'x86': 'x86-32',
}.get(platform.machine().lower())
variant = TOOLSET_VARIANTS.get((sys.platform, arch))
if not variant:
# TODO(vadimsh): Compile go lang from source.
raise Failure('Unrecognized platform')
return '%s/%s.%s' % (DOWNLOAD_URL_PREFIX, TOOLSET_VERSION, variant)
def read_file(path):
"""Returns contents of a given file or None if not readable."""
assert isinstance(path, (list, tuple))
try:
with open(os.path.join(*path), 'r') as f:
return f.read()
except IOError:
return None
def write_file(path, data):
"""Writes |data| to a file."""
assert isinstance(path, (list, tuple))
with open(os.path.join(*path), 'w') as f:
f.write(data)
def remove_directory(path):
"""Recursively removes a directory."""
assert isinstance(path, (list, tuple))
p = os.path.join(*path)
if not os.path.exists(p):
return
LOGGER.info('Removing %s', p)
# Crutch to remove read-only file (.git/* in particular) on Windows.
def onerror(func, path, _exc_info):
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
shutil.rmtree(p, onerror=onerror if sys.platform == 'win32' else None)
def install_toolset(toolset_root, url):
"""Downloads and installs Go toolset.
GOROOT would be <toolset_root>/go/.
"""
if not os.path.exists(toolset_root):
os.makedirs(toolset_root)
pkg_path = os.path.join(toolset_root, url[url.rfind('/')+1:])
LOGGER.info('Downloading %s...', url)
download_file(url, pkg_path)
LOGGER.info('Extracting...')
if pkg_path.endswith('.zip'):
with zipfile.ZipFile(pkg_path, 'r') as f:
f.extractall(toolset_root)
elif pkg_path.endswith('.tar.gz'):
with tarfile.open(pkg_path, 'r:gz') as f:
f.extractall(toolset_root)
else:
raise Failure('Unrecognized archive format')
LOGGER.info('Validating...')
if not check_hello_world(toolset_root):
raise Failure('Something is not right, test program doesn\'t work')
def download_file(url, path):
"""Fetches |url| to |path|."""
last_progress = [0]
def report(a, b, c):
progress = int(a * b * 100.0 / c)
if progress != last_progress[0]:
print >> sys.stderr, 'Downloading... %d%%' % progress
last_progress[0] = progress
# TODO(vadimsh): Use something less crippled, something that validates SSL.
urllib.urlretrieve(url, path, reporthook=report)
@contextlib.contextmanager
def temp_dir(path):
"""Creates a temporary directory, then deletes it."""
tmp = tempfile.mkdtemp(dir=path)
try:
yield tmp
finally:
remove_directory([tmp])
def check_hello_world(toolset_root):
"""Compiles and runs 'hello world' program to verify that toolset works."""
with temp_dir(toolset_root) as tmp:
path = os.path.join(tmp, 'hello.go')
write_file([path], r"""
package main
func main() { println("hello, world\n") }
""")
out = subprocess.check_output(
[get_go_exe(toolset_root), 'run', path],
env=get_go_environ(toolset_root, tmp),
stderr=subprocess.STDOUT)
if out.strip() != 'hello, world':
LOGGER.error('Failed to run sample program:\n%s', out)
return False
return True
def ensure_toolset_installed(toolset_root):
"""Installs or updates Go toolset if necessary.
Returns True if new toolset was installed.
"""
installed = read_file([toolset_root, 'INSTALLED_TOOLSET'])
available = get_toolset_url()
if installed == available:
LOGGER.debug('Go toolset is up-to-date: %s', TOOLSET_VERSION)
return False
LOGGER.info('Installing Go toolset.')
LOGGER.info(' Old toolset is %s', installed)
LOGGER.info(' New toolset is %s', available)
remove_directory([toolset_root])
install_toolset(toolset_root, available)
LOGGER.info('Go toolset installed: %s', TOOLSET_VERSION)
write_file([toolset_root, 'INSTALLED_TOOLSET'], available)
return True
def get_go_environ(
toolset_root,
workspace=None):
"""Returns a copy of os.environ with added GO* environment variables.
Overrides GOROOT, GOPATH and GOBIN. Keeps everything else. Idempotent.
Args:
toolset_root: GOROOT would be <toolset_root>/go.
workspace: main workspace directory or None if compiling in GOROOT.
"""
env = os.environ.copy()
env['GOROOT'] = os.path.join(toolset_root, 'go')
if workspace:
env['GOBIN'] = os.path.join(workspace, 'bin')
else:
env.pop('GOBIN', None)
all_go_paths = []
if workspace:
all_go_paths.append(workspace)
env['GOPATH'] = os.pathsep.join(all_go_paths)
# New PATH entries.
paths_to_add = [
os.path.join(env['GOROOT'], 'bin'),
env.get('GOBIN'),
]
# Make sure not to add duplicates entries to PATH over and over again when
# get_go_environ is invoked multiple times.
path = env['PATH'].split(os.pathsep)
paths_to_add = [p for p in paths_to_add if p and p not in path]
env['PATH'] = os.pathsep.join(paths_to_add + path)
return env
def get_go_exe(toolset_root):
"""Returns path to go executable."""
return os.path.join(toolset_root, 'go', 'bin', 'go' + EXE_SFX)
def bootstrap(logging_level):
"""Installs all dependencies in default locations.
Supposed to be called at the beginning of some script (it modifies logger).
Args:
logging_level: logging level of bootstrap process.
"""
logging.basicConfig()
LOGGER.setLevel(logging_level)
ensure_toolset_installed(TOOLSET_ROOT)
def prepare_go_environ():
"""Returns dict with environment variables to set to use Go toolset.
Installs or updates the toolset if necessary.
"""
bootstrap(logging.INFO)
return get_go_environ(TOOLSET_ROOT, WORKSPACE)
def find_executable(name, workspaces):
"""Returns full path to an executable in some bin/ (in GOROOT or GOBIN)."""
basename = name
if EXE_SFX and basename.endswith(EXE_SFX):
basename = basename[:-len(EXE_SFX)]
roots = [os.path.join(TOOLSET_ROOT, 'go', 'bin')]
for path in workspaces:
roots.extend([
os.path.join(path, 'bin'),
])
for root in roots:
full_path = os.path.join(root, basename + EXE_SFX)
if os.path.exists(full_path):
return full_path
return name
def main(args):
if args:
print >> sys.stderr, sys.modules[__name__].__doc__,
return 2
bootstrap(logging.DEBUG)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
the-stack_0_1943 | # -*- coding: utf-8 -*-
"""This file contains the Task Scheduler Registry keys plugins."""
from __future__ import unicode_literals
from dfdatetime import filetime as dfdatetime_filetime
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.lib import errors
from plaso.parsers import winreg
from plaso.parsers.winreg_plugins import dtfabric_plugin
from plaso.parsers.winreg_plugins import interface
class TaskCacheEventData(events.EventData):
"""Task Cache event data.
Attributes:
key_path (str): Windows Registry key path.
task_name (str): name of the task.
task_identifier (str): identifier of the task.
"""
DATA_TYPE = 'task_scheduler:task_cache:entry'
def __init__(self):
"""Initializes event data."""
super(TaskCacheEventData, self).__init__(data_type=self.DATA_TYPE)
self.key_path = None
self.task_name = None
self.task_identifier = None
class TaskCacheWindowsRegistryPlugin(
dtfabric_plugin.DtFabricBaseWindowsRegistryPlugin):
"""Plugin that parses a Task Cache key."""
NAME = 'windows_task_cache'
DESCRIPTION = 'Parser for Task Scheduler cache Registry data.'
FILTERS = frozenset([
interface.WindowsRegistryKeyPathFilter(
'HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows NT\\'
'CurrentVersion\\Schedule\\TaskCache')])
_DEFINITION_FILE = 'task_scheduler.yaml'
def _GetIdValue(self, registry_key):
"""Retrieves the Id value from Task Cache Tree key.
Args:
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
Yields:
tuple: containing:
dfwinreg.WinRegistryKey: Windows Registry key.
dfwinreg.WinRegistryValue: Windows Registry value.
"""
id_value = registry_key.GetValueByName('Id')
if id_value:
yield registry_key, id_value
for sub_key in registry_key.GetSubkeys():
for value_key, id_value in self._GetIdValue(sub_key):
yield value_key, id_value
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
"""Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
"""
dynamic_info_size_error_reported = False
tasks_key = registry_key.GetSubkeyByName('Tasks')
tree_key = registry_key.GetSubkeyByName('Tree')
if not tasks_key or not tree_key:
parser_mediator.ProduceExtractionWarning(
'Task Cache is missing a Tasks or Tree sub key.')
return
task_guids = {}
for sub_key in tree_key.GetSubkeys():
for value_key, id_value in self._GetIdValue(sub_key):
# TODO: improve this check to a regex.
# The GUID is in the form {%GUID%} and stored an UTF-16 little-endian
# string and should be 78 bytes in size.
id_value_data_size = len(id_value.data)
if id_value_data_size != 78:
parser_mediator.ProduceExtractionWarning(
'unsupported Id value data size: {0:d}.'.format(
id_value_data_size))
continue
guid_string = id_value.GetDataAsObject()
task_guids[guid_string] = value_key.name
dynamic_info_map = self._GetDataTypeMap('dynamic_info_record')
dynamic_info2_map = self._GetDataTypeMap('dynamic_info2_record')
dynamic_info_size = dynamic_info_map.GetByteSize()
dynamic_info2_size = dynamic_info2_map.GetByteSize()
for sub_key in tasks_key.GetSubkeys():
dynamic_info_value = sub_key.GetValueByName('DynamicInfo')
if not dynamic_info_value:
continue
dynamic_info_record_map = None
dynamic_info_value_data_size = len(dynamic_info_value.data)
if dynamic_info_value_data_size == dynamic_info_size:
dynamic_info_record_map = dynamic_info_map
elif dynamic_info_value_data_size == dynamic_info2_size:
dynamic_info_record_map = dynamic_info2_map
else:
if not dynamic_info_size_error_reported:
parser_mediator.ProduceExtractionWarning(
'unsupported DynamicInfo value data size: {0:d}.'.format(
dynamic_info_value_data_size))
dynamic_info_size_error_reported = True
continue
try:
dynamic_info_record = self._ReadStructureFromByteStream(
dynamic_info_value.data, 0, dynamic_info_record_map)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse DynamicInfo record with error: {0!s}.'.format(
exception))
name = task_guids.get(sub_key.name, sub_key.name)
event_data = TaskCacheEventData()
event_data.key_path = (registry_key.path).replace("\\", "/")
event_data.task_name = name
event_data.task_identifier = sub_key.name
event = time_events.DateTimeValuesEvent(
registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
last_registered_time = dynamic_info_record.last_registered_time
if last_registered_time:
# Note this is likely either the last registered time or
# the update time.
date_time = dfdatetime_filetime.Filetime(timestamp=last_registered_time)
event = time_events.DateTimeValuesEvent(
date_time, 'Last registered time')
parser_mediator.ProduceEventWithEventData(event, event_data)
launch_time = dynamic_info_record.launch_time
if launch_time:
# Note this is likely the launch time.
date_time = dfdatetime_filetime.Filetime(timestamp=launch_time)
event = time_events.DateTimeValuesEvent(
date_time, 'Launch time')
parser_mediator.ProduceEventWithEventData(event, event_data)
unknown_time = getattr(dynamic_info_record, 'unknown_time', None)
if unknown_time:
date_time = dfdatetime_filetime.Filetime(timestamp=unknown_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_UNKNOWN)
parser_mediator.ProduceEventWithEventData(event, event_data)
# TODO: Add support for the Triggers value.
winreg.WinRegistryParser.RegisterPlugin(TaskCacheWindowsRegistryPlugin)
|
the-stack_0_1945 | #!/usr/bin/env python3
# Removes old files in media root in order to keep your storage requirements low
from alexacloud import settings
import datetime
import shutil
import os
media_root = settings.MEDIA_ROOT
# Delete directories that were created more than 30 minutes
now = datetime.datetime.now()
ago = now - datetime.timedelta(minutes=30)
folders = [os.path.join(media_root, f) for f in os.listdir(media_root)]
folders = list(filter(os.path.isdir, folders))
for folder in folders:
st = os.stat(folder)
mtime = datetime.datetime.fromtimestamp(st.st_mtime)
if mtime < ago:
shutil.rmtree(folder)
|
the-stack_0_1947 | import functools
import operator
from collections import defaultdict
from contextlib import suppress
from typing import TYPE_CHECKING, Any, Dict, Hashable, Mapping, Optional, Tuple, Union
import numpy as np
import pandas as pd
from . import dtypes, utils
from .indexing import get_indexer_nd
from .utils import is_dict_like, is_full_slice
from .variable import IndexVariable, Variable
if TYPE_CHECKING:
from .dataarray import DataArray
from .dataset import Dataset
def _get_joiner(join):
if join == "outer":
return functools.partial(functools.reduce, operator.or_)
elif join == "inner":
return functools.partial(functools.reduce, operator.and_)
elif join == "left":
return operator.itemgetter(0)
elif join == "right":
return operator.itemgetter(-1)
elif join == "exact":
# We cannot return a function to "align" in this case, because it needs
# access to the dimension name to give a good error message.
return None
elif join == "override":
# We rewrite all indexes and then use join='left'
return operator.itemgetter(0)
else:
raise ValueError("invalid value for join: %s" % join)
def _override_indexes(objects, all_indexes, exclude):
for dim, dim_indexes in all_indexes.items():
if dim not in exclude:
lengths = {index.size for index in dim_indexes}
if len(lengths) != 1:
raise ValueError(
"Indexes along dimension %r don't have the same length."
" Cannot use join='override'." % dim
)
objects = list(objects)
for idx, obj in enumerate(objects[1:]):
new_indexes = {}
for dim in obj.dims:
if dim not in exclude:
new_indexes[dim] = all_indexes[dim][0]
objects[idx + 1] = obj._overwrite_indexes(new_indexes)
return objects
def align(
*objects,
join="inner",
copy=True,
indexes=None,
exclude=frozenset(),
fill_value=dtypes.NA,
):
"""
Given any number of Dataset and/or DataArray objects, returns new
objects with aligned indexes and dimension sizes.
Array from the aligned objects are suitable as input to mathematical
operators, because along each dimension they have the same index and size.
Missing values (if ``join != 'inner'``) are filled with ``fill_value``.
The default fill value is NaN.
Parameters
----------
*objects : Dataset or DataArray
Objects to align.
join : {'outer', 'inner', 'left', 'right', 'exact', 'override'}, optional
Method for joining the indexes of the passed objects along each
dimension:
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- 'override': if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
copy : bool, optional
If ``copy=True``, data in the return values is always copied. If
``copy=False`` and reindexing is unnecessary, or can be performed with
only slice operations, then the output may share memory with the input.
In either case, new xarray objects are always returned.
indexes : dict-like, optional
Any indexes explicitly provided with the `indexes` argument should be
used in preference to the aligned indexes.
exclude : sequence of str, optional
Dimensions that must be excluded from alignment
fill_value : scalar, optional
Value to use for newly missing values
Returns
-------
aligned : same as *objects
Tuple of objects with aligned coordinates.
Raises
------
ValueError
If any dimensions without labels on the arguments have different sizes,
or a different size than the size of the aligned dimension labels.
Examples
--------
>>> import xarray as xr
>>> x = xr.DataArray([[25, 35], [10, 24]], dims=('lat', 'lon'),
... coords={'lat': [35., 40.], 'lon': [100., 120.]})
>>> y = xr.DataArray([[20, 5], [7, 13]], dims=('lat', 'lon'),
... coords={'lat': [35., 42.], 'lon': [100., 120.]})
>>> x
<xarray.DataArray (lat: 2, lon: 2)>
array([[25, 35],
[10, 24]])
Coordinates:
* lat (lat) float64 35.0 40.0
* lon (lon) float64 100.0 120.0
>>> y
<xarray.DataArray (lat: 2, lon: 2)>
array([[20, 5],
[ 7, 13]])
Coordinates:
* lat (lat) float64 35.0 42.0
* lon (lon) float64 100.0 120.0
>>> a, b = xr.align(x, y)
>>> a
<xarray.DataArray (lat: 1, lon: 2)>
array([[25, 35]])
Coordinates:
* lat (lat) float64 35.0
* lon (lon) float64 100.0 120.0
>>> b
<xarray.DataArray (lat: 1, lon: 2)>
array([[20, 5]])
Coordinates:
* lat (lat) float64 35.0
* lon (lon) float64 100.0 120.0
>>> a, b = xr.align(x, y, join='outer')
>>> a
<xarray.DataArray (lat: 3, lon: 2)>
array([[25., 35.],
[10., 24.],
[nan, nan]])
Coordinates:
* lat (lat) float64 35.0 40.0 42.0
* lon (lon) float64 100.0 120.0
>>> b
<xarray.DataArray (lat: 3, lon: 2)>
array([[20., 5.],
[nan, nan],
[ 7., 13.]])
Coordinates:
* lat (lat) float64 35.0 40.0 42.0
* lon (lon) float64 100.0 120.0
>>> a, b = xr.align(x, y, join='outer', fill_value=-999)
>>> a
<xarray.DataArray (lat: 3, lon: 2)>
array([[ 25, 35],
[ 10, 24],
[-999, -999]])
Coordinates:
* lat (lat) float64 35.0 40.0 42.0
* lon (lon) float64 100.0 120.0
>>> b
<xarray.DataArray (lat: 3, lon: 2)>
array([[ 20, 5],
[-999, -999],
[ 7, 13]])
Coordinates:
* lat (lat) float64 35.0 40.0 42.0
* lon (lon) float64 100.0 120.0
>>> a, b = xr.align(x, y, join='left')
>>> a
<xarray.DataArray (lat: 2, lon: 2)>
array([[25, 35],
[10, 24]])
Coordinates:
* lat (lat) float64 35.0 40.0
* lon (lon) float64 100.0 120.0
>>> b
<xarray.DataArray (lat: 2, lon: 2)>
array([[20., 5.],
[nan, nan]])
Coordinates:
* lat (lat) float64 35.0 40.0
* lon (lon) float64 100.0 120.0
>>> a, b = xr.align(x, y, join='right')
>>> a
<xarray.DataArray (lat: 2, lon: 2)>
array([[25., 35.],
[nan, nan]])
Coordinates:
* lat (lat) float64 35.0 42.0
* lon (lon) float64 100.0 120.0
>>> b
<xarray.DataArray (lat: 2, lon: 2)>
array([[20, 5],
[ 7, 13]])
Coordinates:
* lat (lat) float64 35.0 42.0
* lon (lon) float64 100.0 120.0
>>> a, b = xr.align(x, y, join='exact')
Traceback (most recent call last):
...
"indexes along dimension {!r} are not equal".format(dim)
ValueError: indexes along dimension 'lat' are not equal
>>> a, b = xr.align(x, y, join='override')
>>> a
<xarray.DataArray (lat: 2, lon: 2)>
array([[25, 35],
[10, 24]])
Coordinates:
* lat (lat) float64 35.0 40.0
* lon (lon) float64 100.0 120.0
>>> b
<xarray.DataArray (lat: 2, lon: 2)>
array([[20, 5],
[ 7, 13]])
Coordinates:
* lat (lat) float64 35.0 40.0
* lon (lon) float64 100.0 120.0
"""
if indexes is None:
indexes = {}
if not indexes and len(objects) == 1:
# fast path for the trivial case
obj, = objects
return (obj.copy(deep=copy),)
all_indexes = defaultdict(list)
unlabeled_dim_sizes = defaultdict(set)
for obj in objects:
for dim in obj.dims:
if dim not in exclude:
try:
index = obj.indexes[dim]
except KeyError:
unlabeled_dim_sizes[dim].add(obj.sizes[dim])
else:
all_indexes[dim].append(index)
if join == "override":
objects = _override_indexes(objects, all_indexes, exclude)
# We don't reindex over dimensions with all equal indexes for two reasons:
# - It's faster for the usual case (already aligned objects).
# - It ensures it's possible to do operations that don't require alignment
# on indexes with duplicate values (which cannot be reindexed with
# pandas). This is useful, e.g., for overwriting such duplicate indexes.
joiner = _get_joiner(join)
joined_indexes = {}
for dim, matching_indexes in all_indexes.items():
if dim in indexes:
index = utils.safe_cast_to_index(indexes[dim])
if (
any(not index.equals(other) for other in matching_indexes)
or dim in unlabeled_dim_sizes
):
joined_indexes[dim] = index
else:
if (
any(
not matching_indexes[0].equals(other)
for other in matching_indexes[1:]
)
or dim in unlabeled_dim_sizes
):
if join == "exact":
raise ValueError(f"indexes along dimension {dim!r} are not equal")
index = joiner(matching_indexes)
joined_indexes[dim] = index
else:
index = matching_indexes[0]
if dim in unlabeled_dim_sizes:
unlabeled_sizes = unlabeled_dim_sizes[dim]
labeled_size = index.size
if len(unlabeled_sizes | {labeled_size}) > 1:
raise ValueError(
"arguments without labels along dimension %r cannot be "
"aligned because they have different dimension size(s) %r "
"than the size of the aligned dimension labels: %r"
% (dim, unlabeled_sizes, labeled_size)
)
for dim in unlabeled_dim_sizes:
if dim not in all_indexes:
sizes = unlabeled_dim_sizes[dim]
if len(sizes) > 1:
raise ValueError(
"arguments without labels along dimension %r cannot be "
"aligned because they have different dimension sizes: %r"
% (dim, sizes)
)
result = []
for obj in objects:
valid_indexers = {k: v for k, v in joined_indexes.items() if k in obj.dims}
if not valid_indexers:
# fast path for no reindexing necessary
new_obj = obj.copy(deep=copy)
else:
new_obj = obj.reindex(copy=copy, fill_value=fill_value, **valid_indexers)
new_obj.encoding = obj.encoding
result.append(new_obj)
return tuple(result)
def deep_align(
objects,
join="inner",
copy=True,
indexes=None,
exclude=frozenset(),
raise_on_invalid=True,
fill_value=dtypes.NA,
):
"""Align objects for merging, recursing into dictionary values.
This function is not public API.
"""
from .dataarray import DataArray
from .dataset import Dataset
if indexes is None:
indexes = {}
def is_alignable(obj):
return isinstance(obj, (DataArray, Dataset))
positions = []
keys = []
out = []
targets = []
no_key = object()
not_replaced = object()
for position, variables in enumerate(objects):
if is_alignable(variables):
positions.append(position)
keys.append(no_key)
targets.append(variables)
out.append(not_replaced)
elif is_dict_like(variables):
current_out = {}
for k, v in variables.items():
if is_alignable(v) and k not in indexes:
# Skip variables in indexes for alignment, because these
# should to be overwritten instead:
# https://github.com/pydata/xarray/issues/725
# https://github.com/pydata/xarray/issues/3377
# TODO(shoyer): doing this here feels super-hacky -- can we
# move it explicitly into merge instead?
positions.append(position)
keys.append(k)
targets.append(v)
current_out[k] = not_replaced
else:
current_out[k] = v
out.append(current_out)
elif raise_on_invalid:
raise ValueError(
"object to align is neither an xarray.Dataset, "
"an xarray.DataArray nor a dictionary: {!r}".format(variables)
)
else:
out.append(variables)
aligned = align(
*targets,
join=join,
copy=copy,
indexes=indexes,
exclude=exclude,
fill_value=fill_value,
)
for position, key, aligned_obj in zip(positions, keys, aligned):
if key is no_key:
out[position] = aligned_obj
else:
out[position][key] = aligned_obj
# something went wrong: we should have replaced all sentinel values
for arg in out:
assert arg is not not_replaced
if is_dict_like(arg):
assert all(value is not not_replaced for value in arg.values())
return out
def reindex_like_indexers(
target: "Union[DataArray, Dataset]", other: "Union[DataArray, Dataset]"
) -> Dict[Hashable, pd.Index]:
"""Extract indexers to align target with other.
Not public API.
Parameters
----------
target : Dataset or DataArray
Object to be aligned.
other : Dataset or DataArray
Object to be aligned with.
Returns
-------
Dict[Hashable, pandas.Index] providing indexes for reindex keyword
arguments.
Raises
------
ValueError
If any dimensions without labels have different sizes.
"""
indexers = {k: v for k, v in other.indexes.items() if k in target.dims}
for dim in other.dims:
if dim not in indexers and dim in target.dims:
other_size = other.sizes[dim]
target_size = target.sizes[dim]
if other_size != target_size:
raise ValueError(
"different size for unlabeled "
"dimension on argument %r: %r vs %r"
% (dim, other_size, target_size)
)
return indexers
def reindex_variables(
variables: Mapping[Any, Variable],
sizes: Mapping[Any, int],
indexes: Mapping[Any, pd.Index],
indexers: Mapping,
method: Optional[str] = None,
tolerance: Any = None,
copy: bool = True,
fill_value: Optional[Any] = dtypes.NA,
) -> Tuple[Dict[Hashable, Variable], Dict[Hashable, pd.Index]]:
"""Conform a dictionary of aligned variables onto a new set of variables,
filling in missing values with NaN.
Not public API.
Parameters
----------
variables : dict-like
Dictionary of xarray.Variable objects.
sizes : dict-like
Dictionary from dimension names to integer sizes.
indexes : dict-like
Dictionary of indexes associated with variables.
indexers : dict
Dictionary with keys given by dimension names and values given by
arrays of coordinates tick labels. Any mis-matched coordinate values
will be filled in with NaN, and any mis-matched dimension names will
simply be ignored.
method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional
Method to use for filling index values in ``indexers`` not found in
this dataset:
* None (default): don't fill gaps
* pad / ffill: propagate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value
tolerance : optional
Maximum distance between original and new labels for inexact matches.
The values of the index at the matching locations must satisfy the
equation ``abs(index[indexer] - target) <= tolerance``.
copy : bool, optional
If ``copy=True``, data in the return values is always copied. If
``copy=False`` and reindexing is unnecessary, or can be performed
with only slice operations, then the output may share memory with
the input. In either case, new xarray objects are always returned.
fill_value : scalar, optional
Value to use for newly missing values
Returns
-------
reindexed : dict
Dict of reindexed variables.
new_indexes : dict
Dict of indexes associated with the reindexed variables.
"""
from .dataarray import DataArray
# create variables for the new dataset
reindexed: Dict[Hashable, Variable] = {}
# build up indexers for assignment along each dimension
int_indexers = {}
new_indexes = dict(indexes)
masked_dims = set()
unchanged_dims = set()
for dim, indexer in indexers.items():
if isinstance(indexer, DataArray) and indexer.dims != (dim,):
raise ValueError(
"Indexer has dimensions {:s} that are different "
"from that to be indexed along {:s}".format(str(indexer.dims), dim)
)
target = new_indexes[dim] = utils.safe_cast_to_index(indexers[dim])
if dim in indexes:
index = indexes[dim]
if not index.is_unique:
raise ValueError(
"cannot reindex or align along dimension %r because the "
"index has duplicate values" % dim
)
int_indexer = get_indexer_nd(index, target, method, tolerance)
# We uses negative values from get_indexer_nd to signify
# values that are missing in the index.
if (int_indexer < 0).any():
masked_dims.add(dim)
elif np.array_equal(int_indexer, np.arange(len(index))):
unchanged_dims.add(dim)
int_indexers[dim] = int_indexer
if dim in variables:
var = variables[dim]
args: tuple = (var.attrs, var.encoding)
else:
args = ()
reindexed[dim] = IndexVariable((dim,), target, *args)
for dim in sizes:
if dim not in indexes and dim in indexers:
existing_size = sizes[dim]
new_size = indexers[dim].size
if existing_size != new_size:
raise ValueError(
"cannot reindex or align along dimension %r without an "
"index because its size %r is different from the size of "
"the new index %r" % (dim, existing_size, new_size)
)
for name, var in variables.items():
if name not in indexers:
key = tuple(
slice(None) if d in unchanged_dims else int_indexers.get(d, slice(None))
for d in var.dims
)
needs_masking = any(d in masked_dims for d in var.dims)
if needs_masking:
new_var = var._getitem_with_mask(key, fill_value=fill_value)
elif all(is_full_slice(k) for k in key):
# no reindexing necessary
# here we need to manually deal with copying data, since
# we neither created a new ndarray nor used fancy indexing
new_var = var.copy(deep=copy)
else:
new_var = var[key]
reindexed[name] = new_var
return reindexed, new_indexes
def _get_broadcast_dims_map_common_coords(args, exclude):
common_coords = {}
dims_map = {}
for arg in args:
for dim in arg.dims:
if dim not in common_coords and dim not in exclude:
dims_map[dim] = arg.sizes[dim]
if dim in arg.coords:
common_coords[dim] = arg.coords[dim].variable
return dims_map, common_coords
def _broadcast_helper(arg, exclude, dims_map, common_coords):
from .dataarray import DataArray
from .dataset import Dataset
def _set_dims(var):
# Add excluded dims to a copy of dims_map
var_dims_map = dims_map.copy()
for dim in exclude:
with suppress(ValueError):
# ignore dim not in var.dims
var_dims_map[dim] = var.shape[var.dims.index(dim)]
return var.set_dims(var_dims_map)
def _broadcast_array(array):
data = _set_dims(array.variable)
coords = dict(array.coords)
coords.update(common_coords)
return DataArray(data, coords, data.dims, name=array.name, attrs=array.attrs)
def _broadcast_dataset(ds):
data_vars = {k: _set_dims(ds.variables[k]) for k in ds.data_vars}
coords = dict(ds.coords)
coords.update(common_coords)
return Dataset(data_vars, coords, ds.attrs)
if isinstance(arg, DataArray):
return _broadcast_array(arg)
elif isinstance(arg, Dataset):
return _broadcast_dataset(arg)
else:
raise ValueError("all input must be Dataset or DataArray objects")
def broadcast(*args, exclude=None):
"""Explicitly broadcast any number of DataArray or Dataset objects against
one another.
xarray objects automatically broadcast against each other in arithmetic
operations, so this function should not be necessary for normal use.
If no change is needed, the input data is returned to the output without
being copied.
Parameters
----------
*args : DataArray or Dataset objects
Arrays to broadcast against each other.
exclude : sequence of str, optional
Dimensions that must not be broadcasted
Returns
-------
broadcast : tuple of xarray objects
The same data as the input arrays, but with additional dimensions
inserted so that all data arrays have the same dimensions and shape.
Examples
--------
Broadcast two data arrays against one another to fill out their dimensions:
>>> a = xr.DataArray([1, 2, 3], dims='x')
>>> b = xr.DataArray([5, 6], dims='y')
>>> a
<xarray.DataArray (x: 3)>
array([1, 2, 3])
Coordinates:
* x (x) int64 0 1 2
>>> b
<xarray.DataArray (y: 2)>
array([5, 6])
Coordinates:
* y (y) int64 0 1
>>> a2, b2 = xr.broadcast(a, b)
>>> a2
<xarray.DataArray (x: 3, y: 2)>
array([[1, 1],
[2, 2],
[3, 3]])
Coordinates:
* x (x) int64 0 1 2
* y (y) int64 0 1
>>> b2
<xarray.DataArray (x: 3, y: 2)>
array([[5, 6],
[5, 6],
[5, 6]])
Coordinates:
* y (y) int64 0 1
* x (x) int64 0 1 2
Fill out the dimensions of all data variables in a dataset:
>>> ds = xr.Dataset({'a': a, 'b': b})
>>> ds2, = xr.broadcast(ds) # use tuple unpacking to extract one dataset
>>> ds2
<xarray.Dataset>
Dimensions: (x: 3, y: 2)
Coordinates:
* x (x) int64 0 1 2
* y (y) int64 0 1
Data variables:
a (x, y) int64 1 1 2 2 3 3
b (x, y) int64 5 6 5 6 5 6
"""
if exclude is None:
exclude = set()
args = align(*args, join="outer", copy=False, exclude=exclude)
dims_map, common_coords = _get_broadcast_dims_map_common_coords(args, exclude)
result = []
for arg in args:
result.append(_broadcast_helper(arg, exclude, dims_map, common_coords))
return tuple(result)
|
the-stack_0_1948 | import cv2
#Global_vars.cap1 = cv2.VideoCapture("rtsp://10.24.72.33:554/0")
cap1 = cv2.VideoCapture(0)
cap2 = cv2.VideoCapture("rtsp://admin:[email protected]:6461")
#Global_vars.cap1 = cv2.VideoCapture("rtsp://admin:[email protected]:554/Streaming/Channel/101")
## rtsp://192.168.2.109:554/user=admin&password=mammaloe&channel=1&stream=0.sdp?
## rtsp://89.239.192.188:553/ucast/11
#Global_vars.cap2 = cv2.VideoCapture("rtsp://viewer:[email protected]:80")
print("cap1 init done")
cv2.namedWindow("cam1", cv2.WINDOW_NORMAL)
cv2.namedWindow("cam2", cv2.WINDOW_NORMAL)
while 1:
try:
k = cv2.waitKey(1) & 0xff
if k == ord('q') or k == 27:
break
cap1_grab = cap1.grab()
_, stream_buffer1 = cap1.retrieve(cap1_grab)
cap2_grab = cap2.grab()
_, stream_buffer2 = cap2.retrieve(cap2_grab)
cv2.imshow("cam1", stream_buffer1)
cv2.imshow("cam2", stream_buffer2)
except:
pass
cap1.release()
cap2.release()
cv2.destroyAllWindows() |
the-stack_0_1954 | import json
import logging
import os
from pathlib import Path
import psutil
import time
import signal
from collections import namedtuple
from firexapp.events.model import ADDITIONAL_CHILDREN_KEY
from firexapp.submit.uid import Uid
logger = logging.getLogger(__name__)
DEFAULT_FLAME_TIMEOUT = 60 * 60 * 24 * 2
# This structure contains an index by UUID for both ancestors and descendants. This is memory inefficient,
# but makes queries that would involve multiple graph traversals very fast.
# TODO: If further performance enhancements are sought, this structure could be maintained during event receiving
# so that it isn't re-calculated per task query.
FlameTaskGraph = namedtuple('FlameTaskGraph', ['tasks_by_uuid', 'ancestors_by_uuid', 'descendants_by_uuid'])
def get_flame_redirect_file_path(root_logs_dir):
return os.path.join(root_logs_dir, 'flame.html')
def get_flame_debug_dir(root_logs_dir):
return os.path.join(root_logs_dir, Uid.debug_dirname, 'flame')
def get_flame_pid_file_path(root_logs_dir):
return os.path.join(get_flame_debug_dir(root_logs_dir), 'flame.pid')
def get_flame_pid(root_logs_dir):
return int(Path(get_flame_pid_file_path(root_logs_dir)).read_text().strip())
def wait_until(predicate, timeout, sleep_for, *args, **kwargs):
max_time = time.time() + timeout
while time.time() < max_time:
pred_result = predicate(*args, **kwargs)
if pred_result:
return pred_result
time.sleep(sleep_for)
return predicate(*args, **kwargs)
def wait_until_pid_not_exist(pid, timeout=7, sleep_for=1):
return wait_until(lambda p: not psutil.pid_exists(p), timeout, sleep_for, pid)
def web_request_ok(url):
import requests
try:
return requests.get(url).ok
except requests.exceptions.ConnectionError:
return False
def wait_until_web_request_ok(url, timeout=10, sleep_for=1):
return wait_until(web_request_ok, timeout, sleep_for, url)
def wait_until_path_exist(path, timeout=7, sleep_for=1):
return wait_until(os.path.exists, timeout, sleep_for, path)
def json_file_fn(json_file_path, fn):
if not os.path.isfile(json_file_path):
return False
try:
file_data = json.loads(Path(json_file_path).read_text())
except (json.decoder.JSONDecodeError, OSError):
return False
else:
return fn(file_data)
def get_rec_file(log_dir):
return os.path.join(get_flame_debug_dir(log_dir), 'flame.rec')
def find_rec_file(log_dir):
# Formerly was used for backwards compatability, now an alias for get_rec_file
return get_rec_file(log_dir)
def get_flame_url(port, hostname=None):
if hostname is None:
from socket import gethostname
hostname = gethostname()
return 'http://%s:%d' % (hostname, int(port))
class PathNotFoundException(Exception):
pass
def find(keys, input_dict, raise_error=False):
result = input_dict
for key in keys:
try:
result = result[key]
except Exception:
if raise_error:
raise PathNotFoundException()
return None
return result
def filter_paths(input_dict, paths_to_values):
results = {}
for in_key, in_vals in input_dict.items():
results[in_key] = []
for in_val in in_vals:
matches_all = all(to_equal == find(p, in_val) for p, to_equal in paths_to_values.items())
if matches_all:
results[in_key].append(in_val)
return results
def kill_flame(log_dir, sig=signal.SIGKILL, timeout=10):
flame_pid = get_flame_pid(log_dir)
kill_and_wait(flame_pid, sig, timeout)
return flame_pid
def kill_and_wait(pid, sig=signal.SIGKILL, timeout=10):
if psutil.pid_exists(pid):
os.kill(pid, sig)
wait_until_pid_not_exist(pid, timeout=timeout)
return not psutil.pid_exists(pid)
def create_rel_symlink(existing_path, symlink, target_is_directory=False):
rel_new_file = os.path.relpath(existing_path, start=os.path.dirname(symlink))
os.symlink(rel_new_file, symlink, target_is_directory=target_is_directory)
class BrokerConsumerConfig:
def __init__(self, max_retry_attempts, receiver_ready_file, terminate_on_complete):
self.max_retry_attempts = max_retry_attempts
self.receiver_ready_file = receiver_ready_file
self.terminate_on_complete = terminate_on_complete
def is_json_file(file_path):
try:
json.loads(Path(file_path).read_text())
except json.decoder.JSONDecodeError:
return False
else:
return True
def _both_instance(o1, o2, _type):
return isinstance(o1, _type) and isinstance(o2, _type)
def deep_merge(dict1, dict2):
result = dict(dict1)
for d2_key in dict2:
if d2_key in dict1:
v1 = dict1[d2_key]
v2 = dict2[d2_key]
if _both_instance(v1, v2, dict):
result[d2_key] = deep_merge(v1, v2)
elif _both_instance(v1, v2, list):
result[d2_key] = v1 + v2
elif _both_instance(v1, v2, set):
result[d2_key] = v1.union(v2)
elif v1 == v2:
# already the same value in both dicts, take from either.
result[d2_key] = v1
else:
# Both d1 and d2 have entries for d2_key, both entries are not dicts or lists or sets,
# and the values are not the same. This is a conflict.
# Overwrite d1's value to simulate dict.update() behaviour.
result[d2_key] = v2
else:
# New key for d1, just add it.
result[d2_key] = dict2[d2_key]
return result
def _validate_task_queries(task_representation):
if not isinstance(task_representation, list):
return False
missing_criterias = [r for r in task_representation
if 'matchCriteria' not in r or not isinstance(r['matchCriteria'], dict)]
if missing_criterias:
return False
# TODO: validate matchCriteria themselves
return True
def _normalize_criteria_key(k):
return k[1:] if k.startswith('?') else k
def task_matches_criteria(task: dict, criteria: dict):
if criteria['type'] == 'all':
return True
if criteria['type'] == 'always-select-fields':
# always-select-fields doesn't cause matches (tasks to be included), but paths here are always included
# in results.
return False
if criteria['type'] == 'equals':
criteria_val = criteria['value']
# TODO: if more adjusting qualifiers are added, this needs to be reworked.
required_keys = {k for k in criteria_val.keys() if not k.startswith('?')}
optional_keys = {_normalize_criteria_key(k) for k in criteria_val.keys() if k.startswith('?')}
present_required_keys = required_keys.intersection(task.keys())
if len(required_keys) != len(present_required_keys):
return False
present_optional_keys = optional_keys.intersection(task.keys())
normalized_criteria = {_normalize_criteria_key(k): v for k, v in criteria_val.items()}
for k in present_required_keys.union(present_optional_keys):
if task[k] != normalized_criteria[k]:
return False
return True
return False
def _create_dict_with_path_val(path_list, val):
r = {}
lastest_dict = r
for i, e in enumerate(path_list):
is_last = i == len(path_list) - 1
if is_last:
lastest_dict[e] = val
else:
lastest_dict[e] = {}
lastest_dict = lastest_dict[e]
return r
def _get_paths_from_task(paths, task):
r = {}
for path in paths:
try:
path_list = path.split('.')
val = find(path_list, task, raise_error=True)
except PathNotFoundException:
# Don't update the results dict if the current task doesn't have the path.
pass
else:
r = deep_merge(r, _create_dict_with_path_val(path_list, val))
return r
def _get_child_tasks_by_uuid(parent_uuid, all_tasks_by_uuid):
return {u: t for u, t in all_tasks_by_uuid.items() if t['parent_id'] == parent_uuid}
def _get_descendants(uuid, all_tasks_by_uuid):
descendants_by_uuid = _get_child_tasks_by_uuid(uuid, all_tasks_by_uuid)
uuids_to_check = list(descendants_by_uuid.keys())
while uuids_to_check:
cur_descendant_uuid = uuids_to_check.pop()
cur_descendant_children_by_uuid = _get_child_tasks_by_uuid(cur_descendant_uuid, all_tasks_by_uuid)
descendants_by_uuid.update(cur_descendant_children_by_uuid)
uuids_to_check += list(cur_descendant_children_by_uuid.keys())
return descendants_by_uuid
def _get_descendants_for_criteria(select_paths, descendant_criteria, ancestor_uuid, task_graph: FlameTaskGraph):
ancestor_descendants = task_graph.descendants_by_uuid[ancestor_uuid]
matched_descendants_by_uuid = {}
for criteria in descendant_criteria:
for descendant in ancestor_descendants:
if task_matches_criteria(descendant, criteria):
# Need no_descendants=True to prevent infinite loops.
# The fields that are selected for each descendant are determined by all queries, except
# descendant descendants are never included.
matched_descendants_by_uuid[descendant['uuid']] = select_from_task(
select_paths,
[], # Never include descendants in descendant queries to avoid infinite loop.
descendant,
task_graph)
return matched_descendants_by_uuid
def select_from_task(select_paths, select_descendants, task, task_graph: FlameTaskGraph):
selected_dict = {}
paths_update_dict = _get_paths_from_task(select_paths, task)
selected_dict.update(paths_update_dict)
selected_descendants_by_uuid = _get_descendants_for_criteria(select_paths, select_descendants, task['uuid'],
task_graph)
if selected_descendants_by_uuid:
selected_dict.update({'descendants': selected_descendants_by_uuid})
return selected_dict
def flatten(l):
return [item for sublist in l for item in sublist]
def get_always_select_fields(task_queries):
return flatten([q.get('selectPaths', []) for q in task_queries
if q['matchCriteria']['type'] == 'always-select-fields'])
def select_ancestor_of_task_descendant_match(uuid, query, select_paths, task_graph: FlameTaskGraph):
# Should the current task be included in the result because it matches some descendant criteria?
task = task_graph.tasks_by_uuid[uuid]
matching_criteria = [criteria for criteria in query.get('selectDescendants', [])
if task_matches_criteria(task, criteria)]
if matching_criteria:
# The current task matches some descendant criteria. Confirm that some ancestor matches the top-level
# criteria.
ancestor = next((a for a in task_graph.ancestors_by_uuid[uuid]
if task_matches_criteria(a, query['matchCriteria'])), None)
if ancestor:
# The current task and its ancestor should be included in the result.
return ancestor['uuid'], select_from_task(select_paths, matching_criteria, ancestor, task_graph)
return None, {}
def _get_children_by_uuid(tasks_by_uuid):
children_by_uuid = {}
for u, t in tasks_by_uuid.items():
if u not in children_by_uuid:
# Ensure every UUID has an entry in the result, even UUIDs with no children.
children_by_uuid[u] = []
# TODO: consider handling tasks with no 'parent_id' differently from tasks with None 'parent_id',
# since the latter case is the root task and the former seems inexplicable.
parent_id = t.get('parent_id')
if parent_id is not None:
if parent_id not in children_by_uuid:
children_by_uuid[parent_id] = []
children_by_uuid[parent_id].append(t)
return children_by_uuid
def _create_task_graph(tasks_by_uuid):
children_by_uuid = _get_children_by_uuid(tasks_by_uuid)
descendant_uuids_by_uuid = {}
ancestor_uuids_by_uuid = {}
root_task = next((t for t in tasks_by_uuid.values() if t['parent_id'] is None), None)
if root_task:
tasks_to_check = [root_task]
while tasks_to_check:
cur_task = tasks_to_check.pop()
if cur_task['uuid'] not in ancestor_uuids_by_uuid:
ancestor_uuids_by_uuid[cur_task['uuid']] = set()
cur_task_ancestor_uuids = ancestor_uuids_by_uuid[cur_task['uuid']]
# The task tree is being walked top-down, so it's safe to expect ancestors to be populated.
if cur_task.get('parent_id') is not None and cur_task['parent_id'] in ancestor_uuids_by_uuid:
# This task's ancestors are its parent's ancestors plus its parent.
ancestor_uuids = ancestor_uuids_by_uuid[cur_task['parent_id']].union([cur_task['parent_id']])
cur_task_ancestor_uuids.update(ancestor_uuids)
# Update ancestors of additional children.
additional_children_uuids = cur_task.get(ADDITIONAL_CHILDREN_KEY, [])
for additional_child_uuid in additional_children_uuids:
if additional_child_uuid not in ancestor_uuids_by_uuid:
ancestor_uuids_by_uuid[additional_child_uuid] = set()
ancestor_uuids_by_uuid[additional_child_uuid].update(cur_task_ancestor_uuids)
descendant_uuids_by_uuid[cur_task['uuid']] = set(additional_children_uuids)
for ancestor_uuid in cur_task_ancestor_uuids:
descendant_uuids_by_uuid[ancestor_uuid].add(cur_task['uuid'])
descendant_uuids_by_uuid[ancestor_uuid].update(additional_children_uuids)
# traverse the graph via real children only, not additional_children.
tasks_to_check.extend(children_by_uuid[cur_task['uuid']])
ancestors_by_uuid = {u: [tasks_by_uuid[au] for au in ancestor_uuids if au in tasks_by_uuid]
for u, ancestor_uuids in ancestor_uuids_by_uuid.items()}
descendants_by_uuid = {u: [tasks_by_uuid[du] for du in descendant_uuids if du in tasks_by_uuid]
for u, descendant_uuids in descendant_uuids_by_uuid.items()}
return FlameTaskGraph(tasks_by_uuid, ancestors_by_uuid, descendants_by_uuid)
def select_data_for_matches(task_uuid, task_queries, task_graph: FlameTaskGraph, match_descendant_criteria):
result_tasks_by_uuid = {}
always_select_fields = get_always_select_fields(task_queries)
for query in task_queries:
task = task_graph.tasks_by_uuid[task_uuid]
matches_criteria = task_matches_criteria(task, query['matchCriteria'])
select_paths = always_select_fields + query.get('selectPaths', [])
updates_by_uuid = {}
if matches_criteria:
updates_by_uuid[task_uuid] = select_from_task(select_paths, query.get('selectDescendants', []), task,
task_graph)
if match_descendant_criteria:
uuid, task_update = select_ancestor_of_task_descendant_match(task_uuid, query, select_paths, task_graph)
if uuid:
updates_by_uuid[uuid] = task_update
if updates_by_uuid:
result_tasks_by_uuid = deep_merge(result_tasks_by_uuid, updates_by_uuid)
return result_tasks_by_uuid
def _query_flame_tasks(task_uuids_to_query, task_queries, all_tasks_by_uuid, match_descendant_criteria):
if not _validate_task_queries(task_queries):
return {}
task_graph = _create_task_graph(all_tasks_by_uuid)
result_tasks_by_uuid = {}
for uuid in task_uuids_to_query:
selected_tasks_by_uuid = select_data_for_matches(uuid, task_queries, task_graph, match_descendant_criteria)
result_tasks_by_uuid = deep_merge(result_tasks_by_uuid, selected_tasks_by_uuid)
return result_tasks_by_uuid
def query_full_tasks(all_tasks_by_uuid, task_queries):
# When querying a full set of tasks, descendants will be included when their ancestors are matched.
return _query_flame_tasks(all_tasks_by_uuid.keys(), task_queries, all_tasks_by_uuid,
match_descendant_criteria=False)
def query_partial_tasks(task_uuids_to_query, task_queries, all_tasks_by_uuid):
# When querying a partial set of tasks, count descendants as matches to be included in the result.
return _query_flame_tasks(task_uuids_to_query, task_queries, all_tasks_by_uuid, match_descendant_criteria=True)
def get_dict_json_md5(query_config):
import hashlib
return hashlib.md5(json.dumps(query_config, sort_keys=True).encode('utf-8')).hexdigest()
|
the-stack_0_1955 | import torch
from torch import nn
from utils.operator import gradient
def activation_name(activation: nn.Module) -> str:
if activation is nn.Tanh:
return "tanh"
elif activation is nn.ReLU or activation is nn.ELU or activation is nn.GELU:
return "relu"
elif activation is nn.SELU:
return "selu"
elif activation is nn.LeakyReLU:
return "leaky_relu"
elif activation is nn.Sigmoid:
return "sigmoid"
return "linear"
def linear_layer_with_init(width, height, init=nn.init.xavier_uniform_, activation=None) -> nn.Linear:
linear = nn.Linear(width, height)
if init is None or activation is None:
return linear
init(linear.weight, gain=nn.init.calculate_gain(activation_name(activation)))
return linear
class Base(nn.Module):
@torch.no_grad()
def predict(self, x):
return self(x)
@torch.no_grad()
def test(self, x, true_sdf):
sdf_predict = self(x)
return nn.MSELoss()(sdf_predict, true_sdf) # relative L2 norm of the error
def test_norm_gradient(self, x, true_norm_grad):
x.requires_grad_(True)
y = self(x)
norm_grad = torch.linalg.norm(gradient(y, x, create_graph=False), dim=1)
x.requires_grad_(False)
with torch.no_grad():
return nn.MSELoss()(norm_grad, true_norm_grad)
def test_residual(self, x):
x.requires_grad_(True)
y = self(x)
norm_grad = torch.linalg.norm(gradient(y, x, create_graph=False), dim=1)
x.requires_grad_(False)
with torch.no_grad():
return torch.mean((norm_grad - 1).abs())
def print_loss(self, verbose=False) -> None:
keys = [
"_loss",
"_loss_SDF",
"_loss_residual",
"_loss_residual_constraint",
"_loss_normal",
"_loss_cosine_similarity",
]
_loss_str = "Loss: "
for key in keys:
if hasattr(self, key):
_loss_str += f"{getattr(self, key):.6f} "
else:
_loss_str += "na "
if verbose:
print(_loss_str)
return _loss_str
# Physics-informed neural networks: A deep learning framework for solving forward and inverse problems involving nonlinear partial differential equations
# Raissi, Maziar, Paris Perdikaris, and George E. Karniadakis
class PINN(Base):
def loss_residual(self, p):
"""
Calculate residual from gradients, :attr:`p`
Args:
- :attr:`p`: tensor of gradient
Example:
```
y = model(x)
p = gradient(y, x)
model.loss_residual(p)
```
"""
norm_p = torch.linalg.norm(p, dim=1)
self._loss_residual = torch.mean((norm_p - 1) ** 2)
return self._loss_residual
def loss_residual_constraint(self, p):
"""
Calculate loss from gradient, :attr:`p`
`ReLU(norm(p) - 1)`
Args:
- :attr:`p`: tensor of gradient
Example:
```
y = model(x)
p = gradient(y, x)
model.loss_residual_constraint(p)
```
"""
norm_p = torch.linalg.norm(p, dim=1)
self._loss_residual_constraint = torch.mean(torch.nn.ReLU()(norm_p - 1))
return self._loss_residual_constraint
def loss_cosine_similarity(self, p, grad):
"""
Calculate loss from gradient of model (:attr:`p`) and training data (:attr:`grad`)
`torch.dot(p,grad)/(norm(p)*norm(grad))`
Args:
- :attr:`p`: tensor of gradient
- :attr:`grad`: tensor of target gradient
Example:
```
y = model(x)
p = gradient(y, x)
model.loss_cosine_similarity(p, grad)
```
"""
norm_p = torch.linalg.norm(p, dim=1)
norm_g = torch.linalg.norm(grad, dim=1)
self._loss_cosine_similarity = torch.mean(-torch.einsum("ij,ij->i", p, grad) / norm_p / norm_g)
return self._loss_cosine_similarity
def loss_SDF(self, y, sdf):
"""
Calculate loss from predicted SDF from model (:attr:`y`)
and SDF from training data (:attr:`sdf`)
`MSE(y, sdf)`
Args:
- :attr:`y`: predicted SDF
- :attr:`sdf`: target SDF
Example:
```
y = model(x)
model.loss_SDF(y, sdf)
```
"""
self._loss_SDF = torch.nn.MSELoss()(y, sdf)
return self._loss_SDF
def loss_normal(self, p, grad):
"""
Calculate loss from gradient of model (:attr:`p`) and training data (:attr:`grad`)
`MSE(p, (grad / norm(grad)))`
Args:
- :attr:`p`: predicted gradient
- :attr:`grad`: target gradient
Example:
```
y = model(x)
p = gradient(y, x)
model.loss_normal(p, grad)
```
"""
norm_grad = torch.linalg.norm(grad, dim=1)
normal = grad / norm_grad
self._loss_normal = torch.nn.MSELoss()(p, normal)
return self._loss_normal
|
the-stack_0_1957 | """Some macros for building go test data."""
load("//testlib:expose_genfile.bzl", "expose_genfile")
load("@io_bazel_rules_go//go:def.bzl", "go_library")
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
def pb_go_proto_library(name, proto, genfile, visibility = None):
go_proto_library(
name = name,
proto = proto,
importpath = native.package_name() + "/" + name,
visibility = visibility,
)
native.filegroup(
name = name + "_src",
srcs = [":" + name],
output_group = "go_generated_srcs",
)
expose_genfile(
name = name + "_exposed_src",
genfile = genfile,
genfile_orig = name + "/" + genfile,
deps = [":" + name + "_src"],
)
def pb_go_library(**kwargs):
importpath = native.package_name() + "/" + kwargs["name"]
go_library(importpath = importpath, **kwargs)
def resources_package_name():
name = native.package_name()
if not name.endswith("/resources"):
name = name + "/resources"
return name
def resources_import_prefix():
return ""
|
the-stack_0_1959 | import numpy as np
import copy
from supervised.algorithms.registry import AlgorithmsRegistry
from supervised.algorithms.registry import BINARY_CLASSIFICATION
class HillClimbing:
"""
Example params are in JSON format:
{
"booster": ["gbtree", "gblinear"],
"objective": ["binary:logistic"],
"eval_metric": ["auc", "logloss"],
"eta": [0.0025, 0.005, 0.0075, 0.01, 0.025, 0.05, 0.075, 0.1]
}
"""
@staticmethod
def get(params, ml_task, seed=1):
np.random.seed(seed)
keys = list(params.keys())
if "num_class" in keys:
keys.remove("num_class")
keys.remove("model_type")
keys.remove("seed")
keys.remove("ml_task")
model_type = params["model_type"]
if model_type == "Baseline":
return [None, None]
model_info = AlgorithmsRegistry.registry[ml_task][model_type]
model_params = model_info["params"]
permuted_keys = np.random.permutation(keys)
key_to_update = None
for key_to_update in permuted_keys:
values = model_params[key_to_update]
if len(values) > 1:
break
left, right = None, None
for i, v in enumerate(values):
if v == params[key_to_update]:
if i + 1 < len(values):
right = values[i + 1]
if i - 1 >= 0:
left = values[i - 1]
params_1, params_2 = None, None
if left is not None:
params_1 = copy.deepcopy(params)
params_1[key_to_update] = left
if right is not None:
params_2 = copy.deepcopy(params)
params_2[key_to_update] = right
return [params_1, params_2]
|
the-stack_0_1961 | from __future__ import absolute_import
# Copyright (c) 2010-2017 openpyxl
# Simplified implementation of headers and footers: let worksheets have separate items
import re
from warnings import warn
from openpyxl.descriptors import (
Alias,
Bool,
Strict,
String,
Integer,
MatchPattern,
Typed,
)
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.compat import unicode
from openpyxl.xml.functions import Element
from openpyxl.utils.escape import escape, unescape
FONT_PATTERN = '&"(?P<font>.+)"'
COLOR_PATTERN = "&K(?P<color>[A-F0-9]{6})"
SIZE_REGEX = r"&(?P<size>\d+\s?)"
FORMAT_REGEX = re.compile("{0}|{1}|{2}".format(FONT_PATTERN, COLOR_PATTERN,
SIZE_REGEX)
)
def _split_string(text):
"""
Split the combined (decoded) string into left, center and right parts
# See http://stackoverflow.com/questions/27711175/regex-with-multiple-optional-groups for discussion
"""
ITEM_REGEX = re.compile("""
(&L(?P<left>.+?))?
(&C(?P<center>.+?))?
(&R(?P<right>.+?))?
$""", re.VERBOSE | re.DOTALL)
m = ITEM_REGEX.match(text)
try:
parts = m.groupdict()
except AttributeError:
warn("""Cannot parse header or footer so it will be ignored""")
parts = {'left':'', 'right':'', 'center':''}
return parts
class _HeaderFooterPart(Strict):
"""
Individual left/center/right header/footer part
Do not use directly.
Header & Footer ampersand codes:
* &A Inserts the worksheet name
* &B Toggles bold
* &D or &[Date] Inserts the current date
* &E Toggles double-underline
* &F or &[File] Inserts the workbook name
* &I Toggles italic
* &N or &[Pages] Inserts the total page count
* &S Toggles strikethrough
* &T Inserts the current time
* &[Tab] Inserts the worksheet name
* &U Toggles underline
* &X Toggles superscript
* &Y Toggles subscript
* &P or &[Page] Inserts the current page number
* &P+n Inserts the page number incremented by n
* &P-n Inserts the page number decremented by n
* &[Path] Inserts the workbook path
* && Escapes the ampersand character
* &"fontname" Selects the named font
* &nn Selects the specified 2-digit font point size
Colours are in RGB Hex
"""
text = String(allow_none=True)
font = String(allow_none=True)
size = Integer(allow_none=True)
RGB = ("^[A-Fa-f0-9]{6}$")
color = MatchPattern(allow_none=True, pattern=RGB)
def __init__(self, text=None, font=None, size=None, color=None):
self.text = text
self.font = font
self.size = size
self.color = color
def __str__(self):
"""
Convert to Excel HeaderFooter miniformat minus position
"""
fmt = []
if self.font:
fmt.append(u'&"{0}"'.format(self.font))
if self.size:
fmt.append("&{0} ".format(self.size))
if self.color:
fmt.append("&K{0}".format(self.color))
return u"".join(fmt + [self.text])
def __bool__(self):
return bool(self.text)
__nonzero__ = __bool__
@classmethod
def from_str(cls, text):
"""
Convert from miniformat to object
"""
keys = ('font', 'color', 'size')
kw = dict((k, v) for match in FORMAT_REGEX.findall(text)
for k, v in zip(keys, match) if v)
kw['text'] = FORMAT_REGEX.sub('', text)
return cls(**kw)
class HeaderFooterItem(Strict):
"""
Header or footer item
"""
left = Typed(expected_type=_HeaderFooterPart)
center = Typed(expected_type=_HeaderFooterPart)
centre = Alias("center")
right = Typed(expected_type=_HeaderFooterPart)
__keys = ('L', 'C', 'R')
def __init__(self, left=None, right=None, center=None):
if left is None:
left = _HeaderFooterPart()
self.left = left
if center is None:
center = _HeaderFooterPart()
self.center = center
if right is None:
right = _HeaderFooterPart()
self.right = right
def __str__(self):
"""
Pack parts into a single string
"""
TRANSFORM = {'&[Tab]': '&A', '&[Pages]': '&N', '&[Date]': '&D',
'&[Path]': '&Z', '&[Page]': '&P', '&[Time]': '&T', '&[File]': '&F',
'&[Picture]': '&G'}
# escape keys and create regex
SUBS_REGEX = re.compile("|".join(["({0})".format(re.escape(k))
for k in TRANSFORM]))
def replace(match):
"""
Callback for re.sub
Replace expanded control with mini-format equivalent
"""
sub = match.group(0)
return TRANSFORM[sub]
txt = []
for key, part in zip(
self.__keys, [self.left, self.center, self.right]):
if part.text is not None:
txt.append(u"&{0}{1}".format(key, unicode(part)))
txt = "".join(txt)
txt = SUBS_REGEX.sub(replace, txt)
return escape(txt)
def __bool__(self):
return any([self.left, self.center, self.right])
__nonzero__ = __bool__
def to_tree(self, tagname):
"""
Return as XML node
"""
el = Element(tagname)
el.text = unicode(self)
return el
@classmethod
def from_tree(cls, node):
if node.text:
text = unescape(node.text)
parts = _split_string(text)
for k, v in parts.items():
if v is not None:
parts[k] = _HeaderFooterPart.from_str(v)
self = cls(**parts)
return self
class HeaderFooter(Serialisable):
tagname = "headerFooter"
differentOddEven = Bool(allow_none=True)
differentFirst = Bool(allow_none=True)
scaleWithDoc = Bool(allow_none=True)
alignWithMargins = Bool(allow_none=True)
oddHeader = Typed(expected_type=HeaderFooterItem, allow_none=True)
oddFooter = Typed(expected_type=HeaderFooterItem, allow_none=True)
evenHeader = Typed(expected_type=HeaderFooterItem, allow_none=True)
evenFooter = Typed(expected_type=HeaderFooterItem, allow_none=True)
firstHeader = Typed(expected_type=HeaderFooterItem, allow_none=True)
firstFooter = Typed(expected_type=HeaderFooterItem, allow_none=True)
__elements__ = ("oddHeader", "oddFooter", "evenHeader", "evenFooter", "firstHeader", "firstFooter")
def __init__(self,
differentOddEven=None,
differentFirst=None,
scaleWithDoc=None,
alignWithMargins=None,
oddHeader=None,
oddFooter=None,
evenHeader=None,
evenFooter=None,
firstHeader=None,
firstFooter=None,
):
self.differentOddEven = differentOddEven
self.differentFirst = differentFirst
self.scaleWithDoc = scaleWithDoc
self.alignWithMargins = alignWithMargins
if oddHeader is None:
oddHeader = HeaderFooterItem()
self.oddHeader = oddHeader
if oddFooter is None:
oddFooter = HeaderFooterItem()
self.oddFooter = oddFooter
if evenHeader is None:
evenHeader = HeaderFooterItem()
self.evenHeader = evenHeader
if evenFooter is None:
evenFooter = HeaderFooterItem()
self.evenFooter = evenFooter
if firstHeader is None:
firstHeader = HeaderFooterItem()
self.firstHeader = firstHeader
if firstFooter is None:
firstFooter = HeaderFooterItem()
self.firstFooter = firstFooter
def __bool__(self):
parts = [getattr(self, attr) for attr in self.__attrs__ + self.__elements__]
return any(parts)
__nonzero__ = __bool__
|
the-stack_0_1963 | # -*- coding: utf-8 -*
# 获得爬取Disasters Accidents推文所需信息
import json
from Config_Disasters_Accidents_od import get_noau_config
from datetime import datetime, timedelta
_, db, r = get_noau_config() # 数据库配置
def get_date(date):
# 获取日期集合
dates = date.split('\n')
date_list = []
for i in dates:
if not len(i.strip()) == 0:
date_list.append(i.strip())
return list(set(date_list))
def get_location(location, gpe):
# 获取地点集合
locations = location.split('\n')
gpes = gpe.split('\n')
location_list = []
for i in locations:
if not len(i.strip()) == 0:
location_list.append(i.strip())
for j in gpes:
if not len(j.strip()) == 0:
if j.strip not in location_list:
location_list.append(j.strip())
return list(set(location_list))
def get_gpe(gpe):
# 获取GPE集合
gpes = gpe.split('\n')
gpe_list = []
for i in gpes:
if not len(i.strip()) == 0:
gpe_list.append(i.strip())
return list(set(gpe_list))
def get_person(person):
# 获取person集合
persons = person.split('\n')
person_list = []
for i in persons:
if not len(i.strip()) == 0:
person_list.append(i.strip())
return list(set(person_list))
def get_triggers(trigger):
# 获取事件触发词集合
triggers = trigger.split('\n')
trigger_list = []
for i in triggers:
if not len(i.strip()) == 0:
trigger_list.append(i.strip())
return list(set(trigger_list))
def get_query_str(event):
# 获取Twitter查询信息
trigger = get_triggers(event['event']['trigger'])
date = event['event']['date']
date = date.strip()
temp = datetime.strptime(date, "%Y-%m-%d")
date_since = (temp - timedelta(days=7)).strftime('%Y-%m-%d')
date_until = (temp + timedelta(days=7)).strftime('%Y-%m-%d')
# 注意查询格式必须形如(xxx OR xxx) (xxx OR xxx) since:xxxx-xx-xx until:xxxx-xx-xx # 暂时不加地点
return '(' + ' OR '.join(trigger) + ')' + 'since:' + date_since + ' ' + 'until:' + date_until
def get_task():
events = db.event_list.find({})
for event in events:
q = get_query_str(event)
message = {'q': q, 'f': ['&f=news', '', '&f=tweets'], 'num': 10000, 'event_id': event['_id']}
print(message)
# 把获取推文所需信息放入Redis数据库
r.rpush('Disasters_Accidents_od', json.dumps(message))
print('master_Disasters_Accidents_od done!')
if __name__ == '__main__':
get_task()
|
the-stack_0_1964 | from logging import getLogger
from typing import Any, Dict, List, NamedTuple, Optional, Set, Tuple
from django.db import IntegrityError
from django.utils import timezone
from eth_account import Account
from packaging.version import Version
from redis import Redis
from web3.exceptions import BadFunctionCallOutput
from gnosis.eth import EthereumClient, EthereumClientProvider
from gnosis.eth.constants import NULL_ADDRESS
from gnosis.safe import ProxyFactory, Safe
from gnosis.safe.exceptions import InvalidMultisigTx, SafeServiceException
from gnosis.safe.signatures import signatures_to_bytes
from safe_relay_service.gas_station.gas_station import (GasStation,
GasStationProvider)
from safe_relay_service.tokens.models import Token
from safe_relay_service.tokens.price_oracles import CannotGetTokenPriceFromApi
from ..models import (BannedSigner, EthereumBlock, EthereumTx, SafeContract,
SafeMultisigTx)
from ..repositories.redis_repository import EthereumNonceLock, RedisRepository
logger = getLogger(__name__)
class TransactionServiceException(Exception):
pass
class SafeDoesNotExist(TransactionServiceException):
pass
class RefundMustBeEnabled(TransactionServiceException):
pass
class InvalidGasToken(TransactionServiceException):
pass
class SignaturesNotFound(TransactionServiceException):
pass
class SignaturesNotSorted(TransactionServiceException):
pass
class SafeMultisigTxExists(TransactionServiceException):
pass
class NotEnoughFundsForMultisigTx(TransactionServiceException):
pass
class InvalidOwners(TransactionServiceException):
pass
class InvalidMasterCopyAddress(TransactionServiceException):
pass
class InvalidProxyContract(TransactionServiceException):
pass
class InvalidRefundReceiver(TransactionServiceException):
pass
class InvalidGasEstimation(TransactionServiceException):
pass
class GasPriceTooLow(TransactionServiceException):
pass
class SignerIsBanned(TransactionServiceException):
pass
class TransactionEstimationWithNonce(NamedTuple):
safe_tx_gas: int
base_gas: int # For old versions it will equal to `data_gas`
data_gas: int # DEPRECATED
operational_gas: int # DEPRECATED
gas_price: int
gas_token: str
last_used_nonce: int
refund_receiver: str
class TransactionGasTokenEstimation(NamedTuple):
base_gas: int # For old versions it will equal to `data_gas`
gas_price: int
gas_token: str
class TransactionEstimationWithNonceAndGasTokens(NamedTuple):
last_used_nonce: int
safe_tx_gas: int
operational_gas: int # DEPRECATED
estimations: List[TransactionGasTokenEstimation]
class TransactionServiceProvider:
def __new__(cls):
if not hasattr(cls, 'instance'):
from django.conf import settings
cls.instance = TransactionService(GasStationProvider(),
EthereumClientProvider(),
RedisRepository().redis,
settings.SAFE_VALID_CONTRACT_ADDRESSES,
settings.SAFE_PROXY_FACTORY_ADDRESS,
settings.SAFE_TX_SENDER_PRIVATE_KEY)
return cls.instance
@classmethod
def del_singleton(cls):
if hasattr(cls, "instance"):
del cls.instance
class TransactionService:
def __init__(self, gas_station: GasStation, ethereum_client: EthereumClient, redis: Redis,
safe_valid_contract_addresses: Set[str], proxy_factory_address: str, tx_sender_private_key: str):
self.gas_station = gas_station
self.ethereum_client = ethereum_client
self.redis = redis
self.safe_valid_contract_addresses = safe_valid_contract_addresses
self.proxy_factory = ProxyFactory(proxy_factory_address, self.ethereum_client)
self.tx_sender_account = Account.from_key(tx_sender_private_key)
def _check_refund_receiver(self, refund_receiver: str) -> bool:
"""
Support tx.origin or relay tx sender as refund receiver.
This would prevent that anybody can front-run our service
:param refund_receiver: Payment refund receiver as Ethereum checksummed address
:return: True if refund_receiver is ok, False otherwise
"""
return refund_receiver in (NULL_ADDRESS, self.tx_sender_account.address)
@staticmethod
def _is_valid_gas_token(address: Optional[str]) -> float:
"""
:param address: Token address
:return: bool if gas token, false otherwise
"""
address = address or NULL_ADDRESS
if address == NULL_ADDRESS:
return True
try:
Token.objects.get(address=address, gas=True)
return True
except Token.DoesNotExist:
logger.warning('Cannot retrieve gas token from db: Gas token %s not valid', address)
return False
def _check_safe_gas_price(self, gas_token: Optional[str], safe_gas_price: int) -> bool:
"""
Check that `safe_gas_price` is not too low, so that the relay gets a full refund
for the tx. Gas_price must be always > 0, if not refunding would be disabled
If a `gas_token` is used we need to calculate the `gas_price` in Eth
Gas price must be at least >= _minimum_gas_price_ > 0
:param gas_token: Address of token is used, `NULL_ADDRESS` or `None` if it's ETH
:return:
:exception GasPriceTooLow
:exception InvalidGasToken
"""
if safe_gas_price < 1:
raise RefundMustBeEnabled('Tx internal gas price cannot be 0 or less, it was %d' % safe_gas_price)
minimum_accepted_gas_price = self._get_minimum_gas_price()
estimated_gas_price = self._estimate_tx_gas_price(minimum_accepted_gas_price, gas_token)
if safe_gas_price < estimated_gas_price:
raise GasPriceTooLow('Required gas-price>=%d with gas-token=%s' % (estimated_gas_price, gas_token))
return True
def _estimate_tx_gas_price(self, base_gas_price: int, gas_token: Optional[str] = None) -> int:
if gas_token and gas_token != NULL_ADDRESS:
try:
gas_token_model = Token.objects.get(address=gas_token, gas=True)
estimated_gas_price = gas_token_model.calculate_gas_price(base_gas_price)
except Token.DoesNotExist:
raise InvalidGasToken('Gas token %s not found' % gas_token)
else:
estimated_gas_price = base_gas_price
# FIXME Remove 2 / 3, workaround to prevent frontrunning
return int(estimated_gas_price * 2 / 3)
def _get_configured_gas_price(self) -> int:
"""
:return: Gas price for txs
"""
return self.gas_station.get_gas_prices().fast
def _get_minimum_gas_price(self) -> int:
"""
:return: Minimum gas price accepted for txs set by the user
"""
return self.gas_station.get_gas_prices().standard
def get_last_used_nonce(self, safe_address: str) -> Optional[int]:
safe = Safe(safe_address, self.ethereum_client)
last_used_nonce = SafeMultisigTx.objects.get_last_nonce_for_safe(safe_address)
last_used_nonce = last_used_nonce if last_used_nonce is not None else -1
try:
blockchain_nonce = safe.retrieve_nonce()
last_used_nonce = max(last_used_nonce, blockchain_nonce - 1)
if last_used_nonce < 0: # There's no last_used_nonce
last_used_nonce = None
return last_used_nonce
except BadFunctionCallOutput: # If Safe does not exist
raise SafeDoesNotExist(f'Safe={safe_address} does not exist')
def estimate_tx(self, safe_address: str, to: str, value: int, data: bytes, operation: int,
gas_token: Optional[str]) -> TransactionEstimationWithNonce:
"""
:return: TransactionEstimation with costs using the provided gas token and last used nonce of the Safe
:raises: InvalidGasToken: If Gas Token is not valid
"""
if not self._is_valid_gas_token(gas_token):
raise InvalidGasToken(gas_token)
last_used_nonce = self.get_last_used_nonce(safe_address)
safe = Safe(safe_address, self.ethereum_client)
safe_tx_gas = safe.estimate_tx_gas(to, value, data, operation)
safe_tx_base_gas = safe.estimate_tx_base_gas(to, value, data, operation, gas_token, safe_tx_gas)
# For Safe contracts v1.0.0 operational gas is not used (`base_gas` has all the related costs already)
safe_version = safe.retrieve_version()
if Version(safe_version) >= Version('1.0.0'):
safe_tx_operational_gas = 0
else:
safe_tx_operational_gas = safe.estimate_tx_operational_gas(len(data) if data else 0)
# Can throw RelayServiceException
gas_price = self._estimate_tx_gas_price(self._get_configured_gas_price(), gas_token)
return TransactionEstimationWithNonce(safe_tx_gas, safe_tx_base_gas, safe_tx_base_gas, safe_tx_operational_gas,
gas_price, gas_token or NULL_ADDRESS, last_used_nonce,
self.tx_sender_account.address)
def estimate_tx_for_all_tokens(self, safe_address: str, to: str, value: int, data: bytes,
operation: int) -> TransactionEstimationWithNonceAndGasTokens:
"""
:return: TransactionEstimation with costs using ether and every gas token supported by the service,
with the last used nonce of the Safe
:raises: InvalidGasToken: If Gas Token is not valid
"""
safe = Safe(safe_address, self.ethereum_client)
last_used_nonce = self.get_last_used_nonce(safe_address)
safe_tx_gas = safe.estimate_tx_gas(to, value, data, operation)
safe_version = safe.retrieve_version()
if Version(safe_version) >= Version('1.0.0'):
safe_tx_operational_gas = 0
else:
safe_tx_operational_gas = safe.estimate_tx_operational_gas(len(data) if data else 0)
# Calculate `base_gas` for ether and calculate for tokens using the ether token price
ether_safe_tx_base_gas = safe.estimate_tx_base_gas(to, value, data, operation, NULL_ADDRESS, safe_tx_gas)
base_gas_price = self._get_configured_gas_price()
gas_price = self._estimate_tx_gas_price(base_gas_price, NULL_ADDRESS)
gas_token_estimations = [TransactionGasTokenEstimation(ether_safe_tx_base_gas, gas_price, NULL_ADDRESS)]
token_gas_difference = 50000 # 50K gas more expensive than ether
for token in Token.objects.gas_tokens():
try:
gas_price = self._estimate_tx_gas_price(base_gas_price, token.address)
gas_token_estimations.append(
TransactionGasTokenEstimation(ether_safe_tx_base_gas + token_gas_difference,
gas_price, token.address)
)
except CannotGetTokenPriceFromApi:
logger.error('Cannot get price for token=%s', token.address)
return TransactionEstimationWithNonceAndGasTokens(last_used_nonce, safe_tx_gas, safe_tx_operational_gas,
gas_token_estimations)
def create_multisig_tx(self,
safe_address: str,
to: str,
value: int,
data: bytes,
operation: int,
safe_tx_gas: int,
base_gas: int,
gas_price: int,
gas_token: str,
refund_receiver: str,
safe_nonce: int,
signatures: List[Dict[str, int]]) -> SafeMultisigTx:
"""
:return: Database model of SafeMultisigTx
:raises: SafeMultisigTxExists: If Safe Multisig Tx with nonce already exists
:raises: InvalidGasToken: If Gas Token is not valid
:raises: TransactionServiceException: If Safe Tx is not valid (not sorted owners, bad signature, bad nonce...)
"""
safe_contract, _ = SafeContract.objects.get_or_create(address=safe_address,
defaults={'master_copy': NULL_ADDRESS})
created = timezone.now()
if SafeMultisigTx.objects.not_failed().filter(safe=safe_contract, nonce=safe_nonce).exists():
raise SafeMultisigTxExists(f'Tx with safe-nonce={safe_nonce} for safe={safe_address} already exists in DB')
signature_pairs = [(s['v'], s['r'], s['s']) for s in signatures]
signatures_packed = signatures_to_bytes(signature_pairs)
try:
tx_hash, safe_tx_hash, tx = self._send_multisig_tx(
safe_address,
to,
value,
data,
operation,
safe_tx_gas,
base_gas,
gas_price,
gas_token,
refund_receiver,
safe_nonce,
signatures_packed
)
except SafeServiceException as exc:
raise TransactionServiceException(str(exc)) from exc
ethereum_tx = EthereumTx.objects.create_from_tx_dict(tx, tx_hash)
try:
return SafeMultisigTx.objects.create(
created=created,
safe=safe_contract,
ethereum_tx=ethereum_tx,
to=to,
value=value,
data=data,
operation=operation,
safe_tx_gas=safe_tx_gas,
data_gas=base_gas,
gas_price=gas_price,
gas_token=None if gas_token == NULL_ADDRESS else gas_token,
refund_receiver=refund_receiver,
nonce=safe_nonce,
signatures=signatures_packed,
safe_tx_hash=safe_tx_hash,
)
except IntegrityError as exc:
raise SafeMultisigTxExists(f'Tx with safe_tx_hash={safe_tx_hash.hex()} already exists in DB') from exc
def _send_multisig_tx(self,
safe_address: str,
to: str,
value: int,
data: bytes,
operation: int,
safe_tx_gas: int,
base_gas: int,
gas_price: int,
gas_token: str,
refund_receiver: str,
safe_nonce: int,
signatures: bytes,
block_identifier='latest') -> Tuple[bytes, bytes, Dict[str, Any]]:
"""
This function calls the `send_multisig_tx` of the Safe, but has some limitations to prevent abusing
the relay
:return: Tuple(tx_hash, safe_tx_hash, tx)
:raises: InvalidMultisigTx: If user tx cannot go through the Safe
"""
safe = Safe(safe_address, self.ethereum_client)
data = data or b''
gas_token = gas_token or NULL_ADDRESS
refund_receiver = refund_receiver or NULL_ADDRESS
to = to or NULL_ADDRESS
# Make sure refund receiver is set to 0x0 so that the contract refunds the gas costs to tx.origin
if not self._check_refund_receiver(refund_receiver):
raise InvalidRefundReceiver(refund_receiver)
self._check_safe_gas_price(gas_token, gas_price)
# Make sure proxy contract is ours
if not self.proxy_factory.check_proxy_code(safe_address):
raise InvalidProxyContract(safe_address)
# Make sure master copy is valid
safe_master_copy_address = safe.retrieve_master_copy_address()
if safe_master_copy_address not in self.safe_valid_contract_addresses:
raise InvalidMasterCopyAddress(safe_master_copy_address)
# Check enough funds to pay for the gas
if not safe.check_funds_for_tx_gas(safe_tx_gas, base_gas, gas_price, gas_token):
raise NotEnoughFundsForMultisigTx
threshold = safe.retrieve_threshold()
number_signatures = len(signatures) // 65 # One signature = 65 bytes
if number_signatures < threshold:
raise SignaturesNotFound('Need at least %d signatures' % threshold)
safe_tx_gas_estimation = safe.estimate_tx_gas(to, value, data, operation)
safe_base_gas_estimation = safe.estimate_tx_base_gas(to, value, data, operation, gas_token,
safe_tx_gas_estimation)
if safe_tx_gas < safe_tx_gas_estimation or base_gas < safe_base_gas_estimation:
raise InvalidGasEstimation("Gas should be at least equal to safe-tx-gas=%d and base-gas=%d. Current is "
"safe-tx-gas=%d and base-gas=%d" %
(safe_tx_gas_estimation, safe_base_gas_estimation, safe_tx_gas, base_gas))
# We use fast tx gas price, if not txs could be stuck
tx_gas_price = self._get_configured_gas_price()
tx_sender_private_key = self.tx_sender_account.key
tx_sender_address = Account.from_key(tx_sender_private_key).address
safe_tx = safe.build_multisig_tx(
to,
value,
data,
operation,
safe_tx_gas,
base_gas,
gas_price,
gas_token,
refund_receiver,
signatures,
safe_nonce=safe_nonce,
safe_version=safe.retrieve_version()
)
owners = safe.retrieve_owners()
signers = safe_tx.signers
if set(signers) - set(owners): # All the signers must be owners
raise InvalidOwners('Signers=%s are not valid owners of the safe. Owners=%s', safe_tx.signers, owners)
if signers != safe_tx.sorted_signers:
raise SignaturesNotSorted('Safe-tx-hash=%s - Signatures are not sorted by owner: %s' %
(safe_tx.safe_tx_hash.hex(), safe_tx.signers))
if banned_signers := BannedSigner.objects.filter(address__in=signers):
raise SignerIsBanned(f'Signers {list(banned_signers)} are banned')
logger.info('Safe=%s safe-nonce=%d Check `call()` before sending transaction', safe_address, safe_nonce)
# Set `gasLimit` for `call()`. It will use the same that it will be used later for execution
tx_gas = safe_tx.recommended_gas()
safe_tx.call(tx_sender_address=tx_sender_address, tx_gas=tx_gas, block_identifier=block_identifier)
with EthereumNonceLock(self.redis, self.ethereum_client, self.tx_sender_account.address,
lock_timeout=60 * 2) as tx_nonce:
logger.info('Safe=%s safe-nonce=%d `call()` was successful', safe_address, safe_nonce)
tx_hash, tx = safe_tx.execute(tx_sender_private_key, tx_gas=tx_gas, tx_gas_price=tx_gas_price,
tx_nonce=tx_nonce, block_identifier=block_identifier)
logger.info('Safe=%s, Sent transaction with nonce=%d tx-hash=%s for safe-tx-hash=%s safe-nonce=%d',
safe_address, tx_nonce, tx_hash.hex(), safe_tx.safe_tx_hash.hex(), safe_tx.safe_nonce)
return tx_hash, safe_tx.safe_tx_hash, tx
def resend(self, gas_price: int, multisig_tx: SafeMultisigTx) -> Optional[EthereumTx]:
"""
Resend transaction with `gas_price` if it's higher or equal than transaction gas price. Setting equal
`gas_price` is allowed as sometimes a transaction can be out of the mempool but `gas_price` does not need
to be increased when resending
:param gas_price: New gas price for the transaction. Must be >= old gas price
:param multisig_tx: Multisig Tx not mined to be sent again
:return: If a new transaction is sent is returned, `None` if not
"""
assert multisig_tx.ethereum_tx.block_id is None, 'Block is present!'
transaction_receipt = self.ethereum_client.get_transaction_receipt(multisig_tx.ethereum_tx_id)
if transaction_receipt and transaction_receipt['blockNumber']:
logger.info(
'%s tx was already mined on block %d',
multisig_tx.ethereum_tx_id, transaction_receipt['blockNumber']
)
return None
if multisig_tx.ethereum_tx.gas_price > gas_price:
logger.info(
'%s tx gas price is %d > %d. Tx should be mined soon',
multisig_tx.ethereum_tx_id, multisig_tx.ethereum_tx.gas_price, gas_price
)
return None
safe = Safe(multisig_tx.safe_id, self.ethereum_client)
try:
safe_nonce = safe.retrieve_nonce()
if safe_nonce > multisig_tx.nonce:
logger.info(
'%s tx safe nonce is %d and current safe nonce is %d. Transaction is not valid anymore. Deleting',
multisig_tx.ethereum_tx_id, multisig_tx.nonce, safe_nonce
)
multisig_tx.delete() # Transaction is not valid anymore
return None
except (ValueError, BadFunctionCallOutput):
logger.error('Something is wrong with Safe %s, cannot retrieve nonce', multisig_tx.safe_id,
exc_info=True)
return None
logger.info(
'%s tx gas price was %d. Resending with new gas price %d',
multisig_tx.ethereum_tx_id, multisig_tx.ethereum_tx.gas_price, gas_price
)
safe_tx = multisig_tx.get_safe_tx(self.ethereum_client)
tx_gas = safe_tx.recommended_gas()
try:
tx_hash, tx = safe_tx.execute(self.tx_sender_account.key, tx_gas=tx_gas, tx_gas_price=gas_price,
tx_nonce=multisig_tx.ethereum_tx.nonce)
except ValueError as exc:
if exc.args and isinstance(exc.args[0], dict) and 'nonce' in exc.args[0].get('message', ''):
# ValueError({'code': -32010, 'message': 'Transaction nonce is too low. Try incrementing the nonce.'})
try:
# Check that transaction is still valid
safe_tx.call(tx_sender_address=self.tx_sender_account.address, tx_gas=tx_gas)
except InvalidMultisigTx:
# Maybe there's a transaction with a lower nonce that must be mined before
# It doesn't matter, as soon as a transaction with a newer nonce is added it will be deleted
return None
# Send transaction again with a new nonce
with EthereumNonceLock(self.redis, self.ethereum_client, self.tx_sender_account.address,
lock_timeout=60 * 2) as tx_nonce:
tx_hash, tx = safe_tx.execute(self.tx_sender_account.key, tx_gas=tx_gas, tx_gas_price=gas_price,
tx_nonce=tx_nonce)
else:
logger.error('Problem resending transaction', exc_info=True)
return None
multisig_tx.ethereum_tx = EthereumTx.objects.create_from_tx_dict(tx, tx_hash)
multisig_tx.full_clean(validate_unique=False)
multisig_tx.save(update_fields=['ethereum_tx'])
return multisig_tx.ethereum_tx
# TODO Refactor and test
def create_or_update_ethereum_tx(self, tx_hash: str) -> Optional[EthereumTx]:
try:
ethereum_tx = EthereumTx.objects.get(tx_hash=tx_hash)
if ethereum_tx.block is None:
tx_receipt = self.ethereum_client.get_transaction_receipt(tx_hash)
if tx_receipt:
ethereum_tx.block = self.get_or_create_ethereum_block(tx_receipt.blockNumber)
ethereum_tx.gas_used = tx_receipt['gasUsed']
ethereum_tx.status = tx_receipt.get('status')
ethereum_tx.transaction_index = tx_receipt['transactionIndex']
ethereum_tx.save(update_fields=['block', 'gas_used', 'status', 'transaction_index'])
return ethereum_tx
except EthereumTx.DoesNotExist:
tx = self.ethereum_client.get_transaction(tx_hash)
tx_receipt = self.ethereum_client.get_transaction_receipt(tx_hash)
if tx:
if tx_receipt:
ethereum_block = self.get_or_create_ethereum_block(tx_receipt.blockNumber)
return EthereumTx.objects.create_from_tx_dict(tx, tx_hash,
tx_receipt=tx_receipt.gasUsed,
ethereum_block=ethereum_block)
return EthereumTx.objects.create_from_tx_dict(tx, tx_hash)
# TODO Refactor and test
def get_or_create_ethereum_block(self, block_number: int):
try:
return EthereumBlock.objects.get(number=block_number)
except EthereumBlock.DoesNotExist:
block = self.ethereum_client.get_block(block_number)
return EthereumBlock.objects.create_from_block(block)
|
the-stack_0_1965 | #!/usr/bin/env python3
import json
from pathlib import Path
from urllib.parse import urlparse
from tests.mocks.categories import CATEGORIES
from tests.mocks.kit_info import KIT_INFO
from tests.mocks.kit_sha1 import KIT_SHA1
file_path = Path(__file__).resolve()
api_mocks = file_path.parent.joinpath("apis")
ebuilds_mocks = file_path.parent.joinpath("ebuilds")
async def stub_get_page(uri, session, **kwargs):
o = urlparse(uri)
if o.path.endswith("kit-info.json"):
return json.dumps(KIT_INFO)
if o.path.endswith("kit-sha1.json"):
return json.dumps(KIT_SHA1)
if o.path.endswith("categories"):
return "\n".join(CATEGORIES)
if o.path.endswith("firefox-72.0.2.ebuild"):
with open(ebuilds_mocks / "firefox-72.0.2.ebuild") as f:
result = f.read()
return result
if o.netloc == "api.github.com":
return github_api_stub(o, **kwargs)
if o.netloc == "code.funtoo.org":
return funtoo_stash_api_stub(o, **kwargs)
def github_api_stub(o, **kwargs):
headers = kwargs["headers"] if "headers" in kwargs else None
if o.path.endswith("/5932b921ba48f44e9c19d19301ae9448bb3fd912"):
with open(
api_mocks / "github_5932b921ba48f44e9c19d19301ae9448bb3fd912.json"
) as f:
result = f.read()
return result
if o.path.endswith("/04eb725f50c46031116df312c634eb767ba1b718"):
with open(
api_mocks / "github_04eb725f50c46031116df312c634eb767ba1b718.json"
) as f:
result = f.read()
return result
if o.path.endswith("/ba2ec9cdda1ab7d29185777d5d9f7b2488ae7390"):
with open(
api_mocks / "github_ba2ec9cdda1ab7d29185777d5d9f7b2488ae7390.json"
) as f:
result = f.read()
return result
if o.path.endswith("/789bfa81a335ab23accbd0da7d0808b499227510"):
if headers is not None and headers["accept"] == "application/vnd.github.v3.raw":
with open(ebuilds_mocks / "firefox-72.0.2.ebuild") as f:
result = f.read()
else:
with open(
api_mocks / "github_789bfa81a335ab23accbd0da7d0808b499227510.json"
) as f:
result = f.read()
return result
raise ValueError("unsupported path")
def funtoo_stash_api_stub(o, **kwargs):
if o.query.endswith("=5932b921ba48f44e9c19d19301ae9448bb3fd912"):
with open(
api_mocks / "funtoo_stash_5932b921ba48f44e9c19d19301ae9448bb3fd912.json"
) as f:
result = f.read()
return result
if o.query.endswith("=04eb725f50c46031116df312c634eb767ba1b718"):
with open(
api_mocks / "funtoo_stash_04eb725f50c46031116df312c634eb767ba1b718.json"
) as f:
result = f.read()
return result
if o.query.endswith("=ba2ec9cdda1ab7d29185777d5d9f7b2488ae7390"):
with open(
api_mocks / "funtoo_stash_ba2ec9cdda1ab7d29185777d5d9f7b2488ae7390.json"
) as f:
result = f.read()
return result
raise ValueError("unsupported path")
|
the-stack_0_1967 | """Pairwise genome alignment
src: {ensemblgenomes.prefix}/fasta/{species}/*.fa.gz
dst: ./pairwise/{target}/{query}/{chromosome}/sing.maf
https://lastz.github.io/lastz/
"""
import concurrent.futures as confu
import gzip
import logging
import os
import shutil
from pathlib import Path
from ..db import ensemblgenomes, phylo
from ..util import cli, fs, subp
_log = logging.getLogger(__name__)
_executor = confu.ThreadPoolExecutor()
def main(argv: list[str] = []):
parser = cli.logging_argparser()
parser.add_argument("-n", "--dry-run", action="store_true")
parser.add_argument("-j", "--jobs", type=int, default=os.cpu_count())
parser.add_argument("--quick", action="store_true")
parser.add_argument("-c", "--clade", choices=phylo.newicks.keys())
parser.add_argument("target", choices=ensemblgenomes.species_names())
parser.add_argument("query", nargs="*")
args = parser.parse_args(argv or None)
cli.logging_config(args.loglevel)
cli.dry_run = args.dry_run
if args.clade:
assert not args.query
run(args.target, args.clade, args.jobs, args.quick)
else:
_run(args.target, args.query, args.jobs, args.quick)
def run(target: str, clade: str, jobs: int, quick: bool = False):
tree = phylo.newicks[clade]
_run(target, phylo.extract_names(tree), jobs, quick)
return Path("pairwise") / target
def _run(target: str, queries: list[str], jobs: int, quick: bool):
queries = ensemblgenomes.sanitize_queries(target, queries)
_executor._max_workers = jobs
futures: list[confu.Future[Path]] = []
for query in queries:
pa = PairwiseAlignment(target, query, quick=quick)
futures.extend(pa.run())
for future in confu.as_completed(futures):
if (sing_maf := future.result()).exists():
print(sing_maf)
class PairwiseAlignment:
def __init__(self, target: str, query: str, quick: bool):
self._target = target
self._query = query
self._quick = quick
self._target_sizes = ensemblgenomes.get_file("fasize.chrom.sizes", target)
self._query_sizes = ensemblgenomes.get_file("fasize.chrom.sizes", query)
self._outdir = Path("pairwise") / target / query
def run(self):
if not cli.dry_run:
self._outdir.mkdir(0o755, parents=True, exist_ok=True)
patt = "*.chromosome.*.2bit"
it = ensemblgenomes.rglob(patt, [self._target])
target_chromosomes = fs.sorted_naturally(it)
it = ensemblgenomes.rglob(patt, [self._query])
query_chromosomes = fs.sorted_naturally(it)
subexe = confu.ThreadPoolExecutor(max_workers=len(target_chromosomes))
waiters: list[confu.Future[list[Path]]] = []
for t in target_chromosomes:
futures = [
_executor.submit(self.align_chr, t, q) for q in query_chromosomes
]
waiters.append(subexe.submit(wait_results, futures))
return [
_executor.submit(self.integrate, future.result())
for future in confu.as_completed(waiters)
]
def align_chr(self, target_2bit: Path, query_2bit: Path):
axtgz = self.lastz(target_2bit, query_2bit)
chain = self.axt_chain(target_2bit, query_2bit, axtgz)
return chain
def integrate(self, chains: list[Path]):
pre_chain = self.merge_sort_pre(chains)
syntenic_net = self.chain_net_syntenic(pre_chain)
sing_maf = self.net_axt_maf(syntenic_net, pre_chain)
return sing_maf
def lastz(self, target_2bit: Path, query_2bit: Path):
target_label = target_2bit.stem.rsplit("dna_sm.", 1)[1]
query_label = query_2bit.stem.rsplit("dna_sm.", 1)[1]
subdir = self._outdir / target_label
if not cli.dry_run:
subdir.mkdir(0o755, exist_ok=True)
axtgz = subdir / f"{query_label}.axt.gz"
args = f"lastz {target_2bit} {query_2bit} --format=axt --inner=2000 --step=7"
if self._quick:
args += " --notransition --nogapped"
is_to_run = fs.is_outdated(axtgz, [target_2bit, query_2bit])
lastz = subp.run_if(is_to_run, args, stdout=subp.PIPE)
if is_to_run and not cli.dry_run:
with gzip.open(axtgz, "wb") as fout:
fout.write(lastz.stdout)
return axtgz
def axt_chain(self, target_2bit: Path, query_2bit: Path, axtgz: Path):
chain = axtgz.with_suffix("").with_suffix(".chain")
cmd = "axtChain -minScore=5000 -linearGap=medium stdin"
cmd += f" {target_2bit} {query_2bit} {chain}"
is_to_run = fs.is_outdated(chain, axtgz)
p = subp.popen_if(is_to_run, cmd, stdin=subp.PIPE)
if is_to_run and not cli.dry_run:
assert p.stdin
with gzip.open(axtgz, "rb") as fin:
shutil.copyfileobj(fin, p.stdin)
p.stdin.close()
p.communicate()
return chain
def merge_sort_pre(self, chains: list[Path]):
parent = set(x.parent for x in chains)
subdir = parent.pop()
assert not parent, "chains are in the same directory"
pre_chain = subdir / "pre.chain.gz"
is_to_run = fs.is_outdated(pre_chain, chains)
merge_cmd = ["chainMergeSort"] + [str(x) for x in chains]
merge = subp.popen_if(is_to_run, merge_cmd, stdout=subp.PIPE)
assert merge.stdout
pre_cmd = f"chainPreNet stdin {self._target_sizes} {self._query_sizes} stdout"
pre = subp.popen_if(is_to_run, pre_cmd, stdin=merge.stdout, stdout=subp.PIPE)
merge.stdout.close()
if is_to_run and not cli.dry_run:
(stdout, _stderr) = pre.communicate()
with gzip.open(pre_chain, "wb") as fout:
fout.write(stdout)
return pre_chain
def chain_net_syntenic(self, pre_chain: Path):
syntenic_net = pre_chain.parent / "syntenic.net"
is_to_run = fs.is_outdated(syntenic_net, pre_chain)
cn_cmd = (
f"chainNet stdin {self._target_sizes} {self._query_sizes} stdout /dev/null"
)
cn = subp.popen_if(is_to_run, cn_cmd, stdin=subp.PIPE, stdout=subp.PIPE)
assert cn.stdin
assert cn.stdout
if is_to_run and not cli.dry_run:
with gzip.open(pre_chain, "rb") as fout:
shutil.copyfileobj(fout, cn.stdin)
cn.stdin.close()
sn = subp.popen_if(
is_to_run, f"netSyntenic stdin {syntenic_net}", stdin=cn.stdout
)
cn.stdout.close()
sn.communicate()
return syntenic_net
def net_axt_maf(self, syntenic_net: Path, pre_chain: Path):
sing_maf = syntenic_net.parent / "sing.maf"
target_2bit = ensemblgenomes.get_file("*.genome.2bit", self._target)
query_2bit = ensemblgenomes.get_file("*.genome.2bit", self._query)
is_to_run = fs.is_outdated(sing_maf, [syntenic_net, pre_chain])
toaxt_cmd = f"netToAxt {syntenic_net} stdin {target_2bit} {query_2bit} stdout"
toaxt = subp.popen_if(is_to_run, toaxt_cmd, stdin=subp.PIPE, stdout=subp.PIPE)
assert toaxt.stdin
assert toaxt.stdout
if is_to_run and not cli.dry_run:
with gzip.open(pre_chain, "rb") as fout:
shutil.copyfileobj(fout, toaxt.stdin)
toaxt.stdin.close()
sort = subp.popen_if(
is_to_run, "axtSort stdin stdout", stdin=toaxt.stdout, stdout=subp.PIPE
)
toaxt.stdout.close()
assert sort.stdout
tprefix = phylo.shorten(self._target)
qprefix = phylo.shorten(self._query)
axttomaf_cmd = (
f"axtToMaf -tPrefix={tprefix}. -qPrefix={qprefix}. stdin"
f" {self._target_sizes} {self._query_sizes} {sing_maf}"
)
atm = subp.popen_if(is_to_run, axttomaf_cmd, stdin=sort.stdout)
sort.stdout.close()
atm.communicate()
return sing_maf
def wait_results(futures: list[confu.Future[Path]]):
return [f.result() for f in futures]
if __name__ == "__main__":
main()
|
the-stack_0_1968 | from cProfile import label
from IMLearn.learners.regressors import linear_regression
from IMLearn.learners.regressors import PolynomialFitting
from IMLearn.utils import split_train_test
import os
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def load_data(filename: str) -> pd.DataFrame:
"""
Load city daily temperature dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (Temp)
"""
full_data = pd.read_csv(filename, parse_dates=["Date"]).dropna().drop_duplicates()
#change date to the day in the year
full_data = full_data.apply(lambda x : [obj.timetuple().tm_yday for obj in x] if x.name == "Date" else x)
#delete samples with Temp< -10
full_data = full_data.drop(full_data[full_data.Temp < -10].index)
return full_data
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of city temperature dataset
data = load_data("/home/alonbentzi/IML.HUJI/datasets/City_Temperature.csv")
# Question 2 - Exploring data for specific country
Israel_data = data.loc[data['Country'] == "Israel"]
#convert "YEAR" to string for px.scatter function
Israel_data["Year"] = Israel_data["Year"].astype(str)
#plot Israel temp as function of Day of the year
fig = px.scatter(Israel_data, x="Date", y="Temp", color="Year",
title="Temp as a function of Day of the year | Israel")
fig.write_image(os.path.join("/home/alonbentzi/IML.HUJI/exercises/.plots", "Israel_data.png"))
# grouping by 'Month'
IL_by_month = Israel_data.groupby(['Month']).Temp.agg(std='std')
fig = px.bar(IL_by_month, x=IL_by_month.index, y="std",
title="STD Temp as a function of Month in Israel")
fig.write_image(os.path.join("/home/alonbentzi/IML.HUJI/exercises/.plots", "Month_tmp.png"))
# Question 3 - Exploring differences between countries
# grouping by 'Country & 'Month'
grouped_by_month_and_country = data.groupby(['Month','Country']).Temp.agg([np.mean, np.std])
grouped_by_month_and_country = grouped_by_month_and_country.reset_index('Country')
print(grouped_by_month_and_country.shape)
print(grouped_by_month_and_country.columns)
fig = px.line(grouped_by_month_and_country, y='mean' ,color='Country',
error_y= 'std',
labels={'x': "Month",
'Temp': "Temp (Avg)"},
title="std Temp as a function of Month")
fig.write_image(os.path.join("/home/alonbentzi/IML.HUJI/exercises/.plots", "Month_tmp_with_err.png"))
# Question 4 - Fitting model for different values of `k`
train_x, train_y, test_x, test_y = split_train_test(Israel_data['Date'], Israel_data['Temp'])
losses_array = np.empty((0,2), (int,float))
for k in range(1,11):
model = PolynomialFitting(k)
model._fit(train_x, train_y)
temp_loss = round(model._loss(test_x, test_y), 2)
losses_array = np.append(losses_array, [[k, temp_loss]], axis=0)
fig = px.bar(losses_array, x=losses_array[:,0], y=losses_array[:,1],
labels={'x': "K", "y": "Temp_loss"})
fig.write_image(os.path.join("/home/alonbentzi/IML.HUJI/exercises/.plots", "error_for_each_k.png"))
# Question 5 - Evaluating fitted model on different countries
BEST_K = 5
counries = []
loss_countries = []
model_5 = PolynomialFitting(BEST_K)
model_5.fit(Israel_data["Date"], Israel_data["Temp"])
for country in data["Country"].unique():
if country == "Israel": continue
df_country = data[data["Country"] == country]
loss = model_5.loss(df_country['Date'], df_country['Temp'])
counries.append(country)
loss_countries.append(loss)
#convert arrays to np.array
counries = np.array(counries)
loss_countries = np.array(loss_countries)
fig = px.bar(x=counries, y=loss_countries,
labels={'x': "Countries", "y": "Temp_loss"})
fig.write_image(os.path.join("/home/alonbentzi/IML.HUJI/exercises/.plots", "Q5.png"))
|
the-stack_0_1969 | # Copyright (C) 2003-2007 Robey Pointer <[email protected]>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Core protocol implementation
"""
import os
import socket
import sys
import threading
import time
import weakref
from hashlib import md5, sha1
import paramiko
from paramiko import util
from paramiko.auth_handler import AuthHandler
from paramiko.channel import Channel
from paramiko.common import xffffffff, cMSG_CHANNEL_OPEN, cMSG_IGNORE, \
cMSG_GLOBAL_REQUEST, DEBUG, MSG_KEXINIT, MSG_IGNORE, MSG_DISCONNECT, \
MSG_DEBUG, ERROR, WARNING, cMSG_UNIMPLEMENTED, INFO, cMSG_KEXINIT, \
cMSG_NEWKEYS, MSG_NEWKEYS, cMSG_REQUEST_SUCCESS, cMSG_REQUEST_FAILURE, \
CONNECTION_FAILED_CODE, OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED, \
OPEN_SUCCEEDED, cMSG_CHANNEL_OPEN_FAILURE, cMSG_CHANNEL_OPEN_SUCCESS, \
MSG_GLOBAL_REQUEST, MSG_REQUEST_SUCCESS, MSG_REQUEST_FAILURE, \
MSG_CHANNEL_OPEN_SUCCESS, MSG_CHANNEL_OPEN_FAILURE, MSG_CHANNEL_OPEN, \
MSG_CHANNEL_SUCCESS, MSG_CHANNEL_FAILURE, MSG_CHANNEL_DATA, \
MSG_CHANNEL_EXTENDED_DATA, MSG_CHANNEL_WINDOW_ADJUST, MSG_CHANNEL_REQUEST, \
MSG_CHANNEL_EOF, MSG_CHANNEL_CLOSE
from paramiko.compress import ZlibCompressor, ZlibDecompressor
from paramiko.dsskey import DSSKey
from paramiko.kex_gex import KexGex
from paramiko.kex_group1 import KexGroup1
from paramiko.message import Message
from paramiko.packet import Packetizer, NeedRekeyException
from paramiko.primes import ModulusPack
from paramiko.py3compat import string_types, long, byte_ord, b
from paramiko.rsakey import RSAKey
from paramiko.ecdsakey import ECDSAKey
from paramiko.server import ServerInterface
from paramiko.sftp_client import SFTPClient
from paramiko.ssh_exception import (SSHException, BadAuthenticationType,
ChannelException, ProxyCommandFailure)
from paramiko.util import retry_on_signal
from Crypto.Cipher import Blowfish, AES, DES3, ARC4
try:
from Crypto.Util import Counter
except ImportError:
from paramiko.util import Counter
# for thread cleanup
_active_threads = []
def _join_lingering_threads():
for thr in _active_threads:
thr.stop_thread()
import atexit
atexit.register(_join_lingering_threads)
class Transport (threading.Thread):
"""
An SSH Transport attaches to a stream (usually a socket), negotiates an
encrypted session, authenticates, and then creates stream tunnels, called
`channels <.Channel>`, across the session. Multiple channels can be
multiplexed across a single session (and often are, in the case of port
forwardings).
"""
_PROTO_ID = '2.0'
_CLIENT_ID = 'paramiko_%s' % paramiko.__version__
_preferred_ciphers = ('aes128-ctr', 'aes256-ctr', 'aes128-cbc', 'blowfish-cbc',
'aes256-cbc', '3des-cbc', 'arcfour128', 'arcfour256')
_preferred_macs = ('hmac-sha1', 'hmac-md5', 'hmac-sha1-96', 'hmac-md5-96')
_preferred_keys = ('ssh-rsa', 'ssh-dss', 'ecdsa-sha2-nistp256')
_preferred_kex = ('diffie-hellman-group1-sha1', 'diffie-hellman-group-exchange-sha1')
_preferred_compression = ('none',)
_cipher_info = {
'aes128-ctr': {'class': AES, 'mode': AES.MODE_CTR, 'block-size': 16, 'key-size': 16},
'aes256-ctr': {'class': AES, 'mode': AES.MODE_CTR, 'block-size': 16, 'key-size': 32},
'blowfish-cbc': {'class': Blowfish, 'mode': Blowfish.MODE_CBC, 'block-size': 8, 'key-size': 16},
'aes128-cbc': {'class': AES, 'mode': AES.MODE_CBC, 'block-size': 16, 'key-size': 16},
'aes256-cbc': {'class': AES, 'mode': AES.MODE_CBC, 'block-size': 16, 'key-size': 32},
'3des-cbc': {'class': DES3, 'mode': DES3.MODE_CBC, 'block-size': 8, 'key-size': 24},
'arcfour128': {'class': ARC4, 'mode': None, 'block-size': 8, 'key-size': 16},
'arcfour256': {'class': ARC4, 'mode': None, 'block-size': 8, 'key-size': 32},
}
_mac_info = {
'hmac-sha1': {'class': sha1, 'size': 20},
'hmac-sha1-96': {'class': sha1, 'size': 12},
'hmac-md5': {'class': md5, 'size': 16},
'hmac-md5-96': {'class': md5, 'size': 12},
}
_key_info = {
'ssh-rsa': RSAKey,
'ssh-dss': DSSKey,
'ecdsa-sha2-nistp256': ECDSAKey,
}
_kex_info = {
'diffie-hellman-group1-sha1': KexGroup1,
'diffie-hellman-group-exchange-sha1': KexGex,
}
_compression_info = {
# [email protected] is just zlib, but only turned on after a successful
# authentication. openssh servers may only offer this type because
# they've had troubles with security holes in zlib in the past.
'[email protected]': (ZlibCompressor, ZlibDecompressor),
'zlib': (ZlibCompressor, ZlibDecompressor),
'none': (None, None),
}
_modulus_pack = None
def __init__(self, sock):
"""
Create a new SSH session over an existing socket, or socket-like
object. This only creates the `.Transport` object; it doesn't begin the
SSH session yet. Use `connect` or `start_client` to begin a client
session, or `start_server` to begin a server session.
If the object is not actually a socket, it must have the following
methods:
- ``send(str)``: Writes from 1 to ``len(str)`` bytes, and returns an
int representing the number of bytes written. Returns
0 or raises ``EOFError`` if the stream has been closed.
- ``recv(int)``: Reads from 1 to ``int`` bytes and returns them as a
string. Returns 0 or raises ``EOFError`` if the stream has been
closed.
- ``close()``: Closes the socket.
- ``settimeout(n)``: Sets a (float) timeout on I/O operations.
For ease of use, you may also pass in an address (as a tuple) or a host
string as the ``sock`` argument. (A host string is a hostname with an
optional port (separated by ``":"``) which will be converted into a
tuple of ``(hostname, port)``.) A socket will be connected to this
address and used for communication. Exceptions from the ``socket``
call may be thrown in this case.
:param socket sock:
a socket or socket-like object to create the session over.
"""
self.active = False
if isinstance(sock, string_types):
# convert "host:port" into (host, port)
hl = sock.split(':', 1)
if len(hl) == 1:
sock = (hl[0], 22)
else:
sock = (hl[0], int(hl[1]))
if type(sock) is tuple:
# connect to the given (host, port)
hostname, port = sock
reason = 'No suitable address family'
for (family, socktype, proto, canonname, sockaddr) in socket.getaddrinfo(hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM):
if socktype == socket.SOCK_STREAM:
af = family
addr = sockaddr
sock = socket.socket(af, socket.SOCK_STREAM)
try:
retry_on_signal(lambda: sock.connect((hostname, port)))
except socket.error as e:
reason = str(e)
else:
break
else:
raise SSHException(
'Unable to connect to %s: %s' % (hostname, reason))
# okay, normal socket-ish flow here...
threading.Thread.__init__(self)
self.setDaemon(True)
self.sock = sock
# Python < 2.3 doesn't have the settimeout method - RogerB
try:
# we set the timeout so we can check self.active periodically to
# see if we should bail. socket.timeout exception is never
# propagated.
self.sock.settimeout(0.1)
except AttributeError:
pass
# negotiated crypto parameters
self.packetizer = Packetizer(sock)
self.local_version = 'SSH-' + self._PROTO_ID + '-' + self._CLIENT_ID
self.remote_version = ''
self.local_cipher = self.remote_cipher = ''
self.local_kex_init = self.remote_kex_init = None
self.local_mac = self.remote_mac = None
self.local_compression = self.remote_compression = None
self.session_id = None
self.host_key_type = None
self.host_key = None
# state used during negotiation
self.kex_engine = None
self.H = None
self.K = None
self.initial_kex_done = False
self.in_kex = False
self.authenticated = False
self._expected_packet = tuple()
self.lock = threading.Lock() # synchronization (always higher level than write_lock)
# tracking open channels
self._channels = ChannelMap()
self.channel_events = {} # (id -> Event)
self.channels_seen = {} # (id -> True)
self._channel_counter = 1
self.window_size = 65536
self.max_packet_size = 34816
self._forward_agent_handler = None
self._x11_handler = None
self._tcp_handler = None
self.saved_exception = None
self.clear_to_send = threading.Event()
self.clear_to_send_lock = threading.Lock()
self.clear_to_send_timeout = 30.0
self.log_name = 'paramiko.transport'
self.logger = util.get_logger(self.log_name)
self.packetizer.set_log(self.logger)
self.auth_handler = None
self.global_response = None # response Message from an arbitrary global request
self.completion_event = None # user-defined event callbacks
self.banner_timeout = 15 # how long (seconds) to wait for the SSH banner
# server mode:
self.server_mode = False
self.server_object = None
self.server_key_dict = {}
self.server_accepts = []
self.server_accept_cv = threading.Condition(self.lock)
self.subsystem_table = {}
def __repr__(self):
"""
Returns a string representation of this object, for debugging.
"""
out = '<paramiko.Transport at %s' % hex(long(id(self)) & xffffffff)
if not self.active:
out += ' (unconnected)'
else:
if self.local_cipher != '':
out += ' (cipher %s, %d bits)' % (self.local_cipher,
self._cipher_info[self.local_cipher]['key-size'] * 8)
if self.is_authenticated():
out += ' (active; %d open channel(s))' % len(self._channels)
elif self.initial_kex_done:
out += ' (connected; awaiting auth)'
else:
out += ' (connecting)'
out += '>'
return out
def atfork(self):
"""
Terminate this Transport without closing the session. On posix
systems, if a Transport is open during process forking, both parent
and child will share the underlying socket, but only one process can
use the connection (without corrupting the session). Use this method
to clean up a Transport object without disrupting the other process.
.. versionadded:: 1.5.3
"""
self.close()
def get_security_options(self):
"""
Return a `.SecurityOptions` object which can be used to tweak the
encryption algorithms this transport will permit (for encryption,
digest/hash operations, public keys, and key exchanges) and the order
of preference for them.
"""
return SecurityOptions(self)
def start_client(self, event=None):
"""
Negotiate a new SSH2 session as a client. This is the first step after
creating a new `.Transport`. A separate thread is created for protocol
negotiation.
If an event is passed in, this method returns immediately. When
negotiation is done (successful or not), the given ``Event`` will
be triggered. On failure, `is_active` will return ``False``.
(Since 1.4) If ``event`` is ``None``, this method will not return until
negotation is done. On success, the method returns normally.
Otherwise an SSHException is raised.
After a successful negotiation, you will usually want to authenticate,
calling `auth_password <Transport.auth_password>` or
`auth_publickey <Transport.auth_publickey>`.
.. note:: `connect` is a simpler method for connecting as a client.
.. note:: After calling this method (or `start_server` or `connect`),
you should no longer directly read from or write to the original
socket object.
:param .threading.Event event:
an event to trigger when negotiation is complete (optional)
:raises SSHException: if negotiation fails (and no ``event`` was passed
in)
"""
self.active = True
if event is not None:
# async, return immediately and let the app poll for completion
self.completion_event = event
self.start()
return
# synchronous, wait for a result
self.completion_event = event = threading.Event()
self.start()
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException('Negotiation failed.')
if event.isSet():
break
def start_server(self, event=None, server=None):
"""
Negotiate a new SSH2 session as a server. This is the first step after
creating a new `.Transport` and setting up your server host key(s). A
separate thread is created for protocol negotiation.
If an event is passed in, this method returns immediately. When
negotiation is done (successful or not), the given ``Event`` will
be triggered. On failure, `is_active` will return ``False``.
(Since 1.4) If ``event`` is ``None``, this method will not return until
negotation is done. On success, the method returns normally.
Otherwise an SSHException is raised.
After a successful negotiation, the client will need to authenticate.
Override the methods `get_allowed_auths
<.ServerInterface.get_allowed_auths>`, `check_auth_none
<.ServerInterface.check_auth_none>`, `check_auth_password
<.ServerInterface.check_auth_password>`, and `check_auth_publickey
<.ServerInterface.check_auth_publickey>` in the given ``server`` object
to control the authentication process.
After a successful authentication, the client should request to open a
channel. Override `check_channel_request
<.ServerInterface.check_channel_request>` in the given ``server``
object to allow channels to be opened.
.. note::
After calling this method (or `start_client` or `connect`), you
should no longer directly read from or write to the original socket
object.
:param .threading.Event event:
an event to trigger when negotiation is complete.
:param .ServerInterface server:
an object used to perform authentication and create `channels
<.Channel>`
:raises SSHException: if negotiation fails (and no ``event`` was passed
in)
"""
if server is None:
server = ServerInterface()
self.server_mode = True
self.server_object = server
self.active = True
if event is not None:
# async, return immediately and let the app poll for completion
self.completion_event = event
self.start()
return
# synchronous, wait for a result
self.completion_event = event = threading.Event()
self.start()
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException('Negotiation failed.')
if event.isSet():
break
def add_server_key(self, key):
"""
Add a host key to the list of keys used for server mode. When behaving
as a server, the host key is used to sign certain packets during the
SSH2 negotiation, so that the client can trust that we are who we say
we are. Because this is used for signing, the key must contain private
key info, not just the public half. Only one key of each type (RSA or
DSS) is kept.
:param .PKey key:
the host key to add, usually an `.RSAKey` or `.DSSKey`.
"""
self.server_key_dict[key.get_name()] = key
def get_server_key(self):
"""
Return the active host key, in server mode. After negotiating with the
client, this method will return the negotiated host key. If only one
type of host key was set with `add_server_key`, that's the only key
that will ever be returned. But in cases where you have set more than
one type of host key (for example, an RSA key and a DSS key), the key
type will be negotiated by the client, and this method will return the
key of the type agreed on. If the host key has not been negotiated
yet, ``None`` is returned. In client mode, the behavior is undefined.
:return:
host key (`.PKey`) of the type negotiated by the client, or
``None``.
"""
try:
return self.server_key_dict[self.host_key_type]
except KeyError:
pass
return None
def load_server_moduli(filename=None):
"""
(optional)
Load a file of prime moduli for use in doing group-exchange key
negotiation in server mode. It's a rather obscure option and can be
safely ignored.
In server mode, the remote client may request "group-exchange" key
negotiation, which asks the server to send a random prime number that
fits certain criteria. These primes are pretty difficult to compute,
so they can't be generated on demand. But many systems contain a file
of suitable primes (usually named something like ``/etc/ssh/moduli``).
If you call `load_server_moduli` and it returns ``True``, then this
file of primes has been loaded and we will support "group-exchange" in
server mode. Otherwise server mode will just claim that it doesn't
support that method of key negotiation.
:param str filename:
optional path to the moduli file, if you happen to know that it's
not in a standard location.
:return:
True if a moduli file was successfully loaded; False otherwise.
.. note:: This has no effect when used in client mode.
"""
Transport._modulus_pack = ModulusPack()
# places to look for the openssh "moduli" file
file_list = ['/etc/ssh/moduli', '/usr/local/etc/moduli']
if filename is not None:
file_list.insert(0, filename)
for fn in file_list:
try:
Transport._modulus_pack.read_file(fn)
return True
except IOError:
pass
# none succeeded
Transport._modulus_pack = None
return False
load_server_moduli = staticmethod(load_server_moduli)
def close(self):
"""
Close this session, and any open channels that are tied to it.
"""
if not self.active:
return
self.stop_thread()
for chan in list(self._channels.values()):
chan._unlink()
self.sock.close()
def get_remote_server_key(self):
"""
Return the host key of the server (in client mode).
.. note::
Previously this call returned a tuple of ``(key type, key
string)``. You can get the same effect by calling `.PKey.get_name`
for the key type, and ``str(key)`` for the key string.
:raises SSHException: if no session is currently active.
:return: public key (`.PKey`) of the remote server
"""
if (not self.active) or (not self.initial_kex_done):
raise SSHException('No existing session')
return self.host_key
def is_active(self):
"""
Return true if this session is active (open).
:return:
True if the session is still active (open); False if the session is
closed
"""
return self.active
def open_session(self):
"""
Request a new channel to the server, of type ``"session"``. This is
just an alias for calling `open_channel` with an argument of
``"session"``.
:return: a new `.Channel`
:raises SSHException: if the request is rejected or the session ends
prematurely
"""
return self.open_channel('session')
def open_x11_channel(self, src_addr=None):
"""
Request a new channel to the client, of type ``"x11"``. This
is just an alias for ``open_channel('x11', src_addr=src_addr)``.
:param tuple src_addr:
the source address (``(str, int)``) of the x11 server (port is the
x11 port, ie. 6010)
:return: a new `.Channel`
:raises SSHException: if the request is rejected or the session ends
prematurely
"""
return self.open_channel('x11', src_addr=src_addr)
def open_forward_agent_channel(self):
"""
Request a new channel to the client, of type
``"[email protected]"``.
This is just an alias for ``open_channel('[email protected]')``.
:return: a new `.Channel`
:raises SSHException:
if the request is rejected or the session ends prematurely
"""
return self.open_channel('[email protected]')
def open_forwarded_tcpip_channel(self, src_addr, dest_addr):
"""
Request a new channel back to the client, of type ``"forwarded-tcpip"``.
This is used after a client has requested port forwarding, for sending
incoming connections back to the client.
:param src_addr: originator's address
:param dest_addr: local (server) connected address
"""
return self.open_channel('forwarded-tcpip', dest_addr, src_addr)
def open_channel(self, kind, dest_addr=None, src_addr=None):
"""
Request a new channel to the server. `Channels <.Channel>` are
socket-like objects used for the actual transfer of data across the
session. You may only request a channel after negotiating encryption
(using `connect` or `start_client`) and authenticating.
:param str kind:
the kind of channel requested (usually ``"session"``,
``"forwarded-tcpip"``, ``"direct-tcpip"``, or ``"x11"``)
:param tuple dest_addr:
the destination address (address + port tuple) of this port
forwarding, if ``kind`` is ``"forwarded-tcpip"`` or
``"direct-tcpip"`` (ignored for other channel types)
:param src_addr: the source address of this port forwarding, if
``kind`` is ``"forwarded-tcpip"``, ``"direct-tcpip"``, or ``"x11"``
:return: a new `.Channel` on success
:raises SSHException: if the request is rejected or the session ends
prematurely
"""
if not self.active:
raise SSHException('SSH session not active')
self.lock.acquire()
try:
chanid = self._next_channel()
m = Message()
m.add_byte(cMSG_CHANNEL_OPEN)
m.add_string(kind)
m.add_int(chanid)
m.add_int(self.window_size)
m.add_int(self.max_packet_size)
if (kind == 'forwarded-tcpip') or (kind == 'direct-tcpip'):
m.add_string(dest_addr[0])
m.add_int(dest_addr[1])
m.add_string(src_addr[0])
m.add_int(src_addr[1])
elif kind == 'x11':
m.add_string(src_addr[0])
m.add_int(src_addr[1])
chan = Channel(chanid)
self._channels.put(chanid, chan)
self.channel_events[chanid] = event = threading.Event()
self.channels_seen[chanid] = True
chan._set_transport(self)
chan._set_window(self.window_size, self.max_packet_size)
finally:
self.lock.release()
self._send_user_message(m)
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is None:
e = SSHException('Unable to open channel.')
raise e
if event.isSet():
break
chan = self._channels.get(chanid)
if chan is not None:
return chan
e = self.get_exception()
if e is None:
e = SSHException('Unable to open channel.')
raise e
def request_port_forward(self, address, port, handler=None):
"""
Ask the server to forward TCP connections from a listening port on
the server, across this SSH session.
If a handler is given, that handler is called from a different thread
whenever a forwarded connection arrives. The handler parameters are::
handler(channel, (origin_addr, origin_port), (server_addr, server_port))
where ``server_addr`` and ``server_port`` are the address and port that
the server was listening on.
If no handler is set, the default behavior is to send new incoming
forwarded connections into the accept queue, to be picked up via
`accept`.
:param str address: the address to bind when forwarding
:param int port:
the port to forward, or 0 to ask the server to allocate any port
:param callable handler:
optional handler for incoming forwarded connections, of the form
``func(Channel, (str, int), (str, int))``.
:return: the port number (`int`) allocated by the server
:raises SSHException: if the server refused the TCP forward request
"""
if not self.active:
raise SSHException('SSH session not active')
port = int(port)
response = self.global_request('tcpip-forward', (address, port), wait=True)
if response is None:
raise SSHException('TCP forwarding request denied')
if port == 0:
port = response.get_int()
if handler is None:
def default_handler(channel, src_addr, dest_addr_port):
#src_addr, src_port = src_addr_port
#dest_addr, dest_port = dest_addr_port
self._queue_incoming_channel(channel)
handler = default_handler
self._tcp_handler = handler
return port
def cancel_port_forward(self, address, port):
"""
Ask the server to cancel a previous port-forwarding request. No more
connections to the given address & port will be forwarded across this
ssh connection.
:param str address: the address to stop forwarding
:param int port: the port to stop forwarding
"""
if not self.active:
return
self._tcp_handler = None
self.global_request('cancel-tcpip-forward', (address, port), wait=True)
def open_sftp_client(self):
"""
Create an SFTP client channel from an open transport. On success, an
SFTP session will be opened with the remote host, and a new
`.SFTPClient` object will be returned.
:return:
a new `.SFTPClient` referring to an sftp session (channel) across
this transport
"""
return SFTPClient.from_transport(self)
def send_ignore(self, byte_count=None):
"""
Send a junk packet across the encrypted link. This is sometimes used
to add "noise" to a connection to confuse would-be attackers. It can
also be used as a keep-alive for long lived connections traversing
firewalls.
:param int byte_count:
the number of random bytes to send in the payload of the ignored
packet -- defaults to a random number from 10 to 41.
"""
m = Message()
m.add_byte(cMSG_IGNORE)
if byte_count is None:
byte_count = (byte_ord(os.urandom(1)) % 32) + 10
m.add_bytes(os.urandom(byte_count))
self._send_user_message(m)
def renegotiate_keys(self):
"""
Force this session to switch to new keys. Normally this is done
automatically after the session hits a certain number of packets or
bytes sent or received, but this method gives you the option of forcing
new keys whenever you want. Negotiating new keys causes a pause in
traffic both ways as the two sides swap keys and do computations. This
method returns when the session has switched to new keys.
:raises SSHException: if the key renegotiation failed (which causes the
session to end)
"""
self.completion_event = threading.Event()
self._send_kex_init()
while True:
self.completion_event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException('Negotiation failed.')
if self.completion_event.isSet():
break
return
def set_keepalive(self, interval):
"""
Turn on/off keepalive packets (default is off). If this is set, after
``interval`` seconds without sending any data over the connection, a
"keepalive" packet will be sent (and ignored by the remote host). This
can be useful to keep connections alive over a NAT, for example.
:param int interval:
seconds to wait before sending a keepalive packet (or
0 to disable keepalives).
"""
self.packetizer.set_keepalive(interval,
lambda x=weakref.proxy(self): x.global_request('[email protected]', wait=False))
def global_request(self, kind, data=None, wait=True):
"""
Make a global request to the remote host. These are normally
extensions to the SSH2 protocol.
:param str kind: name of the request.
:param tuple data:
an optional tuple containing additional data to attach to the
request.
:param bool wait:
``True`` if this method should not return until a response is
received; ``False`` otherwise.
:return:
a `.Message` containing possible additional data if the request was
successful (or an empty `.Message` if ``wait`` was ``False``);
``None`` if the request was denied.
"""
if wait:
self.completion_event = threading.Event()
m = Message()
m.add_byte(cMSG_GLOBAL_REQUEST)
m.add_string(kind)
m.add_boolean(wait)
if data is not None:
m.add(*data)
self._log(DEBUG, 'Sending global request "%s"' % kind)
self._send_user_message(m)
if not wait:
return None
while True:
self.completion_event.wait(0.1)
if not self.active:
return None
if self.completion_event.isSet():
break
return self.global_response
def accept(self, timeout=None):
"""
Return the next channel opened by the client over this transport, in
server mode. If no channel is opened before the given timeout, ``None``
is returned.
:param int timeout:
seconds to wait for a channel, or ``None`` to wait forever
:return: a new `.Channel` opened by the client
"""
self.lock.acquire()
try:
if len(self.server_accepts) > 0:
chan = self.server_accepts.pop(0)
else:
self.server_accept_cv.wait(timeout)
if len(self.server_accepts) > 0:
chan = self.server_accepts.pop(0)
else:
# timeout
chan = None
finally:
self.lock.release()
return chan
def connect(self, hostkey=None, username='', password=None, pkey=None):
"""
Negotiate an SSH2 session, and optionally verify the server's host key
and authenticate using a password or private key. This is a shortcut
for `start_client`, `get_remote_server_key`, and
`Transport.auth_password` or `Transport.auth_publickey`. Use those
methods if you want more control.
You can use this method immediately after creating a Transport to
negotiate encryption with a server. If it fails, an exception will be
thrown. On success, the method will return cleanly, and an encrypted
session exists. You may immediately call `open_channel` or
`open_session` to get a `.Channel` object, which is used for data
transfer.
.. note::
If you fail to supply a password or private key, this method may
succeed, but a subsequent `open_channel` or `open_session` call may
fail because you haven't authenticated yet.
:param .PKey hostkey:
the host key expected from the server, or ``None`` if you don't
want to do host key verification.
:param str username: the username to authenticate as.
:param str password:
a password to use for authentication, if you want to use password
authentication; otherwise ``None``.
:param .PKey pkey:
a private key to use for authentication, if you want to use private
key authentication; otherwise ``None``.
:raises SSHException: if the SSH2 negotiation fails, the host key
supplied by the server is incorrect, or authentication fails.
"""
if hostkey is not None:
self._preferred_keys = [hostkey.get_name()]
self.start_client()
# check host key if we were given one
if hostkey is not None:
key = self.get_remote_server_key()
if (key.get_name() != hostkey.get_name()) or (key.asbytes() != hostkey.asbytes()):
self._log(DEBUG, 'Bad host key from server')
self._log(DEBUG, 'Expected: %s: %s' % (hostkey.get_name(), repr(hostkey.asbytes())))
self._log(DEBUG, 'Got : %s: %s' % (key.get_name(), repr(key.asbytes())))
raise SSHException('Bad host key from server')
self._log(DEBUG, 'Host key verified (%s)' % hostkey.get_name())
if (pkey is not None) or (password is not None):
if password is not None:
self._log(DEBUG, 'Attempting password auth...')
self.auth_password(username, password)
else:
self._log(DEBUG, 'Attempting public-key auth...')
self.auth_publickey(username, pkey)
return
def get_exception(self):
"""
Return any exception that happened during the last server request.
This can be used to fetch more specific error information after using
calls like `start_client`. The exception (if any) is cleared after
this call.
:return:
an exception, or ``None`` if there is no stored exception.
.. versionadded:: 1.1
"""
self.lock.acquire()
try:
e = self.saved_exception
self.saved_exception = None
return e
finally:
self.lock.release()
def set_subsystem_handler(self, name, handler, *larg, **kwarg):
"""
Set the handler class for a subsystem in server mode. If a request
for this subsystem is made on an open ssh channel later, this handler
will be constructed and called -- see `.SubsystemHandler` for more
detailed documentation.
Any extra parameters (including keyword arguments) are saved and
passed to the `.SubsystemHandler` constructor later.
:param str name: name of the subsystem.
:param class handler:
subclass of `.SubsystemHandler` that handles this subsystem.
"""
try:
self.lock.acquire()
self.subsystem_table[name] = (handler, larg, kwarg)
finally:
self.lock.release()
def is_authenticated(self):
"""
Return true if this session is active and authenticated.
:return:
True if the session is still open and has been authenticated
successfully; False if authentication failed and/or the session is
closed.
"""
return self.active and (self.auth_handler is not None) and self.auth_handler.is_authenticated()
def get_username(self):
"""
Return the username this connection is authenticated for. If the
session is not authenticated (or authentication failed), this method
returns ``None``.
:return: username that was authenticated (a `str`), or ``None``.
"""
if not self.active or (self.auth_handler is None):
return None
return self.auth_handler.get_username()
def get_banner(self):
"""
Return the banner supplied by the server upon connect. If no banner is
supplied, this method returns C{None}.
@return: server supplied banner, or C{None}.
@rtype: string
"""
if not self.active or (self.auth_handler is None):
return None
return self.auth_handler.banner
def auth_none(self, username):
"""
Try to authenticate to the server using no authentication at all.
This will almost always fail. It may be useful for determining the
list of authentication types supported by the server, by catching the
`.BadAuthenticationType` exception raised.
:param str username: the username to authenticate as
:return:
`list` of auth types permissible for the next stage of
authentication (normally empty)
:raises BadAuthenticationType: if "none" authentication isn't allowed
by the server for this user
:raises SSHException: if the authentication failed due to a network
error
.. versionadded:: 1.5
"""
if (not self.active) or (not self.initial_kex_done):
raise SSHException('No existing session')
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_none(username, my_event)
return self.auth_handler.wait_for_response(my_event)
def auth_password(self, username, password, event=None, fallback=True):
"""
Authenticate to the server using a password. The username and password
are sent over an encrypted link.
If an ``event`` is passed in, this method will return immediately, and
the event will be triggered once authentication succeeds or fails. On
success, `is_authenticated` will return ``True``. On failure, you may
use `get_exception` to get more detailed error information.
Since 1.1, if no event is passed, this method will block until the
authentication succeeds or fails. On failure, an exception is raised.
Otherwise, the method simply returns.
Since 1.5, if no event is passed and ``fallback`` is ``True`` (the
default), if the server doesn't support plain password authentication
but does support so-called "keyboard-interactive" mode, an attempt
will be made to authenticate using this interactive mode. If it fails,
the normal exception will be thrown as if the attempt had never been
made. This is useful for some recent Gentoo and Debian distributions,
which turn off plain password authentication in a misguided belief
that interactive authentication is "more secure". (It's not.)
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
:param str username: the username to authenticate as
:param basestring password: the password to authenticate with
:param .threading.Event event:
an event to trigger when the authentication attempt is complete
(whether it was successful or not)
:param bool fallback:
``True`` if an attempt at an automated "interactive" password auth
should be made if the server doesn't support normal password auth
:return:
`list` of auth types permissible for the next stage of
authentication (normally empty)
:raises BadAuthenticationType: if password authentication isn't
allowed by the server for this user (and no event was passed in)
:raises AuthenticationException: if the authentication failed (and no
event was passed in)
:raises SSHException: if there was a network error
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to send the password unless we're on a secure link
raise SSHException('No existing session')
if event is None:
my_event = threading.Event()
else:
my_event = event
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_password(username, password, my_event)
if event is not None:
# caller wants to wait for event themselves
return []
try:
return self.auth_handler.wait_for_response(my_event)
except BadAuthenticationType as e:
# if password auth isn't allowed, but keyboard-interactive *is*, try to fudge it
if not fallback or ('keyboard-interactive' not in e.allowed_types):
raise
try:
def handler(title, instructions, fields):
if len(fields) > 1:
raise SSHException('Fallback authentication failed.')
if len(fields) == 0:
# for some reason, at least on os x, a 2nd request will
# be made with zero fields requested. maybe it's just
# to try to fake out automated scripting of the exact
# type we're doing here. *shrug* :)
return []
return [password]
return self.auth_interactive(username, handler)
except SSHException:
# attempt failed; just raise the original exception
raise e
def auth_publickey(self, username, key, event=None):
"""
Authenticate to the server using a private key. The key is used to
sign data from the server, so it must include the private part.
If an ``event`` is passed in, this method will return immediately, and
the event will be triggered once authentication succeeds or fails. On
success, `is_authenticated` will return ``True``. On failure, you may
use `get_exception` to get more detailed error information.
Since 1.1, if no event is passed, this method will block until the
authentication succeeds or fails. On failure, an exception is raised.
Otherwise, the method simply returns.
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
:param str username: the username to authenticate as
:param .PKey key: the private key to authenticate with
:param .threading.Event event:
an event to trigger when the authentication attempt is complete
(whether it was successful or not)
:return:
`list` of auth types permissible for the next stage of
authentication (normally empty)
:raises BadAuthenticationType: if public-key authentication isn't
allowed by the server for this user (and no event was passed in)
:raises AuthenticationException: if the authentication failed (and no
event was passed in)
:raises SSHException: if there was a network error
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException('No existing session')
if event is None:
my_event = threading.Event()
else:
my_event = event
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_publickey(username, key, my_event)
if event is not None:
# caller wants to wait for event themselves
return []
return self.auth_handler.wait_for_response(my_event)
def auth_interactive(self, username, handler, submethods=''):
"""
Authenticate to the server interactively. A handler is used to answer
arbitrary questions from the server. On many servers, this is just a
dumb wrapper around PAM.
This method will block until the authentication succeeds or fails,
peroidically calling the handler asynchronously to get answers to
authentication questions. The handler may be called more than once
if the server continues to ask questions.
The handler is expected to be a callable that will handle calls of the
form: ``handler(title, instructions, prompt_list)``. The ``title`` is
meant to be a dialog-window title, and the ``instructions`` are user
instructions (both are strings). ``prompt_list`` will be a list of
prompts, each prompt being a tuple of ``(str, bool)``. The string is
the prompt and the boolean indicates whether the user text should be
echoed.
A sample call would thus be:
``handler('title', 'instructions', [('Password:', False)])``.
The handler should return a list or tuple of answers to the server's
questions.
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
:param str username: the username to authenticate as
:param callable handler: a handler for responding to server questions
:param str submethods: a string list of desired submethods (optional)
:return:
`list` of auth types permissible for the next stage of
authentication (normally empty).
:raises BadAuthenticationType: if public-key authentication isn't
allowed by the server for this user
:raises AuthenticationException: if the authentication failed
:raises SSHException: if there was a network error
.. versionadded:: 1.5
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException('No existing session')
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_interactive(username, handler, my_event, submethods)
return self.auth_handler.wait_for_response(my_event)
def set_log_channel(self, name):
"""
Set the channel for this transport's logging. The default is
``"paramiko.transport"`` but it can be set to anything you want. (See
the `.logging` module for more info.) SSH Channels will log to a
sub-channel of the one specified.
:param str name: new channel name for logging
.. versionadded:: 1.1
"""
self.log_name = name
self.logger = util.get_logger(name)
self.packetizer.set_log(self.logger)
def get_log_channel(self):
"""
Return the channel name used for this transport's logging.
:return: channel name as a `str`
.. versionadded:: 1.2
"""
return self.log_name
def set_hexdump(self, hexdump):
"""
Turn on/off logging a hex dump of protocol traffic at DEBUG level in
the logs. Normally you would want this off (which is the default),
but if you are debugging something, it may be useful.
:param bool hexdump:
``True`` to log protocol traffix (in hex) to the log; ``False``
otherwise.
"""
self.packetizer.set_hexdump(hexdump)
def get_hexdump(self):
"""
Return ``True`` if the transport is currently logging hex dumps of
protocol traffic.
:return: ``True`` if hex dumps are being logged, else ``False``.
.. versionadded:: 1.4
"""
return self.packetizer.get_hexdump()
def use_compression(self, compress=True):
"""
Turn on/off compression. This will only have an affect before starting
the transport (ie before calling `connect`, etc). By default,
compression is off since it negatively affects interactive sessions.
:param bool compress:
``True`` to ask the remote client/server to compress traffic;
``False`` to refuse compression
.. versionadded:: 1.5.2
"""
if compress:
self._preferred_compression = ('[email protected]', 'zlib', 'none')
else:
self._preferred_compression = ('none',)
def getpeername(self):
"""
Return the address of the remote side of this Transport, if possible.
This is effectively a wrapper around ``'getpeername'`` on the underlying
socket. If the socket-like object has no ``'getpeername'`` method,
then ``("unknown", 0)`` is returned.
:return:
the address of the remote host, if known, as a ``(str, int)``
tuple.
"""
gp = getattr(self.sock, 'getpeername', None)
if gp is None:
return 'unknown', 0
return gp()
def stop_thread(self):
self.active = False
self.packetizer.close()
while self.isAlive():
self.join(10)
### internals...
def _log(self, level, msg, *args):
if issubclass(type(msg), list):
for m in msg:
self.logger.log(level, m)
else:
self.logger.log(level, msg, *args)
def _get_modulus_pack(self):
"""used by KexGex to find primes for group exchange"""
return self._modulus_pack
def _next_channel(self):
"""you are holding the lock"""
chanid = self._channel_counter
while self._channels.get(chanid) is not None:
self._channel_counter = (self._channel_counter + 1) & 0xffffff
chanid = self._channel_counter
self._channel_counter = (self._channel_counter + 1) & 0xffffff
return chanid
def _unlink_channel(self, chanid):
"""used by a Channel to remove itself from the active channel list"""
self._channels.delete(chanid)
def _send_message(self, data):
self.packetizer.send_message(data)
def _send_user_message(self, data):
"""
send a message, but block if we're in key negotiation. this is used
for user-initiated requests.
"""
start = time.time()
while True:
self.clear_to_send.wait(0.1)
if not self.active:
self._log(DEBUG, 'Dropping user packet because connection is dead.')
return
self.clear_to_send_lock.acquire()
if self.clear_to_send.isSet():
break
self.clear_to_send_lock.release()
if time.time() > start + self.clear_to_send_timeout:
raise SSHException('Key-exchange timed out waiting for key negotiation')
try:
self._send_message(data)
finally:
self.clear_to_send_lock.release()
def _set_K_H(self, k, h):
"""used by a kex object to set the K (root key) and H (exchange hash)"""
self.K = k
self.H = h
if self.session_id is None:
self.session_id = h
def _expect_packet(self, *ptypes):
"""used by a kex object to register the next packet type it expects to see"""
self._expected_packet = tuple(ptypes)
def _verify_key(self, host_key, sig):
key = self._key_info[self.host_key_type](Message(host_key))
if key is None:
raise SSHException('Unknown host key type')
if not key.verify_ssh_sig(self.H, Message(sig)):
raise SSHException('Signature verification (%s) failed.' % self.host_key_type)
self.host_key = key
def _compute_key(self, id, nbytes):
"""id is 'A' - 'F' for the various keys used by ssh"""
m = Message()
m.add_mpint(self.K)
m.add_bytes(self.H)
m.add_byte(b(id))
m.add_bytes(self.session_id)
out = sofar = sha1(m.asbytes()).digest()
while len(out) < nbytes:
m = Message()
m.add_mpint(self.K)
m.add_bytes(self.H)
m.add_bytes(sofar)
digest = sha1(m.asbytes()).digest()
out += digest
sofar += digest
return out[:nbytes]
def _get_cipher(self, name, key, iv):
if name not in self._cipher_info:
raise SSHException('Unknown client cipher ' + name)
if name in ('arcfour128', 'arcfour256'):
# arcfour cipher
cipher = self._cipher_info[name]['class'].new(key)
# as per RFC 4345, the first 1536 bytes of keystream
# generated by the cipher MUST be discarded
cipher.encrypt(" " * 1536)
return cipher
elif name.endswith("-ctr"):
# CTR modes, we need a counter
counter = Counter.new(nbits=self._cipher_info[name]['block-size'] * 8, initial_value=util.inflate_long(iv, True))
return self._cipher_info[name]['class'].new(key, self._cipher_info[name]['mode'], iv, counter)
else:
return self._cipher_info[name]['class'].new(key, self._cipher_info[name]['mode'], iv)
def _set_forward_agent_handler(self, handler):
if handler is None:
def default_handler(channel):
self._queue_incoming_channel(channel)
self._forward_agent_handler = default_handler
else:
self._forward_agent_handler = handler
def _set_x11_handler(self, handler):
# only called if a channel has turned on x11 forwarding
if handler is None:
# by default, use the same mechanism as accept()
def default_handler(channel, src_addr_port):
self._queue_incoming_channel(channel)
self._x11_handler = default_handler
else:
self._x11_handler = handler
def _queue_incoming_channel(self, channel):
self.lock.acquire()
try:
self.server_accepts.append(channel)
self.server_accept_cv.notify()
finally:
self.lock.release()
def run(self):
# (use the exposed "run" method, because if we specify a thread target
# of a private method, threading.Thread will keep a reference to it
# indefinitely, creating a GC cycle and not letting Transport ever be
# GC'd. it's a bug in Thread.)
# Hold reference to 'sys' so we can test sys.modules to detect
# interpreter shutdown.
self.sys = sys
# active=True occurs before the thread is launched, to avoid a race
_active_threads.append(self)
if self.server_mode:
self._log(DEBUG, 'starting thread (server mode): %s' % hex(long(id(self)) & xffffffff))
else:
self._log(DEBUG, 'starting thread (client mode): %s' % hex(long(id(self)) & xffffffff))
try:
try:
self.packetizer.write_all(b(self.local_version + '\r\n'))
self._check_banner()
self._send_kex_init()
self._expect_packet(MSG_KEXINIT)
while self.active:
if self.packetizer.need_rekey() and not self.in_kex:
self._send_kex_init()
try:
ptype, m = self.packetizer.read_message()
except NeedRekeyException:
continue
if ptype == MSG_IGNORE:
continue
elif ptype == MSG_DISCONNECT:
self._parse_disconnect(m)
self.active = False
self.packetizer.close()
break
elif ptype == MSG_DEBUG:
self._parse_debug(m)
continue
if len(self._expected_packet) > 0:
if ptype not in self._expected_packet:
raise SSHException('Expecting packet from %r, got %d' % (self._expected_packet, ptype))
self._expected_packet = tuple()
if (ptype >= 30) and (ptype <= 39):
self.kex_engine.parse_next(ptype, m)
continue
if ptype in self._handler_table:
self._handler_table[ptype](self, m)
elif ptype in self._channel_handler_table:
chanid = m.get_int()
chan = self._channels.get(chanid)
if chan is not None:
self._channel_handler_table[ptype](chan, m)
elif chanid in self.channels_seen:
self._log(DEBUG, 'Ignoring message for dead channel %d' % chanid)
else:
self._log(ERROR, 'Channel request for unknown channel %d' % chanid)
self.active = False
self.packetizer.close()
elif (self.auth_handler is not None) and (ptype in self.auth_handler._handler_table):
self.auth_handler._handler_table[ptype](self.auth_handler, m)
else:
self._log(WARNING, 'Oops, unhandled type %d' % ptype)
msg = Message()
msg.add_byte(cMSG_UNIMPLEMENTED)
msg.add_int(m.seqno)
self._send_message(msg)
except SSHException as e:
self._log(ERROR, 'Exception: ' + str(e))
self._log(ERROR, util.tb_strings())
self.saved_exception = e
except EOFError as e:
self._log(DEBUG, 'EOF in transport thread')
#self._log(DEBUG, util.tb_strings())
self.saved_exception = e
except socket.error as e:
if type(e.args) is tuple:
if e.args:
emsg = '%s (%d)' % (e.args[1], e.args[0])
else: # empty tuple, e.g. socket.timeout
emsg = str(e) or repr(e)
else:
emsg = e.args
self._log(ERROR, 'Socket exception: ' + emsg)
self.saved_exception = e
except Exception as e:
self._log(ERROR, 'Unknown exception: ' + str(e))
self._log(ERROR, util.tb_strings())
self.saved_exception = e
_active_threads.remove(self)
for chan in list(self._channels.values()):
chan._unlink()
if self.active:
self.active = False
self.packetizer.close()
if self.completion_event is not None:
self.completion_event.set()
if self.auth_handler is not None:
self.auth_handler.abort()
for event in self.channel_events.values():
event.set()
try:
self.lock.acquire()
self.server_accept_cv.notify()
finally:
self.lock.release()
self.sock.close()
except:
# Don't raise spurious 'NoneType has no attribute X' errors when we
# wake up during interpreter shutdown. Or rather -- raise
# everything *if* sys.modules (used as a convenient sentinel)
# appears to still exist.
if self.sys.modules is not None:
raise
### protocol stages
def _negotiate_keys(self, m):
# throws SSHException on anything unusual
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.clear()
finally:
self.clear_to_send_lock.release()
if self.local_kex_init is None:
# remote side wants to renegotiate
self._send_kex_init()
self._parse_kex_init(m)
self.kex_engine.start_kex()
def _check_banner(self):
# this is slow, but we only have to do it once
for i in range(100):
# give them 15 seconds for the first line, then just 2 seconds
# each additional line. (some sites have very high latency.)
if i == 0:
timeout = self.banner_timeout
else:
timeout = 2
try:
buf = self.packetizer.readline(timeout)
except ProxyCommandFailure:
raise
except Exception as e:
raise SSHException('Error reading SSH protocol banner' + str(e))
if buf[:4] == 'SSH-':
break
self._log(DEBUG, 'Banner: ' + buf)
if buf[:4] != 'SSH-':
raise SSHException('Indecipherable protocol version "' + buf + '"')
# save this server version string for later
self.remote_version = buf
# pull off any attached comment
comment = ''
i = buf.find(' ')
if i >= 0:
comment = buf[i+1:]
buf = buf[:i]
# parse out version string and make sure it matches
segs = buf.split('-', 2)
if len(segs) < 3:
raise SSHException('Invalid SSH banner')
version = segs[1]
client = segs[2]
if version != '1.99' and version != '2.0':
raise SSHException('Incompatible version (%s instead of 2.0)' % (version,))
self._log(INFO, 'Connected (version %s, client %s)' % (version, client))
def _send_kex_init(self):
"""
announce to the other side that we'd like to negotiate keys, and what
kind of key negotiation we support.
"""
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.clear()
finally:
self.clear_to_send_lock.release()
self.in_kex = True
if self.server_mode:
if (self._modulus_pack is None) and ('diffie-hellman-group-exchange-sha1' in self._preferred_kex):
# can't do group-exchange if we don't have a pack of potential primes
pkex = list(self.get_security_options().kex)
pkex.remove('diffie-hellman-group-exchange-sha1')
self.get_security_options().kex = pkex
available_server_keys = list(filter(list(self.server_key_dict.keys()).__contains__,
self._preferred_keys))
else:
available_server_keys = self._preferred_keys
m = Message()
m.add_byte(cMSG_KEXINIT)
m.add_bytes(os.urandom(16))
m.add_list(self._preferred_kex)
m.add_list(available_server_keys)
m.add_list(self._preferred_ciphers)
m.add_list(self._preferred_ciphers)
m.add_list(self._preferred_macs)
m.add_list(self._preferred_macs)
m.add_list(self._preferred_compression)
m.add_list(self._preferred_compression)
m.add_string(bytes())
m.add_string(bytes())
m.add_boolean(False)
m.add_int(0)
# save a copy for later (needed to compute a hash)
self.local_kex_init = m.asbytes()
self._send_message(m)
def _parse_kex_init(self, m):
cookie = m.get_bytes(16)
kex_algo_list = m.get_list()
server_key_algo_list = m.get_list()
client_encrypt_algo_list = m.get_list()
server_encrypt_algo_list = m.get_list()
client_mac_algo_list = m.get_list()
server_mac_algo_list = m.get_list()
client_compress_algo_list = m.get_list()
server_compress_algo_list = m.get_list()
client_lang_list = m.get_list()
server_lang_list = m.get_list()
kex_follows = m.get_boolean()
unused = m.get_int()
self._log(DEBUG, 'kex algos:' + str(kex_algo_list) + ' server key:' + str(server_key_algo_list) +
' client encrypt:' + str(client_encrypt_algo_list) +
' server encrypt:' + str(server_encrypt_algo_list) +
' client mac:' + str(client_mac_algo_list) +
' server mac:' + str(server_mac_algo_list) +
' client compress:' + str(client_compress_algo_list) +
' server compress:' + str(server_compress_algo_list) +
' client lang:' + str(client_lang_list) +
' server lang:' + str(server_lang_list) +
' kex follows?' + str(kex_follows))
# as a server, we pick the first item in the client's list that we support.
# as a client, we pick the first item in our list that the server supports.
if self.server_mode:
agreed_kex = list(filter(self._preferred_kex.__contains__, kex_algo_list))
else:
agreed_kex = list(filter(kex_algo_list.__contains__, self._preferred_kex))
if len(agreed_kex) == 0:
raise SSHException('Incompatible ssh peer (no acceptable kex algorithm)')
self.kex_engine = self._kex_info[agreed_kex[0]](self)
if self.server_mode:
available_server_keys = list(filter(list(self.server_key_dict.keys()).__contains__,
self._preferred_keys))
agreed_keys = list(filter(available_server_keys.__contains__, server_key_algo_list))
else:
agreed_keys = list(filter(server_key_algo_list.__contains__, self._preferred_keys))
if len(agreed_keys) == 0:
raise SSHException('Incompatible ssh peer (no acceptable host key)')
self.host_key_type = agreed_keys[0]
if self.server_mode and (self.get_server_key() is None):
raise SSHException('Incompatible ssh peer (can\'t match requested host key type)')
if self.server_mode:
agreed_local_ciphers = list(filter(self._preferred_ciphers.__contains__,
server_encrypt_algo_list))
agreed_remote_ciphers = list(filter(self._preferred_ciphers.__contains__,
client_encrypt_algo_list))
else:
agreed_local_ciphers = list(filter(client_encrypt_algo_list.__contains__,
self._preferred_ciphers))
agreed_remote_ciphers = list(filter(server_encrypt_algo_list.__contains__,
self._preferred_ciphers))
if (len(agreed_local_ciphers) == 0) or (len(agreed_remote_ciphers) == 0):
raise SSHException('Incompatible ssh server (no acceptable ciphers)')
self.local_cipher = agreed_local_ciphers[0]
self.remote_cipher = agreed_remote_ciphers[0]
self._log(DEBUG, 'Ciphers agreed: local=%s, remote=%s' % (self.local_cipher, self.remote_cipher))
if self.server_mode:
agreed_remote_macs = list(filter(self._preferred_macs.__contains__, client_mac_algo_list))
agreed_local_macs = list(filter(self._preferred_macs.__contains__, server_mac_algo_list))
else:
agreed_local_macs = list(filter(client_mac_algo_list.__contains__, self._preferred_macs))
agreed_remote_macs = list(filter(server_mac_algo_list.__contains__, self._preferred_macs))
if (len(agreed_local_macs) == 0) or (len(agreed_remote_macs) == 0):
raise SSHException('Incompatible ssh server (no acceptable macs)')
self.local_mac = agreed_local_macs[0]
self.remote_mac = agreed_remote_macs[0]
if self.server_mode:
agreed_remote_compression = list(filter(self._preferred_compression.__contains__, client_compress_algo_list))
agreed_local_compression = list(filter(self._preferred_compression.__contains__, server_compress_algo_list))
else:
agreed_local_compression = list(filter(client_compress_algo_list.__contains__, self._preferred_compression))
agreed_remote_compression = list(filter(server_compress_algo_list.__contains__, self._preferred_compression))
if (len(agreed_local_compression) == 0) or (len(agreed_remote_compression) == 0):
raise SSHException('Incompatible ssh server (no acceptable compression) %r %r %r' % (agreed_local_compression, agreed_remote_compression, self._preferred_compression))
self.local_compression = agreed_local_compression[0]
self.remote_compression = agreed_remote_compression[0]
self._log(DEBUG, 'using kex %s; server key type %s; cipher: local %s, remote %s; mac: local %s, remote %s; compression: local %s, remote %s' %
(agreed_kex[0], self.host_key_type, self.local_cipher, self.remote_cipher, self.local_mac,
self.remote_mac, self.local_compression, self.remote_compression))
# save for computing hash later...
# now wait! openssh has a bug (and others might too) where there are
# actually some extra bytes (one NUL byte in openssh's case) added to
# the end of the packet but not parsed. turns out we need to throw
# away those bytes because they aren't part of the hash.
self.remote_kex_init = cMSG_KEXINIT + m.get_so_far()
def _activate_inbound(self):
"""switch on newly negotiated encryption parameters for inbound traffic"""
block_size = self._cipher_info[self.remote_cipher]['block-size']
if self.server_mode:
IV_in = self._compute_key('A', block_size)
key_in = self._compute_key('C', self._cipher_info[self.remote_cipher]['key-size'])
else:
IV_in = self._compute_key('B', block_size)
key_in = self._compute_key('D', self._cipher_info[self.remote_cipher]['key-size'])
engine = self._get_cipher(self.remote_cipher, key_in, IV_in)
mac_size = self._mac_info[self.remote_mac]['size']
mac_engine = self._mac_info[self.remote_mac]['class']
# initial mac keys are done in the hash's natural size (not the potentially truncated
# transmission size)
if self.server_mode:
mac_key = self._compute_key('E', mac_engine().digest_size)
else:
mac_key = self._compute_key('F', mac_engine().digest_size)
self.packetizer.set_inbound_cipher(engine, block_size, mac_engine, mac_size, mac_key)
compress_in = self._compression_info[self.remote_compression][1]
if (compress_in is not None) and ((self.remote_compression != '[email protected]') or self.authenticated):
self._log(DEBUG, 'Switching on inbound compression ...')
self.packetizer.set_inbound_compressor(compress_in())
def _activate_outbound(self):
"""switch on newly negotiated encryption parameters for outbound traffic"""
m = Message()
m.add_byte(cMSG_NEWKEYS)
self._send_message(m)
block_size = self._cipher_info[self.local_cipher]['block-size']
if self.server_mode:
IV_out = self._compute_key('B', block_size)
key_out = self._compute_key('D', self._cipher_info[self.local_cipher]['key-size'])
else:
IV_out = self._compute_key('A', block_size)
key_out = self._compute_key('C', self._cipher_info[self.local_cipher]['key-size'])
engine = self._get_cipher(self.local_cipher, key_out, IV_out)
mac_size = self._mac_info[self.local_mac]['size']
mac_engine = self._mac_info[self.local_mac]['class']
# initial mac keys are done in the hash's natural size (not the potentially truncated
# transmission size)
if self.server_mode:
mac_key = self._compute_key('F', mac_engine().digest_size)
else:
mac_key = self._compute_key('E', mac_engine().digest_size)
sdctr = self.local_cipher.endswith('-ctr')
self.packetizer.set_outbound_cipher(engine, block_size, mac_engine, mac_size, mac_key, sdctr)
compress_out = self._compression_info[self.local_compression][0]
if (compress_out is not None) and ((self.local_compression != '[email protected]') or self.authenticated):
self._log(DEBUG, 'Switching on outbound compression ...')
self.packetizer.set_outbound_compressor(compress_out())
if not self.packetizer.need_rekey():
self.in_kex = False
# we always expect to receive NEWKEYS now
self._expect_packet(MSG_NEWKEYS)
def _auth_trigger(self):
self.authenticated = True
# delayed initiation of compression
if self.local_compression == '[email protected]':
compress_out = self._compression_info[self.local_compression][0]
self._log(DEBUG, 'Switching on outbound compression ...')
self.packetizer.set_outbound_compressor(compress_out())
if self.remote_compression == '[email protected]':
compress_in = self._compression_info[self.remote_compression][1]
self._log(DEBUG, 'Switching on inbound compression ...')
self.packetizer.set_inbound_compressor(compress_in())
def _parse_newkeys(self, m):
self._log(DEBUG, 'Switch to new keys ...')
self._activate_inbound()
# can also free a bunch of stuff here
self.local_kex_init = self.remote_kex_init = None
self.K = None
self.kex_engine = None
if self.server_mode and (self.auth_handler is None):
# create auth handler for server mode
self.auth_handler = AuthHandler(self)
if not self.initial_kex_done:
# this was the first key exchange
self.initial_kex_done = True
# send an event?
if self.completion_event is not None:
self.completion_event.set()
# it's now okay to send data again (if this was a re-key)
if not self.packetizer.need_rekey():
self.in_kex = False
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.set()
finally:
self.clear_to_send_lock.release()
return
def _parse_disconnect(self, m):
code = m.get_int()
desc = m.get_text()
self._log(INFO, 'Disconnect (code %d): %s' % (code, desc))
def _parse_global_request(self, m):
kind = m.get_text()
self._log(DEBUG, 'Received global request "%s"' % kind)
want_reply = m.get_boolean()
if not self.server_mode:
self._log(DEBUG, 'Rejecting "%s" global request from server.' % kind)
ok = False
elif kind == 'tcpip-forward':
address = m.get_text()
port = m.get_int()
ok = self.server_object.check_port_forward_request(address, port)
if ok:
ok = (ok,)
elif kind == 'cancel-tcpip-forward':
address = m.get_text()
port = m.get_int()
self.server_object.cancel_port_forward_request(address, port)
ok = True
else:
ok = self.server_object.check_global_request(kind, m)
extra = ()
if type(ok) is tuple:
extra = ok
ok = True
if want_reply:
msg = Message()
if ok:
msg.add_byte(cMSG_REQUEST_SUCCESS)
msg.add(*extra)
else:
msg.add_byte(cMSG_REQUEST_FAILURE)
self._send_message(msg)
def _parse_request_success(self, m):
self._log(DEBUG, 'Global request successful.')
self.global_response = m
if self.completion_event is not None:
self.completion_event.set()
def _parse_request_failure(self, m):
self._log(DEBUG, 'Global request denied.')
self.global_response = None
if self.completion_event is not None:
self.completion_event.set()
def _parse_channel_open_success(self, m):
chanid = m.get_int()
server_chanid = m.get_int()
server_window_size = m.get_int()
server_max_packet_size = m.get_int()
chan = self._channels.get(chanid)
if chan is None:
self._log(WARNING, 'Success for unrequested channel! [??]')
return
self.lock.acquire()
try:
chan._set_remote_channel(server_chanid, server_window_size, server_max_packet_size)
self._log(INFO, 'Secsh channel %d opened.' % chanid)
if chanid in self.channel_events:
self.channel_events[chanid].set()
del self.channel_events[chanid]
finally:
self.lock.release()
return
def _parse_channel_open_failure(self, m):
chanid = m.get_int()
reason = m.get_int()
reason_str = m.get_text()
lang = m.get_text()
reason_text = CONNECTION_FAILED_CODE.get(reason, '(unknown code)')
self._log(INFO, 'Secsh channel %d open FAILED: %s: %s' % (chanid, reason_str, reason_text))
self.lock.acquire()
try:
self.saved_exception = ChannelException(reason, reason_text)
if chanid in self.channel_events:
self._channels.delete(chanid)
if chanid in self.channel_events:
self.channel_events[chanid].set()
del self.channel_events[chanid]
finally:
self.lock.release()
return
def _parse_channel_open(self, m):
kind = m.get_text()
chanid = m.get_int()
initial_window_size = m.get_int()
max_packet_size = m.get_int()
reject = False
if (kind == '[email protected]') and (self._forward_agent_handler is not None):
self._log(DEBUG, 'Incoming forward agent connection')
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif (kind == 'x11') and (self._x11_handler is not None):
origin_addr = m.get_text()
origin_port = m.get_int()
self._log(DEBUG, 'Incoming x11 connection from %s:%d' % (origin_addr, origin_port))
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif (kind == 'forwarded-tcpip') and (self._tcp_handler is not None):
server_addr = m.get_text()
server_port = m.get_int()
origin_addr = m.get_text()
origin_port = m.get_int()
self._log(DEBUG, 'Incoming tcp forwarded connection from %s:%d' % (origin_addr, origin_port))
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif not self.server_mode:
self._log(DEBUG, 'Rejecting "%s" channel request from server.' % kind)
reject = True
reason = OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
else:
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
if kind == 'direct-tcpip':
# handle direct-tcpip requests comming from the client
dest_addr = m.get_text()
dest_port = m.get_int()
origin_addr = m.get_text()
origin_port = m.get_int()
reason = self.server_object.check_channel_direct_tcpip_request(
my_chanid, (origin_addr, origin_port), (dest_addr, dest_port))
else:
reason = self.server_object.check_channel_request(kind, my_chanid)
if reason != OPEN_SUCCEEDED:
self._log(DEBUG, 'Rejecting "%s" channel request from client.' % kind)
reject = True
if reject:
msg = Message()
msg.add_byte(cMSG_CHANNEL_OPEN_FAILURE)
msg.add_int(chanid)
msg.add_int(reason)
msg.add_string('')
msg.add_string('en')
self._send_message(msg)
return
chan = Channel(my_chanid)
self.lock.acquire()
try:
self._channels.put(my_chanid, chan)
self.channels_seen[my_chanid] = True
chan._set_transport(self)
chan._set_window(self.window_size, self.max_packet_size)
chan._set_remote_channel(chanid, initial_window_size, max_packet_size)
finally:
self.lock.release()
m = Message()
m.add_byte(cMSG_CHANNEL_OPEN_SUCCESS)
m.add_int(chanid)
m.add_int(my_chanid)
m.add_int(self.window_size)
m.add_int(self.max_packet_size)
self._send_message(m)
self._log(INFO, 'Secsh channel %d (%s) opened.', my_chanid, kind)
if kind == '[email protected]':
self._forward_agent_handler(chan)
elif kind == 'x11':
self._x11_handler(chan, (origin_addr, origin_port))
elif kind == 'forwarded-tcpip':
chan.origin_addr = (origin_addr, origin_port)
self._tcp_handler(chan, (origin_addr, origin_port), (server_addr, server_port))
else:
self._queue_incoming_channel(chan)
def _parse_debug(self, m):
always_display = m.get_boolean()
msg = m.get_string()
lang = m.get_string()
self._log(DEBUG, 'Debug msg: ' + util.safe_string(msg))
def _get_subsystem_handler(self, name):
try:
self.lock.acquire()
if name not in self.subsystem_table:
return None, [], {}
return self.subsystem_table[name]
finally:
self.lock.release()
_handler_table = {
MSG_NEWKEYS: _parse_newkeys,
MSG_GLOBAL_REQUEST: _parse_global_request,
MSG_REQUEST_SUCCESS: _parse_request_success,
MSG_REQUEST_FAILURE: _parse_request_failure,
MSG_CHANNEL_OPEN_SUCCESS: _parse_channel_open_success,
MSG_CHANNEL_OPEN_FAILURE: _parse_channel_open_failure,
MSG_CHANNEL_OPEN: _parse_channel_open,
MSG_KEXINIT: _negotiate_keys,
}
_channel_handler_table = {
MSG_CHANNEL_SUCCESS: Channel._request_success,
MSG_CHANNEL_FAILURE: Channel._request_failed,
MSG_CHANNEL_DATA: Channel._feed,
MSG_CHANNEL_EXTENDED_DATA: Channel._feed_extended,
MSG_CHANNEL_WINDOW_ADJUST: Channel._window_adjust,
MSG_CHANNEL_REQUEST: Channel._handle_request,
MSG_CHANNEL_EOF: Channel._handle_eof,
MSG_CHANNEL_CLOSE: Channel._handle_close,
}
class SecurityOptions (object):
"""
Simple object containing the security preferences of an ssh transport.
These are tuples of acceptable ciphers, digests, key types, and key
exchange algorithms, listed in order of preference.
Changing the contents and/or order of these fields affects the underlying
`.Transport` (but only if you change them before starting the session).
If you try to add an algorithm that paramiko doesn't recognize,
``ValueError`` will be raised. If you try to assign something besides a
tuple to one of the fields, ``TypeError`` will be raised.
"""
#__slots__ = [ 'ciphers', 'digests', 'key_types', 'kex', 'compression', '_transport' ]
__slots__ = '_transport'
def __init__(self, transport):
self._transport = transport
def __repr__(self):
"""
Returns a string representation of this object, for debugging.
"""
return '<paramiko.SecurityOptions for %s>' % repr(self._transport)
def _get_ciphers(self):
return self._transport._preferred_ciphers
def _get_digests(self):
return self._transport._preferred_macs
def _get_key_types(self):
return self._transport._preferred_keys
def _get_kex(self):
return self._transport._preferred_kex
def _get_compression(self):
return self._transport._preferred_compression
def _set(self, name, orig, x):
if type(x) is list:
x = tuple(x)
if type(x) is not tuple:
raise TypeError('expected tuple or list')
possible = list(getattr(self._transport, orig).keys())
forbidden = [n for n in x if n not in possible]
if len(forbidden) > 0:
raise ValueError('unknown cipher')
setattr(self._transport, name, x)
def _set_ciphers(self, x):
self._set('_preferred_ciphers', '_cipher_info', x)
def _set_digests(self, x):
self._set('_preferred_macs', '_mac_info', x)
def _set_key_types(self, x):
self._set('_preferred_keys', '_key_info', x)
def _set_kex(self, x):
self._set('_preferred_kex', '_kex_info', x)
def _set_compression(self, x):
self._set('_preferred_compression', '_compression_info', x)
ciphers = property(_get_ciphers, _set_ciphers, None,
"Symmetric encryption ciphers")
digests = property(_get_digests, _set_digests, None,
"Digest (one-way hash) algorithms")
key_types = property(_get_key_types, _set_key_types, None,
"Public-key algorithms")
kex = property(_get_kex, _set_kex, None, "Key exchange algorithms")
compression = property(_get_compression, _set_compression, None,
"Compression algorithms")
class ChannelMap (object):
def __init__(self):
# (id -> Channel)
self._map = weakref.WeakValueDictionary()
self._lock = threading.Lock()
def put(self, chanid, chan):
self._lock.acquire()
try:
self._map[chanid] = chan
finally:
self._lock.release()
def get(self, chanid):
self._lock.acquire()
try:
return self._map.get(chanid, None)
finally:
self._lock.release()
def delete(self, chanid):
self._lock.acquire()
try:
try:
del self._map[chanid]
except KeyError:
pass
finally:
self._lock.release()
def values(self):
self._lock.acquire()
try:
return list(self._map.values())
finally:
self._lock.release()
def __len__(self):
self._lock.acquire()
try:
return len(self._map)
finally:
self._lock.release()
|
the-stack_0_1970 | from functools import partial, wraps, update_wrapper
import copy
import re
import clize.errors
from .core import _accepts_context, _call_fragment_body, collect, DROP, many as _many
from .objects import Context
__all__ = ['accumulate', 'callable', 'filter', 'many', 'format', 'regex', 'keywords',
'focus', 'magnify', 'try_except']
decorators = []
def _get_lenses():
global lenses, _get_lenses
try:
import lenses
except ImportError: # pragma: no cover
lenses = None
_get_lenses = lambda: lenses
return lenses
def _get_formatter():
global _get_formatter
import string
formatter = string.Formatter()
_get_formatter = lambda: formatter
return formatter
def decorator(*names, doc=None, takes_string=False, prep=None, dec_args=()):
if prep is None:
if len(dec_args) == 1:
prep = lambda _, a: a
elif len(dec_args) > 1:
prep = lambda _, *a: a
def wrapperer(_spy_decorator):
@wraps(_spy_decorator)
def wrapper(fn, dec_args=()):
is_decorator = getattr(fn, '_spy_decorated', None)
if is_decorator:
xfn = fn
elif _accepts_context(fn):
xfn = partial(_call_fragment_body, fn)
else:
xfn = partial(_drop_context, fn)
if prep:
opaque = prep(fn, *dec_args)
def wrapped(v, context=None):
_spy_callable = fn # noqa: F841
_spy_value = v # noqa: F841
return _spy_decorator(xfn, v, context, opaque)
else:
def wrapped(v, context=None):
_spy_callable = fn # noqa: F841
_spy_value = v # noqa: F841
return _spy_decorator(xfn, v, context)
update_wrapper(wrapped, fn)
wrapped._spy_decorated = True
return wrapped
if dec_args:
orig_wrapper = wrapper
def wrapper(*a):
return partial(orig_wrapper, dec_args=a)
wrapper.decorator_names = names
wrapper.decorator_help = doc
wrapper.takes_string = takes_string
wrapper.dec_args = dec_args
decorators.append(wrapper)
return wrapper
return wrapperer
def _drop_context(fn, v, context):
return _call_fragment_body(fn, v)
@decorator('--accumulate', '-a', doc='Pass an iterator of yielded values to this fragment')
def accumulate(fn, v, context):
return fn(collect(context), context)
@decorator('--callable', '-c', doc='Call the result of this fragment')
def callable(fn, v, context):
result = fn(v, context)
return result(v)
@decorator('--filter', '-f', doc='Treat this fragment as a predicate to filter data')
def filter(fn, v, context):
result = fn(v, context)
return v if result else DROP
@decorator('--many', '-m', doc='Iterate over this fragment')
def many(fn, v, context):
result = fn(v, context)
return _many(result)
@decorator('--format', '-i', doc='Interpolate argument as a format string', takes_string=True, prep=lambda _: _get_formatter())
def format(fn, v, context, formatter):
env, x = fn(v, context)
return formatter.vformat(x, v, env)
@decorator('--regex', '--regexp', '-R', doc='Match argument as a regexp', takes_string=True)
def regex(fn, v, context):
env, x = fn(v, context)
return re.match(x, v)
def _kw_prep(fn):
base = fn
while hasattr(base, '__wrapped__'):
base = base.__wrapped__
if not hasattr(base, '_spy_setenv'):
raise ValueError("inappropriate function")
return base._spy_setenv
@decorator('--keywords', '-k', doc='Execute with the input value as the scope', prep=_kw_prep)
def keywords(fn, v, context, setenv):
setenv(v)
return fn(v, context)
def _convert_focus(s):
lenses = _get_lenses()
if lenses is not None and s.startswith('_'):
context = Context()
context['_'] = lenses.lens
return eval(s, context, {})
if s.startswith('.'):
return s[1:]
if s[:1] in '0123456789-' and (len(s) == 1 or s[1:].isdigit()):
return int(s)
if ':' in s:
if lenses is None:
raise clize.errors.ArgumentError("slice focusing requires `lenses`")
sbits = s.split(':')
bits = []
for x in sbits:
if x == '':
bits.append(None)
elif x.isdigit() or x[:1] == '-' and x[1:].isdigit():
bits.append(int(x))
else:
break
else:
if len(bits) in (2,3):
return lenses.lens[slice(*bits)].Each()
return s
_convert_focus.usage_name = 'ITEM'
def _focus_prep(fn, focus):
lenses = _get_lenses()
if lenses is None:
def apply(f, v):
v_ = copy.copy(v)
v_[focus] = f(v_[focus])
return v_
return apply
if not isinstance(focus, lenses.UnboundLens):
focus = lenses.lens[focus]
return lambda f, v: focus.modify(f)(v)
@decorator('--focus', '-o', doc='Operate on an item of the input in-place',
prep=_focus_prep, dec_args=[_convert_focus])
def focus(fn, v, context, f):
fn = partial(fn, context=context)
return f(fn, v)
def _magnify_prep(fn, focus):
lenses = _get_lenses()
if lenses is None:
def apply(f, v):
return f(v[focus])
return apply
if not isinstance(focus, lenses.UnboundLens):
focus = lenses.lens[focus]
return lambda f, v: f(focus.get()(v))
@decorator('--magnify', '-O', doc='Operate on and return an item of the input',
prep=_magnify_prep, dec_args=[_convert_focus])
def magnify(fn, v, context, f):
fn = partial(fn, context=context)
return f(fn, v)
@decorator('--try', '-t', doc='Filter out input that causes the fragment to raise an exception')
def try_except(fn, v, context):
try:
return fn(v, context)
except:
pass
return DROP
|
the-stack_0_1971 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.CardFundInfo import CardFundInfo
from alipay.aop.api.domain.CardCreditInfo import CardCreditInfo
class AlipayAssetCardNewtemplateCreateModel(object):
def __init__(self):
self._account_model = None
self._assets_code = None
self._biz_from = None
self._card_fund_infos = None
self._card_model = None
self._card_name = None
self._creator = None
self._credit_info = None
self._extend_info = None
self._operator = None
self._out_biz_no = None
self._partner_id = None
self._period_type = None
self._product_code = None
self._settle_user_id = None
@property
def account_model(self):
return self._account_model
@account_model.setter
def account_model(self, value):
self._account_model = value
@property
def assets_code(self):
return self._assets_code
@assets_code.setter
def assets_code(self, value):
self._assets_code = value
@property
def biz_from(self):
return self._biz_from
@biz_from.setter
def biz_from(self, value):
self._biz_from = value
@property
def card_fund_infos(self):
return self._card_fund_infos
@card_fund_infos.setter
def card_fund_infos(self, value):
if isinstance(value, list):
self._card_fund_infos = list()
for i in value:
if isinstance(i, CardFundInfo):
self._card_fund_infos.append(i)
else:
self._card_fund_infos.append(CardFundInfo.from_alipay_dict(i))
@property
def card_model(self):
return self._card_model
@card_model.setter
def card_model(self, value):
self._card_model = value
@property
def card_name(self):
return self._card_name
@card_name.setter
def card_name(self, value):
self._card_name = value
@property
def creator(self):
return self._creator
@creator.setter
def creator(self, value):
self._creator = value
@property
def credit_info(self):
return self._credit_info
@credit_info.setter
def credit_info(self, value):
if isinstance(value, CardCreditInfo):
self._credit_info = value
else:
self._credit_info = CardCreditInfo.from_alipay_dict(value)
@property
def extend_info(self):
return self._extend_info
@extend_info.setter
def extend_info(self, value):
self._extend_info = value
@property
def operator(self):
return self._operator
@operator.setter
def operator(self, value):
self._operator = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def partner_id(self):
return self._partner_id
@partner_id.setter
def partner_id(self, value):
self._partner_id = value
@property
def period_type(self):
return self._period_type
@period_type.setter
def period_type(self, value):
self._period_type = value
@property
def product_code(self):
return self._product_code
@product_code.setter
def product_code(self, value):
self._product_code = value
@property
def settle_user_id(self):
return self._settle_user_id
@settle_user_id.setter
def settle_user_id(self, value):
self._settle_user_id = value
def to_alipay_dict(self):
params = dict()
if self.account_model:
if hasattr(self.account_model, 'to_alipay_dict'):
params['account_model'] = self.account_model.to_alipay_dict()
else:
params['account_model'] = self.account_model
if self.assets_code:
if hasattr(self.assets_code, 'to_alipay_dict'):
params['assets_code'] = self.assets_code.to_alipay_dict()
else:
params['assets_code'] = self.assets_code
if self.biz_from:
if hasattr(self.biz_from, 'to_alipay_dict'):
params['biz_from'] = self.biz_from.to_alipay_dict()
else:
params['biz_from'] = self.biz_from
if self.card_fund_infos:
if isinstance(self.card_fund_infos, list):
for i in range(0, len(self.card_fund_infos)):
element = self.card_fund_infos[i]
if hasattr(element, 'to_alipay_dict'):
self.card_fund_infos[i] = element.to_alipay_dict()
if hasattr(self.card_fund_infos, 'to_alipay_dict'):
params['card_fund_infos'] = self.card_fund_infos.to_alipay_dict()
else:
params['card_fund_infos'] = self.card_fund_infos
if self.card_model:
if hasattr(self.card_model, 'to_alipay_dict'):
params['card_model'] = self.card_model.to_alipay_dict()
else:
params['card_model'] = self.card_model
if self.card_name:
if hasattr(self.card_name, 'to_alipay_dict'):
params['card_name'] = self.card_name.to_alipay_dict()
else:
params['card_name'] = self.card_name
if self.creator:
if hasattr(self.creator, 'to_alipay_dict'):
params['creator'] = self.creator.to_alipay_dict()
else:
params['creator'] = self.creator
if self.credit_info:
if hasattr(self.credit_info, 'to_alipay_dict'):
params['credit_info'] = self.credit_info.to_alipay_dict()
else:
params['credit_info'] = self.credit_info
if self.extend_info:
if hasattr(self.extend_info, 'to_alipay_dict'):
params['extend_info'] = self.extend_info.to_alipay_dict()
else:
params['extend_info'] = self.extend_info
if self.operator:
if hasattr(self.operator, 'to_alipay_dict'):
params['operator'] = self.operator.to_alipay_dict()
else:
params['operator'] = self.operator
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.partner_id:
if hasattr(self.partner_id, 'to_alipay_dict'):
params['partner_id'] = self.partner_id.to_alipay_dict()
else:
params['partner_id'] = self.partner_id
if self.period_type:
if hasattr(self.period_type, 'to_alipay_dict'):
params['period_type'] = self.period_type.to_alipay_dict()
else:
params['period_type'] = self.period_type
if self.product_code:
if hasattr(self.product_code, 'to_alipay_dict'):
params['product_code'] = self.product_code.to_alipay_dict()
else:
params['product_code'] = self.product_code
if self.settle_user_id:
if hasattr(self.settle_user_id, 'to_alipay_dict'):
params['settle_user_id'] = self.settle_user_id.to_alipay_dict()
else:
params['settle_user_id'] = self.settle_user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayAssetCardNewtemplateCreateModel()
if 'account_model' in d:
o.account_model = d['account_model']
if 'assets_code' in d:
o.assets_code = d['assets_code']
if 'biz_from' in d:
o.biz_from = d['biz_from']
if 'card_fund_infos' in d:
o.card_fund_infos = d['card_fund_infos']
if 'card_model' in d:
o.card_model = d['card_model']
if 'card_name' in d:
o.card_name = d['card_name']
if 'creator' in d:
o.creator = d['creator']
if 'credit_info' in d:
o.credit_info = d['credit_info']
if 'extend_info' in d:
o.extend_info = d['extend_info']
if 'operator' in d:
o.operator = d['operator']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'partner_id' in d:
o.partner_id = d['partner_id']
if 'period_type' in d:
o.period_type = d['period_type']
if 'product_code' in d:
o.product_code = d['product_code']
if 'settle_user_id' in d:
o.settle_user_id = d['settle_user_id']
return o
|
the-stack_0_1972 | import pathlib
import requests
import json
import argparse
from folioclient.FolioClient import FolioClient
parser = argparse.ArgumentParser()
parser.add_argument("operation", help="backup or restore")
parser.add_argument("path", help="result file path (backup); take data from this file (restore)")
parser.add_argument("okapi_url", help="url of your FOLIO OKAPI endpoint.")
parser.add_argument("tenant_id", help="id of the FOLIO tenant")
parser.add_argument("username", help=("the api user"))
parser.add_argument("password", help=("the api users password"))
args = parser.parse_args()
folio_client = FolioClient(args.okapi_url, args.tenant_id, args.username, args.password)
okapiHeaders = folio_client.okapi_headers
if str(args.operation) == 'backup':
periods_query = "?withOpeningDays=true&showPast=true&showExceptional"
periods_path = "/calendar/periods/{}/period{}"
sp_request = requests.get(args.okapi_url + '/service-points',
headers=okapiHeaders)
sp_json = json.loads(sp_request.text)
service_points_ids = [sp['id'] for sp
in sp_json['servicepoints']]
periods_to_save = {}
for sp_id in service_points_ids:
query = periods_path.format(sp_id, periods_query)
period_req = requests.get(args.okapi_url + query,
headers=okapiHeaders)
periods_resp = json.loads(period_req.text)
periods_to_save[sp_id] = periods_resp
with open(args.path, 'w+') as settings_file:
settings_file.write(json.dumps(periods_to_save))
if args.operation == 'restore':
with open(args.path) as settings_file:
js = json.load(settings_file)
for sp_id, periods in js.items():
if any(periods['openingPeriods']):
period = periods['openingPeriods'][0]
periods_path = "/calendar/periods/{}/period".format(sp_id)
# print("{}, {}".format(sp_id, period['openingPeriods'][0]))
req = requests.post(args.okapi_url + periods_path,
data=json.dumps(period),
headers=okapiHeaders)
print(req.status_code)
print(req.text)
if str(req.status_code).startswith('4'):
print(req.text)
|
the-stack_0_1973 | #Modules
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
#Accuracy Threshold
ACCURACY_THRESHOLD = 0.95
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('acc') > ACCURACY_THRESHOLD):
print("\nReached %2.2f%% accuracy, so stopping training!!" %(ACCURACY_THRESHOLD*100))
self.model.stop_training = True
callbacks = myCallback()
#Dividing the Dataset
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
train_images, test_images = train_images / 255.0, test_images / 255.0
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer','dog', 'frog', 'horse', 'ship', 'truck']
#Sample Plotting of Dataset
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i][0]])
plt.show()
#Convolutional Layer, Pooling Layer
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.summary()
#Model Summary
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10))
model.summary()
#Compiling the model
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'])
#Deciding the training layers
epochs = 50
#Defining Variables
history = model.fit(train_images, train_labels, epochs=epochs,validation_data=(test_images, test_labels))
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(0,epochs)
#Plotting accuracy
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.show()
#Final Accuracy
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print(test_acc)
#Saving the model
model.save('dataset.h5')
print("Saving the model as dataset.h5")
|
the-stack_0_1974 | import json
import sys
import iotbx.phil
import dials.util
from dials.algorithms.spot_finding import per_image_analysis
from dials.util import tabulate
from dials.util.options import OptionParser, reflections_and_experiments_from_files
help_message = """
Reports the number of strong spots and computes an estimate of the resolution
limit for each image, given the results of dials.find_spots. Optionally
generates a plot of the per-image statistics (plot=image.png).
Examples::
dials.spot_counts_per_image imported.expt strong.refl
dials.spot_counts_per_image imported.expt strong.refl plot=per_image.png
"""
phil_scope = iotbx.phil.parse(
"""\
resolution_analysis = True
.type = bool
plot = None
.type = path
json = None
.type = path
split_json = False
.type = bool
joint_json = True
.type = bool
id = None
.type = int(value_min=0)
"""
)
@dials.util.show_mail_handle_errors()
def run(args=None):
usage = "dials.spot_counts_per_image [options] imported.expt strong.refl"
parser = OptionParser(
usage=usage,
read_reflections=True,
read_experiments=True,
phil=phil_scope,
check_format=False,
epilog=help_message,
)
params, options = parser.parse_args(args, show_diff_phil=False)
reflections, experiments = reflections_and_experiments_from_files(
params.input.reflections, params.input.experiments
)
if not reflections and not experiments:
parser.print_help()
return
# FIXME may want to change this to allow many to be passed i.e.
# from parallel runs
if len(reflections) != 1:
sys.exit("Only one reflection list may be passed")
reflections = reflections[0]
if "miller_index" in reflections:
sys.exit("Only unindexed reflections are currently supported")
if any(experiments.crystals()):
sys.exit("Only unindexed experiments are currently supported")
reflections.centroid_px_to_mm(experiments)
reflections.map_centroids_to_reciprocal_space(experiments)
if params.id is not None:
reflections = reflections.select(reflections["id"] == params.id)
all_stats = []
for i, expt in enumerate(experiments):
refl = reflections.select(reflections["id"] == i)
stats = per_image_analysis.stats_per_image(
expt, refl, resolution_analysis=params.resolution_analysis
)
all_stats.append(stats)
# transpose stats
summary_table = {}
for s in all_stats:
for k, value in s._asdict().items():
summary_table.setdefault(k, [])
summary_table[k].extend(value)
stats = per_image_analysis.StatsMultiImage(**summary_table)
print(stats)
overall_stats = per_image_analysis.stats_for_reflection_table(
reflections, resolution_analysis=params.resolution_analysis
)
rows = [
("Overall statistics", ""),
("#spots", "%i" % overall_stats.n_spots_total),
("#spots_no_ice", "%i" % overall_stats.n_spots_no_ice),
("d_min", f"{overall_stats.estimated_d_min:.2f}"),
(
"d_min (distl method 1)",
"%.2f (%.2f)"
% (overall_stats.d_min_distl_method_1, overall_stats.noisiness_method_1),
),
(
"d_min (distl method 2)",
"%.2f (%.2f)"
% (overall_stats.d_min_distl_method_2, overall_stats.noisiness_method_2),
),
]
print(tabulate(rows, headers="firstrow"))
if params.json:
if params.split_json:
for k, v in stats._asdict().items():
start, end = params.json.split(".")
with open(f"{start}_{k}.{end}", "w") as fp:
json.dump(v, fp)
if params.joint_json:
with open(params.json, "w") as fp:
json.dump(stats._asdict(), fp)
if params.plot:
import matplotlib
matplotlib.use("Agg")
per_image_analysis.plot_stats(stats, filename=params.plot)
if __name__ == "__main__":
run()
|
the-stack_0_1975 | # -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2017 Vic Chan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import paddle.v2 as paddle
import gzip
import sys
import data_provider
import numpy as np
def param():
return paddle.attr.Param(
initial_std=0.01,
initial_mean=0
)
def encoder(x_):
x_ = paddle.layer.fc(
input=x_,
size=512,
act=paddle.activation.Sigmoid(),
param_attr=param(),
bias_attr=param()
)
x_ = paddle.layer.fc(
input=x_,
size=256,
act=paddle.activation.Relu(),
param_attr=param(),
bias_attr=param()
)
x_ = paddle.layer.fc(
input=x_,
size=128,
act=paddle.activation.Relu(),
param_attr=param(),
bias_attr=param()
)
return x_
def decoder(x_):
x_ = paddle.layer.fc(
input=x_,
size=128,
act=paddle.activation.Sigmoid(),
param_attr=param(),
bias_attr=param()
)
x_ = paddle.layer.fc(
input=x_,
size=256,
act=paddle.activation.Relu(),
param_attr=param(),
bias_attr=param()
)
x_ = paddle.layer.fc(
input=x_,
size=512,
act=paddle.activation.Relu(),
param_attr=param(),
bias_attr=param()
)
return x_
def output(x_):
return paddle.layer.fc(
input=x_,
size=784,
act=paddle.activation.Relu(),
param_attr=param(),
bias_attr=param()
)
paddle.init(use_gpu=False, trainer_count=1)
x = paddle.layer.data(
name='x',
type=paddle.data_type.dense_vector(784)
)
y = encoder(x)
y = decoder(y)
y = output(y)
def train():
optimizer = paddle.optimizer.RMSProp(
learning_rate=1e-3,
regularization=paddle.optimizer.L2Regularization(rate=8e-4)
)
loss = paddle.layer.mse_cost(label=x, input=y)
parameters = paddle.parameters.create(loss)
trainer = paddle.trainer.SGD(
cost=loss,
parameters=parameters,
update_equation=optimizer
)
feeding = {'x': 0}
def event_handler(event):
if isinstance(event, paddle.event.EndIteration):
if event.batch_id % 50 == 0:
print ("\n pass %d, Batch: %d cost: %f"
% (event.pass_id, event.batch_id, event.cost))
else:
sys.stdout.write('.')
sys.stdout.flush()
if isinstance(event, paddle.event.EndPass):
with gzip.open('output/params_pass_%d.tar.gz' % event.pass_id, 'w') as f:
parameters.to_tar(f)
reader = data_provider.create_reader('train', 60000)
trainer.train(
paddle.batch(
reader=reader,
batch_size=128
),
feeding=feeding,
num_passes=20,
event_handler=event_handler
)
def test(model_path):
with gzip.open(model_path, 'r') as openFile:
parameters = paddle.parameters.Parameters.from_tar(openFile)
testset = [[x] for x in data_provider.fetch_testingset()['images'][:10]]
# 使用infer进行预测
result = paddle.infer(
input=testset,
parameters=parameters,
output_layer=y,
feeding={'x': 0}
)
return result, np.array(testset)
if __name__ == '__main__':
origin, result = test('output/params_pass_19.tar.gz')
np.save('origin.dat', origin)
np.save('result.dat', result)
|
the-stack_0_1978 | """ Tensorflow implementation of the face detection / alignment algorithm found at
https://github.com/kpzhang93/MTCNN_face_detection_alignment
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six import string_types, iteritems
import numpy as np
import tensorflow as tf
#from math import floor
import cv2
import os
def layer(op):
"""Decorator for composable network layers."""
def layer_decorated(self, *args, **kwargs):
# Automatically set a name if not provided.
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
# Figure out the layer inputs.
if len(self.terminals) == 0:
raise RuntimeError('No input variables found for layer %s.' % name)
elif len(self.terminals) == 1:
layer_input = self.terminals[0]
else:
layer_input = list(self.terminals)
# Perform the operation and get the output.
layer_output = op(self, layer_input, *args, **kwargs)
# Add to layer LUT.
self.layers[name] = layer_output
# This output is now the input for the next layer.
self.feed(layer_output)
# Return self for chained calls.
return self
return layer_decorated
class Network(object):
def __init__(self, inputs, trainable=True):
# The input nodes for this network
self.inputs = inputs
# The current list of terminal nodes
self.terminals = []
# Mapping from layer names to layers
self.layers = dict(inputs)
# If true, the resulting variables are set as trainable
self.trainable = trainable
self.setup()
def setup(self):
"""Construct the network. """
raise NotImplementedError('Must be implemented by the subclass.')
def load(self, data_path, session, ignore_missing=False):
"""Load network weights.
data_path: The path to the numpy-serialized network weights
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored.
"""
# print("@@@@@@@@@@@@",data_path)
data_dict = np.load(data_path, encoding='latin1',allow_pickle=True).item() #pylint: disable=no-member
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
for param_name, data in iteritems(data_dict[op_name]):
try:
var = tf.get_variable(param_name)
session.run(var.assign(data))
except ValueError:
if not ignore_missing:
raise
def feed(self, *args):
"""Set the input(s) for the next operation by replacing the terminal nodes.
The arguments can be either layer names or the actual layers.
"""
assert len(args) != 0
self.terminals = []
for fed_layer in args:
if isinstance(fed_layer, string_types):
try:
fed_layer = self.layers[fed_layer]
except KeyError:
raise KeyError('Unknown layer name fed: %s' % fed_layer)
self.terminals.append(fed_layer)
return self
def get_output(self):
"""Returns the current network output."""
return self.terminals[-1]
def get_unique_name(self, prefix):
"""Returns an index-suffixed unique name for the given prefix.
This is used for auto-generating layer names based on the type-prefix.
"""
ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1
return '%s_%d' % (prefix, ident)
def make_var(self, name, shape):
"""Creates a new TensorFlow variable."""
return tf.get_variable(name, shape, trainable=self.trainable)
def validate_padding(self, padding):
"""Verifies that the padding is one of the supported ones."""
assert padding in ('SAME', 'VALID')
@layer
def conv(self,
inp,
k_h,
k_w,
c_o,
s_h,
s_w,
name,
relu=True,
padding='SAME',
group=1,
biased=True):
# Verify that the padding is acceptable
self.validate_padding(padding)
# Get the number of channels in the input
c_i = int(inp.get_shape()[-1])
# Verify that the grouping parameter is valid
assert c_i % group == 0
assert c_o % group == 0
# Convolution for a given input and kernel
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
with tf.variable_scope(name) as scope:
kernel = self.make_var('weights', shape=[k_h, k_w, c_i // group, c_o])
# This is the common-case. Convolve the input without any further complications.
output = convolve(inp, kernel)
# Add the biases
if biased:
biases = self.make_var('biases', [c_o])
output = tf.nn.bias_add(output, biases)
if relu:
# ReLU non-linearity
output = tf.nn.relu(output, name=scope.name)
return output
@layer
def prelu(self, inp, name):
with tf.variable_scope(name):
i = int(inp.get_shape()[-1])
alpha = self.make_var('alpha', shape=(i,))
output = tf.nn.relu(inp) + tf.multiply(alpha, -tf.nn.relu(-inp))
return output
@layer
def max_pool(self, inp, k_h, k_w, s_h, s_w, name, padding='SAME'):
self.validate_padding(padding)
return tf.nn.max_pool(inp,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def fc(self, inp, num_out, name, relu=True):
with tf.variable_scope(name):
input_shape = inp.get_shape()
if input_shape.ndims == 4:
# The input is spatial. Vectorize it first.
dim = 1
for d in input_shape[1:].as_list():
dim *= int(d)
feed_in = tf.reshape(inp, [-1, dim])
else:
feed_in, dim = (inp, input_shape[-1].value)
weights = self.make_var('weights', shape=[dim, num_out])
biases = self.make_var('biases', [num_out])
op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = op(feed_in, weights, biases, name=name)
return fc
"""
Multi dimensional softmax,
refer to https://github.com/tensorflow/tensorflow/issues/210
compute softmax along the dimension of target
the native softmax only supports batch_size x dimension
"""
@layer
def softmax(self, target, axis, name=None):
max_axis = tf.reduce_max(target, axis, keepdims=True)
target_exp = tf.exp(target-max_axis)
normalize = tf.reduce_sum(target_exp, axis, keepdims=True)
softmax = tf.div(target_exp, normalize, name)
return softmax
class PNet(Network):
def setup(self):
(self.feed('data') #pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 10, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='PReLU1')
.max_pool(2, 2, 2, 2, name='pool1')
.conv(3, 3, 16, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='PReLU2')
.conv(3, 3, 32, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='PReLU3')
.conv(1, 1, 2, 1, 1, relu=False, name='conv4-1')
.softmax(3,name='prob1'))
(self.feed('PReLU3') #pylint: disable=no-value-for-parameter
.conv(1, 1, 4, 1, 1, relu=False, name='conv4-2'))
class RNet(Network):
def setup(self):
(self.feed('data') #pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 28, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='prelu1')
.max_pool(3, 3, 2, 2, name='pool1')
.conv(3, 3, 48, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='prelu2')
.max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
.conv(2, 2, 64, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='prelu3')
.fc(128, relu=False, name='conv4')
.prelu(name='prelu4')
.fc(2, relu=False, name='conv5-1')
.softmax(1,name='prob1'))
(self.feed('prelu4') #pylint: disable=no-value-for-parameter
.fc(4, relu=False, name='conv5-2'))
class ONet(Network):
def setup(self):
(self.feed('data') #pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 32, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='prelu1')
.max_pool(3, 3, 2, 2, name='pool1')
.conv(3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='prelu2')
.max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
.conv(3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='prelu3')
.max_pool(2, 2, 2, 2, name='pool3')
.conv(2, 2, 128, 1, 1, padding='VALID', relu=False, name='conv4')
.prelu(name='prelu4')
.fc(256, relu=False, name='conv5')
.prelu(name='prelu5')
.fc(2, relu=False, name='conv6-1')
.softmax(1, name='prob1'))
(self.feed('prelu5') #pylint: disable=no-value-for-parameter
.fc(4, relu=False, name='conv6-2'))
(self.feed('prelu5') #pylint: disable=no-value-for-parameter
.fc(10, relu=False, name='conv6-3'))
def create_mtcnn(sess, model_path):
if not model_path:
model_path,_ = os.path.split(os.path.realpath(__file__))
# print("!!!!!!!!!!!!!!!!!!!",model_path)
with tf.variable_scope('pnet'):
data = tf.placeholder(tf.float32, (None,None,None,3), 'input')
pnet = PNet({'data':data})
pnet.load(os.path.join(model_path, 'det1.npy'), sess)
with tf.variable_scope('rnet'):
data = tf.placeholder(tf.float32, (None,24,24,3), 'input')
rnet = RNet({'data':data})
rnet.load(os.path.join(model_path, 'det2.npy'), sess)
with tf.variable_scope('onet'):
data = tf.placeholder(tf.float32, (None,48,48,3), 'input')
onet = ONet({'data':data})
onet.load(os.path.join(model_path, 'det3.npy'), sess)
pnet_fun = lambda img : sess.run(('pnet/conv4-2/BiasAdd:0', 'pnet/prob1:0'), feed_dict={'pnet/input:0':img})
rnet_fun = lambda img : sess.run(('rnet/conv5-2/conv5-2:0', 'rnet/prob1:0'), feed_dict={'rnet/input:0':img})
onet_fun = lambda img : sess.run(('onet/conv6-2/conv6-2:0', 'onet/conv6-3/conv6-3:0', 'onet/prob1:0'), feed_dict={'onet/input:0':img})
return pnet_fun, rnet_fun, onet_fun
def detect_face(img, minsize, pnet, rnet, onet, threshold, factor):
"""Detects faces in an image, and returns bounding boxes and points for them.
img: input image
minsize: minimum faces' size
pnet, rnet, onet: caffemodel
threshold: threshold=[th1, th2, th3], th1-3 are three steps's threshold
factor: the factor used to create a scaling pyramid of face sizes to detect in the image.
"""
factor_count=0
total_boxes=np.empty((0,9))
points=np.empty(0)
h=img.shape[0]
w=img.shape[1]
minl=np.amin([h, w])
m=12.0/minsize
minl=minl*m
# create scale pyramid
scales=[]
while minl>=12:
scales += [m*np.power(factor, factor_count)]
minl = minl*factor
factor_count += 1
# first stage
for scale in scales:
hs=int(np.ceil(h*scale))
ws=int(np.ceil(w*scale))
im_data = imresample(img, (hs, ws))
im_data = (im_data-127.5)*0.0078125
img_x = np.expand_dims(im_data, 0)
img_y = np.transpose(img_x, (0,2,1,3))
out = pnet(img_y)
out0 = np.transpose(out[0], (0,2,1,3))
out1 = np.transpose(out[1], (0,2,1,3))
boxes, _ = generateBoundingBox(out1[0,:,:,1].copy(), out0[0,:,:,:].copy(), scale, threshold[0])
# inter-scale nms
pick = nms(boxes.copy(), 0.5, 'Union')
if boxes.size>0 and pick.size>0:
boxes = boxes[pick,:]
total_boxes = np.append(total_boxes, boxes, axis=0)
numbox = total_boxes.shape[0]
if numbox>0:
pick = nms(total_boxes.copy(), 0.7, 'Union')
total_boxes = total_boxes[pick,:]
regw = total_boxes[:,2]-total_boxes[:,0]
regh = total_boxes[:,3]-total_boxes[:,1]
qq1 = total_boxes[:,0]+total_boxes[:,5]*regw
qq2 = total_boxes[:,1]+total_boxes[:,6]*regh
qq3 = total_boxes[:,2]+total_boxes[:,7]*regw
qq4 = total_boxes[:,3]+total_boxes[:,8]*regh
total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:,4]]))
total_boxes = rerec(total_boxes.copy())
total_boxes[:,0:4] = np.fix(total_boxes[:,0:4]).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
numbox = total_boxes.shape[0]
if numbox>0:
# second stage
tempimg = np.zeros((24,24,3,numbox))
for k in range(0,numbox):
tmp = np.zeros((int(tmph[k]),int(tmpw[k]),3))
tmp[dy[k]-1:edy[k],dx[k]-1:edx[k],:] = img[y[k]-1:ey[k],x[k]-1:ex[k],:]
if tmp.shape[0]>0 and tmp.shape[1]>0 or tmp.shape[0]==0 and tmp.shape[1]==0:
tempimg[:,:,:,k] = imresample(tmp, (24, 24))
else:
return np.empty()
tempimg = (tempimg-127.5)*0.0078125
tempimg1 = np.transpose(tempimg, (3,1,0,2))
out = rnet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
score = out1[1,:]
ipass = np.where(score>threshold[1])
total_boxes = np.hstack([total_boxes[ipass[0],0:4].copy(), np.expand_dims(score[ipass].copy(),1)])
mv = out0[:,ipass[0]]
if total_boxes.shape[0]>0:
pick = nms(total_boxes, 0.7, 'Union')
total_boxes = total_boxes[pick,:]
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv[:,pick]))
total_boxes = rerec(total_boxes.copy())
numbox = total_boxes.shape[0]
if numbox>0:
# third stage
total_boxes = np.fix(total_boxes).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
tempimg = np.zeros((48,48,3,numbox))
for k in range(0,numbox):
tmp = np.zeros((int(tmph[k]),int(tmpw[k]),3))
tmp[dy[k]-1:edy[k],dx[k]-1:edx[k],:] = img[y[k]-1:ey[k],x[k]-1:ex[k],:]
if tmp.shape[0]>0 and tmp.shape[1]>0 or tmp.shape[0]==0 and tmp.shape[1]==0:
tempimg[:,:,:,k] = imresample(tmp, (48, 48))
else:
return np.empty()
tempimg = (tempimg-127.5)*0.0078125
tempimg1 = np.transpose(tempimg, (3,1,0,2))
out = onet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[1,:]
points = out1
ipass = np.where(score>threshold[2])
points = points[:,ipass[0]]
total_boxes = np.hstack([total_boxes[ipass[0],0:4].copy(), np.expand_dims(score[ipass].copy(),1)])
mv = out0[:,ipass[0]]
w = total_boxes[:,2]-total_boxes[:,0]+1
h = total_boxes[:,3]-total_boxes[:,1]+1
points[0:5,:] = np.tile(w,(5, 1))*points[0:5,:] + np.tile(total_boxes[:,0],(5, 1))-1
points[5:10,:] = np.tile(h,(5, 1))*points[5:10,:] + np.tile(total_boxes[:,1],(5, 1))-1
if total_boxes.shape[0]>0:
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv))
pick = nms(total_boxes.copy(), 0.7, 'Min')
total_boxes = total_boxes[pick,:]
points = points[:,pick]
return total_boxes, points
def bulk_detect_face(images, detection_window_size_ratio, pnet, rnet, onet, threshold, factor):
"""Detects faces in a list of images
images: list containing input images
detection_window_size_ratio: ratio of minimum face size to smallest image dimension
pnet, rnet, onet: caffemodel
threshold: threshold=[th1 th2 th3], th1-3 are three steps's threshold [0-1]
factor: the factor used to create a scaling pyramid of face sizes to detect in the image.
"""
all_scales = [None] * len(images)
images_with_boxes = [None] * len(images)
for i in range(len(images)):
images_with_boxes[i] = {'total_boxes': np.empty((0, 9))}
# create scale pyramid
for index, img in enumerate(images):
all_scales[index] = []
h = img.shape[0]
w = img.shape[1]
minsize = int(detection_window_size_ratio * np.minimum(w, h))
factor_count = 0
minl = np.amin([h, w])
if minsize <= 12:
minsize = 12
m = 12.0 / minsize
minl = minl * m
while minl >= 12:
all_scales[index].append(m * np.power(factor, factor_count))
minl = minl * factor
factor_count += 1
# # # # # # # # # # # # #
# first stage - fast proposal network (pnet) to obtain face candidates
# # # # # # # # # # # # #
images_obj_per_resolution = {}
# TODO: use some type of rounding to number module 8 to increase probability that pyramid images will have the same resolution across input images
for index, scales in enumerate(all_scales):
h = images[index].shape[0]
w = images[index].shape[1]
for scale in scales:
hs = int(np.ceil(h * scale))
ws = int(np.ceil(w * scale))
if (ws, hs) not in images_obj_per_resolution:
images_obj_per_resolution[(ws, hs)] = []
im_data = imresample(images[index], (hs, ws))
im_data = (im_data - 127.5) * 0.0078125
img_y = np.transpose(im_data, (1, 0, 2)) # caffe uses different dimensions ordering
images_obj_per_resolution[(ws, hs)].append({'scale': scale, 'image': img_y, 'index': index})
for resolution in images_obj_per_resolution:
images_per_resolution = [i['image'] for i in images_obj_per_resolution[resolution]]
outs = pnet(images_per_resolution)
for index in range(len(outs[0])):
scale = images_obj_per_resolution[resolution][index]['scale']
image_index = images_obj_per_resolution[resolution][index]['index']
out0 = np.transpose(outs[0][index], (1, 0, 2))
out1 = np.transpose(outs[1][index], (1, 0, 2))
boxes, _ = generateBoundingBox(out1[:, :, 1].copy(), out0[:, :, :].copy(), scale, threshold[0])
# inter-scale nms
pick = nms(boxes.copy(), 0.5, 'Union')
if boxes.size > 0 and pick.size > 0:
boxes = boxes[pick, :]
images_with_boxes[image_index]['total_boxes'] = np.append(images_with_boxes[image_index]['total_boxes'],
boxes,
axis=0)
for index, image_obj in enumerate(images_with_boxes):
numbox = image_obj['total_boxes'].shape[0]
if numbox > 0:
h = images[index].shape[0]
w = images[index].shape[1]
pick = nms(image_obj['total_boxes'].copy(), 0.7, 'Union')
image_obj['total_boxes'] = image_obj['total_boxes'][pick, :]
regw = image_obj['total_boxes'][:, 2] - image_obj['total_boxes'][:, 0]
regh = image_obj['total_boxes'][:, 3] - image_obj['total_boxes'][:, 1]
qq1 = image_obj['total_boxes'][:, 0] + image_obj['total_boxes'][:, 5] * regw
qq2 = image_obj['total_boxes'][:, 1] + image_obj['total_boxes'][:, 6] * regh
qq3 = image_obj['total_boxes'][:, 2] + image_obj['total_boxes'][:, 7] * regw
qq4 = image_obj['total_boxes'][:, 3] + image_obj['total_boxes'][:, 8] * regh
image_obj['total_boxes'] = np.transpose(np.vstack([qq1, qq2, qq3, qq4, image_obj['total_boxes'][:, 4]]))
image_obj['total_boxes'] = rerec(image_obj['total_boxes'].copy())
image_obj['total_boxes'][:, 0:4] = np.fix(image_obj['total_boxes'][:, 0:4]).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(image_obj['total_boxes'].copy(), w, h)
numbox = image_obj['total_boxes'].shape[0]
tempimg = np.zeros((24, 24, 3, numbox))
if numbox > 0:
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = images[index][y[k] - 1:ey[k], x[k] - 1:ex[k], :]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
tempimg[:, :, :, k] = imresample(tmp, (24, 24))
else:
return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
image_obj['rnet_input'] = np.transpose(tempimg, (3, 1, 0, 2))
# # # # # # # # # # # # #
# second stage - refinement of face candidates with rnet
# # # # # # # # # # # # #
bulk_rnet_input = np.empty((0, 24, 24, 3))
for index, image_obj in enumerate(images_with_boxes):
if 'rnet_input' in image_obj:
bulk_rnet_input = np.append(bulk_rnet_input, image_obj['rnet_input'], axis=0)
out = rnet(bulk_rnet_input)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
score = out1[1, :]
i = 0
for index, image_obj in enumerate(images_with_boxes):
if 'rnet_input' not in image_obj:
continue
rnet_input_count = image_obj['rnet_input'].shape[0]
score_per_image = score[i:i + rnet_input_count]
out0_per_image = out0[:, i:i + rnet_input_count]
ipass = np.where(score_per_image > threshold[1])
image_obj['total_boxes'] = np.hstack([image_obj['total_boxes'][ipass[0], 0:4].copy(),
np.expand_dims(score_per_image[ipass].copy(), 1)])
mv = out0_per_image[:, ipass[0]]
if image_obj['total_boxes'].shape[0] > 0:
h = images[index].shape[0]
w = images[index].shape[1]
pick = nms(image_obj['total_boxes'], 0.7, 'Union')
image_obj['total_boxes'] = image_obj['total_boxes'][pick, :]
image_obj['total_boxes'] = bbreg(image_obj['total_boxes'].copy(), np.transpose(mv[:, pick]))
image_obj['total_boxes'] = rerec(image_obj['total_boxes'].copy())
numbox = image_obj['total_boxes'].shape[0]
if numbox > 0:
tempimg = np.zeros((48, 48, 3, numbox))
image_obj['total_boxes'] = np.fix(image_obj['total_boxes']).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(image_obj['total_boxes'].copy(), w, h)
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = images[index][y[k] - 1:ey[k], x[k] - 1:ex[k], :]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
tempimg[:, :, :, k] = imresample(tmp, (48, 48))
else:
return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
image_obj['onet_input'] = np.transpose(tempimg, (3, 1, 0, 2))
i += rnet_input_count
# # # # # # # # # # # # #
# third stage - further refinement and facial landmarks positions with onet
# # # # # # # # # # # # #
bulk_onet_input = np.empty((0, 48, 48, 3))
for index, image_obj in enumerate(images_with_boxes):
if 'onet_input' in image_obj:
bulk_onet_input = np.append(bulk_onet_input, image_obj['onet_input'], axis=0)
out = onet(bulk_onet_input)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[1, :]
points = out1
i = 0
ret = []
for index, image_obj in enumerate(images_with_boxes):
if 'onet_input' not in image_obj:
ret.append(None)
continue
onet_input_count = image_obj['onet_input'].shape[0]
out0_per_image = out0[:, i:i + onet_input_count]
score_per_image = score[i:i + onet_input_count]
points_per_image = points[:, i:i + onet_input_count]
ipass = np.where(score_per_image > threshold[2])
points_per_image = points_per_image[:, ipass[0]]
image_obj['total_boxes'] = np.hstack([image_obj['total_boxes'][ipass[0], 0:4].copy(),
np.expand_dims(score_per_image[ipass].copy(), 1)])
mv = out0_per_image[:, ipass[0]]
w = image_obj['total_boxes'][:, 2] - image_obj['total_boxes'][:, 0] + 1
h = image_obj['total_boxes'][:, 3] - image_obj['total_boxes'][:, 1] + 1
points_per_image[0:5, :] = np.tile(w, (5, 1)) * points_per_image[0:5, :] + np.tile(
image_obj['total_boxes'][:, 0], (5, 1)) - 1
points_per_image[5:10, :] = np.tile(h, (5, 1)) * points_per_image[5:10, :] + np.tile(
image_obj['total_boxes'][:, 1], (5, 1)) - 1
if image_obj['total_boxes'].shape[0] > 0:
image_obj['total_boxes'] = bbreg(image_obj['total_boxes'].copy(), np.transpose(mv))
pick = nms(image_obj['total_boxes'].copy(), 0.7, 'Min')
image_obj['total_boxes'] = image_obj['total_boxes'][pick, :]
points_per_image = points_per_image[:, pick]
ret.append((image_obj['total_boxes'], points_per_image))
else:
ret.append(None)
i += onet_input_count
return ret
# function [boundingbox] = bbreg(boundingbox,reg)
def bbreg(boundingbox,reg):
"""Calibrate bounding boxes"""
if reg.shape[1]==1:
reg = np.reshape(reg, (reg.shape[2], reg.shape[3]))
w = boundingbox[:,2]-boundingbox[:,0]+1
h = boundingbox[:,3]-boundingbox[:,1]+1
b1 = boundingbox[:,0]+reg[:,0]*w
b2 = boundingbox[:,1]+reg[:,1]*h
b3 = boundingbox[:,2]+reg[:,2]*w
b4 = boundingbox[:,3]+reg[:,3]*h
boundingbox[:,0:4] = np.transpose(np.vstack([b1, b2, b3, b4 ]))
return boundingbox
def generateBoundingBox(imap, reg, scale, t):
"""Use heatmap to generate bounding boxes"""
stride=2
cellsize=12
imap = np.transpose(imap)
dx1 = np.transpose(reg[:,:,0])
dy1 = np.transpose(reg[:,:,1])
dx2 = np.transpose(reg[:,:,2])
dy2 = np.transpose(reg[:,:,3])
y, x = np.where(imap >= t)
if y.shape[0]==1:
dx1 = np.flipud(dx1)
dy1 = np.flipud(dy1)
dx2 = np.flipud(dx2)
dy2 = np.flipud(dy2)
score = imap[(y,x)]
reg = np.transpose(np.vstack([ dx1[(y,x)], dy1[(y,x)], dx2[(y,x)], dy2[(y,x)] ]))
if reg.size==0:
reg = np.empty((0,3))
bb = np.transpose(np.vstack([y,x]))
q1 = np.fix((stride*bb+1)/scale)
q2 = np.fix((stride*bb+cellsize-1+1)/scale)
boundingbox = np.hstack([q1, q2, np.expand_dims(score,1), reg])
return boundingbox, reg
# function pick = nms(boxes,threshold,type)
def nms(boxes, threshold, method):
if boxes.size==0:
return np.empty((0,3))
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
s = boxes[:,4]
area = (x2-x1+1) * (y2-y1+1)
I = np.argsort(s)
pick = np.zeros_like(s, dtype=np.int16)
counter = 0
while I.size>0:
i = I[-1]
pick[counter] = i
counter += 1
idx = I[0:-1]
xx1 = np.maximum(x1[i], x1[idx])
yy1 = np.maximum(y1[i], y1[idx])
xx2 = np.minimum(x2[i], x2[idx])
yy2 = np.minimum(y2[i], y2[idx])
w = np.maximum(0.0, xx2-xx1+1)
h = np.maximum(0.0, yy2-yy1+1)
inter = w * h
if method is 'Min':
o = inter / np.minimum(area[i], area[idx])
else:
o = inter / (area[i] + area[idx] - inter)
I = I[np.where(o<=threshold)]
pick = pick[0:counter]
return pick
# function [dy edy dx edx y ey x ex tmpw tmph] = pad(total_boxes,w,h)
def pad(total_boxes, w, h):
"""Compute the padding coordinates (pad the bounding boxes to square)"""
tmpw = (total_boxes[:,2]-total_boxes[:,0]+1).astype(np.int32)
tmph = (total_boxes[:,3]-total_boxes[:,1]+1).astype(np.int32)
numbox = total_boxes.shape[0]
dx = np.ones((numbox), dtype=np.int32)
dy = np.ones((numbox), dtype=np.int32)
edx = tmpw.copy().astype(np.int32)
edy = tmph.copy().astype(np.int32)
x = total_boxes[:,0].copy().astype(np.int32)
y = total_boxes[:,1].copy().astype(np.int32)
ex = total_boxes[:,2].copy().astype(np.int32)
ey = total_boxes[:,3].copy().astype(np.int32)
tmp = np.where(ex>w)
edx.flat[tmp] = np.expand_dims(-ex[tmp]+w+tmpw[tmp],1)
ex[tmp] = w
tmp = np.where(ey>h)
edy.flat[tmp] = np.expand_dims(-ey[tmp]+h+tmph[tmp],1)
ey[tmp] = h
tmp = np.where(x<1)
dx.flat[tmp] = np.expand_dims(2-x[tmp],1)
x[tmp] = 1
tmp = np.where(y<1)
dy.flat[tmp] = np.expand_dims(2-y[tmp],1)
y[tmp] = 1
return dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph
# function [bboxA] = rerec(bboxA)
def rerec(bboxA):
"""Convert bboxA to square."""
h = bboxA[:,3]-bboxA[:,1]
w = bboxA[:,2]-bboxA[:,0]
l = np.maximum(w, h)
bboxA[:,0] = bboxA[:,0]+w*0.5-l*0.5
bboxA[:,1] = bboxA[:,1]+h*0.5-l*0.5
bboxA[:,2:4] = bboxA[:,0:2] + np.transpose(np.tile(l,(2,1)))
return bboxA
def imresample(img, sz):
im_data = cv2.resize(img, (sz[1], sz[0]), interpolation=cv2.INTER_AREA) #@UndefinedVariable
return im_data
# This method is kept for debugging purpose
# h=img.shape[0]
# w=img.shape[1]
# hs, ws = sz
# dx = float(w) / ws
# dy = float(h) / hs
# im_data = np.zeros((hs,ws,3))
# for a1 in range(0,hs):
# for a2 in range(0,ws):
# for a3 in range(0,3):
# im_data[a1,a2,a3] = img[int(floor(a1*dy)),int(floor(a2*dx)),a3]
# return im_data
|
the-stack_0_1979 | # SPDX-FileCopyrightText: 2020 The Magma Authors.
# SPDX-FileCopyrightText: 2022 Open Networking Foundation <[email protected]>
#
# SPDX-License-Identifier: BSD-3-Clause
import time
from abc import ABC, abstractmethod
from collections import namedtuple
from typing import Any, Optional
import metrics
from configuration.service_configs import load_service_config
from data_models.data_model import InvalidTrParamPath
from data_models.data_model_parameters import ParameterName
from device_config.configuration_init import build_desired_config
from exceptions import ConfigurationError, Tr069Error
from logger import EnodebdLogger as logger
from state_machines.acs_state_utils import (
does_inform_have_event,
get_all_objects_to_add,
get_all_objects_to_delete,
get_all_param_values_to_set,
get_obj_param_values_to_set,
get_object_params_to_get,
get_optional_param_to_check,
get_param_values_to_set,
get_params_to_get,
parse_get_parameter_values_response,
process_inform_message,
)
from state_machines.enb_acs import EnodebAcsStateMachine
from state_machines.timer import StateMachineTimer
from tr069 import models
AcsMsgAndTransition = namedtuple(
'AcsMsgAndTransition', ['msg', 'next_state'],
)
AcsReadMsgResult = namedtuple(
'AcsReadMsgResult', ['msg_handled', 'next_state'],
)
class EnodebAcsState(ABC):
"""
State class for the Enodeb state machine
States can transition after reading a message from the eNB, sending a
message out to the eNB, or when a timer completes. As such, some states
are only responsible for message sending, and others are only responsible
for reading incoming messages.
In the constructor, set up state transitions.
"""
def __init__(self):
self._acs = None
def enter(self) -> None:
"""
Set up your timers here. Call transition(..) on the ACS when the timer
completes or throw an error
"""
pass
def exit(self) -> None:
"""Destroy timers here"""
pass
def read_msg(self, message: Any) -> AcsReadMsgResult:
"""
Args: message: tr069 message
Returns: name of the next state, if transition required
"""
raise ConfigurationError(
'%s should implement read_msg() if it '
'needs to handle message reading' % self.__class__.__name__,
)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
"""
Produce a message to send back to the eNB.
Args:
message: TR-069 message which was already processed by read_msg
Returns: Message and possible transition
"""
raise ConfigurationError(
'%s should implement get_msg() if it '
'needs to produce messages' % self.__class__.__name__,
)
@property
def acs(self) -> EnodebAcsStateMachine:
return self._acs
@acs.setter
def acs(self, val: EnodebAcsStateMachine) -> None:
self._acs = val
@abstractmethod
def state_description(self) -> str:
""" Provide a few words about what the state represents """
pass
class WaitInformState(EnodebAcsState):
"""
This state indicates that no Inform message has been received yet, or
that no Inform message has been received for a long time.
This state is used to handle an Inform message that arrived when enodebd
already believes that the eNB is connected. As such, it is unclear to
enodebd whether the eNB is just sending another Inform, or if a different
eNB was plugged into the same interface.
"""
def __init__(
self,
acs: EnodebAcsStateMachine,
when_done: str,
when_boot: Optional[str] = None,
):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.boot_transition = when_boot
self.has_enb_just_booted = False
def read_msg(self, message: Any) -> AcsReadMsgResult:
"""
Args:
message: models.Inform Tr069 Inform message
"""
if not isinstance(message, models.Inform):
return AcsReadMsgResult(False, None)
process_inform_message(
message, self.acs.data_model,
self.acs.device_cfg,
)
# Switch enodeb status to connected
metrics.set_enb_status(
self.acs.device_cfg.get_parameter("Serial number"),
status="connected"
)
if does_inform_have_event(message, '1 BOOT'):
return AcsReadMsgResult(True, self.boot_transition)
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
""" Reply with InformResponse """
response = models.InformResponse()
# Set maxEnvelopes to 1, as per TR-069 spec
response.MaxEnvelopes = 1
return AcsMsgAndTransition(response, self.done_transition)
def state_description(self) -> str:
return 'Waiting for an Inform'
class GetRPCMethodsState(EnodebAcsState):
"""
After the first Inform message from boot, it is expected that the eNB
will try to learn the RPC methods of the ACS.
"""
def __init__(self, acs: EnodebAcsStateMachine, when_done: str, when_skip: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.skip_transition = when_skip
def read_msg(self, message: Any) -> AcsReadMsgResult:
# If this is a regular Inform, not after a reboot we'll get an empty
if isinstance(message, models.DummyInput):
return AcsReadMsgResult(True, self.skip_transition)
if not isinstance(message, models.GetRPCMethods):
return AcsReadMsgResult(False, self.done_transition)
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
resp = models.GetRPCMethodsResponse()
resp.MethodList = models.MethodList()
RPC_METHODS = ['Inform', 'GetRPCMethods', 'TransferComplete']
resp.MethodList.arrayType = 'xsd:string[%d]' \
% len(RPC_METHODS)
resp.MethodList.string = RPC_METHODS
return AcsMsgAndTransition(resp, self.done_transition)
def state_description(self) -> str:
return 'Waiting for incoming GetRPC Methods after boot'
class BaicellsRemWaitState(EnodebAcsState):
"""
We've already received an Inform message. This state is to handle a
Baicells eNodeB issue.
After eNodeB is rebooted, hold off configuring it for some time to give
time for REM to run. This is a BaiCells eNodeB issue that doesn't support
enabling the eNodeB during initial REM.
In this state, just hang at responding to Inform, and then ending the
TR-069 session.
"""
CONFIG_DELAY_AFTER_BOOT = 600
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.rem_timer = None
def enter(self):
self.rem_timer = StateMachineTimer(self.CONFIG_DELAY_AFTER_BOOT)
logger.info(
'Holding off of eNB configuration for %s seconds. '
'Will resume after eNB REM process has finished. ',
self.CONFIG_DELAY_AFTER_BOOT,
)
def exit(self):
self.rem_timer = None
def read_msg(self, message: Any) -> AcsReadMsgResult:
if not isinstance(message, models.Inform):
return AcsReadMsgResult(False, None)
process_inform_message(
message, self.acs.data_model,
self.acs.device_cfg,
)
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
if self.rem_timer.is_done():
return AcsMsgAndTransition(
models.DummyInput(),
self.done_transition,
)
return AcsMsgAndTransition(models.DummyInput(), None)
def state_description(self) -> str:
remaining = self.rem_timer.seconds_remaining()
return 'Waiting for eNB REM to run for %d more seconds before ' \
'resuming with configuration.' % remaining
class WaitEmptyMessageState(EnodebAcsState):
def __init__(
self,
acs: EnodebAcsStateMachine,
when_done: str,
when_missing: Optional[str] = None,
):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.unknown_param_transition = when_missing
def read_msg(self, message: Any) -> AcsReadMsgResult:
"""
It's expected that we transition into this state right after receiving
an Inform message and replying with an InformResponse. At that point,
the eNB sends an empty HTTP request (aka DummyInput) to initiate the
rest of the provisioning process
"""
if not isinstance(message, models.DummyInput):
logger.debug("Ignoring message %s", str(type(message)))
return AcsReadMsgResult(msg_handled=False, next_state=None)
if self.unknown_param_transition:
if get_optional_param_to_check(self.acs.data_model):
return AcsReadMsgResult(
msg_handled=True,
next_state=self.unknown_param_transition,
)
return AcsReadMsgResult(
msg_handled=True,
next_state=self.done_transition,
)
def get_msg(self, message: Any) -> AcsReadMsgResult:
"""
Return a dummy message waiting for the empty message from CPE
"""
request = models.DummyInput()
return AcsMsgAndTransition(msg=request, next_state=None)
def state_description(self) -> str:
return 'Waiting for empty message from eNodeB'
class CheckOptionalParamsState(EnodebAcsState):
def __init__(
self,
acs: EnodebAcsStateMachine,
when_done: str,
):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.optional_param = None
def get_msg(self, message: Any) -> AcsMsgAndTransition:
self.optional_param = get_optional_param_to_check(self.acs.data_model)
if self.optional_param is None:
raise Tr069Error('Invalid State')
# Generate the request
request = models.GetParameterValues()
request.ParameterNames = models.ParameterNames()
request.ParameterNames.arrayType = 'xsd:string[1]'
request.ParameterNames.string = []
path = self.acs.data_model.get_parameter(self.optional_param).path
request.ParameterNames.string.append(path)
return AcsMsgAndTransition(request, None)
def read_msg(self, message: Any) -> AcsReadMsgResult:
""" Process either GetParameterValuesResponse or a Fault """
if type(message) == models.Fault:
self.acs.data_model.set_parameter_presence(
self.optional_param,
False,
)
elif type(message) == models.GetParameterValuesResponse:
name_to_val = parse_get_parameter_values_response(
self.acs.data_model,
message,
)
logger.debug(
'Received CPE parameter values: %s',
str(name_to_val),
)
for name, val in name_to_val.items():
self.acs.data_model.set_parameter_presence(
self.optional_param,
True,
)
magma_val = self.acs.data_model.transform_for_magma(name, val)
self.acs.device_cfg.set_parameter(name, magma_val)
else:
return AcsReadMsgResult(False, None)
if get_optional_param_to_check(self.acs.data_model) is not None:
return AcsReadMsgResult(True, None)
return AcsReadMsgResult(True, self.done_transition)
def state_description(self) -> str:
return 'Checking if some optional parameters exist in data model'
class SendGetTransientParametersState(EnodebAcsState):
"""
Periodically read eNodeB status. Note: keep frequency low to avoid
backing up large numbers of read operations if enodebd is busy.
Some eNB parameters are read only and updated by the eNB itself.
"""
PARAMETERS = [
ParameterName.OP_STATE,
ParameterName.RF_TX_STATUS,
ParameterName.GPS_STATUS,
ParameterName.PTP_STATUS,
ParameterName.MME_STATUS,
ParameterName.GPS_LAT,
ParameterName.GPS_LONG,
]
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
def read_msg(self, message: Any) -> AcsReadMsgResult:
if not isinstance(message, models.DummyInput):
return AcsReadMsgResult(False, None)
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
request = models.GetParameterValues()
request.ParameterNames = models.ParameterNames()
request.ParameterNames.string = []
for name in self.PARAMETERS:
# Not all data models have these parameters
if self.acs.data_model.is_parameter_present(name):
path = self.acs.data_model.get_parameter(name).path
request.ParameterNames.string.append(path)
request.ParameterNames.arrayType = \
'xsd:string[%d]' % len(request.ParameterNames.string)
return AcsMsgAndTransition(request, self.done_transition)
def state_description(self) -> str:
return 'Getting transient read-only parameters'
class WaitGetTransientParametersState(EnodebAcsState):
"""
Periodically read eNodeB status. Note: keep frequency low to avoid
backing up large numbers of read operations if enodebd is busy
"""
def __init__(
self,
acs: EnodebAcsStateMachine,
when_get: str,
when_get_obj_params: str,
when_delete: str,
when_add: str,
when_set: str,
when_skip: str,
):
super().__init__()
self.acs = acs
self.done_transition = when_get
self.get_obj_params_transition = when_get_obj_params
self.rm_obj_transition = when_delete
self.add_obj_transition = when_add
self.set_transition = when_set
self.skip_transition = when_skip
def read_msg(self, message: Any) -> AcsReadMsgResult:
if not isinstance(message, models.GetParameterValuesResponse):
return AcsReadMsgResult(False, None)
# Current values of the fetched parameters
name_to_val = parse_get_parameter_values_response(
self.acs.data_model,
message,
)
logger.debug('Fetched Transient Params: %s', str(name_to_val))
# Update device configuration
for name in name_to_val:
magma_val = \
self.acs.data_model.transform_for_magma(
name,
name_to_val[name],
)
self.acs.device_cfg.set_parameter(name, magma_val)
return AcsReadMsgResult(True, self.get_next_state())
def get_next_state(self) -> str:
should_get_params = \
len(
get_params_to_get(
self.acs.device_cfg,
self.acs.data_model,
),
) > 0
if should_get_params:
return self.done_transition
should_get_obj_params = \
len(
get_object_params_to_get(
self.acs.desired_cfg,
self.acs.device_cfg,
self.acs.data_model,
),
) > 0
if should_get_obj_params:
return self.get_obj_params_transition
elif len(
get_all_objects_to_delete(
self.acs.desired_cfg,
self.acs.device_cfg,
),
) > 0:
return self.rm_obj_transition
elif len(
get_all_objects_to_add(
self.acs.desired_cfg,
self.acs.device_cfg,
),
) > 0:
return self.add_obj_transition
return self.skip_transition
def state_description(self) -> str:
return 'Getting transient read-only parameters'
class GetParametersState(EnodebAcsState):
"""
Get the value of most parameters of the eNB that are defined in the data
model. Object parameters are excluded.
"""
def __init__(
self,
acs: EnodebAcsStateMachine,
when_done: str,
request_all_params: bool = False,
):
super().__init__()
self.acs = acs
self.done_transition = when_done
# Set to True if we want to request values of all parameters, even if
# the ACS state machine already has recorded values of them.
self.request_all_params = request_all_params
def read_msg(self, message: Any) -> AcsReadMsgResult:
"""
It's expected that we transition into this state right after receiving
an Inform message and replying with an InformResponse. At that point,
the eNB sends an empty HTTP request (aka DummyInput) to initiate the
rest of the provisioning process
"""
if not isinstance(message, models.DummyInput):
return AcsReadMsgResult(False, None)
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
"""
Respond with GetParameterValuesRequest
Get the values of all parameters defined in the data model.
Also check which addable objects are present, and what the values of
parameters for those objects are.
"""
# Get the names of regular parameters
names = get_params_to_get(
self.acs.device_cfg, self.acs.data_model,
self.request_all_params,
)
# Generate the request
request = models.GetParameterValues()
request.ParameterNames = models.ParameterNames()
request.ParameterNames.arrayType = 'xsd:string[%d]' \
% len(names)
request.ParameterNames.string = []
for name in names:
path = self.acs.data_model.get_parameter(name).path
if path is not InvalidTrParamPath:
# Only get data elements backed by tr69 path
request.ParameterNames.string.append(path)
return AcsMsgAndTransition(request, self.done_transition)
def state_description(self) -> str:
return 'Getting non-object parameters'
class WaitGetParametersState(EnodebAcsState):
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
def read_msg(self, message: Any) -> AcsReadMsgResult:
""" Process GetParameterValuesResponse """
if not isinstance(message, models.GetParameterValuesResponse):
return AcsReadMsgResult(False, None)
name_to_val = parse_get_parameter_values_response(
self.acs.data_model,
message,
)
logger.debug('Received CPE parameter values: %s', str(name_to_val))
for name, val in name_to_val.items():
magma_val = self.acs.data_model.transform_for_magma(name, val)
self.acs.device_cfg.set_parameter(name, magma_val)
return AcsReadMsgResult(True, self.done_transition)
def state_description(self) -> str:
return 'Getting non-object parameters'
class GetObjectParametersState(EnodebAcsState):
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
def get_msg(self, message: Any) -> AcsMsgAndTransition:
""" Respond with GetParameterValuesRequest """
names = get_object_params_to_get(
self.acs.desired_cfg,
self.acs.device_cfg,
self.acs.data_model,
)
# Generate the request
request = models.GetParameterValues()
request.ParameterNames = models.ParameterNames()
request.ParameterNames.arrayType = 'xsd:string[%d]' \
% len(names)
request.ParameterNames.string = []
for name in names:
path = self.acs.data_model.get_parameter(name).path
request.ParameterNames.string.append(path)
return AcsMsgAndTransition(request, self.done_transition)
def state_description(self) -> str:
return 'Getting object parameters'
class WaitGetObjectParametersState(EnodebAcsState):
def __init__(
self,
acs: EnodebAcsStateMachine,
when_delete: str,
when_add: str,
when_set: str,
when_skip: str,
):
super().__init__()
self.acs = acs
self.rm_obj_transition = when_delete
self.add_obj_transition = when_add
self.set_params_transition = when_set
self.skip_transition = when_skip
def read_msg(self, message: Any) -> AcsReadMsgResult:
""" Process GetParameterValuesResponse """
if not isinstance(message, models.GetParameterValuesResponse):
return AcsReadMsgResult(False, None)
path_to_val = {}
if hasattr(message.ParameterList, 'ParameterValueStruct') and \
message.ParameterList.ParameterValueStruct is not None:
for param_value_struct in message.ParameterList.ParameterValueStruct:
path_to_val[param_value_struct.Name] = \
param_value_struct.Value.Data
logger.debug('Received object parameters: %s', str(path_to_val))
# Number of PLMN objects reported can be incorrect. Let's count them
num_plmns = 0
obj_to_params = self.acs.data_model.get_numbered_param_names()
while True:
obj_name = ParameterName.PLMN_N % (num_plmns + 1)
if obj_name not in obj_to_params or len(obj_to_params[obj_name]) == 0:
logger.warning(
"eNB has PLMN %s but not defined in model",
obj_name,
)
break
param_name_list = obj_to_params[obj_name]
obj_path = self.acs.data_model.get_parameter(param_name_list[0]).path
if obj_path not in path_to_val:
break
if not self.acs.device_cfg.has_object(obj_name):
self.acs.device_cfg.add_object(obj_name)
num_plmns += 1
for name in param_name_list:
path = self.acs.data_model.get_parameter(name).path
value = path_to_val[path]
magma_val = \
self.acs.data_model.transform_for_magma(name, value)
self.acs.device_cfg.set_parameter_for_object(
name, magma_val,
obj_name,
)
num_plmns_reported = \
int(self.acs.device_cfg.get_parameter(ParameterName.NUM_PLMNS))
if num_plmns != num_plmns_reported:
logger.warning(
"eNB reported %d PLMNs but found %d",
num_plmns_reported, num_plmns,
)
self.acs.device_cfg.set_parameter(
ParameterName.NUM_PLMNS,
num_plmns,
)
# Now we can have the desired state
if self.acs.desired_cfg is None:
self.acs.desired_cfg = build_desired_config(
self.acs.mconfig,
self.acs.service_config,
self.acs.device_cfg,
self.acs.data_model,
self.acs.config_postprocessor,
)
if len(
get_all_objects_to_delete(
self.acs.desired_cfg,
self.acs.device_cfg,
),
) > 0:
return AcsReadMsgResult(True, self.rm_obj_transition)
elif len(
get_all_objects_to_add(
self.acs.desired_cfg,
self.acs.device_cfg,
),
) > 0:
return AcsReadMsgResult(True, self.add_obj_transition)
elif len(
get_all_param_values_to_set(
self.acs.desired_cfg,
self.acs.device_cfg,
self.acs.data_model,
),
) > 0:
return AcsReadMsgResult(True, self.set_params_transition)
return AcsReadMsgResult(True, self.skip_transition)
def state_description(self) -> str:
return 'Getting object parameters'
class DeleteObjectsState(EnodebAcsState):
def __init__(
self,
acs: EnodebAcsStateMachine,
when_add: str,
when_skip: str,
):
super().__init__()
self.acs = acs
self.deleted_param = None
self.add_obj_transition = when_add
self.skip_transition = when_skip
def get_msg(self, message: Any) -> AcsMsgAndTransition:
"""
Send DeleteObject message to TR-069 and poll for response(s).
Input:
- Object name (string)
"""
request = models.DeleteObject()
self.deleted_param = get_all_objects_to_delete(
self.acs.desired_cfg,
self.acs.device_cfg,
)[0]
request.ObjectName = \
self.acs.data_model.get_parameter(self.deleted_param).path
return AcsMsgAndTransition(request, None)
def read_msg(self, message: Any) -> AcsReadMsgResult:
"""
Send DeleteObject message to TR-069 and poll for response(s).
Input:
- Object name (string)
"""
if type(message) == models.DeleteObjectResponse:
if message.Status != 0:
raise Tr069Error(
'Received DeleteObjectResponse with '
'Status=%d' % message.Status,
)
elif type(message) == models.Fault:
raise Tr069Error(
'Received Fault in response to DeleteObject '
'(faultstring = %s)' % message.FaultString,
)
else:
return AcsReadMsgResult(False, None)
self.acs.device_cfg.delete_object(self.deleted_param)
obj_list_to_delete = get_all_objects_to_delete(
self.acs.desired_cfg,
self.acs.device_cfg,
)
if len(obj_list_to_delete) > 0:
return AcsReadMsgResult(True, None)
if len(
get_all_objects_to_add(
self.acs.desired_cfg,
self.acs.device_cfg,
),
) == 0:
return AcsReadMsgResult(True, self.skip_transition)
return AcsReadMsgResult(True, self.add_obj_transition)
def state_description(self) -> str:
return 'Deleting objects'
class AddObjectsState(EnodebAcsState):
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.added_param = None
def get_msg(self, message: Any) -> AcsMsgAndTransition:
request = models.AddObject()
self.added_param = get_all_objects_to_add(
self.acs.desired_cfg,
self.acs.device_cfg,
)[0]
desired_param = self.acs.data_model.get_parameter(self.added_param)
desired_path = desired_param.path
path_parts = desired_path.split('.')
# If adding enumerated object, ie. XX.N. we should add it to the
# parent object XX. so strip the index
if len(path_parts) > 2 and \
path_parts[-1] == '' and path_parts[-2].isnumeric():
logger.debug('Stripping index from path=%s', desired_path)
desired_path = '.'.join(path_parts[:-2]) + '.'
request.ObjectName = desired_path
return AcsMsgAndTransition(request, None)
def read_msg(self, message: Any) -> AcsReadMsgResult:
if type(message) == models.AddObjectResponse:
if message.Status != 0:
raise Tr069Error(
'Received AddObjectResponse with '
'Status=%d' % message.Status,
)
elif type(message) == models.Fault:
raise Tr069Error(
'Received Fault in response to AddObject '
'(faultstring = %s)' % message.FaultString,
)
else:
return AcsReadMsgResult(False, None)
instance_n = message.InstanceNumber
self.acs.device_cfg.add_object(self.added_param % instance_n)
obj_list_to_add = get_all_objects_to_add(
self.acs.desired_cfg,
self.acs.device_cfg,
)
if len(obj_list_to_add) > 0:
return AcsReadMsgResult(True, None)
return AcsReadMsgResult(True, self.done_transition)
def state_description(self) -> str:
return 'Adding objects'
class SetParameterValuesState(EnodebAcsState):
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
def get_msg(self, message: Any) -> AcsMsgAndTransition:
request = models.SetParameterValues()
request.ParameterList = models.ParameterValueList()
param_values = get_all_param_values_to_set(
self.acs.desired_cfg,
self.acs.device_cfg,
self.acs.data_model,
)
request.ParameterList.arrayType = 'cwmp:ParameterValueStruct[%d]' \
% len(param_values)
request.ParameterList.ParameterValueStruct = []
logger.debug(
'Sending TR069 request to set CPE parameter values: %s',
str(param_values),
)
# TODO: Match key response when we support having multiple outstanding
# calls.
if self.acs.has_version_key:
request.ParameterKey = models.ParameterKeyType()
request.ParameterKey.Data =\
"SetParameter-{:10.0f}".format(self.acs.parameter_version_key)
request.ParameterKey.type = 'xsd:string'
for name, value in param_values.items():
param_info = self.acs.data_model.get_parameter(name)
type_ = param_info.type
name_value = models.ParameterValueStruct()
name_value.Value = models.anySimpleType()
name_value.Name = param_info.path
enb_value = self.acs.data_model.transform_for_enb(name, value)
if type_ in ('int', 'unsignedInt'):
name_value.Value.type = 'xsd:%s' % type_
name_value.Value.Data = str(enb_value)
elif type_ == 'boolean':
# Boolean values have integral representations in spec
name_value.Value.type = 'xsd:boolean'
name_value.Value.Data = str(int(enb_value))
elif type_ == 'string':
name_value.Value.type = 'xsd:string'
name_value.Value.Data = str(enb_value)
else:
raise Tr069Error(
'Unsupported type for %s: %s' %
(name, type_),
)
if param_info.is_invasive:
self.acs.are_invasive_changes_applied = False
request.ParameterList.ParameterValueStruct.append(name_value)
return AcsMsgAndTransition(request, self.done_transition)
def state_description(self) -> str:
return 'Setting parameter values'
class SetParameterValuesNotAdminState(EnodebAcsState):
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
def get_msg(self, message: Any) -> AcsMsgAndTransition:
request = models.SetParameterValues()
request.ParameterList = models.ParameterValueList()
param_values = get_all_param_values_to_set(
self.acs.desired_cfg,
self.acs.device_cfg,
self.acs.data_model,
exclude_admin=True,
)
request.ParameterList.arrayType = 'cwmp:ParameterValueStruct[%d]' \
% len(param_values)
request.ParameterList.ParameterValueStruct = []
logger.debug(
'Sending TR069 request to set CPE parameter values: %s',
str(param_values),
)
for name, value in param_values.items():
param_info = self.acs.data_model.get_parameter(name)
type_ = param_info.type
name_value = models.ParameterValueStruct()
name_value.Value = models.anySimpleType()
name_value.Name = param_info.path
enb_value = self.acs.data_model.transform_for_enb(name, value)
if type_ in ('int', 'unsignedInt'):
name_value.Value.type = 'xsd:%s' % type_
name_value.Value.Data = str(enb_value)
elif type_ == 'boolean':
# Boolean values have integral representations in spec
name_value.Value.type = 'xsd:boolean'
name_value.Value.Data = str(int(enb_value))
elif type_ == 'string':
name_value.Value.type = 'xsd:string'
name_value.Value.Data = str(enb_value)
else:
raise Tr069Error(
'Unsupported type for %s: %s' %
(name, type_),
)
if param_info.is_invasive:
self.acs.are_invasive_changes_applied = False
request.ParameterList.ParameterValueStruct.append(name_value)
return AcsMsgAndTransition(request, self.done_transition)
def state_description(self) -> str:
return 'Setting parameter values excluding Admin Enable'
class WaitSetParameterValuesState(EnodebAcsState):
def __init__(
self,
acs: EnodebAcsStateMachine,
when_done: str,
when_apply_invasive: str,
status_non_zero_allowed: bool = False,
):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.apply_invasive_transition = when_apply_invasive
# Set Params can legally return zero and non zero status
# Per tr-196, if there are errors the method should return a fault.
# Make flag optional to compensate for existing radios returning non
# zero on error.
self.status_non_zero_allowed = status_non_zero_allowed
def read_msg(self, message: Any) -> AcsReadMsgResult:
if type(message) == models.SetParameterValuesResponse:
if not self.status_non_zero_allowed:
if message.Status != 0:
raise Tr069Error(
'Received SetParameterValuesResponse with '
'Status=%d' % message.Status,
)
self._mark_as_configured()
metrics.set_enb_last_configured_time(
self.acs.device_cfg.get_parameter("Serial number"),
self.acs.device_cfg.get_parameter("ip_address"),
int(time.time())
)
# Switch enodeb status to configured
metrics.set_enb_status(
self.acs.device_cfg.get_parameter("Serial number"),
status="configured"
)
if not self.acs.are_invasive_changes_applied:
return AcsReadMsgResult(True, self.apply_invasive_transition)
return AcsReadMsgResult(True, self.done_transition)
elif type(message) == models.Fault:
logger.error(
'Received Fault in response to SetParameterValues, '
'Code (%s), Message (%s)', message.FaultCode,
message.FaultString,
)
if message.SetParameterValuesFault is not None:
for fault in message.SetParameterValuesFault:
logger.error(
'SetParameterValuesFault Param: %s, '
'Code: %s, String: %s', fault.ParameterName,
fault.FaultCode, fault.FaultString,
)
return AcsReadMsgResult(False, None)
def _mark_as_configured(self) -> None:
"""
A successful attempt at setting parameter values means that we need to
update what we think the eNB's configuration is to match what we just
set the parameter values to.
"""
# Values of parameters
name_to_val = get_param_values_to_set(
self.acs.desired_cfg,
self.acs.device_cfg,
self.acs.data_model,
)
for name, val in name_to_val.items():
magma_val = self.acs.data_model.transform_for_magma(name, val)
self.acs.device_cfg.set_parameter(name, magma_val)
# Values of object parameters
obj_to_name_to_val = get_obj_param_values_to_set(
self.acs.desired_cfg,
self.acs.device_cfg,
self.acs.data_model,
)
for obj_name, name_to_val in obj_to_name_to_val.items():
for name, val in name_to_val.items():
logger.debug(
'Set obj: %s, name: %s, val: %s', str(obj_name),
str(name), str(val),
)
magma_val = self.acs.data_model.transform_for_magma(name, val)
self.acs.device_cfg.set_parameter_for_object(
name, magma_val,
obj_name,
)
logger.info('Successfully configured CPE parameters!')
def state_description(self) -> str:
return 'Setting parameter values'
class EndSessionState(EnodebAcsState):
""" To end a TR-069 session, send an empty HTTP response """
def __init__(self, acs: EnodebAcsStateMachine):
super().__init__()
self.acs = acs
def read_msg(self, message: Any) -> AcsReadMsgResult:
"""
No message is expected after enodebd sends the eNodeB
an empty HTTP response.
If a device sends an empty HTTP request, we can just
ignore it and send another empty response.
"""
if isinstance(message, models.DummyInput):
return AcsReadMsgResult(True, None)
return AcsReadMsgResult(False, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
# Switch enodeb status to disconnected
metrics.set_enb_status(
self.acs.device_cfg.get_parameter("Serial number"),
status="disconnected"
)
request = models.DummyInput()
return AcsMsgAndTransition(request, None)
def state_description(self) -> str:
return 'Completed provisioning eNB. Awaiting new Inform.'
class EnbSendRebootState(EnodebAcsState):
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.prev_msg_was_inform = False
def read_msg(self, message: Any) -> AcsReadMsgResult:
"""
This state can be transitioned into through user command.
All messages received by enodebd will be ignored in this state.
"""
if self.prev_msg_was_inform \
and not isinstance(message, models.DummyInput):
return AcsReadMsgResult(False, None)
elif isinstance(message, models.Inform):
self.prev_msg_was_inform = True
process_inform_message(
message, self.acs.data_model,
self.acs.device_cfg,
)
return AcsReadMsgResult(True, None)
self.prev_msg_was_inform = False
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
if self.prev_msg_was_inform:
response = models.InformResponse()
# Set maxEnvelopes to 1, as per TR-069 spec
response.MaxEnvelopes = 1
return AcsMsgAndTransition(response, None)
logger.info('Sending reboot request to eNB')
request = models.Reboot()
request.CommandKey = ''
self.acs.are_invasive_changes_applied = True
return AcsMsgAndTransition(request, self.done_transition)
def state_description(self) -> str:
return 'Rebooting eNB'
class SendRebootState(EnodebAcsState):
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.prev_msg_was_inform = False
def read_msg(self, message: Any) -> AcsReadMsgResult:
"""
This state can be transitioned into through user command.
All messages received by enodebd will be ignored in this state.
"""
if self.prev_msg_was_inform \
and not isinstance(message, models.DummyInput):
return AcsReadMsgResult(False, None)
elif isinstance(message, models.Inform):
self.prev_msg_was_inform = True
process_inform_message(
message, self.acs.data_model,
self.acs.device_cfg,
)
return AcsReadMsgResult(True, None)
self.prev_msg_was_inform = False
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
if self.prev_msg_was_inform:
response = models.InformResponse()
# Set maxEnvelopes to 1, as per TR-069 spec
response.MaxEnvelopes = 1
return AcsMsgAndTransition(response, None)
logger.info('Sending reboot request to eNB')
request = models.Reboot()
request.CommandKey = ''
return AcsMsgAndTransition(request, self.done_transition)
def state_description(self) -> str:
return 'Rebooting eNB'
class WaitRebootResponseState(EnodebAcsState):
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
def read_msg(self, message: Any) -> AcsReadMsgResult:
if not isinstance(message, models.RebootResponse):
return AcsReadMsgResult(False, None)
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
""" Reply with empty message """
return AcsMsgAndTransition(models.DummyInput(), self.done_transition)
def state_description(self) -> str:
return 'Rebooting eNB'
class WaitInformMRebootState(EnodebAcsState):
"""
After sending a reboot request, we expect an Inform request with a
specific 'inform event code'
"""
# Time to wait for eNodeB reboot. The measured time
# (on BaiCells indoor eNodeB)
# is ~110secs, so add healthy padding on top of this.
REBOOT_TIMEOUT = 300 # In seconds
# We expect that the Inform we receive tells us the eNB has rebooted
INFORM_EVENT_CODE = 'M Reboot'
def __init__(
self,
acs: EnodebAcsStateMachine,
when_done: str,
when_timeout: str,
):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.timeout_transition = when_timeout
self.timeout_timer = None
self.timer_handle = None
def enter(self):
self.timeout_timer = StateMachineTimer(self.REBOOT_TIMEOUT)
def check_timer() -> None:
if self.timeout_timer.is_done():
self.acs.transition(self.timeout_transition)
raise Tr069Error(
'Did not receive Inform response after '
'rebooting',
)
self.timer_handle = \
self.acs.event_loop.call_later(
self.REBOOT_TIMEOUT,
check_timer,
)
def exit(self):
self.timer_handle.cancel()
self.timeout_timer = None
def read_msg(self, message: Any) -> AcsReadMsgResult:
if not isinstance(message, models.Inform):
return AcsReadMsgResult(False, None)
if not does_inform_have_event(message, self.INFORM_EVENT_CODE):
raise Tr069Error(
'Did not receive M Reboot event code in '
'Inform',
)
process_inform_message(
message, self.acs.data_model,
self.acs.device_cfg,
)
return AcsReadMsgResult(True, self.done_transition)
def state_description(self) -> str:
return 'Waiting for M Reboot code from Inform'
class WaitRebootDelayState(EnodebAcsState):
"""
After receiving the Inform notifying us that the eNodeB has successfully
rebooted, wait a short duration to prevent unspecified race conditions
that may occur w.r.t reboot
"""
# Short delay timer to prevent race conditions w.r.t. reboot
SHORT_CONFIG_DELAY = 10
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.config_timer = None
self.timer_handle = None
def enter(self):
self.config_timer = StateMachineTimer(self.SHORT_CONFIG_DELAY)
def check_timer() -> None:
if self.config_timer.is_done():
self.acs.transition(self.done_transition)
self.timer_handle = \
self.acs.event_loop.call_later(
self.SHORT_CONFIG_DELAY,
check_timer,
)
def exit(self):
self.timer_handle.cancel()
self.config_timer = None
def read_msg(self, message: Any) -> AcsReadMsgResult:
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
return AcsMsgAndTransition(models.DummyInput(), None)
def state_description(self) -> str:
return 'Waiting after eNB reboot to prevent race conditions'
class DownloadState(EnodebAcsState):
"""
The eNB handler will enter this state when firmware version is older than desired version.
"""
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
def get_msg(self, message: Any) -> AcsMsgAndTransition:
# Switch enodeb status to firmware upgrading
metrics.set_enb_status(
self.acs.device_cfg.get_parameter("Serial number"),
status="firmware_upgrading"
)
request = models.Download()
request.CommandKey = "20220206215200"
request.FileType = "1 Firmware Upgrade Image"
request.URL = "http://10.128.250.131/firmware/Qproject_TEST3918_2102241222.ffw"
request.Username = ""
request.Password = ""
request.FileSize = 57208579
request.TargetFileName = "Qproject_TEST3918_2102241222.ffw"
request.DelaySeconds = 0
request.SuccessURL = ""
request.FailureURL = ""
return AcsMsgAndTransition(request, self.done_transition)
def state_description(self) -> str:
return 'Upgrade the firmware the desired version'
class WaitDownloadResponseState(EnodebAcsState):
"""
The eNB handler will enter this state after the Download command sent.
"""
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
def read_msg(self, message: Any) -> AcsReadMsgResult:
if not isinstance(message, models.DownloadResponse):
return AcsReadMsgResult(False, None)
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
""" Reply with empty message """
logger.info("Received Download Response from eNodeB")
return AcsMsgAndTransition(models.DummyInput(), self.done_transition)
def state_description(self) -> str:
return "Wait DownloadResponse message"
class WaitInformTransferCompleteState(EnodebAcsState):
"""
The eNB handler will enter this state after firmware upgraded and rebooted
"""
REBOOT_TIMEOUT = 300 # In seconds
INFORM_EVENT_CODE = "7 TRANSFER COMPLETE"
PREIODIC_EVENT_CODE = "2 PERIODIC"
def __init__(self, acs: EnodebAcsStateMachine, when_done: str, when_periodic: str, when_timeout: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.periodic_update_transition = when_periodic
self.timeout_transition = when_timeout
self.timeout_timer = None
self.timer_handle = None
def enter(self):
print("Get into the TransferComplete State")
self.timeout_timer = StateMachineTimer(self.REBOOT_TIMEOUT)
def check_timer() -> None:
if self.timeout_timer.is_done():
self.acs.transition(self.timeout_transition)
raise Tr069Error("Didn't receive Inform response after rebooting")
self.timer_handle = self.acs.event_loop.call_later(
self.REBOOT_TIMEOUT,
check_timer,
)
def exit(self):
self.timer_handle.cancel()
self.timeout_timer = None
def get_msg(self, message: Any) -> AcsMsgAndTransition:
return AcsMsgAndTransition(models.DummyInput(), None)
def read_msg(self, message: Any) -> AcsReadMsgResult:
if not isinstance(message, models.Inform):
return AcsReadMsgResult(False, None)
if does_inform_have_event(message, self.PREIODIC_EVENT_CODE):
logger.info("Receive Periodic update from enodeb")
return AcsReadMsgResult(True, self.periodic_update_transition)
if does_inform_have_event(message, self.INFORM_EVENT_CODE):
logger.info("Receive Transfer complete")
return AcsReadMsgResult(True, self.done_transition)
# Unhandled situation
return AcsReadMsgResult(False, None)
def state_description(self) -> str:
return "Wait DownloadResponse message"
class CheckStatusState(EnodebAcsState):
"""
Sent a request to enodeb to get the basic status from device
"""
def __init__(
self,
acs: EnodebAcsStateMachine,
when_done: str,
):
super().__init__()
self.acs = acs
self.done_transition = when_done
def get_msg(self, message: Any) -> AcsMsgAndTransition:
"""
Send with GetParameterValuesRequest
"""
self.PARAMETERS = [
ParameterName.RF_TX_STATUS,
ParameterName.GPS_STATUS,
ParameterName.GPS_LAT,
ParameterName.GPS_LONG,
]
request = models.GetParameterValues()
request.ParameterNames = models.ParameterNames()
request.ParameterNames.arrayType = 'xsd:string[1]'
request.ParameterNames.string = []
for name in self.PARAMETERS:
if self.acs.data_model.is_parameter_present(name):
path = self.acs.data_model.get_parameter(name).path
request.ParameterNames.string.append(path)
request.ParameterNames.arrayType = \
'xsd:string[%d]' % len(request.ParameterNames.string)
return AcsMsgAndTransition(request, self.done_transition)
def read_msg(self, message: Any) -> AcsReadMsgResult:
if not isinstance(message, models.GetParameterValuesResponse):
return AcsReadMsgResult(msg_handled=False, next_state=when_done)
name_to_val = parse_get_parameter_values_response(self.acs.data_model, message, )
logger.info("CheckStatusState: %s", str(name_to_val))
# Call set_enb_gps_status to update the parameter in prometheus api
metrics.set_enb_gps_status(
self.acs.device_cfg.get_parameter("Serial number"),
name_to_val["GPS lat"], name_to_val["GPS long"],
name_to_val["gps_status"]
)
# Call set_enb_op_status to update the parameter in prometheus api
metrics.set_enb_op_status(
self.acs.device_cfg.get_parameter("Serial number"),
name_to_val["Opstate"]
)
# Sleep 1 minute and check status again
time.sleep(60)
return AcsReadMsgResult(msg_handled=True, next_state=self.done_transition)
def state_description(self) -> str:
return 'Getting'
class ErrorState(EnodebAcsState):
"""
The eNB handler will enter this state when an unhandled Fault is received.
If the inform_transition_target constructor parameter is non-null, this
state will attempt to autoremediate by transitioning to the specified
target state when an Inform is received.
"""
def __init__(
self, acs: EnodebAcsStateMachine,
inform_transition_target: Optional[str] = None,
):
super().__init__()
self.acs = acs
self.inform_transition_target = inform_transition_target
def read_msg(self, message: Any) -> AcsReadMsgResult:
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
if not self.inform_transition_target:
return AcsMsgAndTransition(models.DummyInput(), None)
if isinstance(message, models.Inform):
return AcsMsgAndTransition(
models.DummyInput(),
self.inform_transition_target,
)
return AcsMsgAndTransition(models.DummyInput(), None)
def state_description(self) -> str:
return 'Error state - awaiting manual restart of enodebd service or ' \
'an Inform to be received from the eNB' |
the-stack_0_1980 | from struct import Struct
from types import new_class
# class init => python type to obj, value = python type
# obj encode: obj to bytes/array
# classmethod decode: bytes array (array('B', [0, 2, 255, ..])) to python type
# str obj to str, mostly str(value)
# self.value is bytes on Base class
class MetaBluezFormat(type):
def __str__(self):
return "{}".format(self.__name__)
class MetaBluezFormatInt(type):
def __str__(self):
return "{}(len={},exponent={})".format(self.__name__, self.len, self.exponent)
class FormatBase(object, metaclass=MetaBluezFormat):
# __metaclass__ = MetaFormatInt
# 0 means variable length
len = 0
# for all numeric
exponent = 0
native_types = bytes
# init takes native python type as arg (depends on formatBase, base is 'bytes' type)
def __init__(self, value):
if not isinstance(value, self.native_types):
raise TypeError(
"{}, wrong type: {}, expected: {}".format(
self.__class__.__name__, type(value), self.native_types
)
)
self.value = value
try:
_ = self.encode()
except Exception as ex:
# keep exception raised by 'encode', but add this one
raise ValueError(f"{self.__class__.__name__}: {str(ex)}")
@classmethod
def decode(cls, value):
return cls(bytes(value))
def encode(self):
return self.value
def __str__(self):
return str(self.value)
def __eq__(self, other):
if isinstance(other, FormatBase):
return self.value == other.value
return self.value == other
# alias
class FormatRaw(FormatBase):
pass
# base only for non-power two uints
class FormatUint(FormatBase):
exponent = 0
len = 1
native_types = (int, float)
@classmethod
def decode(cls, value):
acc = 0
for idx, v in enumerate(value):
if idx == cls.len:
break
acc += int(v) * pow(2, 8 * idx)
if cls.exponent:
n = float(acc) * pow(10, cls.exponent)
if cls.exponent:
n = round(n, cls.exponent * -1)
return cls(n)
return cls(acc)
def encode(self):
if self.exponent:
v = int(self.value / pow(10, self.exponent))
else:
v = self.value
b = []
for idx in range(0, self.len):
b.append(v % 256)
v = int(v / 256)
return bytes(b)
class FormatUint24(FormatUint):
len = 3
class FormatUint40(FormatUint):
len = 5
class FormatUint48(FormatUint):
len = 6
_endian = "<"
# works only as base for powers of 2 sints
class FormatPacked(FormatBase):
exponent = 0
len = 1
# adds float for native type (self.value), but pack/unpack always the/to int
native_types = (int, float)
pck_fmt = Struct(_endian + "B")
@classmethod
def decode(cls, value):
v = bytes(value)
if len(v) < cls.len:
v = bytes(value) + bytes([0] * (cls.len - len(v)))
# acc = unpack(cls.endian + cls.pck_fmt, v)
acc = cls.pck_fmt.unpack(v)
if cls.exponent:
return cls(round(float(acc[0]) * pow(10, cls.exponent), cls.exponent * -1))
return cls(acc[0])
def encode(self):
if self.exponent:
v = int(self.value / pow(10, self.exponent))
else:
v = int(self.value)
return self.pck_fmt.pack(v)
def __int__(self):
return int(self.value)
def __float__(self):
return float(self.value)
class FormatUint8(FormatPacked):
pck_fmt = Struct(_endian + "B")
class FormatUint8Enum(FormatUint8):
pass
class FormatUint16(FormatPacked):
len = 2
pck_fmt = Struct(_endian + "H")
class FormatUint32(FormatPacked):
len = 4
pck_fmt = Struct(_endian + "I")
class FormatUint64(FormatPacked):
len = 8
pck_fmt = Struct(_endian + "Q")
class FormatSint8(FormatPacked):
pck_fmt = Struct(_endian + "b")
class FormatSint16(FormatPacked):
len = 2
pck_fmt = Struct(_endian + "h")
class FormatSint32(FormatPacked):
len = 4
pck_fmt = Struct(_endian + "i")
class FormatSint64(FormatPacked):
len = 2
pck_fmt = Struct(_endian + "q")
class FormatFloat32(FormatPacked):
len = 4
pck_fmt = Struct(_endian + "f")
class FormatFloat64(FormatPacked):
len = 8
pck_fmt = Struct(_endian + "d")
class FormatUtf8s(FormatBase):
# native 'value' format is unicode string
native_types = str
@classmethod
def decode(cls, value):
s = bytes(value).decode("utf-8")
l = len(s)
# remove trailing NUL
if l > 0 and s[l - 1] == "\x00":
s = s[:-1]
return cls(s)
def encode(self):
return self.value.encode("utf-8")
class FormatBitfield(FormatUint8):
len = 1
native_types = (int,)
def __str__(self):
return "0b{:08b}".format(self.value)
class FormatBitfield16(FormatUint16):
len = 2
def __str__(self):
return "0b{:016b}".format(self.value)
class FormatTuple(FormatBase):
sub_cls = []
sub_cls_names = []
native_types = (list, tuple)
# here we have a list/tuple as value
def __init__(self, value):
try:
if len(self.sub_cls) != len(value):
raise ValueError(
(
f"Expected {len(self.sub_cls)} number of values for format:"
"{self.__class__.__name__} ({self._sub_str()}}"
)
)
except TypeError:
raise TypeError(
"Expected iterable with {} number of values for format: {} ({})".format(
len(self.sub_cls), self.__class__.__name__, self._sub_str()
)
) from None
self.value = value
def _sub_str(self):
scn = self.sub_cls_names if self._is_named() else None
if scn and len(scn) == len(self):
d = {}
for idx, n in enumerate(scn):
d[n] = self.sub_cls[idx]
return str(d)
return "({})".format(",".join([sub_c.__name__ for sub_c in self.sub_cls]))
def _is_named(self):
try:
_ = self.sub_cls_names
except AttributeError:
return False
return bool(self.sub_cls_names)
# del not suported, wonder if wee need it
# def __delitem__(self, key):
# self.__delattr__(key)
def __len__(self):
return len(self.sub_cls)
def __getitem__(self, key):
if isinstance(key, int):
return self.value[key]
elif isinstance(key, str):
if not self._is_named():
raise TypeError("index must be int")
try:
idx = self.sub_cls_names.index(key)
except ValueError:
raise KeyError(key)
return self.value[idx]
raise TypeError("index must be str or int")
def __setitem__(self, key, sub_value):
if isinstance(key, int):
try:
# create sub class instance for type checking (raises Type/Value)
# resulting value should be original sub_value on success
self.value[key] = self.sub_cls[key](sub_value).value
except IndexError:
raise IndexError(
f"{self.__class__.__name__} assignment index out of range"
)
elif isinstance(key, str):
if not self._is_named():
raise TypeError("index must be int")
try:
idx = self.sub_cls_names.index(key)
except ValueError:
raise KeyError(key)
self.value[idx] = self.sub_cls[idx](sub_value).value
else:
raise TypeError("index must be str or int")
def keys(self):
if not self._is_named():
return []
return self.sub_cls_names
def values(self):
return self.value
def items(self):
if not self._is_named():
return []
return [
(self.sub_cls_names[idx], value) for idx, value in enumerate(self.value)
]
@classmethod
def decode(cls, value):
dec_vals = []
for sub in cls.sub_cls:
# consume bytes suitable for class, or all
len_get = len(value) if sub.len == 0 else sub.len
v = value[:len_get]
value = value[len_get:]
dec_vals.append(sub.decode(v))
return cls(cls.native_types[0](dec_vals))
def encode(self):
enc_vals = b""
for idx, val in enumerate(self.value):
# add bytes for all classes in order, or all
if isinstance(val, FormatBase):
enc_vals += val.encode()
else:
enc_vals += self.sub_cls[idx](val).encode()
return enc_vals
def __str__(self):
return "(" + ",".join([str(v) for v in self.value]) + ")"
def __eq__(self, other):
if isinstance(other, FormatTuple):
if len(other) != len(self):
return False
for idx, value in enumerate(self.values()):
if value != other[idx]:
return False
return True
elif not isinstance(other, FormatBase):
for idx, value in enumerate(self.values()):
if value != other[idx]:
return False
return True
return False
__all__ = (
"FormatBase",
"FormatRaw",
"FormatUint",
"FormatUint8",
"FormatUint8Enum",
"FormatUint16",
"FormatUint24",
"FormatUint32",
"FormatUint40",
"FormatUint48",
"FormatUint64",
"FormatSint8",
"FormatSint16",
"FormatSint32",
"FormatSint64",
"FormatUtf8s",
"FormatBitfield",
"FormatTuple",
)
|
the-stack_0_1985 | #!/usr/bin/env python2
# coding=utf-8
# ^^^^^^^^^^^^ TODO remove when supporting only Python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Hierarchical Deterministic wallet function."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class WalletHDTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
self.nodes = start_nodes(2, self.options.tmpdir, [['-usehd=0'], ['-usehd=1', '-keypool=0']])
self.is_network_split = False
connect_nodes_bi(self.nodes, 0, 1)
self.is_network_split=False
self.sync_all()
def run_test (self):
tmpdir = self.options.tmpdir
# Make sure can't switch off usehd after wallet creation
stop_node(self.nodes[1],1)
try:
start_node(1, self.options.tmpdir, ['-usehd=0'])
raise AssertionError("Must not allow to turn off HD on an already existing HD wallet")
except Exception as e:
assert("bitozd exited with status 1 during initialization" in str(e))
# assert_start_raises_init_error(1, self.options.tmpdir, ['-usehd=0'], 'already existing HD wallet')
# self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1])
self.nodes[1] = start_node(1, self.options.tmpdir, ['-usehd=1', '-keypool=0'])
connect_nodes_bi(self.nodes, 0, 1)
# Make sure we use hd, keep chainid
chainid = self.nodes[1].getwalletinfo()['hdchainid']
assert_equal(len(chainid), 64)
# create an internal key
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr);
assert_equal(change_addrV["hdkeypath"], "m/44'/1'/0'/1/0") #first internal child key
# Import a non-HD private key in the HD wallet
non_hd_add = self.nodes[0].getnewaddress()
self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add))
# This should be enough to keep the master key and the non-HD key
self.nodes[1].backupwallet(tmpdir + "/hd.bak")
#self.nodes[1].dumpwallet(tmpdir + "/hd.dump")
# Derive some HD addresses and remember the last
# Also send funds to each add
self.nodes[0].generate(101)
hd_add = None
num_hd_adds = 300
for i in range(num_hd_adds):
hd_add = self.nodes[1].getnewaddress()
hd_info = self.nodes[1].validateaddress(hd_add)
assert_equal(hd_info["hdkeypath"], "m/44'/1'/0'/0/"+str(i+1))
assert_equal(hd_info["hdchainid"], chainid)
self.nodes[0].sendtoaddress(hd_add, 1)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(non_hd_add, 1)
self.nodes[0].generate(1)
# create an internal key (again)
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr);
assert_equal(change_addrV["hdkeypath"], "m/44'/1'/0'/1/1") #second internal child key
self.sync_all()
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
print("Restore backup ...")
stop_node(self.nodes[1],1)
os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/hd.bak", tmpdir + "/node1/regtest/wallet.dat")
self.nodes[1] = start_node(1, self.options.tmpdir, ['-usehd=1', '-keypool=0'])
#connect_nodes_bi(self.nodes, 0, 1)
# Assert that derivation is deterministic
hd_add_2 = None
for _ in range(num_hd_adds):
hd_add_2 = self.nodes[1].getnewaddress()
hd_info_2 = self.nodes[1].validateaddress(hd_add_2)
assert_equal(hd_info_2["hdkeypath"], "m/44'/1'/0'/0/"+str(_+1))
assert_equal(hd_info_2["hdchainid"], chainid)
assert_equal(hd_add, hd_add_2)
# Needs rescan
stop_node(self.nodes[1],1)
self.nodes[1] = start_node(1, self.options.tmpdir, ['-usehd=1', '-keypool=0', '-rescan'])
#connect_nodes_bi(self.nodes, 0, 1)
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
# send a tx and make sure its using the internal chain for the changeoutput
txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
outs = self.nodes[1].decoderawtransaction(self.nodes[1].gettransaction(txid)['hex'])['vout'];
keypath = ""
for out in outs:
if out['value'] != 1:
keypath = self.nodes[1].validateaddress(out['scriptPubKey']['addresses'][0])['hdkeypath']
assert_equal(keypath[0:13], "m/44'/1'/0'/1")
if __name__ == '__main__':
WalletHDTest().main ()
|
the-stack_0_1986 | from math import log
def tfidf(t, d, d_list):
return tf(t, d) * idf(t, d_list)
def tf(t, d):
r = 0
for term in d.split():
if t == term:
r += 1
return r
def idf(t, d_list):
d_with_t = 0
for d in d_list:
if t in d.split():
d_with_t += 1
return log(len(d_list) / d_with_t)
if __name__ == "__main__":
d1 = "snow in my shoe abandoned sparrow's nest"
d2 = "whitecaps on the bay a broken signboard banging in the April wind"
d3 = "lily out of the water out of itself bass picking bugs off the moon"
d4 = "an aging willow its image unsteady in the flowing stream"
d5 = "just friends he watches my gauze dress blowing on the line"
d6 = "little spider will you outlive me"
d7 = "meteor shower a gentle wave wets our sandals"
d_list = [d1, d2, d3, d4, d5, d6, d7]
print(tfidf("a", d2, d_list))
print(tfidf("out", d1, d_list))
print(tfidf("out", d3, d_list))
|
the-stack_0_1987 | # pylint: disable=line-too-long, no-member
from __future__ import print_function
import pytz
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db.models import Q
from django.utils import timezone
from ...decorators import handle_lock
from ...models import DataPoint, DataSource, DataSourceAlert, DataSourceReference, DataGeneratorDefinition
GENERATOR = 'pdk-remote-nudge'
CRITICAL_LEVEL = 12 * 60 * 60
WARNING_LEVEL = 6 * 60 * 60
class Command(BaseCommand):
help = 'Determines if mobile devices are receiving silent push notifications.'
@handle_lock
def handle(self, *args, **options): # pylint: disable=too-many-branches, too-many-statements, too-many-locals
try:
if (GENERATOR in settings.PDK_ENABLED_CHECKS) is False:
DataSourceAlert.objects.filter(generator_identifier=GENERATOR, active=True).update(active=False)
return
except AttributeError:
print('Did not find PDK_ENABLED_CHECKS in Django settings. Please define with a list of generators with status checks to enable.')
print('Example: PDK_ENABLED_CHECKS = (\'' + GENERATOR + '\',)')
here_tz = pytz.timezone(settings.TIME_ZONE)
for source in DataSource.objects.all(): # pylint: disable=too-many-nested-blocks
now = timezone.now()
source_reference = DataSourceReference.reference_for_source(source.identifier)
generator_definition = DataGeneratorDefinition.definition_for_identifier('pdk-app-event')
if source.should_suppress_alerts():
DataSourceAlert.objects.filter(data_source=source, generator_identifier=GENERATOR, active=True).update(active=False)
else:
secondary_query = Q(secondary_identifier='app_recv_remote_notification') | Q(secondary_identifier='pdk-received-firebase-message')
last_event = DataPoint.objects.filter(source_reference=source_reference, generator_definition=generator_definition).filter(secondary_query).order_by('-created').first()
last_alert = DataSourceAlert.objects.filter(data_source=source, generator_identifier=GENERATOR, active=True).order_by('-created').first()
alert_name = None
alert_details = {}
alert_level = 'info'
if last_event is not None:
delta = now - last_event.created
when = last_event.created.astimezone(here_tz)
if delta.total_seconds() > CRITICAL_LEVEL:
alert_name = 'Push Notifications Delayed'
alert_details['message'] = 'Device not received push notifications since ' + when.strftime('%H:%M on %b %d, %Y') + '.'
alert_level = 'critical'
elif delta.total_seconds() > WARNING_LEVEL:
alert_name = 'Push Notifications Delayed'
alert_details['message'] = 'Device not received push notifications since ' + when.strftime('%H:%M on %b %d, %Y') + '.'
alert_level = 'warning'
else:
alert_name = 'Push Notifications Never Received'
alert_details['message'] = 'Device has never received push notifications.'
if alert_name is not None:
if last_alert is None or last_alert.alert_name != alert_name or last_alert.alert_level != alert_level:
if last_alert is not None:
last_alert.active = False
last_alert.updated = timezone.now()
last_alert.save()
new_alert = DataSourceAlert(alert_name=alert_name, data_source=source, generator_identifier=GENERATOR)
new_alert.alert_level = alert_level
new_alert.update_alert_details(alert_details)
new_alert.created = timezone.now()
new_alert.updated = timezone.now()
new_alert.active = True
new_alert.save()
else:
last_alert.updated = timezone.now()
last_alert.update_alert_details(alert_details)
last_alert.save()
elif last_alert is not None:
last_alert.updated = timezone.now()
last_alert.active = False
last_alert.save()
|
the-stack_0_1989 | # model settings
model = dict(
type='TTFNet',
pretrained='modelzoo://resnet18',
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_eval=False,
style='pytorch'),
neck=None,
bbox_head=dict(
type='TTFLevelHead',
inplanes=(64, 128, 256, 512),
planes=(256, 128, 32),
down_ratio_b1=8,
down_ratio_b2=4,
hm_head_channels=(128, 128),
wh_head_channels=(64, 64),
hm_head_conv_num=(2, 2),
wh_head_conv_num=(1, 1),
num_classes=81,
wh_scale_factor_b1=16.,
wh_scale_factor_b2=16.,
shortcut_cfg=(1, 2, 3),
alpha=0.54,
beta=0.54,
max_objs=128,
hm_weight_b1=1.,
wh_weight_b1=5.,
hm_weight_b2=1.,
wh_weight_b2=5.,
b1_min_length=48,
b2_max_length=64,
mdcn_before_s8=True,
mdcn_before_s8_bn=False,
inf_branch=['b1', 'b2'],
use_simple_nms=False,
conv_cfg=None,
norm_cfg=dict(type='BN')))
cudnn_benchmark = True
# training and testing settings
train_cfg = dict(debug=False)
test_cfg = dict(score_thr=0.01, max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=16,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='SGD',
lr=0.002,
momentum=0.9,
weight_decay=0.0004,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 5,
step=[9, 11])
checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_every_n_epochs=9)
# yapf:disable
log_config = dict(interval=20)
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/ttfv2net_r18_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
the-stack_0_1990 | """Task I/O specifications."""
import attr
from pathlib import Path
import typing as ty
import inspect
import re
from glob import glob
from .helpers_file import template_update_single
def attr_fields(spec, exclude_names=()):
return [field for field in spec.__attrs_attrs__ if field.name not in exclude_names]
def attr_fields_dict(spec, exclude_names=()):
return {
field.name: field
for field in spec.__attrs_attrs__
if field.name not in exclude_names
}
class File:
"""An :obj:`os.pathlike` object, designating a file."""
class Directory:
"""An :obj:`os.pathlike` object, designating a folder."""
class MultiInputObj:
"""A ty.List[ty.Any] object, converter changes a single values to a list"""
@classmethod
def converter(cls, value):
from .helpers import ensure_list
if value == attr.NOTHING:
return value
else:
return ensure_list(value)
class MultiOutputObj:
"""A ty.List[ty.Any] object, converter changes an 1-el list to the single value"""
@classmethod
def converter(cls, value):
if isinstance(value, list) and len(value) == 1:
return value[0]
else:
return value
class MultiInputFile(MultiInputObj):
"""A ty.List[File] object, converter changes a single file path to a list"""
class MultiOutputFile(MultiOutputObj):
"""A ty.List[File] object, converter changes an 1-el list to the single value"""
@attr.s(auto_attribs=True, kw_only=True)
class SpecInfo:
"""Base data structure for metadata of specifications."""
name: str
"""A name for the specification."""
fields: ty.List[ty.Tuple] = attr.ib(factory=list)
"""List of names of fields (can be inputs or outputs)."""
bases: ty.Tuple[ty.Type] = attr.ib(factory=tuple)
"""Keeps track of specification inheritance.
Should be a tuple containing at least one BaseSpec """
@attr.s(auto_attribs=True, kw_only=True)
class BaseSpec:
"""The base dataclass specs for all inputs and outputs."""
def __attrs_post_init__(self):
self.files_hash = {
field.name: {}
for field in attr_fields(
self, exclude_names=("_graph_checksums", "bindings", "files_hash")
)
if field.metadata.get("output_file_template") is None
}
def __setattr__(self, name, value):
"""changing settatr, so the converter and validator is run
if input is set after __init__
"""
if inspect.stack()[1][3] == "__init__" or name in [
"inp_hash",
"changed",
"files_hash",
]:
super().__setattr__(name, value)
else:
tp = attr.fields_dict(self.__class__)[name].type
# if the type has a converter, e.g., MultiInputObj
if hasattr(tp, "converter"):
value = tp.converter(value)
self.files_hash[name] = {}
super().__setattr__(name, value)
# validate all fields that have set a validator
attr.validate(self)
def collect_additional_outputs(self, inputs, output_dir, outputs):
"""Get additional outputs."""
return {}
@property
def hash(self):
"""Compute a basic hash for any given set of fields."""
from .helpers import hash_value, hash_function
inp_dict = {}
for field in attr_fields(
self, exclude_names=("_graph_checksums", "bindings", "files_hash")
):
if field.metadata.get("output_file_template"):
continue
# removing values that are not set from hash calculation
if getattr(self, field.name) is attr.NOTHING:
continue
value = getattr(self, field.name)
inp_dict[field.name] = hash_value(
value=value,
tp=field.type,
metadata=field.metadata,
precalculated=self.files_hash[field.name],
)
inp_hash = hash_function(inp_dict)
if hasattr(self, "_graph_checksums"):
inp_hash = hash_function((inp_hash, self._graph_checksums))
return inp_hash
def retrieve_values(self, wf, state_index=None):
"""Get values contained by this spec."""
temp_values = {}
for field in attr_fields(self):
value = getattr(self, field.name)
if isinstance(value, LazyField):
value = value.get_value(wf, state_index=state_index)
temp_values[field.name] = value
for field, value in temp_values.items():
setattr(self, field, value)
def check_fields_input_spec(self):
"""
Check fields from input spec based on the medatada.
e.g., if xor, requires are fulfilled, if value provided when mandatory.
"""
fields = attr_fields(self)
names = []
require_to_check = {}
for fld in fields:
mdata = fld.metadata
# checking if the mandatory field is provided
if getattr(self, fld.name) is attr.NOTHING:
if mdata.get("mandatory"):
raise AttributeError(
f"{fld.name} is mandatory, but no value provided"
)
else:
continue
names.append(fld.name)
# checking if fields meet the xor and requires are
if "xor" in mdata:
if [el for el in mdata["xor"] if (el in names and el != fld.name)]:
raise AttributeError(
f"{fld.name} is mutually exclusive with {mdata['xor']}"
)
if "requires" in mdata:
if [el for el in mdata["requires"] if el not in names]:
# will check after adding all fields to names
require_to_check[fld.name] = mdata["requires"]
if (
fld.type in [File, Directory]
or "pydra.engine.specs.File" in str(fld.type)
or "pydra.engine.specs.Directory" in str(fld.type)
):
self._file_check_n_bindings(fld)
for nm, required in require_to_check.items():
required_notfound = [el for el in required if el not in names]
if required_notfound:
raise AttributeError(f"{nm} requires {required_notfound}")
def _file_check_n_bindings(self, field):
"""for tasks without container, this is simple check if the file exists"""
if isinstance(getattr(self, field.name), list):
# if value is a list and type is a list of Files/Directory, checking all elements
if field.type in [ty.List[File], ty.List[Directory]]:
for el in getattr(self, field.name):
file = Path(el)
if not file.exists() and field.type in [File, Directory]:
raise FileNotFoundError(
f"the file {file} from the {field.name} input does not exist"
)
else:
file = Path(getattr(self, field.name))
# error should be raised only if the type is strictly File or Directory
if not file.exists() and field.type in [File, Directory]:
raise FileNotFoundError(
f"the file {file} from the {field.name} input does not exist"
)
def check_metadata(self):
"""Check contained metadata."""
def template_update(self):
"""Update template."""
def copyfile_input(self, output_dir):
"""Copy the file pointed by a :class:`File` input."""
@attr.s(auto_attribs=True, kw_only=True)
class Runtime:
"""Represent run time metadata."""
rss_peak_gb: ty.Optional[float] = None
"""Peak in consumption of physical RAM."""
vms_peak_gb: ty.Optional[float] = None
"""Peak in consumption of virtual memory."""
cpu_peak_percent: ty.Optional[float] = None
"""Peak in cpu consumption."""
@attr.s(auto_attribs=True, kw_only=True)
class Result:
"""Metadata regarding the outputs of processing."""
output: ty.Optional[ty.Any] = None
runtime: ty.Optional[Runtime] = None
errored: bool = False
def __getstate__(self):
state = self.__dict__.copy()
if state["output"] is not None:
fields = tuple((el.name, el.type) for el in attr_fields(state["output"]))
state["output_spec"] = (state["output"].__class__.__name__, fields)
state["output"] = attr.asdict(state["output"], recurse=False)
return state
def __setstate__(self, state):
if "output_spec" in state:
spec = list(state["output_spec"])
del state["output_spec"]
klass = attr.make_class(
spec[0], {k: attr.ib(type=v) for k, v in list(spec[1])}
)
state["output"] = klass(**state["output"])
self.__dict__.update(state)
def get_output_field(self, field_name):
"""Used in get_values in Workflow
Parameters
----------
field_name : `str`
Name of field in LazyField object
"""
if field_name == "all_":
return attr.asdict(self.output, recurse=False)
else:
return getattr(self.output, field_name)
@attr.s(auto_attribs=True, kw_only=True)
class RuntimeSpec:
"""
Specification for a task.
From CWL::
InlineJavascriptRequirement
SchemaDefRequirement
DockerRequirement
SoftwareRequirement
InitialWorkDirRequirement
EnvVarRequirement
ShellCommandRequirement
ResourceRequirement
InlineScriptRequirement
"""
outdir: ty.Optional[str] = None
container: ty.Optional[str] = "shell"
network: bool = False
@attr.s(auto_attribs=True, kw_only=True)
class FunctionSpec(BaseSpec):
"""Specification for a process invoked from a shell."""
def check_metadata(self):
"""
Check the metadata for fields in input_spec and fields.
Also sets the default values when available and needed.
"""
supported_keys = {
"allowed_values",
"copyfile",
"help_string",
"mandatory",
# "readonly", #likely not needed
# "output_field_name", #likely not needed
# "output_file_template", #likely not needed
"requires",
"keep_extension",
"xor",
"sep",
}
for fld in attr_fields(self, exclude_names=("_func", "_graph_checksums")):
mdata = fld.metadata
# checking keys from metadata
if set(mdata.keys()) - supported_keys:
raise AttributeError(
f"only these keys are supported {supported_keys}, but "
f"{set(mdata.keys()) - supported_keys} provided"
)
# checking if the help string is provided (required field)
if "help_string" not in mdata:
raise AttributeError(f"{fld.name} doesn't have help_string field")
# not allowing for default if the field is mandatory
if not fld.default == attr.NOTHING and mdata.get("mandatory"):
raise AttributeError(
"default value should not be set when the field is mandatory"
)
# setting default if value not provided and default is available
if getattr(self, fld.name) is None:
if not fld.default == attr.NOTHING:
setattr(self, fld.name, fld.default)
@attr.s(auto_attribs=True, kw_only=True)
class ShellSpec(BaseSpec):
"""Specification for a process invoked from a shell."""
executable: ty.Union[str, ty.List[str]] = attr.ib(
metadata={
"help_string": "the first part of the command, can be a string, "
"e.g. 'ls', or a list, e.g. ['ls', '-l', 'dirname']"
}
)
args: ty.Union[str, ty.List[str], None] = attr.ib(
None,
metadata={
"help_string": "the last part of the command, can be a string, "
"e.g. <file_name>, or a list"
},
)
def retrieve_values(self, wf, state_index=None):
"""Parse output results."""
temp_values = {}
for field in attr_fields(self):
# retrieving values that do not have templates
if not field.metadata.get("output_file_template"):
value = getattr(self, field.name)
if isinstance(value, LazyField):
value = value.get_value(wf, state_index=state_index)
temp_values[field.name] = value
for field, value in temp_values.items():
value = path_to_string(value)
setattr(self, field, value)
def check_metadata(self):
"""
Check the metadata for fields in input_spec and fields.
Also sets the default values when available and needed.
"""
supported_keys = {
"allowed_values",
"argstr",
"container_path",
"copyfile",
"help_string",
"mandatory",
"readonly",
"output_field_name",
"output_file_template",
"position",
"requires",
"keep_extension",
"xor",
"sep",
"formatter",
}
for fld in attr_fields(self, exclude_names=("_func", "_graph_checksums")):
mdata = fld.metadata
# checking keys from metadata
if set(mdata.keys()) - supported_keys:
raise AttributeError(
f"only these keys are supported {supported_keys}, but "
f"{set(mdata.keys()) - supported_keys} provided"
)
# checking if the help string is provided (required field)
if "help_string" not in mdata:
raise AttributeError(f"{fld.name} doesn't have help_string field")
# assuming that fields with output_file_template shouldn't have default
if fld.default not in [attr.NOTHING, True, False] and mdata.get(
"output_file_template"
):
raise AttributeError(
"default value should not be set together with output_file_template"
)
# not allowing for default if the field is mandatory
if not fld.default == attr.NOTHING and mdata.get("mandatory"):
raise AttributeError(
"default value should not be set when the field is mandatory"
)
# setting default if value not provided and default is available
if getattr(self, fld.name) is None:
if not fld.default == attr.NOTHING:
setattr(self, fld.name, fld.default)
@attr.s(auto_attribs=True, kw_only=True)
class ShellOutSpec:
"""Output specification of a generic shell process."""
return_code: int
"""The process' exit code."""
stdout: ty.Union[File, str]
"""The process' standard output."""
stderr: ty.Union[File, str]
"""The process' standard input."""
def collect_additional_outputs(self, inputs, output_dir, outputs):
"""Collect additional outputs from shelltask output_spec."""
additional_out = {}
for fld in attr_fields(self, exclude_names=("return_code", "stdout", "stderr")):
if fld.type not in [
File,
MultiOutputFile,
Directory,
int,
float,
bool,
str,
list,
]:
raise Exception("not implemented (collect_additional_output)")
# assuming that field should have either default or metadata, but not both
if (
fld.default is None or fld.default == attr.NOTHING
) and not fld.metadata: # TODO: is it right?
raise AttributeError("File has to have default value or metadata")
elif fld.default != attr.NOTHING:
additional_out[fld.name] = self._field_defaultvalue(fld, output_dir)
elif fld.metadata:
if (
fld.type in [int, float, bool, str, list]
and "callable" not in fld.metadata
):
raise AttributeError(
f"{fld.type} has to have a callable in metadata"
)
additional_out[fld.name] = self._field_metadata(
fld, inputs, output_dir, outputs
)
return additional_out
def generated_output_names(self, inputs, output_dir):
"""Returns a list of all outputs that will be generated by the task.
Takes into account the task input and the requires list for the output fields.
TODO: should be in all Output specs?
"""
# checking the input (if all mandatory fields are provided, etc.)
inputs.check_fields_input_spec()
output_names = ["return_code", "stdout", "stderr"]
for fld in attr_fields(self, exclude_names=("return_code", "stdout", "stderr")):
if fld.type is not File:
raise Exception("not implemented (collect_additional_output)")
# assuming that field should have either default or metadata, but not both
if (
fld.default in (None, attr.NOTHING) and not fld.metadata
): # TODO: is it right?
raise AttributeError("File has to have default value or metadata")
elif fld.default != attr.NOTHING:
output_names.append(fld.name)
elif (
fld.metadata
and self._field_metadata(
fld, inputs, output_dir, outputs=None, check_existance=False
)
!= attr.NOTHING
):
output_names.append(fld.name)
return output_names
def _field_defaultvalue(self, fld, output_dir):
"""Collect output file if the default value specified."""
if not isinstance(fld.default, (str, Path)):
raise AttributeError(
f"{fld.name} is a File, so default value "
f"should be a string or a Path, "
f"{fld.default} provided"
)
default = fld.default
if isinstance(default, str):
default = Path(default)
default = output_dir / default
if "*" not in str(default):
if default.exists():
return default
else:
raise AttributeError(f"file {default} does not exist")
else:
all_files = [Path(el) for el in glob(str(default.expanduser()))]
if len(all_files) > 1:
return all_files
elif len(all_files) == 1:
return all_files[0]
else:
raise AttributeError(f"no file matches {default.name}")
def _field_metadata(
self, fld, inputs, output_dir, outputs=None, check_existance=True
):
"""Collect output file if metadata specified."""
if self._check_requires(fld, inputs) is False:
return attr.NOTHING
if "value" in fld.metadata:
return output_dir / fld.metadata["value"]
# this block is only run if "output_file_template" is provided in output_spec
# if the field is set in input_spec with output_file_template,
# than the field already should have value
elif "output_file_template" in fld.metadata:
value = template_update_single(
fld, inputs=inputs, output_dir=output_dir, spec_type="output"
)
if fld.type is MultiOutputFile and type(value) is list:
# TODO: how to deal with mandatory list outputs
ret = []
for val in value:
val = Path(val)
if check_existance and not val.exists():
ret.append(attr.NOTHING)
else:
ret.append(val)
return ret
else:
val = Path(value)
# checking if the file exists
if check_existance and not val.exists():
# if mandatory raise exception
if "mandatory" in fld.metadata:
if fld.metadata["mandatory"]:
raise Exception(
f"mandatory output for variable {fld.name} does not exist"
)
return attr.NOTHING
return val
elif "callable" in fld.metadata:
call_args = inspect.getargspec(fld.metadata["callable"])
call_args_val = {}
for argnm in call_args.args:
if argnm == "field":
call_args_val[argnm] = fld
elif argnm == "output_dir":
call_args_val[argnm] = output_dir
elif argnm == "inputs":
call_args_val[argnm] = inputs
elif argnm == "stdout":
call_args_val[argnm] = outputs["stdout"]
elif argnm == "stderr":
call_args_val[argnm] = outputs["stderr"]
else:
try:
call_args_val[argnm] = getattr(inputs, argnm)
except AttributeError:
raise AttributeError(
f"arguments of the callable function from {fld.name} "
f"has to be in inputs or be field or output_dir, "
f"but {argnm} is used"
)
return fld.metadata["callable"](**call_args_val)
else:
raise Exception("(_field_metadata) is not a current valid metadata key.")
def _check_requires(self, fld, inputs):
"""checking if all fields from the requires and template are set in the input
if requires is a list of list, checking if at least one list has all elements set
"""
from .helpers import ensure_list
if "requires" in fld.metadata:
# if requires is a list of list it is treated as el[0] OR el[1] OR...
if all([isinstance(el, list) for el in fld.metadata["requires"]]):
field_required_OR = fld.metadata["requires"]
# if requires is a list of tuples/strings - I'm creating a 1-el nested list
elif all([isinstance(el, (str, tuple)) for el in fld.metadata["requires"]]):
field_required_OR = [fld.metadata["requires"]]
else:
raise Exception(
f"requires field can be a list of list, or a list "
f"of strings/tuples, but {fld.metadata['requires']} "
f"provided for {fld.name}"
)
else:
field_required_OR = [[]]
for field_required in field_required_OR:
# if the output has output_file_template field,
# adding all input fields from the template to requires
if "output_file_template" in fld.metadata:
inp_fields = re.findall(r"{\w+}", fld.metadata["output_file_template"])
field_required += [
el[1:-1] for el in inp_fields if el[1:-1] not in field_required
]
# it's a flag, of the field from the list is not in input it will be changed to False
required_found = True
for field_required in field_required_OR:
required_found = True
# checking if the input fields from requires have set values
for inp in field_required:
if isinstance(inp, str): # name of the input field
if not hasattr(inputs, inp):
raise Exception(
f"{inp} is not a valid input field, can't be used in requires"
)
elif getattr(inputs, inp) in [attr.NOTHING, None]:
required_found = False
break
elif isinstance(inp, tuple): # (name, allowed values)
inp, allowed_val = inp[0], ensure_list(inp[1])
if not hasattr(inputs, inp):
raise Exception(
f"{inp} is not a valid input field, can't be used in requires"
)
elif getattr(inputs, inp) not in allowed_val:
required_found = False
break
else:
raise Exception(
f"each element of the requires element should be a string or a tuple, "
f"but {inp} is found in {field_required}"
)
# if the specific list from field_required_OR has all elements set, no need to check more
if required_found:
break
if required_found:
return True
else:
return False
@attr.s(auto_attribs=True, kw_only=True)
class ContainerSpec(ShellSpec):
"""Refine the generic command-line specification to container execution."""
image: ty.Union[File, str] = attr.ib(
metadata={"help_string": "image", "mandatory": True}
)
"""The image to be containerized."""
container: ty.Union[File, str, None] = attr.ib(
metadata={"help_string": "container"}
)
"""The container."""
container_xargs: ty.Optional[ty.List[str]] = attr.ib(
default=None, metadata={"help_string": "todo"}
)
"""Execution arguments to run the image."""
bindings: ty.Optional[
ty.List[
ty.Tuple[
Path, # local path
Path, # container path
ty.Optional[str], # mount mode
]
]
] = attr.ib(default=None, metadata={"help_string": "bindings"})
"""Mount points to be bound into the container."""
def _file_check_n_bindings(self, field):
if field.name == "image":
return
file = Path(getattr(self, field.name))
if field.metadata.get("container_path"):
# if the path is in a container the input should be treated as a str (hash as a str)
# field.type = "str"
# setattr(self, field.name, str(file))
pass
# if this is a local path, checking if the path exists
elif file.exists():
if self.bindings is None:
self.bindings = []
self.bindings.append((file.parent, f"/pydra_inp_{field.name}", "ro"))
# error should be raised only if the type is strictly File or Directory
elif field.type in [File, Directory]:
raise FileNotFoundError(
f"the file {file} from {field.name} input does not exist, "
f"if the file comes from the container, "
f"use field.metadata['container_path']=True"
)
@attr.s(auto_attribs=True, kw_only=True)
class DockerSpec(ContainerSpec):
"""Particularize container specifications to the Docker engine."""
container: str = attr.ib("docker", metadata={"help_string": "container"})
@attr.s(auto_attribs=True, kw_only=True)
class SingularitySpec(ContainerSpec):
"""Particularize container specifications to Singularity."""
container: str = attr.ib("singularity", metadata={"help_string": "container type"})
class LazyField:
"""Lazy fields implement promises."""
def __init__(self, node, attr_type):
"""Initialize a lazy field."""
self.name = node.name
if attr_type == "input":
self.fields = [field[0] for field in node.input_spec.fields]
elif attr_type == "output":
self.fields = node.output_names
else:
raise ValueError(f"LazyField: Unknown attr_type: {attr_type}")
self.attr_type = attr_type
self.field = None
def __getattr__(self, name):
if name in self.fields or name == "all_":
self.field = name
return self
if name in dir(self):
return self.__getattribute__(name)
raise AttributeError(
f"Task {self.name} has no {self.attr_type} attribute {name}"
)
def __getstate__(self):
state = self.__dict__.copy()
state["name"] = self.name
state["fields"] = self.fields
state["field"] = self.field
return state
def __setstate__(self, state):
self.__dict__.update(state)
def __repr__(self):
return f"LF('{self.name}', '{self.field}')"
def get_value(self, wf, state_index=None):
"""Return the value of a lazy field."""
if self.attr_type == "input":
return getattr(wf.inputs, self.field)
elif self.attr_type == "output":
node = getattr(wf, self.name)
result = node.result(state_index=state_index)
if isinstance(result, list):
if len(result) and isinstance(result[0], list):
results_new = []
for res_l in result:
res_l_new = []
for res in res_l:
if res.errored:
raise ValueError("Error from get_value")
else:
res_l_new.append(res.get_output_field(self.field))
results_new.append(res_l_new)
return results_new
else:
results_new = []
for res in result:
if res.errored:
raise ValueError("Error from get_value")
else:
results_new.append(res.get_output_field(self.field))
return results_new
else:
if result.errored:
raise ValueError("Error from get_value")
return result.get_output_field(self.field)
def donothing(*args, **kwargs):
return None
@attr.s(auto_attribs=True, kw_only=True)
class TaskHook:
"""Callable task hooks."""
pre_run_task: ty.Callable = donothing
post_run_task: ty.Callable = donothing
pre_run: ty.Callable = donothing
post_run: ty.Callable = donothing
def __setattr__(cls, attr, val):
if attr not in ["pre_run_task", "post_run_task", "pre_run", "post_run"]:
raise AttributeError("Cannot set unknown hook")
super().__setattr__(attr, val)
def reset(self):
for val in ["pre_run_task", "post_run_task", "pre_run", "post_run"]:
setattr(self, val, donothing)
def path_to_string(value):
"""Convert paths to strings."""
if isinstance(value, Path):
value = str(value)
elif isinstance(value, list) and len(value) and isinstance(value[0], Path):
value = [str(val) for val in value]
return value
|
the-stack_0_1991 | """
装饰器使用
"""
import time
import functools
import decorator
def cost(func):
@functools.wraps(func)
def wapper(*args):
t1 = time.time()
res = func(*args)
t2 = time.time()
print(f'运行时间为:{str(t2 - t1)}')
return res
# return wapper()返回的是执行结果了,所以不能加括号
return wapper
@cost
def excuteBll(a: int, b: int) -> int:
'''
返回两个数据的和
:param a:第一个参数
:param b: 第二个参数
:return:
'''
time.sleep(1)
print(a + b)
def retry(retry_count=3, sleep_time=1):
'''
重试装饰器
:param retry_count:重试次数,默认3
:param sleep_time: 等待时间,默认1
:return:
'''
def inner(func):
print('第1步')
@functools.wraps(func)
def wapper(*args, **kwargs):
print('第2步')
for i in range(retry_count):
print('第3步')
try:
print('第6步')
res = func(*args, **kwargs)
print('最后一步')
return res
except:
print('第7步')
time.sleep(sleep_time)
continue
return None
return wapper
return inner
@cost
@retry(retry_count=2, sleep_time=3)
def requestNameHttp(ip, address):
print('第4步')
print('请求操作中')
time.sleep(1)
print('请求成功')
return
class Cust(object):
'''
类装饰器 核心点是__call__函数
'''
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
print('装饰器装饰')
f = self.func(*args, **kwargs)
print('装饰完成了')
return f
@Cust
def test02(a: int, b: int) -> int:
'''
返回两个数相加结果
:param a:
:param b:
:return:
'''
print('函数内部')
print(f'a+b={a + b}')
@decorator.decorator
def costss(func, time_sleep=3, *args, **kw):
print('开始了')
f = func(*args, **kw)
print('结束了')
return f
@costss
def costssTest(a, b):
print(f'a+b={a + b}')
if __name__ == '__main__':
# excuteBll(3, 4)
# print(excuteBll.__name__)
# print(excuteBll.__doc__)
print('=====================')
requestNameHttp('', '')
print('=====================')
test02(3, 4)
# print(test02.__name__)
# print(test02.__doc__)
print('=====================')
costssTest(3, 4)
|
the-stack_0_1996 | import datetime
from dataclasses import dataclass
from typing import Dict, List, Tuple
import appdaemon.plugins.hass.hassapi as hass
@dataclass
class Preferences:
input_time: str
input_temperature: str
target_area: str
@classmethod
def from_args(cls, prefs: Dict[str, Dict[str, str]]) -> Dict[str, "Preferences"]:
ret = {}
for k, v in prefs.items():
ret[k] = cls(**v)
return ret
class Climate(hass.Hass):
"""Hacs class."""
def initialize(self):
try:
self.thermostat = self.args["thermostat"]
except KeyError:
self.log("missing required argument: thermostat")
raise
self.mode_switching_enabled = self.args.get("mode_switching_enabled", False)
try:
self.prefs = Preferences.from_args(self.args["preferences"])
except KeyError:
self.log("missing required argument: preferences")
raise
self.log(f"preferences: {self.prefs}")
self.time_pref = self.create_pref_time_dict()
try:
self._outside_temperature_sensor = self.args["weather_sensor"]
except KeyError:
self.log("missing required argument: weather_sensor")
raise
self.run_minutely(self.temperature_check, datetime.time(0, 0, 0))
@property
def outside_temperature(self) -> float:
return float(self.get_state(self._outside_temperature_sensor))
@property
def max_temperature(self) -> int:
return int(self.args.get("max_temperature", 80))
@property
def min_temperature(self) -> int:
return int(self.args.get("min_temperature", 60))
@property
def thermostat_temperature(self) -> int:
return int(self.get_state(
self.thermostat, attribute="current_temperature"
))
def temperature_check(self, kwargs):
self.log("Checking temperature")
pref = self.nearest(self.time_pref.keys(), self.get_now())
preference = self.time_pref.get(pref)
self.log(f"using preference: {preference}")
self._set_temp(preference)
def _set_temp(self, preference: Preferences):
temp_to_set = float(self.get_state(preference.input_temperature))
current_outside_temp = self.outside_temperature
current_state = self.get_state(self.thermostat)
thermostat_temp = self.thermostat_temperature
sensors = self.args.get("inside_temperature_sensors", {})
current_temps = self.get_current_temperatures(sensors)
target_area = preference.target_area
if target_area in current_temps:
target_area_temp = current_temps[target_area]
self.log(
f"Target area: {target_area} adjusted temperature: {target_area_temp}, actual: {current_temps[target_area]}"
)
else:
self.log("Target area not currently in current temperatures")
target_area_temp = thermostat_temp
try:
adjustment = thermostat_temp - current_temps[target_area]
except KeyError:
self.log(
f"Could not find target area: {target_area} in current temperatures"
)
adjustment = 0
temp_to_set += adjustment
if temp_to_set > self.max_temperature:
self.log(f"temp: {temp_to_set} was too high, using max temperature: {self.max_temperature}")
temp_to_set = self.max_temperature
elif temp_to_set < self.min_temperature:
self.log(f"temp: {temp_to_set} was too low, using min temperature: {self.min_temperature}")
temp_to_set = self.min_temperature
else:
self.log(f"temp_to_set: {temp_to_set} within temperature boundaries")
self.log(
f"adj_temp: {temp_to_set}, thermostat_temp: {thermostat_temp}, current_outside_temp: {current_outside_temp}"
)
if target_area_temp > current_outside_temp:
mode = "heat"
else:
mode = "cool"
self.log(f"Current mode: {current_state}, desired mode: {mode}")
if mode == "cool" and self.min_temperature == temp_to_set and self.mode_switching_enabled and current_state == "heat":
self.log(f"Changing climate mode from {current_state} to {mode}")
self.call_service(
"climate/set_hvac_mode", hvac_mode=mode, entity_id=self.thermostat
)
if current_state != mode and self.mode_switching_enabled:
self.log(f"Changing climate mode from {current_state} to {mode}")
self.call_service(
"climate/set_hvac_mode", hvac_mode=mode, entity_id=self.thermostat
)
self.log(
f"Current Temp Outside: {current_outside_temp}, current indoor temp: {thermostat_temp} setting indoor temp to: {temp_to_set}, using mode: {mode}"
)
self.call_service(
"climate/set_temperature", entity_id=self.thermostat, temperature=temp_to_set
)
def get_current_temperatures(self, sensors):
current_temps = {}
for k, v in sensors.items():
temps = []
for x in v["sensors"]:
inside_temp = self.get_state(x)
try:
temps.append(float(inside_temp))
except ValueError:
self.log(f"could not parse {inside_temp}")
if temps:
current_temps[k] = sum(temps) / len(temps)
self.log(f"Current temperature: {k} {current_temps[k]}")
return current_temps
def nearest(self, items, pivot):
date_items = [
datetime.datetime.combine(datetime.date.today(), x, tzinfo=pivot.tzinfo)
for x in items
]
date_items = [x for x in date_items if x < pivot]
if not date_items:
return min(items)
return min(date_items, key=lambda x: abs(x - pivot)).time()
def create_pref_time_dict(self) -> Dict[datetime.time, Preferences]:
ret = {}
for val in self.prefs.values():
state = self.get_state(val.input_time)
try:
ret[self.parse_time(state, aware=True)] = val
except TypeError:
self.log(f"Error parsing: {state}")
return ret |
the-stack_0_1997 | from torch import nn
from transformers import BertModel, BertTokenizer, BertConfig
import json
from typing import List, Dict, Optional
import os
import torch
from collections import OrderedDict
import numpy as np
import logging
class BioBERT(nn.Module):
"""Huggingface AutoModel to generate token embeddings.
Loads the correct class, e.g. BERT / RoBERTa etc.
"""
def __init__(self, max_seq_length: int = 128, model_args: Dict = {}, cache_dir: Optional[str] = None ):
super(BioBERT, self).__init__()
self.config_keys = ['max_seq_length']
self.max_seq_length = max_seq_length
config = BertConfig.from_json_file('/mnt/nas2/jaimeen/COVID/BioBERT/config.json')
self.auto_model = BertModel(config=config)
self.vocab = self.load_bert_vocab('/mnt/nas2/jaimeen/COVID/BioBERT/vocab.txt')
self.tokenizer = BertTokenizer(vocab_file='/mnt/nas2/jaimeen/COVID/BioBERT/vocab.txt', max_length=max_seq_length)
def load_bert_vocab(self, vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def load_pretrained(self, config):
state_dict = torch.load('/mnt/nas2/jaimeen/COVID/BioBERT/pytorch_model.bin')
new_state_dict = OrderedDict()
for k, v in state_dict.items():
if k.startswith('bert.'):
k = k.replace('bert.', '')
new_state_dict[k] = v
elif k.startswith('cls.'):
continue
else:
new_state_dict[k] = v
self.model = BertModel(config)
self.model.load_state_dict(new_state_dict)
def forward(self, features):
"""Returns token_embeddings, cls_token"""
output_states = self.auto_model(**features)
output_tokens = output_states[0]
cls_tokens = output_tokens[:, 0, :] # CLS token is first token
features.update({'token_embeddings': output_tokens, 'cls_token_embeddings': cls_tokens, 'attention_mask': features['attention_mask']})
if self.auto_model.config.output_hidden_states:
all_layer_idx = 2
if len(output_states) < 3: #Some models only output last_hidden_states and all_hidden_states
all_layer_idx = 1
hidden_states = output_states[all_layer_idx]
features.update({'all_layer_embeddings': hidden_states})
return features
def get_word_embedding_dimension(self) -> int:
return self.auto_model.config.hidden_size
def tokenize(self, text: str) -> List[int]:
"""
Tokenizes a text and maps tokens to token-ids
"""
return self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(text))
def get_sentence_features(self, tokens: List[int], pad_seq_length: int):
"""
Convert tokenized sentence in its embedding ids, segment ids and mask
:param tokens:
a tokenized sentence
:param pad_seq_length:
the maximal length of the sequence. Cannot be greater than self.sentence_transformer_config.max_seq_length
:return: embedding ids, segment ids and mask for the sentence
"""
pad_seq_length = min(pad_seq_length, self.max_seq_length) + 3 #Add space for special tokens
return self.tokenizer.prepare_for_model(tokens, max_length=pad_seq_length, pad_to_max_length=True, return_tensors='pt')
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path: str):
self.auto_model.save_pretrained(output_path)
self.tokenizer.save_pretrained(output_path)
with open(os.path.join(output_path, '/mnt/nas2/jaimeen/COVID/BioBERT/config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
|
the-stack_0_1999 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import random
import sys
from inky import InkyWHAT
from PIL import Image, ImageFont, ImageDraw
from font_source_serif_pro import SourceSerifProSemibold
from font_source_sans_pro import SourceSansProSemibold
print("""Inky wHAT: Quotes
Display quotes on Inky wHAT.
""")
try:
import wikiquotes
except ImportError:
print("""This script requires the wikiquotes module.
Install with:
sudo apt install python-lxml
sudo pip install wikiquotes
""")
sys.exit(1)
# Command line arguments to set display type and colour, and enter your name
parser = argparse.ArgumentParser()
parser.add_argument('--colour', '-c', type=str, required=True, choices=["red", "black", "yellow"], help="ePaper display colour")
args = parser.parse_args()
colour = args.colour
# This function will take a quote as a string, a width to fit
# it into, and a font (one that's been loaded) and then reflow
# that quote with newlines to fit into the space required.
def reflow_quote(quote, width, font):
words = quote.split(" ")
reflowed = '"'
line_length = 0
for i in range(len(words)):
word = words[i] + " "
word_length = font.getsize(word)[0]
line_length += word_length
if line_length < width:
reflowed += word
else:
line_length = word_length
reflowed = reflowed[:-1] + "\n " + word
reflowed = reflowed.rstrip() + '"'
return reflowed
# Set up the correct display and scaling factors
inky_display = InkyWHAT(colour)
inky_display.set_border(inky_display.WHITE)
# inky_display.set_rotation(180)
w = inky_display.WIDTH
h = inky_display.HEIGHT
# Create a new canvas to draw on
img = Image.new("P", (inky_display.WIDTH, inky_display.HEIGHT))
draw = ImageDraw.Draw(img)
# Load the fonts
font_size = 24
author_font = ImageFont.truetype(SourceSerifProSemibold, font_size)
quote_font = ImageFont.truetype(SourceSansProSemibold, font_size)
# A list of famous scientists to search for quotes from
# on https://en.wikiquote.org. Change them to your
# favourite people, if you like!
people = [
"Ada Lovelace",
"Carl Sagan",
"Charles Darwin",
"Dorothy Hodgkin",
"Edith Clarke",
"Grace Hopper",
"Hedy Lamarr",
"Isaac Newton",
"James Clerk Maxwell",
"Margaret Hamilton",
"Marie Curie",
"Michael Faraday",
"Niels Bohr",
"Nikola Tesla",
"Rosalind Franklin",
"Stephen Hawking"
]
# The amount of padding around the quote. Note that
# a value of 30 means 15 pixels padding left and 15
# pixels padding right.
#
# Also define the max width and height for the quote.
padding = 50
max_width = w - padding
max_height = h - padding - author_font.getsize("ABCD ")[1]
below_max_length = False
# Only pick a quote that will fit in our defined area
# once rendered in the font and size defined.
while not below_max_length:
person = random.choice(people) # Pick a random person from our list
quote = wikiquotes.random_quote(person, "english")
reflowed = reflow_quote(quote, max_width, quote_font)
p_w, p_h = quote_font.getsize(reflowed) # Width and height of quote
p_h = p_h * (reflowed.count("\n") + 1) # Multiply through by number of lines
if p_h < max_height:
below_max_length = True # The quote fits! Break out of the loop.
else:
continue
# x- and y-coordinates for the top left of the quote
quote_x = (w - max_width) / 2
quote_y = ((h - max_height) + (max_height - p_h - author_font.getsize("ABCD ")[1])) / 2
# x- and y-coordinates for the top left of the author
author_x = quote_x
author_y = quote_y + p_h
author = "- " + person
# Draw red rectangles top and bottom to frame quote
draw.rectangle((padding / 4, padding / 4, w - (padding / 4), quote_y - (padding / 4)), fill=inky_display.RED)
draw.rectangle((padding / 4, author_y + author_font.getsize("ABCD ")[1] + (padding / 4) + 5, w - (padding / 4), h - (padding / 4)), fill=inky_display.RED)
# Add some white hatching to the red rectangles to make
# it look a bit more interesting
hatch_spacing = 12
for x in range(0, 2 * w, hatch_spacing):
draw.line((x, 0, x - w, h), fill=inky_display.WHITE, width=3)
# Write our quote and author to the canvas
draw.multiline_text((quote_x, quote_y), reflowed, fill=inky_display.BLACK, font=quote_font, align="left")
draw.multiline_text((author_x, author_y), author, fill=inky_display.RED, font=author_font, align="left")
print(reflowed + "\n" + author + "\n")
# Display the completed canvas on Inky wHAT
inky_display.set_image(img)
inky_display.show()
|
the-stack_0_2000 | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for litecoind node under test"""
import contextlib
import decimal
import errno
from enum import Enum
import http.client
import json
import logging
import os
import re
import subprocess
import tempfile
import time
import urllib.parse
from .authproxy import JSONRPCException
from .util import (
append_config,
delete_cookie_file,
get_rpc_proxy,
rpc_url,
wait_until,
p2p_port,
)
# For Python 3.4 compatibility
JSONDecodeError = getattr(json, "JSONDecodeError", ValueError)
BITCOIND_PROC_WAIT_TIMEOUT = 60
class FailedToStartError(Exception):
"""Raised when a node fails to start correctly."""
class ErrorMatch(Enum):
FULL_TEXT = 1
FULL_REGEX = 2
PARTIAL_REGEX = 3
class TestNode():
"""A class for representing a litecoind node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, datadir, *, rpchost, timewait, bitcoind, bitcoin_cli, mocktime, coverage_dir, extra_conf=None, extra_args=None, use_cli=False):
self.index = i
self.datadir = datadir
self.stdout_dir = os.path.join(self.datadir, "stdout")
self.stderr_dir = os.path.join(self.datadir, "stderr")
self.rpchost = rpchost
self.rpc_timeout = timewait
self.binary = bitcoind
self.coverage_dir = coverage_dir
if extra_conf != None:
append_config(datadir, extra_conf)
# Most callers will just need to add extra args to the standard list below.
# For those callers that need more flexibility, they can just set the args property directly.
# Note that common args are set in the config file (see initialize_datadir)
self.extra_args = extra_args
self.args = [
self.binary,
"-datadir=" + self.datadir,
"-logtimemicros",
"-debug",
"-debugexclude=libevent",
"-debugexclude=leveldb",
"-mocktime=" + str(mocktime),
"-uacomment=testnode%d" % i
]
self.cli = TestNodeCLI(bitcoin_cli, self.datadir)
self.use_cli = use_cli
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
self.p2ps = []
def get_deterministic_priv_key(self):
"""Return a deterministic priv key in base58, that only depends on the node's index"""
PRIV_KEYS = [
# adress , privkey
('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'),
('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'),
('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'),
('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'),
('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'),
('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'),
('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'),
('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'),
('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'),
]
return PRIV_KEYS[self.index]
def _node_msg(self, msg: str) -> str:
"""Return a modified msg that identifies this node by its index as a debugging aid."""
return "[node %d] %s" % (self.index, msg)
def _raise_assertion_error(self, msg: str):
"""Raise an AssertionError with msg modified to identify this node."""
raise AssertionError(self._node_msg(msg))
def __del__(self):
# Ensure that we don't leave any bitcoind processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print(self._node_msg("Cleaning up leftover process"))
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(self.cli, name)
else:
assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection")
return getattr(self.rpc, name)
def start(self, extra_args=None, *, stdout=None, stderr=None, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
# Add a new stdout and stderr file each time bitcoind is started
if stderr is None:
stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False)
if stdout is None:
stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False)
self.stderr = stderr
self.stdout = stdout
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by bitcoind, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir)
# add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal
subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, **kwargs)
self.running = True
self.log.debug("litecoind started, waiting for RPC to come up")
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the litecoind process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
if self.process.poll() is not None:
raise FailedToStartError(self._node_msg(
'litecoind exited with status {} during initialization'.format(self.process.returncode)))
try:
self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
self.rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
self.rpc_connected = True
self.url = self.rpc.url
self.log.debug("RPC successfully started")
return
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
self._raise_assertion_error("Unable to connect to litecoind")
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
return self.cli("-rpcwallet={}".format(wallet_name))
else:
assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected")
wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name))
return self.rpc / wallet_path
def stop_node(self, expected_stderr=''):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
self.stop()
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
# Check that stderr is as expected
self.stderr.seek(0)
stderr = self.stderr.read().decode('utf-8').strip()
if stderr != expected_stderr:
raise AssertionError("Unexpected stderr {} != {}".format(stderr, expected_stderr))
self.stdout.close()
self.stderr.close()
del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert return_code == 0, self._node_msg(
"Node returned non-zero exit code (%d) when stopping" % return_code)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout)
@contextlib.contextmanager
def assert_debug_log(self, expected_msgs):
debug_log = os.path.join(self.datadir, 'regtest', 'debug.log')
with open(debug_log, encoding='utf-8') as dl:
dl.seek(0, 2)
prev_size = dl.tell()
try:
yield
finally:
with open(debug_log, encoding='utf-8') as dl:
dl.seek(prev_size)
log = dl.read()
print_log = " - " + "\n - ".join(log.splitlines())
for expected_msg in expected_msgs:
if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:
self._raise_assertion_error('Expected message "{}" does not partially match log:\n\n{}\n\n'.format(expected_msg, print_log))
def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
"""Attempt to start the node and expect it to raise an error.
extra_args: extra arguments to pass through to litecoind
expected_msg: regex that stderr should match when litecoind fails
Will throw if litecoind starts without an error.
Will throw if an expected_msg is provided and it does not match litecoind's stdout."""
with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \
tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:
try:
self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs)
self.wait_for_rpc_connection()
self.stop_node()
self.wait_until_stopped()
except FailedToStartError as e:
self.log.debug('litecoind failed to start: %s', e)
self.running = False
self.process = None
# Check stderr for expected message
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8').strip()
if match == ErrorMatch.PARTIAL_REGEX:
if re.search(expected_msg, stderr, flags=re.MULTILINE) is None:
self._raise_assertion_error(
'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_REGEX:
if re.fullmatch(expected_msg, stderr) is None:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_TEXT:
if expected_msg != stderr:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
else:
if expected_msg is None:
assert_msg = "litecoind should have exited with an error"
else:
assert_msg = "litecoind should have exited with expected error " + expected_msg
self._raise_assertion_error(assert_msg)
def node_encrypt_wallet(self, passphrase):
""""Encrypts the wallet.
This causes litecoind to shutdown, so this method takes
care of cleaning up resources."""
self.encryptwallet(passphrase)
self.wait_until_stopped()
def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
p2p_conn.peer_connect(**kwargs)()
self.p2ps.append(p2p_conn)
if wait_for_verack:
p2p_conn.wait_for_verack()
return p2p_conn
@property
def p2p(self):
"""Return the first p2p connection
Convenience property - most tests only use a single p2p connection to each
node, so this saves having to write node.p2ps[0] many times."""
assert self.p2ps, self._node_msg("No p2p connection")
return self.p2ps[0]
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
class TestNodeCLIAttr:
def __init__(self, cli, command):
self.cli = cli
self.command = command
def __call__(self, *args, **kwargs):
return self.cli.send_cli(self.command, *args, **kwargs)
def get_request(self, *args, **kwargs):
return lambda: self(*args, **kwargs)
class TestNodeCLI():
"""Interface to litecoin-cli for an individual node"""
def __init__(self, binary, datadir):
self.options = []
self.binary = binary
self.datadir = datadir
self.input = None
self.log = logging.getLogger('TestFramework.bitcoincli')
def __call__(self, *options, input=None):
# TestNodeCLI is callable with bitcoin-cli command-line options
cli = TestNodeCLI(self.binary, self.datadir)
cli.options = [str(o) for o in options]
cli.input = input
return cli
def __getattr__(self, command):
return TestNodeCLIAttr(self, command)
def batch(self, requests):
results = []
for request in requests:
try:
results.append(dict(result=request()))
except JSONRPCException as e:
results.append(dict(error=e))
return results
def send_cli(self, command=None, *args, **kwargs):
"""Run litecoin-cli command. Deserializes returned string as python object."""
pos_args = [str(arg).lower() if type(arg) is bool else str(arg) for arg in args]
named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same litecoin-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
if named_args:
p_args += ["-named"]
if command is not None:
p_args += [command]
p_args += pos_args + named_args
self.log.debug("Running litecoin-cli command: %s" % command)
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
if match:
code, message = match.groups()
raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
except JSONDecodeError:
return cli_stdout.rstrip("\n")
|
the-stack_0_2001 | import sys
import unittest
from argparse import Namespace
from .fixtures import set_up_cluster, set_up_subparser
from kafka.tools.assigner.exceptions import ConfigurationException
from kafka.tools.assigner.actions.clone import ActionClone
from kafka.tools.assigner.models.broker import Broker
class ActionCloneTests(unittest.TestCase):
def setUp(self):
self.cluster = set_up_cluster()
(self.parser, self.subparsers) = set_up_subparser()
self.args = Namespace(exclude_topics=[])
def test_create_class(self):
self.args.brokers = [1]
self.args.to_broker = 2
action = ActionClone(self.args, self.cluster)
assert isinstance(action, ActionClone)
def test_create_class_bad_target(self):
self.args.brokers = [1]
self.args.to_broker = 3
self.assertRaises(ConfigurationException, ActionClone, self.args, self.cluster)
def test_create_class_bad_source(self):
self.args.brokers = [3]
self.args.to_broker = 2
self.assertRaises(ConfigurationException, ActionClone, self.args, self.cluster)
def test_configure_args(self):
ActionClone.configure_args(self.subparsers)
sys.argv = ['kafka-assigner', 'clone', '-b', '1', '-t', '2']
parsed_args = self.parser.parse_args()
assert parsed_args.action == 'clone'
def test_process_cluster_clean_target(self):
self.cluster.add_broker(Broker(3, "brokerhost3.example.com"))
self.args.brokers = [1]
self.args.to_broker = 3
action = ActionClone(self.args, self.cluster)
action.process_cluster()
b1 = self.cluster.brokers[1]
b2 = self.cluster.brokers[2]
b3 = self.cluster.brokers[3]
assert self.cluster.topics['testTopic1'].partitions[0].replicas == [b3, b1, b2]
assert self.cluster.topics['testTopic1'].partitions[1].replicas == [b2, b3, b1]
assert self.cluster.topics['testTopic2'].partitions[0].replicas == [b2, b3, b1]
assert self.cluster.topics['testTopic2'].partitions[1].replicas == [b3, b1, b2]
def test_process_cluster_duplicates(self):
self.args.brokers = [1]
self.args.to_broker = 2
action = ActionClone(self.args, self.cluster)
action.process_cluster()
b1 = self.cluster.brokers[1]
b2 = self.cluster.brokers[2]
assert self.cluster.topics['testTopic1'].partitions[0].replicas == [b2, b1]
assert self.cluster.topics['testTopic1'].partitions[1].replicas == [b2, b1]
assert self.cluster.topics['testTopic2'].partitions[0].replicas == [b2, b1]
assert self.cluster.topics['testTopic2'].partitions[1].replicas == [b2, b1]
def test_process_cluster_no_change(self):
self.cluster.add_broker(Broker(3, "brokerhost3.example.com"))
self.args.brokers = [3]
self.args.to_broker = 1
action = ActionClone(self.args, self.cluster)
action.process_cluster()
b1 = self.cluster.brokers[1]
b2 = self.cluster.brokers[2]
assert self.cluster.topics['testTopic1'].partitions[0].replicas == [b1, b2]
assert self.cluster.topics['testTopic1'].partitions[1].replicas == [b2, b1]
assert self.cluster.topics['testTopic2'].partitions[0].replicas == [b2, b1]
assert self.cluster.topics['testTopic2'].partitions[1].replicas == [b1, b2]
|
the-stack_0_2003 | from dotenv import load_dotenv
load_dotenv("config.env")
BOT_TOKEN = "1840298314:AAFUMtMNiJpyBBt4tyGfuq_yO3ZXl88jxwk"
API_ID = 5119765
API_HASH = "ab310ff746864c1a33f3c590f1598c06"
USERBOT_PREFIX = "."
PHONE_NUMBER = "+16465640536" # Need for Userbot # Sudo users have full access to everything, don't trust anyone
LOG_GROUP_ID = -100125431255
GBAN_LOG_GROUP_ID = -1001263664495
MESSAGE_DUMP_CHAT = -1001263664495
FERNET_ENCRYPTION_KEY = "iKMq0WZMnJKjMQxZWKtv-cplMuF_LoyshXj0XbTGGWM=" # Leave this as it is
WELCOME_DELAY_KICK_SEC = 300
MONGO_DB_URI = "mongodb+srv://Satyal:[email protected]/myFirstDatabase?retryWrites=true&w=majority"
ARQ_API_KEY = "NFXKWF-UYMFGH-OVWYFN-VXDNSM-ARQ"
ARQ_API_URL = "https://thearq.tech"
LOG_MENTIONS = True
RSS_DELAY = 300 # In seconds
PM_PERMIT = False
SUDO_USERS_ID = 1741347822
|
the-stack_0_2004 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This API defines FeatureColumn abstraction."""
# This file was originally under tf/python/feature_column, and was moved to
# Keras package in order to remove the reverse dependency from TF to Keras.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.python.feature_column import feature_column_v2
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope
class _BaseFeaturesLayer(Layer):
"""Base class for DenseFeatures and SequenceFeatures.
Defines common methods and helpers.
Args:
feature_columns: An iterable containing the FeatureColumns to use as
inputs to your model.
expected_column_type: Expected class for provided feature columns.
trainable: Boolean, whether the layer's variables will be updated via
gradient descent during training.
name: Name to give to the DenseFeatures.
**kwargs: Keyword arguments to construct a layer.
Raises:
ValueError: if an item in `feature_columns` doesn't match
`expected_column_type`.
"""
def __init__(self,
feature_columns,
expected_column_type,
trainable,
name,
partitioner=None,
**kwargs):
super(_BaseFeaturesLayer, self).__init__(
name=name, trainable=trainable, **kwargs)
self._feature_columns = feature_column_v2._normalize_feature_columns( # pylint: disable=protected-access
feature_columns)
self._state_manager = feature_column_v2._StateManagerImpl( # pylint: disable=protected-access
self, self.trainable)
self._partitioner = partitioner
for column in self._feature_columns:
if not isinstance(column, expected_column_type):
raise ValueError(
'Items of feature_columns must be a {}. '
'You can wrap a categorical column with an '
'embedding_column or indicator_column. Given: {}'.format(
expected_column_type, column))
def build(self, _):
for column in self._feature_columns:
with variable_scope.variable_scope(
self.name, partitioner=self._partitioner):
with variable_scope.variable_scope(
_sanitize_column_name_for_variable_scope(column.name)):
column.create_state(self._state_manager)
super(_BaseFeaturesLayer, self).build(None)
def _output_shape(self, input_shape, num_elements):
"""Computes expected output shape of the layer or a column's dense tensor.
Args:
input_shape: Tensor or array with batch shape.
num_elements: Size of the last dimension of the output.
Returns:
Tuple with output shape.
"""
raise NotImplementedError('Calling an abstract method.')
def compute_output_shape(self, input_shape):
total_elements = 0
for column in self._feature_columns:
total_elements += column.variable_shape.num_elements()
return self._target_shape(input_shape, total_elements)
def _process_dense_tensor(self, column, tensor):
"""Reshapes the dense tensor output of a column based on expected shape.
Args:
column: A DenseColumn or SequenceDenseColumn object.
tensor: A dense tensor obtained from the same column.
Returns:
Reshaped dense tensor.
"""
num_elements = column.variable_shape.num_elements()
target_shape = self._target_shape(array_ops.shape(tensor), num_elements)
return array_ops.reshape(tensor, shape=target_shape)
def _verify_and_concat_tensors(self, output_tensors):
"""Verifies and concatenates the dense output of several columns."""
_verify_static_batch_size_equality(output_tensors, self._feature_columns)
return array_ops.concat(output_tensors, -1)
def get_config(self):
# Import here to avoid circular imports.
from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top
column_configs = [serialization.serialize_feature_column(fc)
for fc in self._feature_columns]
config = {'feature_columns': column_configs}
config['partitioner'] = generic_utils.serialize_keras_object(
self._partitioner)
base_config = super( # pylint: disable=bad-super-call
_BaseFeaturesLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
# Import here to avoid circular imports.
from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top
config_cp = config.copy()
columns_by_name = {}
config_cp['feature_columns'] = [serialization.deserialize_feature_column(
c, custom_objects, columns_by_name) for c in config['feature_columns']]
config_cp['partitioner'] = generic_utils.deserialize_keras_object(
config['partitioner'], custom_objects)
return cls(**config_cp)
def _sanitize_column_name_for_variable_scope(name):
"""Sanitizes user-provided feature names for use as variable scopes."""
invalid_char = re.compile('[^A-Za-z0-9_.\\-]')
return invalid_char.sub('_', name)
def _verify_static_batch_size_equality(tensors, columns):
"""Verify equality between static batch sizes.
Args:
tensors: iterable of input tensors.
columns: Corresponding feature columns.
Raises:
ValueError: in case of mismatched batch sizes.
"""
expected_batch_size = None
for i in range(0, len(tensors)):
# bath_size is a Dimension object.
batch_size = tensor_shape.Dimension(tensor_shape.dimension_value(
tensors[i].shape[0]))
if batch_size.value is not None:
if expected_batch_size is None:
bath_size_column_index = i
expected_batch_size = batch_size
elif not expected_batch_size.is_compatible_with(batch_size):
raise ValueError(
'Batch size (first dimension) of each feature must be same. '
'Batch size of columns ({}, {}): ({}, {})'.format(
columns[bath_size_column_index].name, columns[i].name,
expected_batch_size, batch_size))
|
the-stack_0_2008 | """Example on regression using YearPredictionMSD."""
import time
import torch
import numbers
import torch.nn as nn
from torch.nn import functional as F
from sklearn.preprocessing import scale
from sklearn.datasets import load_svmlight_file
from torch.utils.data import TensorDataset, DataLoader
from torchensemble.fusion import FusionRegressor
from torchensemble.voting import VotingRegressor
from torchensemble.bagging import BaggingRegressor
from torchensemble.gradient_boosting import GradientBoostingRegressor
from torchensemble.snapshot_ensemble import SnapshotEnsembleRegressor
from torchensemble.utils.logging import set_logger
def load_data(batch_size):
# The dataset can be downloaded from:
# https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression.html#YearPredictionMSD
if not isinstance(batch_size, numbers.Integral):
msg = "`batch_size` should be an integer, but got {} instead."
raise ValueError(msg.format(batch_size))
# MODIFY THE PATH IF YOU WANT
train_path = "../../Dataset/LIBSVM/yearpredictionmsd_training"
test_path = "../../Dataset/LIBSVM/yearpredictionmsd_testing"
train = load_svmlight_file(train_path)
test = load_svmlight_file(test_path)
# Numpy array -> Tensor
X_train, X_test = (
torch.FloatTensor(train[0].toarray()),
torch.FloatTensor(test[0].toarray()),
)
y_train, y_test = (
torch.FloatTensor(scale(train[1]).reshape(-1, 1)),
torch.FloatTensor(scale(test[1]).reshape(-1, 1)),
)
# Tensor -> Data loader
train_data = TensorDataset(X_train, y_train)
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
test_data = TensorDataset(X_test, y_test)
test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True)
return train_loader, test_loader
def display_records(records, logger):
msg = (
"{:<28} | Testing MSE: {:.2f} | Training Time: {:.2f} s |"
" Evaluating Time: {:.2f} s"
)
print("\n")
for method, training_time, evaluating_time, mse in records:
logger.info(msg.format(method, mse, training_time, evaluating_time))
class MLP(nn.Module):
def __init__(self):
super(MLP, self).__init__()
self.linear1 = nn.Linear(90, 128)
self.linear2 = nn.Linear(128, 128)
self.linear3 = nn.Linear(128, 1)
def forward(self, x):
x = x.view(x.size()[0], -1)
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = self.linear3(x)
return x
if __name__ == "__main__":
# Hyper-parameters
n_estimators = 10
lr = 1e-3
weight_decay = 5e-4
epochs = 50
# Utils
batch_size = 512
records = []
torch.manual_seed(0)
# Load data
train_loader, test_loader = load_data(batch_size)
print("Finish loading data...\n")
logger = set_logger("regression_YearPredictionMSD_mlp")
# FusionRegressor
model = FusionRegressor(
estimator=MLP,
n_estimators=n_estimators,
cuda=True
)
# Set the optimizer
model.set_optimizer("Adam", lr=lr, weight_decay=weight_decay)
tic = time.time()
model.fit(train_loader, epochs=epochs)
toc = time.time()
training_time = toc - tic
tic = time.time()
testing_mse = model.predict(test_loader)
toc = time.time()
evaluating_time = toc - tic
records.append(("FusionRegressor", training_time, evaluating_time,
testing_mse))
# VotingRegressor
model = VotingRegressor(
estimator=MLP,
n_estimators=n_estimators,
cuda=True
)
# Set the optimizer
model.set_optimizer("Adam", lr=lr, weight_decay=weight_decay)
tic = time.time()
model.fit(train_loader, epochs=epochs)
toc = time.time()
training_time = toc - tic
tic = time.time()
testing_mse = model.predict(test_loader)
toc = time.time()
evaluating_time = toc - tic
records.append(("VotingRegressor", training_time, evaluating_time,
testing_mse))
# BaggingRegressor
model = BaggingRegressor(
estimator=MLP,
n_estimators=n_estimators,
cuda=True
)
# Set the optimizer
model.set_optimizer("Adam", lr=lr, weight_decay=weight_decay)
tic = time.time()
model.fit(train_loader, epochs=epochs)
toc = time.time()
training_time = toc - tic
tic = time.time()
testing_mse = model.predict(test_loader)
toc = time.time()
evaluating_time = toc - tic
records.append(("BaggingRegressor", training_time, evaluating_time,
testing_mse))
# GradientBoostingRegressor
model = GradientBoostingRegressor(
estimator=MLP,
n_estimators=n_estimators,
cuda=True
)
# Set the optimizer
model.set_optimizer("Adam", lr=lr, weight_decay=weight_decay)
tic = time.time()
model.fit(train_loader, epochs=epochs)
toc = time.time()
training_time = toc - tic
tic = time.time()
testing_mse = model.predict(test_loader)
toc = time.time()
evaluating_time = toc - tic
records.append(("GradientBoostingRegressor", training_time,
evaluating_time, testing_mse))
# SnapshotEnsembleRegressor
model = SnapshotEnsembleRegressor(
estimator=MLP,
n_estimators=n_estimators,
cuda=True
)
# Set the optimizer
model.set_optimizer("Adam", lr=lr, weight_decay=weight_decay)
tic = time.time()
model.fit(train_loader, epochs=epochs)
toc = time.time()
training_time = toc - tic
tic = time.time()
testing_acc = model.predict(test_loader)
toc = time.time()
evaluating_time = toc - tic
records.append(("SnapshotEnsembleRegressor", training_time,
evaluating_time, testing_acc))
# Print results on different ensemble methods
display_records(records, logger)
|
the-stack_0_2010 | import asyncio
import os
import sys
import traceback
import disnake
from disnake.ext import commands
if sys.platform == "win32":
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
def fancy_traceback(exc: Exception) -> str:
"""May not fit the message content limit"""
text = "".join(traceback.format_exception(type(exc), exc, exc.__traceback__))
return f"```py\n{text[-4086:]}\n```"
class TestBot(commands.Bot):
def __init__(self):
super().__init__(
command_prefix="..",
intents=disnake.Intents.all(),
help_command=None, # type: ignore
sync_commands_debug=True,
sync_permissions=True,
test_guilds=[
570841314200125460,
768247229840359465,
808030843078836254,
723976264511389746,
],
)
def load_all_extensions(self, folder: str) -> None:
py_path = f"test_bot.{folder}"
folder = f"test_bot/{folder}"
for name in os.listdir(folder):
if name.endswith(".py") and os.path.isfile(f"{folder}/{name}"):
self.load_extension(f"{py_path}.{name[:-3]}")
async def on_ready(self):
# fmt: off
print(
f"\n"
f"The bot is ready.\n"
f"User: {self.user}\n"
f"ID: {self.user.id}\n"
)
# fmt: on
async def on_command_error(self, ctx: commands.Context, error: commands.CommandError) -> None:
embed = disnake.Embed(
title=f"Command `{ctx.command}` failed due to `{error}`",
description=fancy_traceback(error),
color=disnake.Color.red(),
)
await ctx.send(embed=embed)
async def on_slash_command_error(
self,
inter: disnake.AppCmdInter,
error: commands.CommandError,
) -> None:
embed = disnake.Embed(
title=f"Slash command `{inter.data.name}` failed due to `{error}`",
description=fancy_traceback(error),
color=disnake.Color.red(),
)
if inter.response._responded:
send = inter.channel.send
else:
send = inter.response.send_message
await send(embed=embed)
async def on_user_command_error(
self,
inter: disnake.AppCmdInter,
error: commands.CommandError,
) -> None:
embed = disnake.Embed(
title=f"User command `{inter.data.name}` failed due to `{error}`",
description=fancy_traceback(error),
color=disnake.Color.red(),
)
if inter.response._responded:
send = inter.channel.send
else:
send = inter.response.send_message
await send(embed=embed)
async def on_message_command_error(
self,
inter: disnake.AppCmdInter,
error: commands.CommandError,
) -> None:
embed = disnake.Embed(
title=f"Message command `{inter.data.name}` failed due to `{error}`",
description=fancy_traceback(error),
color=disnake.Color.red(),
)
if inter.response._responded:
send = inter.channel.send
else:
send = inter.response.send_message
await send(embed=embed)
print(f"disnake: {disnake.__version__}\n")
bot = TestBot()
bot.load_all_extensions("cogs")
bot.run(os.environ.get("BOT_TOKEN"))
|
the-stack_0_2011 | import numpy as np
from sklearn.ensemble import RandomForestClassifier as SKRandomForestClassifier
from sklearn.feature_selection import SelectFromModel as SkSelect
from skopt.space import Real
from .feature_selector import FeatureSelector
class RFClassifierSelectFromModel(FeatureSelector):
"""Selects top features based on importance weights using a Random Forest classifier."""
name = 'RF Classifier Select From Model'
hyperparameter_ranges = {
"percent_features": Real(.01, 1),
"threshold": ['mean', -np.inf]
}
def __init__(self, number_features=None, n_estimators=10, max_depth=None,
percent_features=0.5, threshold=-np.inf, n_jobs=-1, random_seed=0, **kwargs):
parameters = {"number_features": number_features,
"n_estimators": n_estimators,
"max_depth": max_depth,
"percent_features": percent_features,
"threshold": threshold,
"n_jobs": n_jobs}
parameters.update(kwargs)
estimator = SKRandomForestClassifier(random_state=random_seed,
n_estimators=n_estimators,
max_depth=max_depth,
n_jobs=n_jobs)
max_features = max(1, int(percent_features * number_features)) if number_features else None
feature_selection = SkSelect(estimator=estimator,
max_features=max_features,
threshold=threshold,
**kwargs)
super().__init__(parameters=parameters,
component_obj=feature_selection,
random_seed=random_seed)
|
the-stack_0_2014 | import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
import pandas as pd
df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/gapminderDataFiveYear.csv')
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
app.layout = html.Div([
dcc.Graph(id='graph-with-slider'),
dcc.Slider(
id='year-slider',
min=df['year'].min(),
max=df['year'].max(),
value=df['year'].min(),
marks={str(year): str(year) for year in df['year'].unique()},
step=None
)
])
@app.callback(
Output('graph-with-slider', 'figure'),
Input('year-slider', 'value'))
def update_figure(selected_year):
filtered_df = df[df.year == selected_year]
fig = px.scatter(filtered_df, x="gdpPercap", y="lifeExp",
size="pop", color="continent", hover_name="country",
log_x=False, size_max=55)
fig.update_xaxes(range=[-5000, 60000])
fig.update_yaxes(range=[20, 100])
fig.update_layout(transition_duration=500)
return fig
if __name__ == '__main__':
app.run_server(debug=True)
|
the-stack_0_2015 | """
Chombo frontend tests
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from yt.testing import \
requires_file, \
assert_equal, \
units_override_check
from yt.utilities.answer_testing.framework import \
requires_ds, \
small_patch_amr, \
data_dir_load
from yt.frontends.chombo.api import \
ChomboDataset, \
Orion2Dataset, \
PlutoDataset
_fields = ("density", "velocity_magnitude", # "velocity_divergence",
"magnetic_field_x")
gc = "GaussianCloud/data.0077.3d.hdf5"
@requires_ds(gc)
def test_gc():
ds = data_dir_load(gc)
yield assert_equal, str(ds), "data.0077.3d.hdf5"
for test in small_patch_amr(gc, _fields):
test_gc.__name__ = test.description
yield test
tb = "TurbBoxLowRes/data.0005.3d.hdf5"
@requires_ds(tb)
def test_tb():
ds = data_dir_load(tb)
yield assert_equal, str(ds), "data.0005.3d.hdf5"
for test in small_patch_amr(tb, _fields):
test_tb.__name__ = test.description
yield test
iso = "IsothermalSphere/data.0000.3d.hdf5"
@requires_ds(iso)
def test_iso():
ds = data_dir_load(iso)
yield assert_equal, str(ds), "data.0000.3d.hdf5"
for test in small_patch_amr(iso, _fields):
test_iso.__name__ = test.description
yield test
_zp_fields = ("rhs", "phi")
zp = "ZeldovichPancake/plt32.2d.hdf5"
@requires_ds(zp)
def test_zp():
ds = data_dir_load(zp)
yield assert_equal, str(ds), "plt32.2d.hdf5"
for test in small_patch_amr(zp, _zp_fields, input_center="c",
input_weight="rhs"):
test_zp.__name__ = test.description
yield test
kho = "KelvinHelmholtz/data.0004.hdf5"
@requires_ds(kho)
def test_kho():
ds = data_dir_load(kho)
yield assert_equal, str(ds), "data.0004.hdf5"
for test in small_patch_amr(kho, _fields):
test_kho.__name__ = test.description
yield test
@requires_file(zp)
def test_ChomboDataset():
assert isinstance(data_dir_load(zp), ChomboDataset)
@requires_file(gc)
def test_Orion2Dataset():
assert isinstance(data_dir_load(gc), Orion2Dataset)
@requires_file(kho)
def test_PlutoDataset():
assert isinstance(data_dir_load(kho), PlutoDataset)
@requires_file(zp)
def test_units_override_zp():
for test in units_override_check(zp):
yield test
@requires_file(gc)
def test_units_override_gc():
for test in units_override_check(gc):
yield test
@requires_file(kho)
def test_units_override_kho():
for test in units_override_check(kho):
yield test
|
the-stack_0_2017 | from jcudc24ingesterapi.models.sampling import _Sampling
from jcudc24ingesterapi import typed, APIDomainObject
from simplesos.client import SOSVersions
from simplesos.varients import _52North, SOSVariants, getSOSVariant
"""
Defines all possible data sources or in other words data input methods that can be provisioned.
"""
__author__ = 'Casey Bajema'
class _DataSource(APIDomainObject):
"""
Base data source class that does nothing beyond defining a known type.
Data sources are known types that provide a known set of information but are unrelated to the data type.
The ingester platform will need to implement data type specific ingesters for each data source.
"""
processing_script = typed("_processing_script", str, "Script to run after download")
def __init__(self, processing_script=None):
self.processing_script = processing_script
class DatasetDataSource(_DataSource):
"""
Uses the resulting data_entry from another dataset and processes it further.
"""
__xmlrpc_class__ = "dataset_data_source"
dataset_id = typed("_dataset_id", int, "")
def __init__(self, dataset_id=None, processing_script=None):
self.dataset_id = dataset_id
self.processing_script = processing_script
class PullDataSource(_DataSource):
"""
A data source that polls a URI for data of the dataset's data type.
"""
__xmlrpc_class__ = "pull_data_source"
url = typed("_url", (str,unicode), "URL of the directory to scan")
pattern = typed("_pattern", (str,unicode), "Pattern for identifying files, regex")
recursive = typed("_recursive", bool, "Should the URL be treated as an index page")
mime_type = typed("_mime_type", (str,unicode), "Mime type of the file")
field = typed("_field", (str,unicode), "Field name to ingest into")
sampling = typed("_sampling", _Sampling, "Script to run to determine when to sample")
def __init__(self, url=None, pattern=None, recursive=False, mime_type=None, field=None, processing_script=None, sampling=None):
"""Initialise the PullDataSource with a URI for the source file, and the field that
the uri will be saved to.
"""
self.url = url
self.field = field
self.pattern = pattern
self.mime_type = mime_type
self.processing_script = processing_script
self.sampling = sampling
self.recursive = recursive
class PushDataSource(_DataSource):
"""
A data source where the external application will use the ingester platform API to pass data into.
"""
__xmlrpc_class__ = "push_data_source"
path = typed("_path", (str,unicode), "Path to monitor for new files")
pattern = typed("_pattern", (str,unicode), "Pattern for identifying files, regex")
archive = typed("_archive", (str,unicode), "Path where processed files are archived")
field = typed("_field", (str,unicode), "Field name to ingest into")
sampling = typed("_sampling", _Sampling, "Script to run to determine when to sample")
def __init__(self, path=None, pattern=None, archive=None, field=None, sampling=None):
self.path = path
self.pattern = pattern
self.archive = archive
self.field = field
self.sampling = sampling
class SOSScraperDataSource(_DataSource):
__xmlrpc_class__ = "sos_scraper_data_source"
url = typed("_url", (str,unicode), "URL of the directory to scan")
field = typed("_field", (str,unicode), "Field name to ingest into")
sampling = typed("_sampling", _Sampling, "Script to run to determine when to sample")
variant = typed("_variant", (str,unicode), "The SOS varient.")
version = typed("_version", (str,unicode), "The SOS API version to use.")
def __init__(self, url=None, field=None, sampling=None, processing_script=None, version=SOSVersions.v_1_0_0, variant="52North"):
self.url = url
self.field = field
self.sampling = sampling
self.variant = variant
self.version = version
self.processing_script = processing_script
class SOSDataSource(_DataSource):
"""
A data source that provides a Sensor Observation Service accessible over the web.
SOS standards will be followed such as:
* No authentication required
* Invalid data is dropped
""" # TODO: Work out the exact implementation details
sensor_id = None # Need to check the sensor_id type
sensorml = None
pass
class UploadDataSource(_DataSource):
"""
A data source where the user manually uploads a file using the provisioning system.
This data source will be very similar to PushDataSource but:
* Won't require authentication as it is using the standard provisioning system API by passing a data_entry object
* The provisioning system will setup an upload form.
"""
pass
class FormDataSource(_DataSource):
"""
A data source where the user manually enters data into a form within the provisioning interface
The data entry's will be passed to the ingester platform through the API as data_entry objects.
"""
__xmlrpc_class__ = "form_data_source"
pass
class DataTurbineDataSource(_DataSource):
"""
A data source that implements a data turbine sink.
"""
__xmlrpc_class__ = "data_turbine_data_source"
url = typed("_url", (str,unicode), "URL of the directory to scan")
mime_type = typed("_mime_type", (str,unicode), "Mime type of the channels to read from.")
data_type = typed("_data_type", (str,unicode), "What type data will be read from data turbine as (eg Float32)")
field = typed("_field", (str,unicode), "Field name to ingest into")
sampling = typed("_sampling", _Sampling, "Script to run to determine when to sample")
def __init__(self, url=None, data_type=False, mime_type=None, field=None, processing_script=None, sampling=None):
"""Initialise the PullDataSource with a URI for the source file, and the field that
the uri will be saved to.
"""
self.url = url
self.field = field
self.mime_type = mime_type
self.data_type = data_type
self.processing_script = processing_script
self.sampling = sampling
|
the-stack_0_2018 | import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.data import DataLoader
import dataset
import simple_net
def train_one_epoch(network, criterion, trainloader, optimizer):
network.train()
losses = []
correct = 0
total = 0
for idx, (feature, label) in enumerate(trainloader):
optimizer.zero_grad()
output = network(feature)
_, ind = torch.max(output, dim = 1)
correct += (ind == label).sum().item()
total += len(label)
loss = criterion(output, label)
losses.append(loss.item())
loss.backward()
optimizer.step()
message = '\r[{:5d}/{:5d}({:3.0%})] train loss: {:.2f}\ttrain acc: {:.2%}'.format(len(label) * idx, 40000, len(label) * idx / 40000, loss, correct / total)
print(message, end = '')
print()
message = 'Train Avg loss: {:.2f}\tTrain Acc: {:.2%}'.format(sum(losses) / len(losses), correct / total)
print(message)
def valid(network, validloader):
network.eval()
correct = 0
total = 0
with torch.no_grad():
for (feature, label) in validloader:
output = network(feature)
_, idx = torch.max(output, dim = 1)
correct += (idx == label).sum().item()
total += len(label)
message = 'Valid Acc: {:.2%}'.format(correct / total)
print(message)
def train(network, criterion, trainloader, validloader, optimizer, scheduler, start_epoch = 0, n_epochs = 20):
for _ in range(start_epoch):
scheduler.step()
for epoch in range(start_epoch, n_epochs):
train_one_epoch(network, criterion, trainloader, optimizer)
scheduler.step()
if (epoch + 1) % 3 == 0:
valid(network, validloader)
torch.save({'state_dict': network,
'optimizer': optimizer.state_dict()},
'checkpoint.pth')
def main():
trainset = dataset.Trainset()
validset = dataset.Trainset(training = False)
trainloader = DataLoader(trainset, batch_size = 64, shuffle = True, num_workers = 4)
validloader = DataLoader(validset, batch_size = 16, shuffle = True, num_workers = 4)
network = simple_net.SimpleNet()
optimizer = optim.SGD(network.parameters(), lr = 0.001, momentum = 0.9, weight_decay = 0.00001)
criterion = torch.nn.CrossEntropyLoss()
scheduler = lr_scheduler.StepLR(optimizer, step_size = 5, gamma = 0.5, last_epoch = -1)
train(network, criterion, trainloader, validloader, optimizer, scheduler)
if __name__ == "__main__":
main() |
the-stack_0_2019 | from typing import List, Optional, Callable, Union, Any, Tuple
import re
import copy
import warnings
import numpy as np
import os.path as osp
from collections.abc import Sequence
import torch.utils.data
from torch import Tensor
from .data import Data
from .utils import makedirs
IndexType = Union[slice, Tensor, np.ndarray, Sequence]
class Dataset(torch.utils.data.Dataset):
r"""Dataset base class for creating graph datasets.
See `here <https://pytorch-geometric.readthedocs.io/en/latest/notes/
create_dataset.html>`__ for the accompanying tutorial.
Args:
root (string, optional): Root directory where the dataset should be
saved. (optional: :obj:`None`)
transform (callable, optional): A function/transform that takes in an
:obj:`torch_geometric.data.Data` object and returns a transformed
version. The data object will be transformed before every access.
(default: :obj:`None`)
pre_transform (callable, optional): A function/transform that takes in
an :obj:`torch_geometric.data.Data` object and returns a
transformed version. The data object will be transformed before
being saved to disk. (default: :obj:`None`)
pre_filter (callable, optional): A function that takes in an
:obj:`torch_geometric.data.Data` object and returns a boolean
value, indicating whether the data object should be included in the
final dataset. (default: :obj:`None`)
"""
@property
def raw_file_names(self) -> Union[str, List[str], Tuple]:
r"""The name of the files to find in the :obj:`self.raw_dir` folder in
order to skip the download."""
raise NotImplementedError
@property
def processed_file_names(self) -> Union[str, List[str], Tuple]:
r"""The name of the files to find in the :obj:`self.processed_dir`
folder in order to skip the processing."""
raise NotImplementedError
def download(self):
r"""Downloads the dataset to the :obj:`self.raw_dir` folder."""
raise NotImplementedError
def process(self):
r"""Processes the dataset to the :obj:`self.processed_dir` folder."""
raise NotImplementedError
def len(self) -> int:
raise NotImplementedError
def get(self, idx: int) -> Data:
r"""Gets the data object at index :obj:`idx`."""
raise NotImplementedError
def __init__(
self,
root: Optional[str] = None,
transform: Optional[Callable] = None,
pre_transform: Optional[Callable] = None,
pre_filter: Optional[Callable] = None,
):
super().__init__()
if isinstance(root, str):
root = osp.expanduser(osp.normpath(root))
self.root = root
self.transform = transform
self.pre_transform = pre_transform
self.pre_filter = pre_filter
self._indices: Optional[Sequence] = None
if "download" in self.__class__.__dict__.keys():
self._download()
if "process" in self.__class__.__dict__.keys():
self._process()
def indices(self) -> Sequence:
return range(self.len()) if self._indices is None else self._indices
@property
def raw_dir(self) -> str:
return osp.join(self.root, "raw")
@property
def processed_dir(self) -> str:
return osp.join(self.root, "processed")
@property
def num_node_features(self) -> int:
r"""Returns the number of features per node in the dataset."""
data = self[0]
if hasattr(data, "num_node_features"):
return data.num_node_features
raise AttributeError(
f"'{data.__class__.__name__}' object has no "
f"attribute 'num_node_features'"
)
@property
def num_features(self) -> int:
r"""Alias for :py:attr:`~num_node_features`."""
return self.num_node_features
@property
def num_edge_features(self) -> int:
r"""Returns the number of features per edge in the dataset."""
data = self[0]
if hasattr(data, "num_edge_features"):
return data.num_edge_features
raise AttributeError(
f"'{data.__class__.__name__}' object has no "
f"attribute 'num_edge_features'"
)
@property
def raw_paths(self) -> List[str]:
r"""The filepaths to find in order to skip the download."""
files = to_list(self.raw_file_names)
return [osp.join(self.raw_dir, f) for f in files]
@property
def processed_paths(self) -> List[str]:
r"""The filepaths to find in the :obj:`self.processed_dir`
folder in order to skip the processing."""
files = to_list(self.processed_file_names)
return [osp.join(self.processed_dir, f) for f in files]
def _download(self):
if files_exist(self.raw_paths): # pragma: no cover
return
makedirs(self.raw_dir)
self.download()
def _process(self):
f = osp.join(self.processed_dir, "pre_transform.pt")
if osp.exists(f) and torch.load(f) != _repr(self.pre_transform):
warnings.warn(
f"The `pre_transform` argument differs from the one used in "
f"the pre-processed version of this dataset. If you want to "
f"make use of another pre-processing technique, make sure to "
f"sure to delete '{self.processed_dir}' first"
)
f = osp.join(self.processed_dir, "pre_filter.pt")
if osp.exists(f) and torch.load(f) != _repr(self.pre_filter):
warnings.warn(
"The `pre_filter` argument differs from the one used in the "
"pre-processed version of this dataset. If you want to make "
"use of another pre-fitering technique, make sure to delete "
"'{self.processed_dir}' first"
)
if files_exist(self.processed_paths): # pragma: no cover
return
print("Processing...")
makedirs(self.processed_dir)
self.process()
path = osp.join(self.processed_dir, "pre_transform.pt")
torch.save(_repr(self.pre_transform), path)
path = osp.join(self.processed_dir, "pre_filter.pt")
torch.save(_repr(self.pre_filter), path)
print("Done!")
def __len__(self) -> int:
r"""The number of examples in the dataset."""
return len(self.indices())
def __getitem__(
self,
idx: Union[int, np.integer, IndexType],
) -> Union["Dataset", Data]:
r"""In case :obj:`idx` is of type integer, will return the data object
at index :obj:`idx` (and transforms it in case :obj:`transform` is
present).
In case :obj:`idx` is a slicing object, *e.g.*, :obj:`[2:5]`, a list, a
tuple, a PyTorch :obj:`LongTensor` or a :obj:`BoolTensor`, or a numpy
:obj:`np.array`, will return a subset of the dataset at the specified
indices."""
if (
isinstance(idx, (int, np.integer))
or (isinstance(idx, Tensor) and idx.dim() == 0)
or (isinstance(idx, np.ndarray) and np.isscalar(idx))
):
data = self.get(self.indices()[idx])
data = data if self.transform is None else self.transform(data)
return data
else:
return self.index_select(idx)
def index_select(self, idx: IndexType) -> "Dataset":
indices = self.indices()
if isinstance(idx, slice):
indices = indices[idx]
elif isinstance(idx, Tensor) and idx.dtype == torch.long:
return self.index_select(idx.flatten().tolist())
elif isinstance(idx, Tensor) and idx.dtype == torch.bool:
idx = idx.flatten().nonzero(as_tuple=False)
return self.index_select(idx.flatten().tolist())
elif isinstance(idx, np.ndarray) and idx.dtype == np.int64:
return self.index_select(idx.flatten().tolist())
elif isinstance(idx, np.ndarray) and idx.dtype == np.bool:
idx = idx.flatten().nonzero()[0]
return self.index_select(idx.flatten().tolist())
elif isinstance(idx, Sequence) and not isinstance(idx, str):
indices = [indices[i] for i in idx]
else:
raise IndexError(
f"Only integers, slices (':'), list, tuples, torch.tensor and "
f"np.ndarray of dtype long or bool are valid indices (got "
f"'{type(idx).__name__}')"
)
dataset = copy.copy(self)
dataset._indices = indices
return dataset
def shuffle(
self,
return_perm: bool = False,
) -> Union["Dataset", Tuple["Dataset", Tensor]]:
r"""Randomly shuffles the examples in the dataset.
Args:
return_perm (bool, optional): If set to :obj:`True`, will return
the random permutation used to shuffle the dataset in addition.
(default: :obj:`False`)
"""
perm = torch.randperm(len(self))
dataset = self.index_select(perm)
return (dataset, perm) if return_perm is True else dataset
def __repr__(self) -> str:
arg_repr = str(len(self)) if len(self) > 1 else ""
return f"{self.__class__.__name__}({arg_repr})"
def to_list(value: Any) -> Sequence:
if isinstance(value, Sequence) and not isinstance(value, str):
return value
else:
return [value]
def files_exist(files: List[str]) -> bool:
# NOTE: We return `False` in case `files` is empty, leading to a
# re-processing of files on every instantiation.
return len(files) != 0 and all([osp.exists(f) for f in files])
def _repr(obj: Any) -> str:
if obj is None:
return "None"
return re.sub("(<.*?)\\s.*(>)", r"\1\2", obj.__repr__())
|
the-stack_0_2022 | from time import sleep
import pyautogui
from textblob import TextBlob
from yandex_music_parser import YandexMusicParser
# add your Yandex mail, password and full link to your VK music page
YANDEX_MAIL = "*@yandex.com"
PASSWORD = "*"
VK_MUSIC_LINK = "https://vk.com/audios240917398"
CHROME_ICON = (215, 1055)
CHROME_URL = (410, 70)
SEARCH = (901, 406)
ADD_TRACK = (1462, 525)
SWITCH_LANGUAGE_step1 = (1732, 1059)
SWITCH_LANGUAGE_RUS = (1817, 834)
SWITCH_LANGUAGE_ENG = (1835, 919)
# used to determine the location of the cursor
screenWidth, screenHeight = pyautogui.size()
x, y = pyautogui.position()
print((x, y))
def open_browser():
print("Opening Google Chrome browser")
pyautogui.click(CHROME_ICON)
sleep(1)
def add_track(track_fullname):
sleep(1)
pyautogui.click(SEARCH)
sleep(1)
pyautogui.hotkey('ctrl', 'a')
sleep(1)
pyautogui.keyDown('backspace')
sleep(1)
pyautogui.typewrite(track_fullname)
sleep(1)
pyautogui.keyDown('enter')
sleep(1)
start = None
count = 5
while not start:
if not start:
start = pyautogui.locateCenterOnScreen('images/pattern_screenshot.png')
count -= 1
if count == 0:
break
pyautogui.moveTo(start)
x, y = pyautogui.position()
print((x, y))
ADD_TRACK = (x + 417, y + 74)
pyautogui.moveTo(ADD_TRACK)
pyautogui.click(ADD_TRACK)
sleep(1)
def fix_layout(track_fullname):
eng_chars = u"~!@#$%^&qwertyuiop[]asdfghjkl;'zxcvbnm,./QWERTYUIOP{}ASDFGHJKL:\"|ZXCVBNM<>?"
rus_chars = u"ё!\"№;%:?йцукенгшщзхъфывапролджэячсмитьбю.ЙЦУКЕНГШЩЗХЪФЫВАПРОЛДЖЭ/ЯЧСМИТЬБЮ,"
trans_table = dict(zip(rus_chars, eng_chars))
return ''.join([trans_table.get(c, c) for c in track_fullname])
if __name__ == "__main__":
data = YandexMusicParser(YANDEX_MAIL, PASSWORD)
tracks_fullnames = data.parse_tracks()
open_browser()
for track_fullname in tracks_fullnames[::-1]:
language = TextBlob(track_fullname).detect_language()
if language == "ru":
pyautogui.moveTo(SWITCH_LANGUAGE_step1)
pyautogui.click(SWITCH_LANGUAGE_step1)
pyautogui.moveTo(SWITCH_LANGUAGE_RUS)
pyautogui.click(SWITCH_LANGUAGE_RUS)
add_track(fix_layout(track_fullname))
continue
else:
pyautogui.moveTo(SWITCH_LANGUAGE_step1)
pyautogui.click(SWITCH_LANGUAGE_step1)
pyautogui.moveTo(SWITCH_LANGUAGE_ENG)
pyautogui.click(SWITCH_LANGUAGE_ENG)
add_track(track_fullname)
sleep(1)
|
the-stack_0_2023 | from typing import Any
import requests
import pytest
from _pytest.monkeypatch import MonkeyPatch
from unittest.mock import Mock
from weather.libs.api.open_weather_map import OpenWeatherMap
from weather.libs.api.request_flow_controller import RequestFlowController
class TestOpenWeatherMap:
def test_init(self, fake_token: str, fake_owm: OpenWeatherMap) -> None:
assert fake_owm._token == fake_token
assert fake_owm._BASE_URL == 'https://api.openweathermap.org/data/'
assert fake_owm._VERSION == '2.5'
assert fake_owm.units == 'metric'
assert isinstance(fake_owm.flow_ctrl, RequestFlowController)
def test__url(self, fake_owm: OpenWeatherMap) -> None:
assert fake_owm._url == 'https://api.openweathermap.org/data/2.5/'
def test__get(
self,
fake_owm: OpenWeatherMap,
location_fake_data: dict[str, Any],
monkeypatch: MonkeyPatch,
) -> None:
class ResponsePatch:
def raise_for_status(self) -> None:
pass
def json(self) -> None:
return {'hello': 'world!'}
fake_get: Mock = Mock(return_value=ResponsePatch())
monkeypatch.setattr(requests, 'get', fake_get)
params: dict[str, Any] = location_fake_data
res: dict[str, Any] = fake_owm._get(
url=fake_owm._url + 'weather', params=params
)
assert res == fake_get.return_value.json()
def test_get_weather_by_coord(
self,
fake_owm: OpenWeatherMap,
location_fake_data: dict[str, Any],
monkeypatch: MonkeyPatch,
) -> None:
fake_get: Mock = Mock(return_value={'weather': 'Good'})
monkeypatch.setattr(OpenWeatherMap, '_get', fake_get)
res: dict[str, Any] = fake_owm.get_weather_by_coord(
**location_fake_data
)
assert res == fake_get.return_value
def test_sub_map(self, fake_owm: OpenWeatherMap) -> None:
assert len(list(fake_owm.sub_map(5))) == 2_592
|
the-stack_0_2025 | from unittest import TestCase
from unittest import main as unittest_main
from offconf import funcs, get_func, pipe
class TestUtils(TestCase):
expr = "foo|prepend('bar_')|append('_can')"
expect = "bar_foo_can"
def test_pipe(self):
self.assertEqual(pipe("foo|b64encode", funcs), "Zm9v")
self.assertEqual(pipe(self.expr, funcs), self.expect)
def test_get_func(self):
for idx, expr in enumerate(self.expr.split("|")):
if idx == 0:
self.assertEqual(get_func(funcs, expr, "null"), "foo")
else:
self.assertTrue(callable(get_func(funcs, expr, None)))
if __name__ == "__main__":
unittest_main()
|
the-stack_0_2026 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
# oneflow.python.onnx.oneflow.python.onnx - rewrite oneflow graph to onnx graph
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import collections
import itertools
import logging
import os
import os.path
import sys
import traceback
from typing import Text, Optional, Dict, Callable, List
import numpy as np
from onnx import helper, onnx_pb
import oneflow
import oneflow.python.framework.c_api_util as c_api_util
import oneflow.python.framework.session_context as session_ctx
from oneflow.python.oneflow_export import oneflow_export
import oneflow.python.onnx
from oneflow.python.onnx import constants, schemas, util, handler, optimizer
from oneflow.python.onnx.graph import Graph
import oneflow.python.onnx.onnx_opset # pylint: disable=unused-import
logger = logging.getLogger(__name__)
def FlowToOnnxNaive(graph, shape_override):
"""
Convert node from oneflow format to onnx format.
Convert the oneflow nodes into an onnx graph with minimal rewrites so
we can use the onnx graph as intermediate graph.
The input/output/attr of each node are kept here and will be converted in other
following functions.
"""
dtypes = {}
for lbn in graph.helper.lbn2logical_blob_desc:
lbd = graph.helper.lbn2logical_blob_desc[lbn]
if lbn not in shape_override:
shape_override[lbn] = list(lbd.body.shape.dim)
dtypes[lbn] = util.Flow2OnnxDtype(lbd.body.data_type)
# some stats
op_cnt = collections.Counter()
attr_cnt = collections.Counter()
onnx_nodes = []
def is_user_op(node):
return node.WhichOneof("op_type") == "user_conf"
def get_op_conf(node):
conf_type = node.WhichOneof("op_type")
conf = getattr(node, conf_type)
return conf
def get_op_type(node):
if is_user_op(node):
return node.user_conf.op_type_name
return node.WhichOneof("op_type")[:-5]
def get_inputs(node):
if is_user_op(node):
ibns = handler.flow_op.ibn4op_type(get_op_type(node))
if ibns is None:
return list(
itertools.chain(*[x.s for x in node.user_conf.input.values()])
)
ipts = []
for ibn in ibns:
for key, val in node.user_conf.input.items():
if key == ibn:
assert len(val.s) == 1
ipts.append(val.s[0])
break
else:
raise ValueError(
"ibn {} of node {} (type {}) not found".format(
ibn, node.name, get_op_type(node)
)
)
return ipts
else:
conf = get_op_conf(node)
# it cannot cover all legacy op but it's enough
if hasattr(conf, "in"):
op_in = getattr(conf, "in")
if isinstance(op_in, str):
return [op_in]
else:
return op_in
else:
return []
def get_outputs(node):
if is_user_op(node):
obns = handler.flow_op.obn4op_type(get_op_type(node))
if obns is None:
assert all([len(x.s) == 1 for x in node.user_conf.output.values()])
return [x.s[0] for x in node.user_conf.output.values()]
outputs = []
for obn in obns:
for key, val in node.user_conf.output.items():
if key == obn:
assert len(val.s) == 1
outputs.append(val.s[0])
break
else:
raise ValueError(
"obn {} of node {} (type {}) not found".format(
obn, node.name, get_op_type(node)
)
)
else:
conf = get_op_conf(node)
# it cannot cover all legacy op but it's enough
if hasattr(conf, "out"):
out = getattr(conf, "out")
if isinstance(out, str):
outputs = [out]
else:
outputs = out
else:
outputs = []
outputs = ["{}/{}".format(node.name, output) for output in outputs]
return outputs
# minimal conversion of attributes
for node in graph.net.op:
attr = {}
op_cnt[get_op_type(node)] += 1
attrs = node.user_conf.attr.keys() if is_user_op(node) else []
for a in attrs:
attr_cnt[a] += 1
if a == "dtype":
attr[a] = util.Flow2OnnxDtype(util.get_flow_node_attr(node, "dtype"))
else:
attr[a] = util.get_flow_node_attr(node, a)
try:
op_type = get_op_type(node)
input_names = get_inputs(node)
output_names = get_outputs(node)
onnx_node = helper.make_node(
op_type, input_names, output_names, name=node.name, **attr
)
onnx_nodes.append(onnx_node)
except Exception as ex:
logger.error("pass1 convert failed for %s, ex=%s", node, ex)
raise
return onnx_nodes, op_cnt, attr_cnt, dtypes, shape_override
def FlowOnnxMapping(g, ops_mapping):
logger.debug("Mapping Oneflow node to ONNX node(s)")
mapped_op = collections.Counter()
unmapped_op = collections.Counter()
exceptions = []
ops = list(g.get_nodes())
for node in ops:
logger.debug("Process node: %s\n%s", node.name, node.summary)
if node.skip_conversion:
logger.debug("explicitly skip node " + node.name)
continue
op = node.op_type
map_info = ops_mapping.get(op)
if map_info is None:
unmapped_op[op] += 1
logger.error("oneflow op [%s: %s] is not supported", node.name, op)
continue
mapped_op[op] += 1
func, onnx_op, kwargs = map_info
if onnx_op is not None:
node.op_type = onnx_op
try:
func(g, node, **kwargs)
node.skip_conversion = True
except Exception as ex:
logger.error(
"Failed to convert node %s\n%s", node.name, node.summary, exc_info=1
)
exceptions.append(ex)
return mapped_op, unmapped_op, exceptions
def TopologicalSort(g, continue_on_error):
ops = g.get_nodes()
if not continue_on_error:
g.TopologicalSort(ops)
else:
try:
g.TopologicalSort(ops)
except: # pylint: disable=bare-except
# if we continue on error, ignore graph cycles so we can report all missing ops
pass
@session_ctx.try_init_default_session
@oneflow_export("onnx.export")
def Export(
job_func: Callable,
model_save_dir: Text,
onnx_filename: Text,
continue_on_error: bool = False,
opset: Optional[int] = None,
extra_opset: Optional[int] = None,
shape_override: Optional[Dict[Text, List[int]]] = None,
external_data: bool = False,
):
r"""Export a oneflow model into ONNX format.
Args:
job_func: The job function
model_save_dir: The directory containing oneflow model weights. Users are expected to call check_point.save(dir), wait for the model saving finishing, and pass the argument 'dir' as model_save_dir.
onnx_filename: a string for the output filename
continue_on_error: if an op can't be processed (aka there is no mapping), continue
opset: the opset to be used (int, default is oneflow.python.onnx.constants.PREFERRED_OPSET)
extra_opset: list of extra opset's, for example the opset's used by custom ops
shape_override: dict with inputs that override the shapes given by oneflow
external_data: Save weights as ONNX external data, usually to bypass the 2GB file size limit of protobuf.
"""
assert os.getenv("ENABLE_USER_OP") != "False"
assert os.path.isdir(model_save_dir)
job_set = c_api_util.GetJobSet()
job_name = job_func.__name__
for job in job_set.job:
if job.job_conf.job_name == job_name:
onnx_graph = ProcessFlowGraph(
job,
model_save_dir,
continue_on_error=continue_on_error,
opset=opset,
extra_opset=extra_opset,
shape_override=shape_override,
)
onnx_graph = optimizer.OptimizeGraph(onnx_graph)
model_proto = onnx_graph.MakeModel(
job_name, onnx_filename, external_data=external_data
)
with open(onnx_filename, "wb") as f:
try:
f.write(model_proto.SerializeToString())
except ValueError as e:
raise ValueError(
"Error occured when running model_proto.SerializeToString(). If the model is larger than 2GB, please specify external_data=True when calling flow.onnx.export. Original error message:\n{}".format(
e
)
)
return
raise ValueError('Cannot find job "{}" in jobset'.format(job_name))
def ProcessFlowGraph(
flow_graph,
model_save_dir,
continue_on_error=False,
opset=None,
extra_opset=None,
shape_override=None,
):
opset = util.FindOpset(opset)
logger.info("Using opset <onnx, %s>", opset)
if opset > schemas.get_max_supported_opset_version():
logger.warning(
"Currently installed onnx package %s is too low to support opset %s, "
"please upgrade onnx package to avoid potential conversion issue.",
util.get_onnx_version(),
opset,
)
if shape_override is None:
shape_override = {}
(onnx_nodes, op_cnt, attr_cnt, dtypes, output_shapes,) = FlowToOnnxNaive(
flow_graph, shape_override
)
g = Graph(onnx_nodes, model_save_dir, output_shapes, dtypes, opset, extra_opset,)
# create ops mapping for the desired opsets
ops_mapping = handler.flow_op.CreateMapping(g.opset, g.extra_opset)
# some nodes may already copied into inner Graph, so remove them from main Graph.
TopologicalSort(g, continue_on_error)
mapped_op, unmapped_op, exceptions = FlowOnnxMapping(g, ops_mapping)
if unmapped_op:
logger.error("Unsupported ops: %s", unmapped_op)
if exceptions and not continue_on_error:
raise exceptions[0]
# onnx requires topological sorting
TopologicalSort(g, continue_on_error)
g.UpdateProto()
logger.debug(
"Summay Stats:\n"
"\toneflow ops: {}\n"
"\toneflow attr: {}\n"
"\tonnx mapped: {}\n"
"\tonnx unmapped: {}".format(op_cnt, attr_cnt, mapped_op, unmapped_op)
)
return g
|
the-stack_0_2028 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from batchgenerators.utilities.file_and_folder_operations import *
def remove_trailing_slash(filename: str):
while filename.endswith('\\'):
filename = filename[:-1]
return filename
def maybe_add_0000_to_all_niigz(folder):
nii_gz = subfiles(folder, suffix='.nii.gz')
for n in nii_gz:
n = remove_trailing_slash(n)
if not n.endswith('_0000.nii.gz'):
os.rename(n, n[:-7] + '_0000.nii.gz')
|
the-stack_0_2032 | # coding=utf-8
"""TEC === Tools to calculate total electron content value in the ionosphere
using data derived from global navigation satellite systems."""
# Shortcut
from .glo import collect_freq_nums
from .gnss import BAND_PRIORITY
from .rinex import ObsFileV2
from .rinex import ObsFileV3
# General information
__version__ = '1.1.1'
__author__ = __maintainer__ = 'Ilya Zhivetiev'
__email__ = '[email protected]'
def rnx(file, band_priority=BAND_PRIORITY, glo_freq_nums=None):
"""Return a reader object which will iterate over observation records in
the given file. Each iteration will return Tec object. The file can be any
object which supports iterator protocol.
Parameters
----------
file : file-like object
band_priority : dict
glo_freq_nums : dict
Returns
-------
reader : iterator
Yields Tec object for each satellite of the epoch.
"""
if glo_freq_nums is None:
glo_freq_nums = {}
try:
row = next(file)
rinex_version = float(row[:9])
rinex_type = row[20]
# rinex_sat_system = row[40]
except StopIteration:
raise ValueError("rnx: Empty input file")
except ValueError:
raise ValueError("rnx: Unknown file type")
if rinex_type.upper() != 'O':
raise Exception('rnx: Not an observation file')
rinex_reader = {
(2.0, 2.1, 2.11, 2.12): ObsFileV2,
(3.0, 3.01, 3.02, 3.03): ObsFileV3
}
reader = None
for ver in rinex_reader:
if rinex_version in ver:
reader = rinex_reader[ver]
if reader is None:
raise Exception('Unknown RINEX version: {}'.format(rinex_version))
return reader(
file,
version=rinex_version,
band_priority=band_priority,
glo_freq_nums=glo_freq_nums,
)
|
the-stack_0_2034 | import random, time
from UNP.Core import Account, ActTable
class Loginer:
def __init__(self):
self.mode = "001"
self.timer = -1
self.file = ""
def __str__(self):
string = "mode-" + self.mode + "_timer-" + str(self.timer)
if self.file != "":
string = string + "_filename-" + self.file
return string
def _iterator(self):
userlist_201901 = []
userlist_201906 = []
userlist_201907 = []
userlist = []
if self.mode[0] == '1':
userlist = userlist + userlist_201901
if self.mode[1] == '1':
userlist = userlist + userlist_201906
if self.mode[2] == '1':
userlist = userlist + userlist_201907
random.shuffle(userlist)
for user in userlist:
yield Account(username=user[0], name=user[1])
def active(self):
if self.mode[0].lower() in ['f', 't']:
for account in ActTable(input("Enter filename:")).iterator():
account.load()
if account.accessibility:
print("Welcome! " + account.name)
if self.timer == -1:
if input("Enter Y to stop:").lower() == 'y':
return
else:
print("refresh in " + str(self.timer) + " seconds...")
time.sleep(self.timer)
elif self.mode[0].lower() in ['p', 'c']:
for account in ActTable("customize.csv").iterator():
account.load()
if account.accessibility:
print("Welcome! " + account.name)
if self.timer == -1:
if input("Enter Y to stop:").lower() == 'y':
return
else:
print("refresh in " + str(self.timer) + " seconds...")
time.sleep(self.timer)
else:
for account in self._iterator():
account.load()
if account.accessibility:
print("Welcome! " + account.name)
if self.timer == -1:
if input("Enter Y to stop:").lower() == 'y':
return
else:
print("refresh in " + str(self.timer) + " seconds...")
time.sleep(self.timer)
if self.timer == -1:
input("accounts out")
else:
print("...another run...")
self.active()
def passive(self):
for account in self._iterator():
account.load()
if account.accessibility:
return
|
the-stack_0_2036 | #!/usr/bin/env python
import logging
from contextlib import redirect_stdout
from io import StringIO
from itertools import count
from unittest import main
from unittest.mock import Mock
from cli_command_parser import Command, Action, no_exit_handler, ActionFlag, ParamGroup
from cli_command_parser.actions import help_action
from cli_command_parser.context import Context
from cli_command_parser.parameters import before_main, after_main, action_flag
from cli_command_parser.exceptions import CommandDefinitionError, ParameterDefinitionError, ParamConflict
from cli_command_parser.testing import ParserTest
log = logging.getLogger(__name__)
class ActionFlagTest(ParserTest):
def test_help_action(self):
mock = Mock(__name__='bar')
class Foo(Command, error_handler=no_exit_handler):
action = Action()
action.register(mock)
sio = StringIO()
with redirect_stdout(sio):
foo = Foo.parse(['bar', '-h'])
foo()
self.assertTrue(sio.getvalue().startswith('usage: '))
self.assertEqual(mock.call_count, 0)
def test_af_func_missing(self):
class Foo(Command):
foo = ActionFlag()
with self.assertRaisesRegex(ParameterDefinitionError, 'No function was registered'):
Foo.parse([])
def test_af_order_conflict(self):
class Foo(Command):
foo = ActionFlag()(Mock())
bar = ActionFlag()(Mock())
with self.assertRaisesRegex(CommandDefinitionError, 'different order values'):
Foo.parse([])
def test_af_non_me_group_conflict(self):
class Foo(Command):
with ParamGroup() as group:
foo = ActionFlag()(Mock())
bar = ActionFlag()(Mock())
with self.assertRaisesRegex(CommandDefinitionError, 'different order values'):
Foo.parse([])
def test_af_md_group_conflict(self):
class Foo(Command):
with ParamGroup(mutually_dependent=True) as group:
foo = ActionFlag()(Mock())
bar = ActionFlag()(Mock())
with self.assertRaisesRegex(CommandDefinitionError, 'different order values'):
Foo.parse([])
def test_af_me_group_ok(self):
class Foo(Command):
with ParamGroup(mutually_exclusive=True) as group:
foo = ActionFlag()(Mock())
bar = ActionFlag()(Mock())
self.assert_parse_results(Foo, [], {'foo': False, 'bar': False})
def test_af_mixed_grouping_rejected(self):
class Foo(Command):
with ParamGroup(mutually_exclusive=True) as group:
foo = ActionFlag()(Mock())
bar = ActionFlag()(Mock())
baz = ActionFlag()(Mock())
with self.assertRaisesRegex(CommandDefinitionError, 'different order values'):
Foo.parse([])
def test_af_mixed_grouping_ordered_ok(self):
attrs = ('foo', 'bar', 'baz')
for i, attr in enumerate(attrs):
with self.subTest(attr=attr):
mocks = [Mock(), Mock(), Mock()]
class Foo(Command):
with ParamGroup(mutually_exclusive=True) as group:
foo = ActionFlag()(mocks[0])
bar = ActionFlag()(mocks[1])
baz = ActionFlag(order=2)(mocks[2])
foo = Foo.parse([f'--{attr}'])
foo()
self.assertTrue(mocks[i].called)
for j in {0, 1, 2} - {i}:
self.assertFalse(mocks[j].called)
parsed = foo.ctx.get_parsed()
self.assertTrue(parsed[attr])
for a in set(attrs) - {attr}:
self.assertFalse(parsed[a])
def test_no_reassign(self):
with self.assertRaises(CommandDefinitionError):
class Foo(Command):
foo = ActionFlag()(Mock())
@foo
def bar(self):
pass
def test_short_option_conflict_rejected(self):
class Foo(Command):
bar = ActionFlag('-b', order=1)(Mock())
baz = ActionFlag('-b', order=2)(Mock())
with self.assertRaises(CommandDefinitionError):
Foo.parse([])
def test_extra_flags_provided_cause_error(self):
mocks = [Mock(), Mock()]
class Foo(Command, error_handler=None, multiple_action_flags=False):
foo = ActionFlag('-f', order=1)(mocks[0])
bar = ActionFlag('-b', order=2)(mocks[1])
expected_error_text = r'--foo / -f, --bar / -b \(combining multiple action flags is disabled\)'
with self.assertRaisesRegex(ParamConflict, expected_error_text):
Foo.parse_and_run(['-fb'])
with self.assertRaisesRegex(ParamConflict, expected_error_text):
Foo.parse_and_run(['--foo', '--bar'])
def test_multi_flag_order_followed(self):
class Foo(Command, multiple_action_flags=True):
def __init__(self):
self.call_order = {}
self.counter = count()
@action_flag('-f', order=1)
def foo(self):
self.call_order['foo'] = next(self.counter)
@action_flag('-b', order=2)
def bar(self):
self.call_order['bar'] = next(self.counter)
for case, args in {'combined': ['-fb'], 'split': ['-b', '-f']}.items():
with self.subTest(case=case):
foo = Foo.parse_and_run(args)
self.assertLess(foo.call_order['foo'], foo.call_order['bar'])
def test_before_and_after_flags(self):
class Foo(Command, multiple_action_flags=True):
def __init__(self):
self.call_order = {}
self.counter = count()
@before_main('-f', order=1)
def foo(self):
self.call_order['foo'] = next(self.counter)
def main(self):
super().main()
self.call_order['main'] = next(self.counter)
@after_main('-b', order=2)
def bar(self):
self.call_order['bar'] = next(self.counter)
for case, args in {'combined': ['-fb'], 'split': ['-b', '-f']}.items():
with self.subTest(case=case):
foo = Foo.parse_and_run(args)
self.assertLess(foo.call_order['foo'], foo.call_order['main'])
self.assertLess(foo.call_order['main'], foo.call_order['bar'])
self.assertEqual(2, foo.ctx.actions_taken) # 2 because no non-flag Actions
with self.subTest(case='only after'):
foo = Foo.parse_and_run(['-b'])
self.assertNotIn('foo', foo.call_order)
self.assertLess(foo.call_order['main'], foo.call_order['bar'])
self.assertEqual(1, foo.ctx.actions_taken) # 1 because no non-flag Actions
with self.subTest(case='only before'):
foo = Foo.parse_and_run(['-f'])
self.assertLess(foo.call_order['foo'], foo.call_order['main'])
self.assertNotIn('bar', foo.call_order)
self.assertEqual(1, foo.ctx.actions_taken) # 1 because no non-flag Actions
def test_af_before_and_after_with_action(self):
class Foo(Command):
action = Action()
def __init__(self):
self.call_order = {}
self.counter = count()
@action(default=True)
def default_action(self):
self.call_order['default_action'] = next(self.counter)
@before_main('-f')
def foo(self):
self.call_order['foo'] = next(self.counter)
@after_main('-b')
def bar(self):
self.call_order['bar'] = next(self.counter)
foo = Foo.parse_and_run(['-fb'])
self.assertLess(foo.call_order['foo'], foo.call_order['default_action'])
self.assertLess(foo.call_order['default_action'], foo.call_order['bar'])
self.assertEqual(3, foo.ctx.actions_taken)
def test_bad_action(self):
with self.assertRaises(ParameterDefinitionError):
class Foo(Command):
action_flag(action='store')(Mock())
def test_equals(self):
self.assertEqual(help_action, help_action)
def test_dunder_get(self):
mock = Mock()
class Foo(Command):
@action_flag('-f')
def foo(self):
mock()
Foo.parse(['-f']).foo()
self.assertTrue(mock.called)
def test_no_result(self):
mock = Mock()
class Foo(Command):
@action_flag('-b')
def bar(self):
mock()
foo = Foo.parse(['-b'])
self.assertIsInstance(Foo.bar, ActionFlag)
with foo.ctx:
self.assertFalse(Foo.bar.result()(foo))
def test_no_func(self):
flag = ActionFlag()
with Context() as ctx:
flag.store_const()
with self.assertRaises(ParameterDefinitionError):
flag.result()
def test_not_provided(self):
flag = ActionFlag()
with Context() as ctx:
self.assertFalse(flag.result())
def test_before_main_sorts_before_after_main(self):
a, b = ActionFlag(before_main=False), ActionFlag(before_main=True)
expected = [b, a]
self.assertListEqual(expected, sorted([a, b]))
def test_after_main_always_available(self):
with self.assertRaisesRegex(ParameterDefinitionError, 'cannot be combined with'):
ActionFlag(before_main=False, always_available=True)
def test_nargs_not_allowed(self):
with self.assertRaises(TypeError):
ActionFlag(nargs='+')
def test_type_not_allowed(self):
with self.assertRaises(TypeError):
ActionFlag(type=int)
def test_choices_not_allowed(self):
with self.assertRaises(TypeError):
ActionFlag(choices=(1, 2))
if __name__ == '__main__':
try:
main(warnings='ignore', verbosity=2, exit=False)
except KeyboardInterrupt:
print()
|
the-stack_0_2038 | #!/usr/bin/env python3
"""Fetch RSS feed from phpBB forum and post it to Slack channel.
2017/Nov/15 @ Zdenek Styblik <[email protected]>
"""
import argparse
import logging
import sys
import time
import traceback
from typing import Dict, List
import feedparser
import rss2irc
import rss2slack
CACHE_EXPIRATION = 86400 # seconds
HTTP_TIMEOUT = 30 # seconds
def format_message(
url: str, msg_attrs: Dict[str, str], handle: str = ''
) -> Dict:
"""Return formatted message as Slack's BlockKit section.
:raises: `KeyError`
"""
if handle:
if 'category' in msg_attrs and msg_attrs['category']:
tag = '[{:s}-{:s}] '.format(handle, msg_attrs['category'])
else:
tag = '[{:s}] '.format(handle)
else:
tag = ''
return {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '{:s}<{:s}|{:s}> ({:d})'.format(
tag, url, msg_attrs['title'], msg_attrs['comments_cnt']
)
}
}
def get_authors_from_file(logger: logging.Logger, fname: str) -> List[str]:
"""Return list of authors of interest from given file."""
if not fname:
return []
try:
with open(fname, 'rb') as fhandle:
authors = [
line.decode('utf-8').strip()
for line in fhandle.readlines()
if line.decode('utf-8').strip() != ''
]
except Exception:
logger.error(traceback.format_exc())
authors = []
return authors
def main():
"""Fetch phpBB RSS feed and post RSS news to Slack."""
logging.basicConfig(stream=sys.stdout, level=logging.ERROR)
logger = logging.getLogger('phpbb2slack')
args = parse_args()
if args.verbosity:
logger.setLevel(logging.DEBUG)
if args.cache_expiration < 0:
logger.error("Cache expiration can't be less than 0.")
sys.exit(1)
try:
slack_token = rss2slack.get_slack_token()
authors = get_authors_from_file(logger, args.authors_file)
data = rss2irc.get_rss(logger, args.rss_url, args.rss_http_timeout)
if not data:
logger.error('Failed to get RSS from %s', args.rss_url)
sys.exit(1)
news = parse_news(data, authors)
if not news:
logger.info('No news?')
sys.exit(0)
cache = rss2irc.read_cache(logger, args.cache)
scrub_cache(logger, cache)
for key in list(news.keys()):
if key not in cache.items:
continue
logger.debug('Key %s found in cache', key)
comments_cached = int(cache.items[key]['comments_cnt'])
comments_actual = int(news[key]['comments_cnt'])
if comments_cached == comments_actual:
cache.items[key]['expiration'] = (
int(time.time()) + args.cache_expiration
)
news.pop(key)
slack_client = rss2slack.get_slack_web_client(
slack_token, args.slack_base_url, args.slack_timeout
)
if not args.cache_init:
for url in list(news.keys()):
msg_blocks = [
format_message(url, news[url], args.handle)
]
try:
rss2slack.post_to_slack(
logger, msg_blocks, slack_client, args.slack_channel,
)
except ValueError:
news.pop(url)
finally:
time.sleep(args.sleep)
expiration = int(time.time()) + args.cache_expiration
update_cache(cache, news, expiration)
rss2irc.write_cache(cache, args.cache)
except Exception:
logger.debug(traceback.format_exc())
# TODO(zstyblik):
# 1. touch error file
# 2. send error message to the channel
finally:
sys.exit(0)
def parse_args() -> argparse.Namespace:
"""Return parsed CLI args."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--authors-of-interest',
dest='authors_file', type=str, default=None,
help='Path to file which contains list of authors, one per line. '
'Only threads which are started by one of the authors on the '
'list will be pushed.'
)
parser.add_argument(
'--cache',
dest='cache', type=str, default=None,
help='Path to cache file.'
)
parser.add_argument(
'--cache-expiration',
dest='cache_expiration', type=int,
default=CACHE_EXPIRATION,
help='Time, in seconds, for how long to keep items in cache.'
)
parser.add_argument(
'--cache-init',
dest='cache_init', action='store_true', default=False,
help='Prevents posting news to IRC. This is useful '
'when bootstrapping new RSS feed.'
)
parser.add_argument(
'--handle',
dest='handle', type=str, default=None,
help='Handle/callsign of this feed.'
)
parser.add_argument(
'--rss-url',
dest='rss_url', type=str, required=True,
help='URL of RSS Feed.'
)
parser.add_argument(
'--rss-http-timeout',
dest='rss_http_timeout', type=int,
default=HTTP_TIMEOUT,
help='HTTP Timeout. Defaults to {:d} seconds.'.format(HTTP_TIMEOUT)
)
parser.add_argument(
'--slack-base-url',
dest='slack_base_url', type=str,
default=rss2slack.SLACK_BASE_URL,
help='Base URL for Slack client.'
)
parser.add_argument(
'--slack-channel',
dest='slack_channel', type=str, required=True,
help='Name of Slack channel to send formatted news to.'
)
parser.add_argument(
'--slack-timeout',
dest='slack_timeout', type=int,
default=HTTP_TIMEOUT,
help='Slack API Timeout. Defaults to {:d} seconds.'.format(
HTTP_TIMEOUT
)
)
parser.add_argument(
'--sleep',
dest='sleep', type=int, default=2,
help='Sleep between messages in order to avoid '
'possible excess flood/API call rate limit.'
)
parser.add_argument(
'-v', '--verbose',
dest='verbosity', action='store_true', default=False,
help='Increase logging verbosity.'
)
return parser.parse_args()
def parse_news(data: str, authors: List[str]) -> Dict:
"""Parse-out link and title out of XML."""
news = {}
feed = feedparser.parse(data)
for entry in feed['entries']:
link = entry.pop('link', None)
title = entry.pop('title', None)
author_detail = entry.pop('author_detail', {'name': None})
if (
not 'link'
and not 'title'
):
continue
if authors and author_detail['name'] not in authors:
continue
category = entry.pop('category', None)
comments_cnt = entry.pop('slash_comments', 0)
try:
comments_cnt = int(comments_cnt)
except ValueError:
comments_cnt = 0
news[link] = {
'title': title,
'category': category,
'comments_cnt': int(comments_cnt),
}
return news
def scrub_cache(logger: logging.Logger, cache: rss2irc.CachedData) -> None:
"""Scrub cache and remove expired items."""
time_now = int(time.time())
for key in list(cache.items.keys()):
try:
expiration = int(cache.items[key]['expiration'])
except (KeyError, ValueError):
logger.error(traceback.format_exc())
logger.error(
"Invalid cache entry will be removed: '%s'", cache.items[key]
)
cache.items.pop(key)
continue
if expiration < time_now:
logger.debug('URL %s has expired.', key)
cache.items.pop(key)
def update_cache(
cache: rss2irc.CachedData, news: Dict, expiration: int
) -> None:
"""Update cache contents."""
for key in list(news.keys()):
cache.items[key] = {
'expiration': expiration,
'comments_cnt': int(news[key]['comments_cnt']),
}
if __name__ == '__main__':
main()
|
the-stack_0_2039 | import math
import random
import smtplib
import re
import json
import pandas as pd
import requests
from bs4 import BeautifulSoup
class PostalUtils:
def __init__(self, pc):
self.pc = str(pc)
self.data = None
def get_details(self):
import requests
self.data = requests.get(f'https://thezipcodes.com/api/v1/search?zipCode={str(self.pc)}&countryCode=IN&apiKey=66a4d8e95477daca5f139eedbca5ca3d')
if self.data.status_code != 200:
self.data = None
def extract_info(self):
self.data = json.loads(self.data.text)
if self.data['success']:
country = self.data['location'][0]['country']
region = self.data['location'][0]['city']
state = self.data['location'][0]['state']
return region, state, country
return 'Data unavailable. Check PINCODE!'
class OTP:
def __init__(self):
self.otp = None
def generate_otp(self, leng = 6):
digits="0123456789"
OTP=""
for i in range(leng):
OTP+=digits[math.floor(random.random()*10)]
return OTP
def send_email(self, to_mail):
try:
s = smtplib.SMTP('smtp.gmail.com', 587)
s.starttls()
s.ehlo()
s.login("[email protected]", "!1Abcderf")
self.otp = self.generate_otp()
s.sendmail("[email protected]", to_mail, self.otp)
s.quit()
return self.otp, 'Success'
except Exception as e:
return self.otp, e
def validate_details(otp, email_otp_field, dob, aadhar, pan, passport):
from datetime import date
try:
today = date.today()
birthDate = dob
age = today.year - birthDate.year - ((today.month, today.day) < (birthDate.month, birthDate.day))
if age < 18:
return 'Should be 18 years atleast!'
elif not aadharNumVerify(aadhar):
return 'Invalid Aadhar!'
elif validate_pan(pan):
return 'Invalid PAN'
elif not passport_validator(passport):
return 'Invalid passport number'
elif str(email_otp_field) != str(otp):
return 'Incorrect OTP!'
except Exception as e:
return e
def validate_pincode(pincode):
try:
postal_details = PostalUtils(pincode)
postal_details.get_details()
r,s,c = postal_details.extract_info()
return r, s, c
except Exception as e:
return e
def validate_pan(pan, flag = 'individual'):
pan = pan.upper()
if flag == 'individual':
regex = "[A-Z]{3}P[A-Z][0-9]{4}[A-Z]{1}"
p = re.compile(regex)
if not (re.search(p, pan) and len(pan) == 10):
return True
def aadharNumVerify(aadhar) :
"""
Reference : https://stackoverflow.com/questions/27686384/validating-the-aadhar-card-number-in-a-application
"""
verhoeff_table_d = (
(0, 1, 2, 3, 4, 5, 6, 7, 8, 9),
(1, 2, 3, 4, 0, 6, 7, 8, 9, 5),
(2, 3, 4, 0, 1, 7, 8, 9, 5, 6),
(3, 4, 0, 1, 2, 8, 9, 5, 6, 7),
(4, 0, 1, 2, 3, 9, 5, 6, 7, 8),
(5, 9, 8, 7, 6, 0, 4, 3, 2, 1),
(6, 5, 9, 8, 7, 1, 0, 4, 3, 2),
(7, 6, 5, 9, 8, 2, 1, 0, 4, 3),
(8, 7, 6, 5, 9, 3, 2, 1, 0, 4),
(9, 8, 7, 6, 5, 4, 3, 2, 1, 0))
verhoeff_table_p = (
(0, 1, 2, 3, 4, 5, 6, 7, 8, 9),
(1, 5, 7, 6, 2, 8, 3, 0, 9, 4),
(5, 8, 0, 3, 7, 9, 6, 1, 4, 2),
(8, 9, 1, 6, 0, 4, 3, 5, 2, 7),
(9, 4, 5, 3, 1, 2, 6, 8, 7, 0),
(4, 2, 8, 6, 5, 7, 3, 9, 0, 1),
(2, 7, 9, 3, 8, 0, 6, 4, 1, 5),
(7, 0, 4, 6, 9, 1, 3, 2, 5, 8))
# verhoeff_table_inv = (0, 4, 3, 2, 1, 5, 6, 7, 8, 9)
def checksum(aadhar_inner):
"""For a given number generates a Verhoeff digit and
returns number + digit"""
c = 0
for i, item in enumerate(reversed(aadhar_inner)):
c = verhoeff_table_d[c][verhoeff_table_p[i % 8][int(item)]]
return c
# Validate Verhoeff checksum
return checksum(str(aadhar)) == 0 and len(str(aadhar)) == 12
def passport_validator(passp):
skeleton = "^[A-PR-WYa-pr-wy][1-9]\\d\\s?\\d{4}[1-9]$"
p = re.compile(skeleton)
m = re.match(p, passp)
if m is None or len(passp) != 8:
return False
else:
return True
class Scraper_1:
def __init__(self, c_name, cin):
self.data = {}
self.c_name = c_name
self.cin = cin
self.dins_reference = []
self.link = f'https://www.zaubacorp.com/company/{self.c_name.replace(" ", "-").upper()}/{self.cin}'
def scrape(self):
try:
table_MN = pd.read_html(self.link)
if table_MN is not None:
self.data = {table_MN[0].columns[0]:table_MN[0].columns[1]}
self.data.update({value[0]:value[1] for value in table_MN[0].values})
for value in table_MN[7].iloc[:,0].values:
if value.isnumeric():
self.dins_reference.append(value)
self.dins_reference = set(self.dins_reference)
response = requests.get(self.link)
content = BeautifulSoup(response.text, "html.parser")
add_c = content.find_all("div", class_= 'col-lg-6 col-md-6 col-sm-12 col-xs-12')[2].text.split('Address: ')[1]
self.data['address'] = add_c
else:
return 'Incorrect name'
except Exception as e:
return e
def check_c(corporate_name, c_city, c_reg_no, cin_no, c_status, c_doi, c_DIN, c_gstin, c_pan, c_cat, f_ly, f_ly_2, v1, v2, c_address, c_state):
score = 0
scr = Scraper_1(corporate_name, cin_no)
content = scr.scrape()
err = ''
if content is not None:
return content
else:
if corporate_name == scr.data['Company Name'].upper():
score += 1
else:
err += 'No such Corp. found with the given name;'
if c_address in scr.data['address']:
score += 1
else:
err += 'Address incorrect;'
if c_city in scr.data['address']:
score +=1
else:
err += 'Incorrect city;'
if c_state in scr.data['address']:
score +=1
else:
err += 'Incorrect State;'
if c_status == scr.data['Company Status']:
score += 1
else:
err += 'Incorrect company status;'
if c_cat == scr.data['Company Sub Category']:
score += 1
else:
err += 'Incorrect company category;'
if c_reg_no == str(scr.data['Registration Number']):
score += 1
else:
err += 'Incorrect registration number;'
if cin_no == str(scr.data['CIN']):
score += 1
else:
err += 'Incorrect CIN;'
if set(c_DIN.split(';')) == scr.dins_reference:
score += 1
else:
err += 'DINs missing or not mentioned completely;'
regex = "^[0-9]{2}[A-Z]{5}[0-9]{4}" + "[A-Z]{1}[1-9A-Z]{1}" + "Z[0-9A-Z]{1}$"
p = re.compile(regex)
if (re.search(p, str(c_gstin))):
score += 1
else:
err += 'Invalid GSTIN;'
if str(c_gstin)[2:12] == c_pan:
score += 1
else:
err += 'Invalid PAN;'
if str(v1).replace(',', '') == str(f_ly) and str(v2).replace(',', '') == str(f_ly_2):
score += 1
else:
err += 'Invalid financials'
return err
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.