filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_15350 | # coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GZTAN dataset."""
import os
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_CITATION = """
@misc{tzanetakis_essl_cook_2001,
author = "Tzanetakis, George and Essl, Georg and Cook, Perry",
title = "Automatic Musical Genre Classification Of Audio Signals",
url = "http://ismir2001.ismir.net/pdf/tzanetakis.pdf",
publisher = "The International Society for Music Information Retrieval",
year = "2001"
}
"""
_DESCRIPTION = """
The dataset consists of 1000 audio tracks each 30 seconds long.
It contains 10 genres, each represented by 100 tracks.
The tracks are all 22050Hz Mono 16-bit audio files in .wav format.
The genres are:
* blues
* classical
* country
* disco
* hiphop
* jazz
* metal
* pop
* reggae
* rock
"""
_DOWNLOAD_URL = "http://opihi.cs.uvic.ca/sound/genres.tar.gz"
_HOMEPAGE_URL = "http://marsyas.info/index.html"
_CLASS_LABELS = [
"blues", "classical", "country", "disco", "hiphop", "jazz", "metal", "pop",
"reggae", "rock"
]
class GTZAN(tfds.core.GeneratorBasedBuilder):
"""GTZAN Dataset."""
VERSION = tfds.core.Version("1.0.0")
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
"audio": tfds.features.Audio(file_format="wav", sample_rate=22050),
"label": tfds.features.ClassLabel(names=_CLASS_LABELS),
"audio/filename": tfds.features.Text(),
}),
supervised_keys=("audio", "label"),
homepage=_HOMEPAGE_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_paths = dl_manager.download_and_extract({"genres": _DOWNLOAD_URL})
path = os.path.join(dl_paths["genres"], "genres")
# There is no predefined train/val/test split for this dataset.
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN, gen_kwargs={"path": path}),
]
def _generate_examples(self, path):
"""Yields examples.
Args:
path: Path of the downloaded and extracted directory
Yields:
Next examples
"""
for root, _, file_name in tf.io.gfile.walk(path):
for fname in file_name:
if fname.endswith(".wav"): # select only .wav files
# Each .wav file has name in the format of <genre>.<number>.wav
label = fname.split(".")[0]
key = fname
example = {
"audio": os.path.join(root, fname),
"label": label,
"audio/filename": fname,
}
yield key, example
|
the-stack_0_15351 | """
Forecast datasets & data generators
"""
import os.path
from typing import Union, List
import numpy as np
from numpy.random import default_rng
import pandas as pd
"""
Synthetic sequences of (non-iid) true probs/means
"""
def bernoulli(
n: int,
p: Union[float, List, np.ndarray] = 0.5,
rng: np.random.Generator = np.random.default_rng(),
) -> np.ndarray:
"""Return a sequence of Bernoulli random variables."""
return rng.binomial(1, p, size=n)
def zeros_then_ones(
n_zeros: int,
n_ones: int,
) -> np.ndarray:
"""Return a sequence of `n_zeros` 0's followed by `n_ones` 1's."""
return np.concatenate([np.zeros((n_zeros, )), np.ones((n_ones, ))])
def zeros_then_ones_repeated(
n: int,
n_spans: int,
roll: int = 0,
) -> np.ndarray:
"""Return a repeating sequence of 0's and 1's."""
assert 1 <= n_spans <= n
span = n // n_spans
ys = np.concatenate([
zeros_then_ones(span, span)
for _ in range((n_spans + 1) // 2)
])[:n]
return np.roll(ys, roll)
def randoms_zeros_then_ones(
n_randoms: int,
n_zeros: int,
n_ones: int,
p: float = 0.5,
rng: np.random.Generator = default_rng(),
) -> np.ndarray:
"""Return a sequence of `n_randoms` Bernoulli(p) random variables,
followed by `n_zeros` 0's and `n_ones` 1's."""
return np.concatenate([rng.binomial(1, p, size=n_randoms),
np.zeros((n_zeros, )),
np.ones((n_ones, ))])
def default(
n: int,
):
"""Default setting for the paper.
Random for the first 100, and then repeated zeros-then-ones in
each log-scale span ([101, 1000], [1001, 10000], ...).
"""
n_spans = int(np.log10(n))
assert n_spans >= 2, f"default setting requires n > 100 (given: {n})"
seqs = [np.repeat(0.5, 100)]
for span in range(2, n_spans):
r = 10 ** (span + 1) - 10 ** span
seqs.append(zeros_then_ones(r // 4, r // 4))
seqs.append(zeros_then_ones(r // 4, r // 4)[::-1])
return np.concatenate(seqs)
def sigmoid(
n: int,
changepoint: float = 0.25,
) -> np.ndarray:
"""Return a sequence of values between [0, 1] that follow a sigmoid fn."""
grid = 20. * (np.linspace(0, 1, num=n) - changepoint) # [-10, 10]
return 1. / (1. + np.exp(-grid))
"""
Presets:
binary: pd.DataFrame(time, data, true_probs)
continuous: pd.DataFrame(time, data, true_means, true_params)
"""
def make_preset(
true_probs: np.ndarray,
rng: np.random.Generator = default_rng(),
):
"""A helper function that makes binary data given true probabilities."""
n = len(true_probs)
data = bernoulli(n, true_probs, rng=rng)
return pd.DataFrame({
"time": np.arange(1, n + 1),
"data": data,
"true_probs": true_probs,
})
def preset_default(
n: int,
noise: float = 0.1,
rng: np.random.Generator = default_rng(),
) -> pd.DataFrame:
"""Default synthetic data.
Generated from a noisy version of
100 1/2s, 1000 1s, 1000 0s, 1000 1s, 1000 0s, ..., 1000 1s, and 500 0s."""
pattern = default(n)
true_probs = 0.8 * pattern + 0.2 * (1 - pattern)
true_probs = np.clip(true_probs + rng.normal(0, noise, n), 0, 1)
return make_preset(true_probs, rng)
def preset_random(
n: int,
noise: float = 0.1,
rng: np.random.Generator = default_rng(),
) -> pd.DataFrame:
"""Random synthetic data: true_prob == 0.5 + noise for all rounds."""
true_probs = np.repeat(0.5, n)
true_probs = np.clip(true_probs + rng.normal(0, noise, n), 0, 1)
return make_preset(true_probs, rng)
def preset_sigmoid(
n: int,
noise: float = 0.25,
rng: np.random.Generator = default_rng(),
changepoint: float = 0.25, # between [0, 1]
) -> pd.DataFrame:
"""A smoothly increasing function with a changepoint + sinusoidal noise."""
pattern = sigmoid(n, changepoint)
sine_noise = np.sin(0.1 * np.arange(n)) + rng.normal(0, 1, n)
true_probs = np.clip(pattern + noise * sine_noise, 0, 1)
return make_preset(true_probs, rng)
def make_preset_beta(
true_means: np.ndarray,
rng: np.random.Generator = default_rng(),
) -> pd.DataFrame:
"""A helper function that makes continuous data given true means, where
y_t ~ Beta(r_t, 1 - r_t)."""
n = len(true_means)
true_params = [true_means, 1. - true_means]
data = rng.beta(*true_params)
out = {
"time": np.arange(1, n + 1),
"data": data,
"true_means": true_means,
"true_dist": ["beta" for _ in range(n)],
}
out.update({
f"true_param{i}": true_param
for i, true_param in enumerate(true_params)
})
return pd.DataFrame(out)
def preset_beta(
n: int,
noise: float = 0.1,
rng: np.random.Generator = default_rng(),
) -> pd.DataFrame:
"""Synthetic data with continuous outcomes taking values in [-1, 1].
z_t ~ Beta(r_t, 1 - r_t)
y_t = 2 * z_t - 1
"""
pattern = sigmoid(n, changepoint=0.25)
true_means = 0.8 * pattern + 0.2 * (1 - pattern)
true_means = np.clip(true_means + rng.normal(0, noise, n), 0.01, 0.99)
return make_preset_beta(true_means, rng)
# pd.DataFrame(time, data, true_probs)
PRESETS = {
"default": preset_default,
"random": preset_random,
"sigmoid": preset_sigmoid,
"beta": preset_beta,
}
def get_data(
data_name: str,
size: int = 0,
noise: float = 0.1,
rng: Union[int, np.random.Generator] = default_rng(),
) -> pd.DataFrame:
"""Get data from its name or filename, up to n_rounds."""
if os.path.exists(data_name):
data = pd.read_csv(data_name)
if size > 0:
data = data[:size]
else:
try:
if isinstance(rng, int):
rng = default_rng(rng)
assert size > 0, f"specify data size for synthetic data generation"
data = PRESETS[data_name](size, noise, rng)
except KeyError:
raise KeyError(
f"data name {data_name} is not one of the presets, "
f"available: " + " ".join(list(PRESETS.keys()))) from None
return data
|
the-stack_0_15352 | import os
import numpy as np
import time
from collections import deque
import glob
import pickle
import shutil
from copy import deepcopy
import matplotlib.pyplot as plt
import torch
from agents import AgentDDPG, AgentMADDPG
from utilities import get_env_info
def run(env, params):
brain_name, n_agents, state_size, action_size = get_env_info(env)
if params["type"].lower() == "ddpg":
agent = AgentDDPG(state_size=state_size,
action_size=action_size, params=params)
scores = ddpg(agent, env, params)
elif params["type"].lower() == "2 ddpg":
agent = [AgentDDPG(state_size=state_size, action_size=action_size,
params=params) for i in range(n_agents)]
scores = ddpg(agent, env, params)
elif params["type"].lower() == "maddpg":
agent = AgentMADDPG(env, params)
scores = ddpg(agent, env, params)
else:
raise Exception("'type' can be 'ddpg', '2 ddpg', 'maddpg'")
def ddpg(agent, env, params):
# Get environment information
brain_name, n_agents, state_size, action_size = get_env_info(env)
# Initialize stuff
log = Logger(params, agent)
for _ in range(1, params["n_episodes"]+1):
env_info = env.reset(train_mode=True)[brain_name]
if isinstance(agent, list):
for i in range(n_agents):
agent[i].reset()
else:
agent.reset()
states = env_info.vector_observations
episode_scores = np.zeros(n_agents)
for t in range(params["max_steps"]):
if isinstance(agent, list):
actions = np.zeros((n_agents, action_size))
for i in range(n_agents):
actions[i] = agent[i].act(states[i])
else:
actions = agent.act(states)
if params["type"].lower() == "maddpg":
actions = actions.reshape(n_agents, action_size)
actions = actions.detach().cpu().numpy()
env_info = env.step(actions)[brain_name]
next_states = env_info.vector_observations
rewards = env_info.rewards
dones = env_info.local_done
if isinstance(agent, list):
for i in range(n_agents):
agent[i].step(states[i], actions[i], rewards[i],
next_states[i], dones[i])
else:
agent.step(states, actions, rewards, next_states, dones)
episode_scores += rewards
states = next_states
# check if we should save and show progress
log.tic()
if np.any(dones):
break
log.update(agent, episode_scores, t+1)
log.tic()
if log.solved and params["stop_on_solve"]:
break
if time.time() - log.t_start > params["max_time"] + 5:
break
return agent, log
class Logger():
'''
Logs, displays, and saves progress.
'''
def __init__(self, params, agent):
self.data = params
# If save folder exists raise an exception
if os.path.isdir(self.data["folder"]):
if self.data["overwrite"]:
shutil.rmtree(self.data["folder"])
else:
raise Exception("Folder already exists and overwrite is off.")
if not os.path.isdir(self.data["folder"]):
os.makedirs(self.data["folder"])
self.data["scores"] = []
self.data["mean_scores"] = []
self.data["steps_done"] = []
self._update_agent(agent)
# comb_score_window is the combined score - for tennis it's the max
self.comb_score_window = deque(maxlen=params["scores_window"])
# all_score_window contains the scores of all agents
self.all_score_window = deque(maxlen=params["scores_window"])
self.best_score = -np.inf
self.t_start = time.time()
self.progress_t = time.time()
self.saved_t = time.time()
self.solved = False
self.data["train_time"] = time.time() - self.t_start
def _update_agent(self, agent):
if isinstance(agent, list):
if not "actor_local_dict" in self.data:
temp = []
for i in range(len(agent)):
temp.append([])
self.data["actor_local_dict"] = deepcopy(temp)
self.data["actor_target_dict"] = deepcopy(temp)
self.data["critic_local_dict"] = deepcopy(temp)
self.data["critic_target_dict"] = deepcopy(temp)
else:
for i in range(len(agent)):
self.data["actor_local_dict"][i] = agent[i].actor_local.state_dict()
self.data["actor_target_dict"][i] = agent[i].actor_target.state_dict()
self.data["critic_local_dict"][i] = agent[i].critic_local.state_dict()
self.data["critic_target_dict"][i] = agent[i].critic_target.state_dict(
)
elif isinstance(agent, AgentDDPG):
self.data["actor_local_dict"] = agent.actor_local.state_dict()
self.data["actor_target_dict"] = agent.actor_target.state_dict()
self.data["critic_local_dict"] = agent.critic_local.state_dict()
self.data["critic_target_dict"] = agent.critic_target.state_dict()
elif isinstance(agent, AgentMADDPG):
if not "actor_local_dict" in self.data:
temp = []
for i in range(len(agent.maddpg_agent)):
temp.append([])
self.data["actor_local_dict"] = deepcopy(temp)
self.data["actor_target_dict"] = deepcopy(temp)
self.data["critic_local_dict"] = deepcopy(temp)
self.data["critic_target_dict"] = deepcopy(temp)
else:
for i in range(len(agent.maddpg_agent)):
self.data["actor_local_dict"][i] = agent.maddpg_agent[i].actor_local.state_dict(
)
self.data["actor_target_dict"][i] = agent.maddpg_agent[i].actor_target.state_dict(
)
self.data["critic_local_dict"][i] = agent.maddpg_agent[i].critic_local.state_dict(
)
self.data["critic_target_dict"][i] = agent.maddpg_agent[i].critic_target.state_dict(
)
else:
raise Exception("Unkown agent type.")
def update(self, agent, episode_scores, steps):
self.comb_score_window.append(np.max(episode_scores))
self.all_score_window.append(episode_scores)
self.data["scores"].append(episode_scores)
self.data["mean_scores"].append(np.mean(self.all_score_window, axis=0))
self.data["steps_done"].append(steps)
self._update_agent(agent)
self.tic()
def show_progress(self):
if len(self.data["mean_scores"]):
print('\rMin agent score: {:.2f}\tMax agent score: {:.2f}\tMax steps: {}\tTotal time: {}\tEpisodes: {}'.format(
min(self.data["mean_scores"][-1]),
max(self.data["mean_scores"][-1]),
self.data["steps_done"][-1],
seconds_to_time_str(time.time() - self.t_start),
len(self.data["scores"])), end="")
if len(self.data["mean_scores"]) and self.data["steps_done"][-1] > 5000:
raise Exception("debug")
def tic(self):
self.data["train_time"] = time.time() - self.t_start
if self.data["verbose"] and (self.data["progress_every"] > 0 and
time.time() - self.progress_t >= self.data["progress_every"]):
self.show_progress()
self.progress_t = time.time()
if self.data["save_every"] > 0 and \
time.time() - self.saved_t >= self.data["save_every"]:
self.saved_t = time.time()
self.save()
if len(self.comb_score_window) and \
(np.mean(self.comb_score_window) >= self.data["score_solved"]):
print('\nEnvironment solved in {:d} episodes!\tAverage combined score: {:.2f}'.format(
len(self.data["scores"])-100, np.mean(self.comb_score_window)))
self.save(add="SOLVED")
self.solved = True
def save(self, add=""):
# Figure out the root of the resulting file names
if add != "":
name = "agent_" + add + "_"
else:
name = "agent_"
name = name + "train_time_" + \
seconds_to_time_str(
self.data["train_time"]).replace(" ", "_")
save_path = os.path.join(self.data["folder"], name + ".pkl")
with open(save_path, 'wb') as f:
pickle.dump(self.data, f)
def find_state_mag(env, max_steps=1000, n_episodes=1000):
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
action_size = brain.vector_action_space_size
states = []
for i_episode in range(1, n_episodes+1):
env_info = env.reset(train_mode=True)[brain_name]
num_agents = len(env_info.agents)
state = env_info.vector_observations[0]
for t in range(max_steps):
states.append(state)
actions = np.random.randn(num_agents, action_size)
actions = np.clip(actions, -1, 1)
env_info = env.step(actions)[brain_name]
state = env_info.vector_observations[0]
done = env_info.local_done[0]
if done:
break
states = np.array(states)
states = np.abs(states)
return np.mean(states, axis=0), np.std(states, axis=0)
def seconds_to_time_str(t):
if t < 0:
raise Exception("Negative time?")
if t < 60:
return "{:02d} seconds".format(int(t))
elif t >= 60 and t < 3600:
return "{:04.1f} minutes".format(t/60)
elif t >= 3600:
return "{:04.1f} hours".format(t/3600)
def load_agent(folder, add="", train_time="last"):
if add != "":
name = "agent_" + add + "_"
else:
name = "agent_"
if train_time != "last":
name = name + "train_time_" + train_time.replace(" ", "_") + ".pkl"
else:
files = glob.glob(os.path.join(folder, "*.pkl"))
files.sort(key=os.path.getmtime)
files = files[-1]
name = os.path.split(files)[1]
path = os.path.join(folder, name)
with open(path, 'rb') as f:
data = pickle.load(f)
return data
def show_plots(mean_scores, scores, labels=None, max_episodes=None, only_mean=False, legend_outside=False):
if max_episodes == None:
# Find max number of episodes
max_episodes = 0
for i in range(len(mean_scores)):
if len(mean_scores[i]) > max_episodes:
max_episodes = len(mean_scores[i])
fig, ax = plt.subplots()
cmap = plt.cm.get_cmap("jet", max([len(mean_scores), 2]))
for i in range(len(mean_scores)):
if labels is not None:
label = labels[i]
else:
label = None
mean_score = mean_scores[i]
score = scores[i]
if len(mean_score) < max_episodes:
mean_score = np.concatenate(
(mean_score, np.nan * np.ones(max_episodes-len(mean_score))))
score = np.concatenate(
(score, np.nan * np.ones(max_episodes-len(score))))
if not only_mean:
ax.plot(np.arange(1, max_episodes+1),
score, alpha=0.3, color=cmap(i))
ax.plot(np.arange(1, max_episodes+1), mean_score,
label=label, color=cmap(i), linewidth=2)
if labels is not None:
if legend_outside:
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
else:
ax.legend()
ax.set_xlabel("# episodes")
ax.grid()
|
the-stack_0_15353 | # -*- coding: utf-8 -*-
#
'''
Numerical solution schemes for the Navier--Stokes equation
rho (u' + u.nabla(u)) = - nabla(p) + mu Delta(u) + f,
div(u) = 0.
For an overview of methods, see
An overview of projection methods for incompressible flows;
Guermond, Minev, Shen;
Comput. Methods Appl. Mech. Engrg., 195 (2006);
<http://www.math.ust.hk/~mawang/teaching/math532/guermond-shen-2006.pdf>
or
<http://mumerik.iwr.uni-heidelberg.de/Oberwolfach-Seminar/CFD-Course.pdf>.
'''
from dolfin import (
dot, inner, grad, dx, ds, div, Function, TestFunction, solve, derivative,
TrialFunction, assemble, PETScPreconditioner, FacetNormal,
PETScKrylovSolver, as_backend_type, PETScOptions, Identity
)
from ..message import Message
def _rhs_weak(u, v, f, rho, mu, p0):
'''Right-hand side of the Navier--Stokes momentum equation in weak form.
'''
# It was first proposed in (with two intermediate steps)
#
# Sur l'approximation de la solution des 'equations de Navier-Stokes
# par la m'ethode des pas fractionnaires (II);
# R. Temam;
# Arch. Ration. Mech. Anal. 33, (1969) 377-385;
# <http://link.springer.com/article/10.1007%2FBF00247696>.
#
# to replace the (weak form) convection <(u.\nabla)v, w> by something more
# appropriate. Note, e.g., that
#
# 1/2 ( <(u.\nabla)v, w> - <(u.\nabla)w, v>)
# = 1/2 (2 <(u.\nabla)v, w> - <u, \nabla(v.w)>)
# = <(u.\nabla)v, w> - 1/2 \int u.\nabla(v.w)
# = <(u.\nabla)v, w> - 1/2 (-\int div(u)*(v.w)
# +\int_\Gamma (n.u)*(v.w)
# ).
#
# Since for solutions we have div(u)=0, n.u=0, we can consistently replace
# the convection term <(u.\nabla)u, w> by the skew-symmetric
#
# 1/2 (<(u.\nabla)u, w> - <(u.\nabla)w, u>).
#
# One distinct advantage of this formulation is that the convective term
# doesn't contribute to the total energy of the system since
#
# d/dt ||u||^2 = 2<d_t u, u> = <(u.\nabla)u, u> - <(u.\nabla)u, u> = 0.
#
# More references and info on skew-symmetry can be found in
#
# Finite Element Methods for the Simulation of Incompressible Flows,
# Volker John,
# <http://www.wias-berlin.de/people/john/lectures_madrid_2012.pdf>,
#
# and
#
# <http://calcul.math.cnrs.fr/Documents/Ecoles/CEMRACS2012/Julius_Reiss.pdf>.
#
# The first lecture is quite instructive and gives info on other
# possibilities, e.g.,
#
# * Rotational form
# <http://www.igpm.rwth-aachen.de/Download/reports/DROPS/IGPM193.pdf>
# * Divergence form
# This paper
# <http://www.cimec.org.ar/ojs/index.php/mc/article/viewFile/486/464>
# mentions 'divergence form', but it seems to be understood as another
# way of expressing the stress term mu\Delta(u).
#
# The different methods are numerically compared in
#
# On the accuracy of the rotation form in simulations of the
# Navier-Stokes equations;
# Layton et al.;
# <http://www.mathcs.emory.edu/~molshan/ftp/pub/RotationForm.pdf>.
#
# In
#
# Finite element methods
# for the incompressible Navier-Stokes equations;
# Ir. A. Segal;
# <http://ta.twi.tudelft.nl/users/vuik/burgers/fem_notes.pdf>;
#
# it is advised to use (u{k}.\nabla)u^{k+1} for the treatment of the
# nonlinear term. In connection with the the div-stabilitation, this yields
# unconditional stability of the scheme. On the other hand, an advantage
# of treating the nonlinear term purely explicitly is that the resulting
# problem would be symmetric and positive definite, qualifying for robust
# AMG preconditioning.
# One can also find advice on the boundary conditions for axisymmetric flow
# here.
#
# For more information on stabilization techniques and general solution
# recipes, check out
#
# Finite Element Methods for Flow Problems;
# Jean Donea, Antonio Huerta.
#
# There are plenty of references in the book, e.g. to
#
# Finite element stabilization parameters
# computed from element matrices and vectors;
# Tezduyar, Osawa;
# Comput. Methods Appl. Mech. Engrg. 190 (2000) 411-430;
# <http://www.tafsm.org/PUB_PRE/jALL/j89-CMAME-EBTau.pdf>
#
# where more details on SUPG are given.
#
def epsilon(u):
return 0.5*(grad(u) + grad(u).T)
def sigma(u, p):
d = u.ufl_element().cell().topological_dimension()
return 2*mu*epsilon(u) - p*Identity(d)
# One could omit the boundary term
#
# mu * inner(grad(u)*n, v) * ds.
#
# This effectively means that at all boundaries where no sufficient
# Dirichlet-conditions are posed, we assume grad(u)*n to vanish.
normal = FacetNormal(v.function_space().mesh())
return (
inner(f, v) * dx
# - rho*inner(grad(u)*u, v) * dx
- rho * 0.5 * (inner(grad(u)*u, v) - inner(grad(v)*u, u)) * dx
# - mu * inner(grad(u), grad(v)) * dx
# - inner(grad(p0), v) * dx
- inner(sigma(u, p0), epsilon(v)) * dx
- inner(p0*normal, v) * ds
+ mu*inner(grad(u).T*normal, v)*ds
)
def _compute_tentative_velocity(
u, p0, f, u_bcs, time_step_method, rho, mu, dt, v,
tol=1.0e-10
):
#
# F(u) = 0,
# F(u) := rho (U0 + (u.\nabla)u) - mu \div(\nabla u) - f = 0.
#
# TODO higher-order scheme for time integration
#
# For higher-order schemes, see
#
# A comparison of time-discretization/linearization approaches
# for the incompressible Navier-Stokes equations;
# Volker John, Gunar Matthies, Joachim Rang;
# Comput. Methods Appl. Mech. Engrg. 195 (2006) 5995-6010;
# <http://www.wias-berlin.de/people/john/ELECTRONIC_PAPERS/JMR06.CMAME.pdf>.
#
ui = Function(u[0].function_space())
# F1 is scaled with `dt / rho`.
if time_step_method == 'forward euler':
alpha = 1.0
F1 = (
inner(ui - u[0], v) * dx
- dt/rho * _rhs_weak(u[0], v, f[0], rho, mu, p0)
)
elif time_step_method == 'backward euler':
alpha = 1.0
F1 = (
inner(ui - u[0], v) * dx
- dt/rho * _rhs_weak(ui, v, f[1], rho, mu, p0)
)
else:
assert time_step_method == 'crank-nicolson'
alpha = 1.0
F1 = (
inner(ui - u[0], v) * dx
- dt/rho * 0.5 * (
_rhs_weak(u[0], v, f[0], rho, mu, p0) +
_rhs_weak(ui, v, f[1], rho, mu, p0)
)
)
# else:
# assert time_step_method == 'bdf2'
# alpha = 1.5
# F1 = (
# inner(1.5 * ui - 2 * u[0] + 0.5 * u[-1], v) * dx
# - dt/rho * _rhs_weak(ui, v, f[1], rho, mu, p0)
# )
# Get linearization and solve nonlinear system.
# If the scheme is fully explicit (theta=0.0), then the system is
# actually linear and only one Newton iteration is performed.
J = derivative(F1, ui)
# What is a good initial guess for the Newton solve?
# Three choices come to mind:
#
# (1) the previous solution u0,
# (2) the intermediate solution from the previous step ui0,
# (3) the solution of the semilinear system
# (u.\nabla(u) -> u0.\nabla(u)).
#
# Numerical experiments with the Karman vortex street show that the
# order of accuracy is (1), (3), (2). Typical norms would look like
#
# ||u - u0 || = 1.726432e-02
# ||u - ui0|| = 2.720805e+00
# ||u - u_e|| = 5.921522e-02
#
# Hence, use u0 as initial guess.
ui.assign(u[0])
# problem = NonlinearVariationalProblem(F1, ui, u_bcs, J)
# solver = NonlinearVariationalSolver(problem)
solve(
F1 == 0, ui,
bcs=u_bcs,
J=J,
solver_parameters={
# 'nonlinear_solver': 'snes',
'nonlinear_solver': 'newton',
'newton_solver': {
'maximum_iterations': 10,
'report': True,
'absolute_tolerance': tol,
'relative_tolerance': 0.0,
'error_on_nonconvergence': True
# 'linear_solver': 'iterative',
# # # The nonlinear term makes the problem generally
# # # nonsymmetric.
# # 'symmetric': False,
# # If the nonsymmetry is too strong, e.g., if u_1 is
# # large, then AMG preconditioning might not work
# # very well.
# 'preconditioner': 'ilu',
# # 'preconditioner': 'hypre_amg',
# 'krylov_solver': {
# 'relative_tolerance': tol,
# 'absolute_tolerance': 0.0,
# 'maximum_iterations': 1000,
# 'monitor_convergence': verbose
# }
}
}
)
return ui, alpha
def _compute_pressure(
p0,
alpha, rho, dt, mu,
div_ui,
p_bcs=None,
p_function_space=None,
rotational_form=False,
tol=1.0e-10,
verbose=True
):
'''Solve the pressure Poisson equation
- \\Delta phi = -div(u),
boundary conditions,
for p with
\\nabla p = u.
'''
#
# The following is based on the update formula
#
# rho/dt (u_{n+1}-u*) + \nabla phi = 0
#
# with
#
# phi = (p_{n+1} - p*) + chi*mu*div(u*)
#
# and div(u_{n+1})=0. One derives
#
# - \nabla^2 phi = rho/dt div(u_{n+1} - u*),
# - n.\nabla phi = rho/dt n.(u_{n+1} - u*),
#
# In its weak form, this is
#
# \int \grad(phi).\grad(q)
# = - rho/dt \int div(u*) q - rho/dt \int_Gamma n.(u_{n+1}-u*) q.
#
# If Dirichlet boundary conditions are applied to both u* and u_{n+1} (the
# latter in the final step), the boundary integral vanishes.
#
# Assume that on the boundary
# L2 -= inner(n, rho/k (u_bcs - ui)) * q * ds
# is zero. This requires the boundary conditions to be set for ui as well
# as u_final.
# This creates some problems if the boundary conditions are supposed to
# remain 'free' for the velocity, i.e., no Dirichlet conditions in normal
# direction. In that case, one needs to specify Dirichlet pressure
# conditions.
#
if p0:
P = p0.function_space()
else:
P = p_function_space
p1 = Function(P)
p = TrialFunction(P)
q = TestFunction(P)
a2 = dot(grad(p), grad(q)) * dx
L2 = -alpha * rho/dt * div_ui * q * dx
L2 += dot(grad(p0), grad(q)) * dx
if rotational_form:
L2 -= mu * dot(grad(div_ui), grad(q)) * dx
if p_bcs:
solve(a2 == L2, p1,
bcs=p_bcs,
solver_parameters={
'linear_solver': 'iterative',
'symmetric': True,
'preconditioner': 'hypre_amg',
'krylov_solver': {
'relative_tolerance': tol,
'absolute_tolerance': 0.0,
'maximum_iterations': 100,
'monitor_convergence': verbose,
'error_on_nonconvergence': True
}
})
else:
# If we're dealing with a pure Neumann problem here (which is the
# default case), this doesn't hurt CG if the system is consistent, cf.
#
# Iterative Krylov methods for large linear systems,
# Henk A. van der Vorst.
#
# And indeed, it is consistent: Note that
#
# <1, rhs> = \sum_i 1 * \int div(u) v_i
# = 1 * \int div(u) \sum_i v_i
# = \int div(u).
#
# With the divergence theorem, we have
#
# \int div(u) = \int_\Gamma n.u.
#
# The latter term is 0 if and only if inflow and outflow are exactly
# the same at any given point in time. This corresponds with the
# incompressibility of the liquid.
#
# Another lesson from this:
# If the mesh has penetration boundaries, you either have to specify
# the normal component of the velocity such that \int(n.u) = 0, or
# specify Dirichlet conditions for the pressure somewhere.
#
A = assemble(a2)
b = assemble(L2)
# If the right hand side is flawed (e.g., by round-off errors), then it
# may have a component b1 in the direction of the null space,
# orthogonal to the image of the operator:
#
# b = b0 + b1.
#
# When starting with initial guess x0=0, the minimal achievable
# relative tolerance is then
#
# min_rel_tol = ||b1|| / ||b||.
#
# If ||b|| is very small, which is the case when ui is almost
# divergence-free, then min_rel_to may be larger than the prescribed
# relative tolerance tol. This happens, for example, when the time
# steps is very small.
# Sanitation of right-hand side is easy with
#
# e = Function(P)
# e.interpolate(Constant(1.0))
# evec = e.vector()
# evec /= norm(evec)
# print(b.inner(evec))
# b -= b.inner(evec) * evec
#
# However it's hard to decide when the right-hand side is inconsistent
# because of round-off errors in previous steps, or because the system
# is actually inconsistent (insufficient boundary conditions or
# something like that). Hence, don't do anything and rather try to
# fight the cause for round-off.
# In principle, the ILU preconditioner isn't advised here since it
# might destroy the semidefiniteness needed for CG.
#
# The system is consistent, but the matrix has an eigenvalue 0. This
# does not harm the convergence of CG, but with preconditioning one has
# to make sure that the preconditioner preserves the kernel. ILU might
# destroy this (and the semidefiniteness). With AMG, the coarse grid
# solves cannot be LU then, so try Jacobi here.
# <http://lists.mcs.anl.gov/pipermail/petsc-users/2012-February/012139.html>
#
# TODO clear everything; possible in FEniCS 2017.1
# <https://fenicsproject.org/qa/12916/clear-petscoptions>
# PETScOptions.clear()
prec = PETScPreconditioner('hypre_amg')
PETScOptions.set(
'pc_hypre_boomeramg_relax_type_coarse',
'jacobi'
)
solver = PETScKrylovSolver('cg', prec)
solver.parameters['absolute_tolerance'] = 0.0
solver.parameters['relative_tolerance'] = tol
solver.parameters['maximum_iterations'] = 1000
solver.parameters['monitor_convergence'] = verbose
solver.parameters['error_on_nonconvergence'] = True
# Create solver and solve system
A_petsc = as_backend_type(A)
b_petsc = as_backend_type(b)
p1_petsc = as_backend_type(p1.vector())
solver.set_operator(A_petsc)
solver.solve(p1_petsc, b_petsc)
return p1
def _compute_velocity_correction(
ui, u, u_bcs, p1, p0, v, mu, rho, dt, rotational_form, tol, verbose
):
# Velocity correction.
# U = U0 - dt/rho \nabla p.
u2 = TrialFunction(u[0].function_space())
a3 = inner(u2, v) * dx
phi = p1 - p0
if rotational_form:
phi += mu * div(ui)
L3 = inner(ui, v) * dx \
- dt/rho * inner(grad(phi), v) * dx
u1 = Function(u[0].function_space())
solve(a3 == L3, u1,
bcs=u_bcs,
solver_parameters={
'linear_solver': 'iterative',
'symmetric': True,
'preconditioner': 'hypre_amg',
'krylov_solver': {
'relative_tolerance': tol,
'absolute_tolerance': 0.0,
'maximum_iterations': 100,
'monitor_convergence': verbose,
'error_on_nonconvergence': True
}
})
return u1
def _step(
dt,
u, p0,
u_bcs, p_bcs,
rho, mu,
time_step_method,
f,
rotational_form=False,
verbose=True,
tol=1.0e-10,
):
'''Incremental pressure correction scheme scheme as described in section
3.4 of
An overview of projection methods for incompressible flows;
Guermond, Miev, Shen;
Comput. Methods Appl. Mech. Engrg. 195 (2006),
<http://www.math.tamu.edu/~guermond/PUBLICATIONS/guermond_minev_shen_CMAME_2006.pdf>.
'''
# dt is a Constant() function
assert dt.values()[0] > 0.0
assert mu.values()[0] > 0.0
# Define trial and test functions
v = TestFunction(u[0].function_space())
# Create functions
# Define coefficients
with Message('Computing tentative velocity'):
ui, alpha = _compute_tentative_velocity(
u, p0, f, u_bcs, time_step_method, rho, mu, dt, v,
tol=1.0e-10
)
with Message('Computing pressure'):
p1 = _compute_pressure(
p0,
alpha, rho, dt, mu,
div_ui=div(ui),
p_bcs=p_bcs,
rotational_form=rotational_form,
tol=tol,
verbose=verbose
)
with Message('Computing velocity correction'):
u1 = _compute_velocity_correction(
ui, u, u_bcs, p1, p0, v, mu, rho, dt, rotational_form, tol, verbose
)
return u1, p1
class Chorin(object):
order = {
'velocity': 1.0,
'pressure': 0.5,
}
def __init__(self):
return
# p0 and f0 aren't necessary here, we just keep it around to interface
# equality with IPCS.
# pylint: disable=no-self-use
def step(
self,
dt,
u, p0,
u_bcs, p_bcs,
rho, mu,
f,
verbose=True,
tol=1.0e-10
):
return _step(
dt,
u, Function(p0.function_space()),
u_bcs, p_bcs,
rho, mu,
'backward euler',
f,
verbose=verbose,
tol=tol,
)
class IPCS(object):
order = {
'velocity': 2.0,
'pressure': 1.0,
}
def __init__(self, time_step_method='backward euler'):
self.time_step_method = time_step_method
return
def step(
self,
dt,
u, p0,
u_bcs, p_bcs,
rho, mu,
f,
verbose=True,
tol=1.0e-10
):
return _step(
dt,
u, p0,
u_bcs, p_bcs,
rho, mu,
self.time_step_method,
f,
verbose=verbose,
tol=tol
)
class Rotational(object):
order = {
'velocity': 2.0,
'pressure': 1.5,
}
def __init__(self, time_step_method='backward euler'):
self.time_step_method = time_step_method
return
def step(
self,
dt,
u, p0,
u_bcs, p_bcs,
rho, mu,
f,
verbose=True,
tol=1.0e-10
):
return _step(
dt,
u, p0,
u_bcs, p_bcs,
rho, mu,
self.time_step_method,
f,
rotational_form=True,
verbose=verbose,
tol=tol
)
|
the-stack_0_15354 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Sequential, Linear, ReLU
from torch_geometric.nn import GINConv, global_add_pool, GCNConv
class NNGinConv(torch.nn.Module):
def __init__(self, node_features, classes):
super(NNGinConv, self).__init__()
nn1 = Sequential(Linear(node_features, 32), ReLU(), Linear(32, 64), ReLU() , Linear(64,128))
self.conv1 = GINConv(nn1)
self.bn1 = nn.BatchNorm1d(128)
nn2 = Sequential(Linear(128, 128), ReLU(), Linear(128, 64), ReLU() , Linear(64,32))
self.conv2 = GINConv(nn2)
self.bn2 = nn.BatchNorm1d(32)
nn3 = Sequential(Linear(32, 32), ReLU(), Linear(32, 16))
self.conv3 = GINConv(nn3)
self.bn3 = nn.BatchNorm1d(16)
self.fc1 = Linear(16, 16)
self.fc2 = Linear(16, classes)
def forward(self,data):
x, edge_index, batch = data.x, data.edge_index, data.batch
x = F.relu(self.conv1(x, edge_index))
x = self.bn1(x)
x = F.relu(self.conv2(x, edge_index))
x = self.bn2(x)
x = F.relu(self.conv3(x, edge_index))
x = self.bn3(x)
#x = global_add_pool(x, batch)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=0.5, training=self.training)
x = self.fc2(x)
return torch.tanh(x)
class NNGcnConv(torch.nn.Module):
def __init__(self, node_features, classes):
super(NNGcnConv, self).__init__()
self.conv1 = GCNConv(node_features, 16)
self.conv2 = GCNConv(16, 32)
self.conv3 = GCNConv(32,64)
self.conv4 = GCNConv(64,128)
self.fc1 = Linear(128, 32)
self.fc2 = Linear(32, classes)
def forward(self,data):
x, edge_index = data.x, data.edge_index
x = F.relu(self.conv1(x, edge_index))
#x = F.dropout(x, training=self.training)
x = F.relu(self.conv2(x, edge_index))
x = F.relu(self.conv3(x, edge_index))
x = F.relu(self.conv4(x, edge_index))
x = F.relu(self.fc1(x))
x = F.dropout(x, p=0.5, training=self.training)
x = self.fc2(x)
return F.tanh(x)
|
the-stack_0_15355 | # Реализуйте абстракцию для работы с рациональными числами
# включающую в себя следующие функции:
#
# Конструктор make_rational — принимает на вход числитель и знаменатель,
# возвращает дробь.
# Селектор get_numer — возвращает числитель
# Селектор get_denom — возвращает знаменатель
# Сложение add — складывает переданные дроби
# Вычитание sub — находит разность между двумя дробями
# Не забудьте реализовать нормализацию дробей удобным для вас способом.
#
# >>> rat1 = make_rational(3, 9)
# >>> get_numer(rat1)
# 1
# >>> get_denom(rat1)
# 3
#
# >>> rat2 = make_rational(10, 3)
#
# >>> rat3 = add(rat1, rat2)
# >>> rat_to_string(rat3)
# 11/3
#
# >>> rat4 = sub(rat1, rat2)
# >>> rat_to_string(rat4)
# -3/1
# Подсказки
# Функция gcd из модуля math находит наибольший общий делитель двух чисел
# Функция rat_to_string возвращает строковое представление числа
# (используется для отладки)
# Функция int преобразует значение к целому числу
import math
def make_rational(numer, denom):
gcd = math.gcd(numer, denom)
return {"numer": numer // gcd, "denom": denom // gcd}
def get_numer(rat):
return rat["numer"]
def get_denom(rat):
return rat["denom"]
def add(rat1, rat2):
numer1 = get_numer(rat1) * get_denom(rat2)
numer2 = get_numer(rat2) * get_denom(rat1)
numer = numer1 + numer2
denom = get_denom(rat1) * get_denom(rat2)
return make_rational(numer, denom)
def sub(rat1, rat2):
numer1 = get_numer(rat1) * get_denom(rat2)
numer2 = get_numer(rat2) * get_denom(rat1)
numer = numer1 - numer2
denom = get_denom(rat1) * get_denom(rat2)
return make_rational(numer, denom)
def rat_to_string(rat):
return "{}/{}".format(get_numer(rat), get_denom(rat))
rat_1 = make_rational(3, 9)
rat_2 = make_rational(10, 3)
print(get_numer(rat_1))
print(get_denom(rat_1))
print(rat_1)
print(rat_2)
rat_3 = add(rat_1, rat_2)
print(rat_3)
rat_4 = sub(rat_1, rat_2)
print(rat_4)
def test_rational():
rat1 = make_rational(3, 9)
assert get_numer(rat1) == 1
assert get_denom(rat1) == 3
rat2 = make_rational(10, 3)
assert add(rat1, rat2) == make_rational(11, 3)
assert sub(rat1, rat2) == make_rational(-3, 1)
rat3 = make_rational(-4, 16)
assert get_numer(rat3) == -1
assert get_denom(rat3) == 4
rat4 = make_rational(12, 5)
assert add(rat3, rat4) == make_rational(43, 20)
assert sub(rat3, rat4) == make_rational(-53, 20)
assert rat_to_string(rat1) == "1/3"
assert rat_to_string(rat3) == "-1/4"
test_rational()
|
the-stack_0_15356 | import random
def train(jm=None, api=None, seed=2020, case=None):
pass
def test(jm=None, api=None, seed=2020, case=1):
cases = ["local", "distributed"]
if case not in cases:
print('[WARN] case not in ' + str(cases))
return
api.conf_reset()
conf = {}
if case == 'checkpoint_high':
conf = {
'candidates': ['cnn', 'lstm', 'resnet50', 'vgg16', 'inception3'],
'jm': jm,
'api': api,
'seed': seed,
'job_num': 1,
}
elif case == 'checkpoint_low':
conf = {
'candidates': ['resnet50_d', 'vgg16_d', 'inception3_d'],
'jm': jm,
'api': api,
'seed': seed,
'job_num': 1,
}
elif case == 'checkpoint_auto':
conf = {
'candidates': ['resnet50_d', 'vgg16_d', 'inception3_d'],
'jm': jm,
'api': api,
'seed': seed,
'job_num': 1,
}
else:
print('[ERROR] case not in ' + str(cases))
return
launch(conf)
def launch(conf=None):
if conf is None:
conf = {}
candidates = conf['candidates']
jm = conf['jm']
job_num = conf['job_num']
api = conf['api']
random.seed(conf['seed'])
for i in range(job_num):
next_seed = random.randint(0, 999999)
job_name = random.choice(candidates)
job = jm.get_job(job_name, seed=next_seed)
job['tasks'] = job['tasks'].replace('--save_model_steps=0', '--save_model_steps=50')
msg = api.submit_job(job)
print(i, msg)
if __name__ == '__main__':
print("checkpoint.launcher")
pass
|
the-stack_0_15358 | import random
import yaml
def load_data_cfg(data_cfg, merge_classes=False):
with open(data_cfg) as f:
data = yaml.load(f, Loader=yaml.FullLoader)
if not data.get('colors'):
data['colors'] = [
[random.randint(0, 255) for _ in range(3)]
for _ in range(len(data['names']))
]
if merge_classes:
data['nc'] = 1
data['names'] = ['item']
assert len(data['names']) == data['nc'], f'len(`names`) != `nc` in {data_cfg}.'
return data
|
the-stack_0_15359 | import explorerhat as eh
from time import sleep
while True:
voltage = eh.analog.one.read()
celsius = 100 * (voltage - 0.5)
fahrenheit = 32 + 9 * celsius / 5.0
print('Temperature is %4.1f degrees C or %4.1f degrees F'
% (celsius, fahrenheit))
sleep(1)
|
the-stack_0_15361 | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import textwrap
import pytest
from pants.backend.python.subsystems.python_tool_base import DEFAULT_TOOL_LOCKFILE
from pants.backend.python.target_types import UnrecognizedResolveNamesError
from pants.core.util_rules import config_files, source_files
from pants.core.util_rules.external_tool import rules as external_tool_rules
from pants.engine.fs import Digest, DigestContents
from pants.engine.rules import SubsystemRule, rule
from pants.jvm.resolve import jvm_tool
from pants.jvm.resolve.coursier_fetch import ArtifactRequirements, Coordinate
from pants.jvm.resolve.coursier_fetch import rules as coursier_fetch_rules
from pants.jvm.resolve.coursier_setup import rules as coursier_setup_rules
from pants.jvm.resolve.jvm_tool import (
GatherJvmCoordinatesRequest,
JvmToolBase,
JvmToolLockfileRequest,
JvmToolLockfileSentinel,
determine_resolves_to_generate,
filter_tool_lockfile_requests,
)
from pants.jvm.target_types import JvmArtifactTarget
from pants.jvm.util_rules import rules as util_rules
from pants.testutil.rule_runner import PYTHON_BOOTSTRAP_ENV, QueryRule, RuleRunner
from pants.util.ordered_set import FrozenOrderedSet
class MockJvmTool(JvmToolBase):
options_scope = "mock-tool"
default_version = "1.3"
default_artifacts = ("org.hamcrest:hamcrest-core:{version}",)
default_lockfile_resource = ("pants.backend.jvm.resolve", "mock-tool.default.lockfile.txt")
default_lockfile_url = ""
class MockJvmToolLockfileSentinel(JvmToolLockfileSentinel):
options_scope = MockJvmTool.options_scope
@rule
async def generate_test_tool_lockfile_request(
_: MockJvmToolLockfileSentinel, tool: MockJvmTool
) -> JvmToolLockfileRequest:
return JvmToolLockfileRequest.from_tool(tool)
def test_jvm_tool_base_extracts_correct_coordinates() -> None:
rule_runner = RuleRunner(
rules=[
*config_files.rules(),
*coursier_fetch_rules(),
*coursier_setup_rules(),
*external_tool_rules(),
*source_files.rules(),
*util_rules(),
*jvm_tool.rules(),
generate_test_tool_lockfile_request,
SubsystemRule(MockJvmTool),
QueryRule(JvmToolLockfileRequest, (MockJvmToolLockfileSentinel,)),
QueryRule(ArtifactRequirements, (GatherJvmCoordinatesRequest,)),
QueryRule(DigestContents, (Digest,)),
],
target_types=[JvmArtifactTarget],
)
rule_runner.set_options(
args=[
"--mock-tool-artifacts=//:junit_junit",
"--mock-tool-lockfile=/dev/null",
],
env_inherit=PYTHON_BOOTSTRAP_ENV,
)
rule_runner.write_files(
{
"BUILD": textwrap.dedent(
"""\
jvm_artifact(
name="junit_junit",
group="junit",
artifact="junit",
version="4.13.2",
)
"""
)
}
)
lockfile_request = rule_runner.request(JvmToolLockfileRequest, [MockJvmToolLockfileSentinel()])
assert sorted(lockfile_request.artifact_inputs) == [
"//:junit_junit",
"org.hamcrest:hamcrest-core:1.3",
]
requirements = rule_runner.request(
ArtifactRequirements, [GatherJvmCoordinatesRequest(lockfile_request.artifact_inputs, "")]
)
coordinates = [i.coordinate for i in requirements]
assert sorted(coordinates, key=lambda c: (c.group, c.artifact, c.version)) == [
Coordinate(group="junit", artifact="junit", version="4.13.2"),
Coordinate(group="org.hamcrest", artifact="hamcrest-core", version="1.3"),
]
def test_determine_tool_sentinels_to_generate() -> None:
class Tool1(JvmToolLockfileSentinel):
options_scope = "tool1"
class Tool2(JvmToolLockfileSentinel):
options_scope = "tool2"
class Tool3(JvmToolLockfileSentinel):
options_scope = "tool3"
def assert_chosen(
requested: list[str],
expected_tools: list[type[JvmToolLockfileSentinel]],
) -> None:
tools = determine_resolves_to_generate([Tool1, Tool2, Tool3], requested)
assert tools == expected_tools
assert_chosen([Tool2.options_scope], expected_tools=[Tool2])
assert_chosen(
[Tool1.options_scope, Tool3.options_scope],
expected_tools=[Tool1, Tool3],
)
# If none are specifically requested, return all.
assert_chosen([], expected_tools=[Tool1, Tool2, Tool3])
with pytest.raises(UnrecognizedResolveNamesError):
assert_chosen(["fake"], expected_tools=[])
def test_filter_tool_lockfile_requests() -> None:
def create_request(name: str, lockfile_dest: str | None = None) -> JvmToolLockfileRequest:
return JvmToolLockfileRequest(
FrozenOrderedSet(),
resolve_name=name,
lockfile_dest=lockfile_dest or f"{name}.txt",
)
tool1 = create_request("tool1")
tool2 = create_request("tool2")
default_tool = create_request("default", lockfile_dest=DEFAULT_TOOL_LOCKFILE)
def assert_filtered(
extra_request: JvmToolLockfileRequest | None,
*,
resolve_specified: bool,
) -> None:
requests = [tool1, tool2]
if extra_request:
requests.append(extra_request)
assert filter_tool_lockfile_requests(requests, resolve_specified=resolve_specified) == [
tool1,
tool2,
]
assert_filtered(None, resolve_specified=False)
assert_filtered(None, resolve_specified=True)
assert_filtered(default_tool, resolve_specified=False)
with pytest.raises(ValueError) as exc:
assert_filtered(default_tool, resolve_specified=True)
assert f"`[{default_tool.resolve_name}].lockfile` is set to `{DEFAULT_TOOL_LOCKFILE}`" in str(
exc.value
)
|
the-stack_0_15362 | # For your reference:
#
# SinglyLinkedListNode:
# int data
# SinglyLinkedListNode next
#
#
def insertNodeAtPosition(head, data, position):
cur = head
new_node = SinglyLinkedListNode(data)
count = 0
prev = None
while cur and count != position:
prev = cur
cur = cur.next
count += 1
prev.next = new_node
new_node.next = cur
return head
|
the-stack_0_15364 | '''1. Write a Python program to check the sum of three elements (each from an array) from three arrays is equal to a target value. Print all those three-element combinations.
Sample data:
/*
X = [10, 20, 20, 20]
Y = [10, 20, 30, 40]
Z = [10, 30, 40, 20]
target = 70
*/ '''
X = [10, 20, 20, 20]
Y = [10, 20, 30, 40]
Z = [10, 30, 40, 20]
for x in X:
for y in Y:
for z in Z:
if x+y+z==70:
print((x, y, z))
|
the-stack_0_15366 | #!/usr/bin/python
# coding=utf8
import sys
import numpy as np
import pickle as pk
from struct import unpack
print(sys.argv)
fName = sys.argv[1]
with open(fName, 'rb') as f:
info = f.readline().split(bytes(' '.encode('utf8')))
wordNum = int(info[0])
embSize = int(info[1])
l = []
vocab = {}
count = 0
buf = ''
first = False
while True:
ch = f.read(1).decode('utf8')
if ch == '':
break
elif ch == ' ':
ll = [unpack('f', f.read(4))[0] for _ in range(embSize)]
l.append(ll)
vocab[buf.lower()] = count
count += 1
elif ch == '\n':
buf = ''
else:
buf += str(ch)
matrix = np.array(l, dtype=np.float32)
avgNorm = np.linalg.norm(matrix, axis = 1).reshape([len(vocab), 1])
matrix = matrix / avgNorm
# Read Vectors
# WordSim-353
# with open('wordsim353.pkl', 'rb') as f:
# testData = pk.load(f)
# w1Idx = []
# w2Idx = []
# labels = []
# totalList = []
# for p, c in testData.items():
# w1 = p[0]
# w2 = p[1]
# if w1 in vocab and w2 in vocab:
# w1Idx.append(vocab[w1])
# w2Idx.append(vocab[w2])
# labels.append(float(c))
# totalList.append((float(c), (vocab[w1], vocab[w2])))
# SemLex-999
# with open('SimLex-999.txt', 'r') as f:
# w1Idx = []
# w2Idx = []
# labels = []
# totalList = []
# l = f.readline()
# for line in f.readlines():
# line = line.split('\t')
# w1 = line[0]
# w2 = line[1]
# if w1 in vocab and w2 in vocab:
# w1Idx.append(vocab[w1])
# w2Idx.append(vocab[w2])
# labels.append(float(line[3]))
# totalList.append((float(line[3]), (vocab[w1], vocab[w2])))
# MEN
with open('MEN_dataset_lemma_form_full', 'r') as f:
w1Idx = []
w2Idx = []
labels = []
totalList = []
for line in f.readlines():
line = line.split(' ')
w1 = line[0]
w2 = line[1]
if w1 in vocab and w2 in vocab:
w1Idx.append(vocab[w1])
w2Idx.append(vocab[w2])
labels.append(float(line[2]))
totalList.append((float(line[2]), (vocab[w1], vocab[w2])))
# norm = np.absolute(np.maximum(0, np.sum(matrix[w1Idx, :] * matrix[w2Idx, :], axis = 1)) - np.array(labels, dtype = np.float32) / 10)
# print("Avg Loss:", np.sum(norm) / len(labels), "\nData Count:", len(labels))
totalList.sort(key = lambda x: x[0])
rankDict = {}
for i, v in enumerate(totalList):
rankDict[v[1]] = i
cosines = np.maximum(0, np.sum(matrix[w1Idx, :] * matrix[w2Idx, :], axis = 1))
totalList = []
for i in range(len(w1Idx)):
totalList.append((cosines[i], (w1Idx[i], w2Idx[i])))
totalList.sort(key = lambda x: x[0])
summ = 0
n = len(w1Idx)
for i, v in enumerate(totalList):
summ += (rankDict[v[1]] - i)**2
print('Spearman\'s Correlation:', 1 - (6 * summ / n / (n**2 - 1)))
|
the-stack_0_15372 | import json
class FindVaccineCenter:
"""
Queries the Cowin API to get required data
"""
def __init__(self, raw_json_data,vaccine):
self.raw_json_data = raw_json_data
self.vaccine = vaccine
def filter_results(self, response):
"""
Filters the response object by vaccine type, availability etc
:param response:
:return:
"""
filtered_responses = []
for center in response["centers"]:
for session in center["sessions"]:
filtered_center = {"center_id": center["center_id"], "name": center["name"],
"address": center["address"], "state_name": center["state_name"],
"district_name": center["district_name"], "block_name": center["block_name"],
"pincode": center["pincode"], "lat": center["lat"], "long": center["long"],
"from": center["from"], "to": center["to"], "fee_type": center["fee_type"]}
if center["fee_type"] == "Paid":
if center.get("vaccine_fees", False):
fee = ""
for key in center["vaccine_fees"][0]:
fee += f"{key.title()} : {center['vaccine_fees'][0][key]}\n "
filtered_center["fee_type"] = fee
filtered_sessions = []
if session["available_capacity"] == 0 or session["vaccine"] != self.vaccine:
continue
filtered_sessions.append(session)
if len(filtered_sessions) != 0:
filtered_center["sessions"] = filtered_sessions
filtered_responses.append(filtered_center)
if len(filtered_responses) == 0:
filtered_responses.append({"No centers available ": "("})
filtered_responses = {"centers": filtered_responses}
return filtered_responses
def get_data(self):
"""
The main interface used by external entities, Calls the other methods in class
filters the results and returns a json object of filtered results
:return: Dict (JSON obj) of filtered responses
"""
if self.raw_json_data != -1:
filtered_response = self.filter_results(self.raw_json_data)
return json.dumps(filtered_response, indent=2)
else:
return -1
|
the-stack_0_15373 |
# Copyright (C) 2019-2020 Intel Corporation
#
# SPDX-License-Identifier: MIT
import logging as log
import os
import os.path as osp
from collections import OrderedDict
from datumaro.components.converter import Converter
from datumaro.components.extractor import AnnotationType, DEFAULT_SUBSET_NAME
from .format import YoloPath
def _make_yolo_bbox(img_size, box):
# https://github.com/pjreddie/darknet/blob/master/scripts/voc_label.py
# <x> <y> <width> <height> - values relative to width and height of image
# <x> <y> - are center of rectangle
x = (box[0] + box[2]) / 2 / img_size[0]
y = (box[1] + box[3]) / 2 / img_size[1]
w = (box[2] - box[0]) / img_size[0]
h = (box[3] - box[1]) / img_size[1]
return x, y, w, h
class YoloConverter(Converter):
# https://github.com/AlexeyAB/darknet#how-to-train-to-detect-your-custom-objects
DEFAULT_IMAGE_EXT = '.jpg'
def apply(self):
extractor = self._extractor
save_dir = self._save_dir
os.makedirs(save_dir, exist_ok=True)
label_categories = extractor.categories()[AnnotationType.label]
label_ids = {label.name: idx
for idx, label in enumerate(label_categories.items)}
with open(osp.join(save_dir, 'obj.names'), 'w') as f:
f.writelines('%s\n' % l[0]
for l in sorted(label_ids.items(), key=lambda x: x[1]))
subset_lists = OrderedDict()
for subset_name, subset in self._extractor.subsets().items():
if not subset_name or subset_name == DEFAULT_SUBSET_NAME:
subset_name = YoloPath.DEFAULT_SUBSET_NAME
elif subset_name not in YoloPath.SUBSET_NAMES:
log.warn("Skipping subset export '%s'. "
"If specified, the only valid names are %s" % \
(subset_name, ', '.join(
"'%s'" % s for s in YoloPath.SUBSET_NAMES)))
continue
subset_dir = osp.join(save_dir, 'obj_%s_data' % subset_name)
os.makedirs(subset_dir, exist_ok=True)
image_paths = OrderedDict()
for item in subset:
if not item.has_image:
raise Exception("Failed to export item '%s': "
"item has no image info" % item.id)
height, width = item.image.size
image_name = self._make_image_filename(item)
if self._save_images:
if item.has_image and item.image.has_data:
self._save_image(item, osp.join(subset_dir, image_name))
else:
log.warning("Item '%s' has no image" % item.id)
image_paths[item.id] = osp.join('data',
osp.basename(subset_dir), image_name)
yolo_annotation = ''
for bbox in item.annotations:
if bbox.type is not AnnotationType.bbox:
continue
if bbox.label is None:
continue
yolo_bb = _make_yolo_bbox((width, height), bbox.points)
yolo_bb = ' '.join('%.6f' % p for p in yolo_bb)
yolo_annotation += '%s %s\n' % (bbox.label, yolo_bb)
annotation_path = osp.join(subset_dir, '%s.txt' % item.id)
os.makedirs(osp.dirname(annotation_path), exist_ok=True)
with open(annotation_path, 'w') as f:
f.write(yolo_annotation)
subset_list_name = '%s.txt' % subset_name
subset_lists[subset_name] = subset_list_name
with open(osp.join(save_dir, subset_list_name), 'w') as f:
f.writelines('%s\n' % s for s in image_paths.values())
with open(osp.join(save_dir, 'obj.data'), 'w') as f:
f.write('classes = %s\n' % len(label_ids))
for subset_name, subset_list_name in subset_lists.items():
f.write('%s = %s\n' % (subset_name,
osp.join('data', subset_list_name)))
f.write('names = %s\n' % osp.join('data', 'obj.names'))
f.write('backup = backup/\n')
|
the-stack_0_15376 | import pandas as pd
from pathlib import Path
import json
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.path import Path as mplPath
import skimage.io
def load_annotated_dataset(csv_file_path, images_directory_path):
csv_path = Path(csv_file_path)
df = pd.read_csv(csv_path)
originals = []
masks = []
i = 0
for fn in df["filename"].unique():
i += 1
img_file_path = f"{images_directory_path}/{fn}"
img = skimage.io.imread(img_file_path, as_gray=True)
img_mask = np.zeros([img.shape[1], img.shape[0]])
dirty = False
for region in df[df["filename"] == fn].region_shape_attributes:
region_shape_attributes = json.loads(region)
# I found out, that CSV contains some strange areas
if "all_points_x" not in region_shape_attributes or "all_points_y" not in region_shape_attributes:
continue
plt.imshow(img, cmap="gray")
polygon_x = region_shape_attributes["all_points_x"]
polygon_y = region_shape_attributes["all_points_y"]
polygon = list(zip(polygon_y, polygon_x))
poly_path = mplPath(polygon)
x, y = np.mgrid[
: img.shape[0], : img.shape[1]
]
coors = np.hstack(
(x.reshape(-1, 1), y.reshape(-1, 1))
)
mask = poly_path.contains_points(coors)
mask = mask.reshape([img.shape[0], img.shape[1]])
dirty = True
img_mask = np.logical_xor(img_mask, mask)
if dirty:
originals.append(img)
plt.imshow(img, cmap="gray")
plt.show()
masks.append(img_mask)
plt.imshow(img_mask, cmap="gray")
plt.show()
return originals, masks
def cut_images(images, width, height, xstep, ystep):
cut_array = []
for img in images:
for x in range(0, img.shape[1]-width, xstep):
for y in range(0, img.shape[0]-height, ystep):
cut = img[y: y + height, x: x + width]
cut_array.append(cut)
return cut_array
def load_image(filepath):
img = skimage.io.imread(filepath, as_gray=True)
return img
|
the-stack_0_15380 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates some swift wrapper from some ops description protobuf."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import six
import tensorflow as tf
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import c_api_util
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
'api_def_path',
None,
'path to the api_def directory, e.g. tensorflow/core/api_def/base_api')
flags.DEFINE_string(
'output_path',
None,
'path for the generated swift file')
_WARNING = """// !!! THIS CODE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND !!!
//
"""
_HEADER = """// Copyright 2018-19 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
"""
_OUTPUT_FILE = 'RawOpsGenerated.swift'
_RENAMED_KEYWORDS = {
'': 'empty',
'in': 'in_',
'var': 'var_',
'where': 'where_',
'if': 'if_',
'for': 'for_',
'while': 'while_',
'switch': 'switch_',
'protocol': 'protocol_',
'init': 'init_'}
_TYPE_PROTOCOLS = [
(set(), 'TensorFlowScalar'),
({types_pb2.DT_UINT8,
types_pb2.DT_UINT16,
types_pb2.DT_UINT32,
types_pb2.DT_UINT64}, 'UnsignedInteger & TensorFlowScalar'),
({types_pb2.DT_UINT8,
types_pb2.DT_UINT16,
types_pb2.DT_UINT32,
types_pb2.DT_UINT64,
types_pb2.DT_INT8,
types_pb2.DT_INT16,
types_pb2.DT_INT32,
types_pb2.DT_INT64}, 'BinaryInteger & TensorFlowScalar'),
({types_pb2.DT_FLOAT,
types_pb2.DT_DOUBLE,
types_pb2.DT_HALF,
types_pb2.DT_BFLOAT16}, 'FloatingPoint & TensorFlowScalar'),
({types_pb2.DT_UINT8,
types_pb2.DT_UINT16,
types_pb2.DT_UINT32,
types_pb2.DT_UINT64,
types_pb2.DT_INT8,
types_pb2.DT_INT16,
types_pb2.DT_INT32,
types_pb2.DT_INT64,
types_pb2.DT_FLOAT,
types_pb2.DT_DOUBLE,
types_pb2.DT_HALF,
types_pb2.DT_BFLOAT16}, 'Numeric & TensorFlowScalar')]
_SWIFTIFIED_TYPES = {
types_pb2.DT_FLOAT: 'Float',
types_pb2.DT_DOUBLE: 'Double',
types_pb2.DT_INT32: 'Int32',
types_pb2.DT_UINT8: 'UInt8',
types_pb2.DT_INT16: 'Int16',
types_pb2.DT_INT8: 'Int8',
types_pb2.DT_INT64: 'Int64',
types_pb2.DT_BOOL: 'Bool',
types_pb2.DT_UINT16: 'UInt16',
types_pb2.DT_UINT32: 'UInt32',
types_pb2.DT_UINT64: 'UInt64'}
_SWIFTIFIED_ATTR_TYPES = {
'int': 'Int64',
'float': 'Double',
'bool': 'Bool',
'string': 'String',
'type': 'TensorDataType',
'shape': 'TensorShape?',
'list(int)': '[Int32]',
'list(float)': '[Double]',
'list(bool)': '[Bool]',
'list(string)': '[String]',
'list(type)': '[TensorDataType]',
'list(shape)': '[TensorShape?]'}
_OMITTED_PARAMETER_NAMES = {
'x', 'y', 'a', 'b', 'input', 'tensor', 'values'}
_START_COMMENT = '///'
class UnableToGenerateCodeError(Exception):
def __init__(self, details):
self.details = details
super(UnableToGenerateCodeError, self).__init__()
def __str__(self):
return self.details
class Op(object):
def __init__(self, op_def, api_def, enum_store, string_valued=False):
self.op_def = op_def
self.api_def = api_def
self.enum_store = enum_store
self.string_valued = string_valued
self.inferred_counts = dict()
# Collect all the input and output arguments.
self.input_args = [
Argument(arg_def, op=self)
for arg_def in self.op_def.input_arg]
self.output_args = [
Argument(arg_def, op=self)
for arg_def in self.op_def.output_arg]
# Collect all attributes.
self.attrs = [
Attribute(attr, op=self)
for attr in op_def.attr]
self.type_attrs = [
attr for attr in self.attrs
if attr.is_type_attr]
def swift_function(self):
return '''
{documentation}@inlinable @inline(__always)
public static func {name}{generics}({input_args}
){return_type} {{
{body}
}}'''.format(
documentation=self._swift_documentation(),
name=self._swift_name(),
generics=self._swift_generics(),
input_args=self._swift_input_args(),
return_type=self._swift_return_type(),
body=self._swift_body())
def _swift_documentation(self):
def comment_block(text, indent_level):
"""Returns a commented block of text with some specified indentation."""
def indent(line_index):
if indent_level == 0:
return ''
if line_index:
return ' ' * indent_level
return ' ' * (indent_level - 1) + '- '
return ''.join([
(_START_COMMENT + ' ' + indent(line_index) + line + '\n'
if line else _START_COMMENT + '\n')
for line_index, line in enumerate(text.split('\n'))
])
def append_list(doc, args, arg_type):
"""Returns the documentation for lists of inputs/outputs/attributes."""
args = [arg for arg in args if arg.description]
if len(args) == 1:
block = '%s %s: %s' % (arg_type, args[0].name, args[0].description)
doc += _START_COMMENT + '\n'
doc += comment_block(block, indent_level=1)
elif len(args) > 1:
doc += '%s\n%s - %ss:\n' % (_START_COMMENT, _START_COMMENT, arg_type)
for arg in args:
block = '%s: %s' % (arg.name, arg.description)
doc += comment_block(block, indent_level=2)
return doc
doc = ''
if self.api_def.summary:
doc = comment_block(self.api_def.summary, indent_level=0)
if self.api_def.description:
doc += _START_COMMENT + '\n'
doc += comment_block(self.api_def.description, indent_level=0)
doc = append_list(doc, self.api_def.in_arg, 'Parameter')
doc = append_list(doc, self.api_def.attr, 'Attr')
doc = append_list(doc, self.api_def.out_arg, 'Output')
if doc and not doc.endswith('\n'):
doc = doc + '\n'
return doc
def _swift_name(self):
return swift_compatible_identifier(
self.op_def.name[0].lower() + self.op_def.name[1:])
def _swift_generics(self):
constraints = [
attr.generic_constraints(self.string_valued)
for attr in self.attrs]
constraints = [c for c in constraints if c is not None]
if len(constraints) == 1:
return '<' + ', '.join(constraints) + '>'
if len(constraints) > 1:
return '<\n ' + ',\n '.join(constraints) + '\n>'
return ''
def _swift_input_args(self):
args = ''
for arg in self.input_args:
args += '\n %s: %s,' % (arg.swift_arg_name, str(arg.swift_type(self.string_valued)))
for attr in self.attrs:
if not attr.is_inferred_type_attr and not attr.is_inferred_number_attr:
args += '\n %s: %s%s,' % (attr.swift_arg_name, attr.swift_type, attr.swift_default)
if args != '':
args = args[:-1]
return args
def _swift_return_type(self):
return_type = ''
if len(self.output_args) == 1:
return_type = ' -> ' + str(self.output_args[0].swift_type(self.string_valued))
elif len(self.output_args) > 1:
named_types = [
arg.swift_name + ': ' + str(arg.swift_type(self.string_valued))
for arg in self.output_args]
return_type = ' -> (' + ', '.join(named_types) + ')'
return return_type
def _swift_body(self):
setters = []
for attr in self.attrs:
setters.append(attr.swift_setter(self.string_valued))
for arg in self.input_args:
setters.append(arg.swift_setter())
counts = ['Int({})'.format(arg.swift_count) for arg in self.output_args]
if len(self.output_args) == 0:
body = 'let nOutputs = 0'
else:
body = 'let nOutputs = {}'.format(' + '.join(counts))
body += '\n let op = makeOp("{}", nOutputs)\n '.format(self.op_def.name)
body += '\n '.join(setters)
if len(self.output_args) == 0:
return body + '\n op.execute()'
body += '\n return op.execute({})'.format(', '.join(counts))
return body
class Argument(object):
def __init__(self, arg_def, op):
self.arg_def = arg_def
self.op = op
self.is_list = arg_def.number_attr is not '' \
or arg_def.type_list_attr is not ''
@property
def name(self):
return self.arg_def.name
@property
def swift_name(self):
return swift_compatible_identifier(
self.name[0].lower() + self.name[1:])
@property
def swift_arg_name(self):
name = self.swift_name
if name in _OMITTED_PARAMETER_NAMES:
name = '_ ' + name
return name
def swift_type(self, string_valued=False):
return self.type.swift_type(
string_valued=self.allows_string and string_valued)
def swift_setter(self):
if self.is_list:
return 'op.addInputList({})'.format(self.swift_name)
else:
return 'op.addInput({})'.format(self.swift_name)
@property
def swift_count(self):
number_attr = self.arg_def.number_attr
if number_attr and number_attr in self.op.inferred_counts:
return self.op.inferred_counts[number_attr]
if self.arg_def.type_list_attr:
return self.op.inferred_counts[self.arg_def.type_list_attr]
return '1'
@property
def type(self):
number = self.arg_def.number_attr
if self.arg_def.type_attr:
type_attr = next(
attr for attr in self.op.type_attrs
if attr.name == self.arg_def.type_attr)
return Type('Tensor', base_type=type_attr.swift_name, number=number)
if self.arg_def.type_list_attr:
type_attr = next(
attr for attr in self.op.type_attrs
if attr.name == self.arg_def.type_list_attr)
# There are never any numbered type lists.
return Type(type_attr.swift_name)
if self.arg_def.type in _SWIFTIFIED_TYPES:
base_type = _SWIFTIFIED_TYPES[self.arg_def.type]
return Type('Tensor', base_type=base_type, number=number)
if self.arg_def.type == types_pb2.DT_STRING:
return Type('Tensor', base_type='String', number=number)
if self.arg_def.type == types_pb2.DT_RESOURCE:
return Type('ResourceHandle', number=number)
if self.arg_def.type == types_pb2.DT_VARIANT:
return Type('VariantHandle', number=number)
raise UnableToGenerateCodeError(
'Unsupported type for argument "%s".' % self.name)
@property
def allows_string(self):
if self.arg_def.type_attr:
type_attr = next(
attr for attr in self.op.type_attrs
if attr.name == self.arg_def.type_attr)
return types_pb2.DT_STRING in type_attr.attr_def.allowed_values.list.type
return False
class Type(object):
def __init__(self, kind, base_type=None, number=None):
self.kind = kind
self.base_type = base_type
self.number = number
@property
def count(self):
return self.number if self.number else 1
def swift_type(self, string_valued=False):
if self.kind == 'Tensor':
if self.base_type == 'String' or string_valued:
name = 'StringTensor'
else:
name = 'Tensor<' + self.base_type + '>'
elif self.kind == 'TensorHandle':
name = 'TensorHandle<' + self.base_type + '>'
elif self.kind == 'ResourceHandle':
name = 'ResourceHandle'
elif self.kind == 'VariantHandle':
name = 'VariantHandle'
else:
name = self.kind
return ('[%s]' % name) if self.number else name
class Attribute(object):
"""Represents information extracted from op `type` and `list(type)` attributes."""
def __init__(self, attr_def, op):
self.attr_def = attr_def
self.op = op
self.is_type_attr = attr_def.type in ['type', 'list(type)']
# Check whether the value of this attribute can be
# inferred automatically (this only applies to
# type-valued attributes).
input_args = list(op.op_def.input_arg)
output_args = list(op.op_def.output_arg)
input_arg_type_attrs = set(
[arg.type_attr for arg in input_args] +
[arg.type_list_attr for arg in input_args])
output_arg_type_attrs = set(
[arg.type_attr for arg in output_args] +
[arg.type_list_attr for arg in output_args])
arg_type_attrs = input_arg_type_attrs.union(output_arg_type_attrs)
self.is_inferred_type_attr = attr_def.name in arg_type_attrs
self.is_output_type_attr = attr_def.name in output_arg_type_attrs
self.is_func_attr = self.attr_def.type == 'func'
# We use this for obtaining the `_typeList` property.
self.input_arg = None
self.is_inferred_number_attr = False
for arg in self.op.input_args:
if self.attr_def.name in [arg.arg_def.type_attr,
arg.arg_def.type_list_attr] or \
self.attr_def.name == arg.arg_def.number_attr:
self.input_arg = arg
self.is_inferred_number_attr = True
break
# The following properties are only relevant for
# non-inferred-type-valued attributes.
self._swift_type = ''
self._use_enum = False
if not self.is_inferred_type_attr and not self.is_func_attr:
if self.attr_def.type not in _SWIFTIFIED_ATTR_TYPES:
raise UnableToGenerateCodeError(
'Unsupported type for attribute "%s".'
% self.attr_def.name)
# Get the arg type.
self._swift_type = _SWIFTIFIED_ATTR_TYPES[self.attr_def.type]
# Check if the arg is an enum type.
self._use_enum = False
if self.attr_def.type == 'string':
allowed_values = tuple(sorted(self.attr_def.allowed_values.list.s))
if allowed_values:
self._swift_type = self.op.enum_store.maybe_add(
allowed_values, self.attr_def.name)
self._use_enum = True
if self.is_func_attr:
input_type = self.swift_name.capitalize() + 'In'
output_type = self.swift_name.capitalize() + 'Out'
self._swift_type = '({}) -> {}'.format(input_type, output_type)
@property
def name(self):
return self.attr_def.name
@property
def swift_name(self):
if self.is_inferred_type_attr:
return swift_compatible_identifier(
self.name, capitalize=True)
return swift_compatible_identifier(
self.name[0].lower() + self.name[1:])
@property
def swift_arg_name(self):
name = self.swift_name
if name in _OMITTED_PARAMETER_NAMES:
name = '_ ' + name
return name
@property
def swift_type(self):
return self._swift_type
@property
def swift_default(self):
def swift_float(f):
if f == float('inf'): return 'Double.infinity'
if f == float('-inf'): return '-Double.infinity'
return '%g' % f
if not self.is_inferred_type_attr and self.attr_def.default_value:
default_value = self.attr_def.default_value
if default_value.HasField('b'):
default_value = str(default_value.b).lower()
elif default_value.HasField('i'):
default_value = str(default_value.i)
elif default_value.HasField('f'):
default_value = swift_float(default_value.f)
elif default_value.HasField('s') and default_value.s:
s = str(default_value.s, encoding='utf-8')
default_value = '.' + swift_compatible_identifier(s.lower()) \
if self._use_enum else '"' + s + '"'
elif default_value.HasField('list'):
if default_value.list.i:
default_values = [str(s) for s in default_value.list.i]
default_value = '[' + ', '.join(default_values) + ']'
elif default_value.list.f:
default_values = [swift_float(s) for s in default_value.list.f]
default_value = '[' + ', '.join(default_values) + ']'
else:
default_value = None
else:
default_value = None
if default_value is not None:
default_value = default_value.replace("\t", "\\t")
return ' = ' + default_value
return ''
def swift_setter(self, string_valued=False):
# Inferred-type-valued attributes.
if self.is_inferred_type_attr:
name = self.swift_name
if self.input_arg is not None:
name = self.input_arg.swift_name
if self.attr_def.type == 'list(type)' or self.is_inferred_number_attr:
self.op.inferred_counts[self.name] = name + '._typeList.count'
if self.attr_def.type == 'list(type)':
return 'op.updateAttribute("{}", {}._typeList)'.format(self.name, name)
if string_valued and self.allows_string:
return 'op.updateAttribute("{}", TensorDataType(TF_STRING))'.format(self.name)
return 'op.updateAttribute("{}", {}.tensorFlowDataType)'.format(self.name, self.swift_name)
if self.is_inferred_number_attr:
# The following is used for inferring the lengths of output lists.
self.op.inferred_counts[self.name] = self.input_arg.swift_name + '.count'
return 'op.updateAttribute("{}", {}.count)'.format(self.name, self.input_arg.swift_name)
if self.attr_def.type == 'int':
# The following is used for inferring the lengths of output lists.
self.op.inferred_counts[self.name] = self.swift_name
# Remaining attributes.
value = self.swift_name + '.cName' if self._use_enum else self.swift_name
return 'op.updateAttribute("{}", {})'.format(self.name, value)
def generic_constraints(self, string_valued):
# We use this for obtaining the `_typeList` property.
input_arg = None
if self.attr_def.type == 'list(type)':
for arg in self.op.input_args:
if self.attr_def.name in [arg.arg_def.type_attr,
arg.arg_def.type_list_attr]:
input_arg = arg
break
if self.is_func_attr:
input_type = self.swift_name.capitalize() + 'In'
output_type = self.swift_name.capitalize() + 'Out'
return '{}: TensorGroup,\n {}: TensorGroup'.format(
input_type, output_type)
if not self.is_inferred_type_attr:
return None
protocol = None
if self.attr_def.type == 'list(type)' and input_arg is None:
protocol = 'TensorGroup'
elif self.attr_def.type == 'list(type)':
protocol = 'TensorArrayProtocol'
elif self.attr_def.type == 'type':
if string_valued and self.allows_string:
return None
protocol = 'TensorFlowScalar'
allowed_types = set(self.attr_def.allowed_values.list.type)
allowed_types &= set(_SWIFTIFIED_TYPES.keys())
for types, protocol_name in _TYPE_PROTOCOLS:
if allowed_types.issubset(types):
protocol = protocol_name
break
if protocol is not None:
return self.swift_name + ': ' + protocol
return None
@property
def allows_string(self):
return types_pb2.DT_STRING in self.attr_def.allowed_values.list.type
def swift_compatible_identifier(s, capitalize=False):
"""Transforms an identifier to be more swift idiomatic."""
if s in _RENAMED_KEYWORDS:
return _RENAMED_KEYWORDS[s]
if capitalize:
s = s.capitalize()
without_underscores = []
capitalize_next_char = False
for c in s:
if c == '-' or c == '_' or c == '(' or c == ')':
capitalize_next_char = True
elif capitalize_next_char:
capitalize_next_char = False
without_underscores.append(c.upper())
else:
without_underscores.append(c)
return ''.join(without_underscores)
class EnumStore(object):
"""Stores details on string attributes represented as swift enums."""
def __init__(self):
self._entries = {}
self._type_names = set()
self._counter = 1
def enum_codes(self):
"""Generates the swift code for enums."""
codes = []
entries = list(six.iteritems(self._entries))
for allowed_values, type_name in sorted(entries, key=lambda x: x[1]):
allowed_values = [str(a, encoding='utf-8') for a in allowed_values]
codes.append(
# FIXME: Re-add `@_frozen` after SR-9739 is resolved.
# https://bugs.swift.org/browse/SR-9739
# '@_frozen\n' +
'// @_frozen // SR-9739\n' +
'public enum {} {{\n'.format(type_name) +
'\n'.join([' case {}'.format(
swift_compatible_identifier(a.lower()))
for a in allowed_values]) +
'\n\n' +
' @inlinable\n' +
' var cName: String {\n' +
' @inline(__always)\n' +
' get {\n' +
' switch self {\n' +
'\n'.join([' case .{}: return "{}"'.format(
swift_compatible_identifier(a.lower()), a)
for a in allowed_values]) +
'\n' +
' }\n' +
' }\n' +
' }\n' +
'}')
return codes
def maybe_add(self, allowed_values, attr_def_name):
if allowed_values in self._entries:
return self._entries[allowed_values]
type_name = swift_compatible_identifier(attr_def_name, capitalize=True)
while type_name in self._type_names:
type_name += str(self._counter)
self._counter += 1
self._type_names.add(type_name)
self._entries[allowed_values] = type_name
return type_name
def main(argv):
del argv # Unused.
if FLAGS.output_path is None:
raise ValueError('No output_path has been set')
api_def_map = c_api_util.ApiDefMap()
op_codes = []
enum_store = EnumStore()
op_names = api_def_map.op_names()
if FLAGS.api_def_path is not None:
for op_name in op_names:
path = os.path.join(FLAGS.api_def_path, 'api_def_%s.pbtxt' % op_name)
if not tf.gfile.Exists(path):
continue
with tf.gfile.Open(path, 'r') as fobj:
data = fobj.read()
try:
api_def_map.put_api_def(data)
except Exception as e:
print('Cannot load api def for %s: %s' % (op_name, str(e)))
num_generated = 0
for op_name in sorted(op_names):
try:
if op_name[0] == '_': continue
op_def = api_def_map.get_op_def(op_name)
if any(a.is_ref for a in op_def.input_arg):
raise UnableToGenerateCodeError('has ref-valued input')
if any(a.is_ref for a in op_def.output_arg):
raise UnableToGenerateCodeError('has ref-valued output')
api_def = api_def_map.get_api_def(bytes(op_name, 'utf8'))
# It would be nicer to handle `StringTensor` in a more
# general way by having `String` conform to `TensorFlowScalar`.
default_op = Op(op_def, api_def, enum_store, string_valued=False)
string_valued_op = Op(op_def, api_def, enum_store, string_valued=True)
default_code = default_op.swift_function()
string_valued_code = string_valued_op.swift_function()
op_codes.append(default_code)
if string_valued_code != default_code:
op_codes.append(string_valued_code)
num_generated += 1
except UnableToGenerateCodeError as e:
print('Cannot generate code for %s: %s' % (op_name, e.details))
print('Generated code for %d/%d ops.' % (num_generated, len(op_names)))
version_codes = [
'static let generatedTensorFlowVersion = "%s"' % tf.__version__,
'static let generatedTensorFlowGitVersion = "%s"' % tf.__git_version__]
swift_code = (
_WARNING +
_HEADER +
'import CTensorFlow\n\n' +
'@inlinable @inline(__always)\n' +
'func makeOp(_ name: String, _ nOutputs: Int)'+
' -> TFTensorOperation {\n' +
' _ExecutionContext.makeOp(name, nOutputs)\n' +
'}\n'+
'\npublic enum Raw {\n\n' +
'\n'.join(version_codes) +
'\n\n' +
'\n\n'.join(enum_store.enum_codes()) +
'\n\n' +
'\n'.join(op_codes) +
'\n\n}\n')
with tf.gfile.Open(FLAGS.output_path, 'w') as f:
f.write(swift_code)
if __name__ == '__main__':
tf.app.run(main)
|
the-stack_0_15381 | import itertools
from json import loads
from pathlib import Path
import sys
def encode_msg_text_for_github(msg):
# even though this is probably url quoting, we match the implementation at
# https://github.com/actions/toolkit/blob/af821474235d3c5e1f49cee7c6cf636abb0874c4/packages/core/src/command.ts#L36-L94
return msg.replace('%', '%25').replace('\r', '%0D').replace('\n', '%0A')
def format_msg(msg):
# Formatted for https://github.com/actions/toolkit/blob/master/docs/commands.md#log-level
# mapping between lean severity levels and github levels.
# github does not support info levels, which are emitted by `#check` etc:
# https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-a-debug-message
severity_map = {'information': 'warning'}
severity = msg.get('severity')
severity = severity_map.get(severity, severity)
# We include the filename / line number information as both message and metadata, to ensure
# that github shows it.
msg_text = f"{msg['file_name']}:{msg.get('pos_line')}:{msg.get('pos_col')}:\n{msg.get('text')}"
msg_text = encode_msg_text_for_github(msg_text)
return f"::{severity} file={msg['file_name']},line={msg.get('pos_line')},col={msg.get('pos_col')}::{msg_text}"
def write_and_print_noisy_files(noisy_files):
with open('src/.noisy_files', 'w') as f:
for file in noisy_files:
f.write(file + '\n')
print(file)
noisy_files = set()
for line in sys.stdin:
msg = loads(line)
print(format_msg(msg))
if msg.get('severity') == 'error':
if len(noisy_files) > 0:
print("Also, the following files were noisy:")
write_and_print_noisy_files(noisy_files)
sys.exit(1)
else:
noisy_files.add(str(Path(msg['file_name']).relative_to(Path.cwd())))
if len(noisy_files) > 0:
print("Build succeeded, but the following files were noisy:")
write_and_print_noisy_files(noisy_files)
sys.exit(1)
|
the-stack_0_15382 | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from logging.handlers import RotatingFileHandler
LOG_FILE = '/var/log/enodebd.log'
MAX_BYTES = 1024 * 1024 * 10 # 10MB
BACKUP_COUNT = 5 # 10MB, 5 files, 50MB total
class EnodebdLogger:
"""
EnodebdLogger backs up debug logs with a RotatingFileHandler.
Debug logs will be propagated to root level if the root logger is set to
debug level.
"""
_LOGGER = logging.getLogger(__name__) # type: logging.Logger
@staticmethod
def init() -> None:
if logging.root.level is not logging.DEBUG:
EnodebdLogger._LOGGER.propagate = False
handler = RotatingFileHandler(
LOG_FILE,
maxBytes=MAX_BYTES,
backupCount=BACKUP_COUNT,
)
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
EnodebdLogger._LOGGER.addHandler(handler)
EnodebdLogger._LOGGER.setLevel(logging.DEBUG)
@staticmethod
def debug(msg, *args, **kwargs):
EnodebdLogger._LOGGER.debug(msg, *args, **kwargs)
@staticmethod
def info(msg, *args, **kwargs):
if not EnodebdLogger._LOGGER.propagate:
logging.info(msg, *args, **kwargs)
EnodebdLogger._LOGGER.info(msg, *args, **kwargs)
@staticmethod
def warning(msg, *args, **kwargs):
if not EnodebdLogger._LOGGER.propagate:
logging.warning(msg, *args, **kwargs)
EnodebdLogger._LOGGER.warning(msg, *args, **kwargs)
@staticmethod
def error(msg, *args, **kwargs):
if not EnodebdLogger._LOGGER.propagate:
logging.error(msg, *args, **kwargs)
EnodebdLogger._LOGGER.error(msg, *args, **kwargs)
@staticmethod
def exception(msg, *args, **kwargs):
if not EnodebdLogger._LOGGER.propagate:
logging.exception(msg, *args, **kwargs)
EnodebdLogger._LOGGER.exception(msg, *args, **kwargs)
@staticmethod
def critical(msg, *args, **kwargs):
if not EnodebdLogger._LOGGER.propagate:
logging.critical(msg, *args, **kwargs)
EnodebdLogger._LOGGER.critical(msg, *args, **kwargs)
|
the-stack_0_15385 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from config import yoloCfg,yoloWeights,AngleModelFlag
from config import AngleModelPb,AngleModelPbtxt
import numpy as np
import cv2
from apphelper.image import letterbox_image
if AngleModelFlag=='tf':
##转换为tf模型,以便GPU调用
import tensorflow as tf
from tensorflow.python.platform import gfile
tf.compat.v1.disable_eager_execution()
config = tf.compat.v1.ConfigProto(allow_soft_placement=True)
sess = tf.compat.v1.Session(config=config)
with gfile.FastGFile(AngleModelPb, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sess.graph.as_default()
tf.import_graph_def(graph_def, name='')
inputImg = sess.graph.get_tensor_by_name('input_1:0')
predictions = sess.graph.get_tensor_by_name('predictions/Softmax:0')
keep_prob = tf.placeholder(tf.float32)
else:
angleNet = cv2.dnn.readNetFromTensorflow(AngleModelPb,AngleModelPbtxt)##dnn 文字方向检测
textNet = cv2.dnn.readNetFromDarknet(yoloCfg,yoloWeights)##文字定位
def text_detect(img,scale,maxScale,prob = 0.05):
thresh = prob
img_height,img_width = img.shape[:2]
inputBlob,f = letterbox_image(img,(scale,scale))
inputBlob = cv2.dnn.blobFromImage(inputBlob, scalefactor=1.0, size=(scale,scale),swapRB=True ,crop=False);
textNet.setInput(inputBlob/255.0)
outputName = textNet.getUnconnectedOutLayersNames()
outputs = textNet.forward(outputName)
class_ids = []
confidences = []
boxes = []
for output in outputs:
for detection in output:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > thresh:
center_x = int(detection[0] * scale/f)
center_y = int(detection[1] * scale/f)
width = int(detection[2] * scale/f)
height = int(detection[3] * scale/f)
left = int(center_x - width / 2)
top = int(center_y - height / 2)
if class_id==1:
class_ids.append(class_id)
confidences.append(float(confidence))
boxes.append([left, top,left+width, top+height ])
boxes = np.array(boxes)
confidences = np.array(confidences)
return boxes,confidences
def angle_detect_dnn(img,adjust=True):
"""
文字方向检测
"""
h,w = img.shape[:2]
ROTATE = [0,90,180,270]
if adjust:
thesh = 0.05
xmin,ymin,xmax,ymax = int(thesh*w),int(thesh*h),w-int(thesh*w),h-int(thesh*h)
img = img[ymin:ymax,xmin:xmax]##剪切图片边缘
inputBlob = cv2.dnn.blobFromImage(img,
scalefactor=1.0,
size=(224, 224),
swapRB=True ,
mean=[103.939,116.779,123.68],crop=False);
angleNet.setInput(inputBlob)
pred = angleNet.forward()
index = np.argmax(pred,axis=1)[0]
return ROTATE[index]
def angle_detect_tf(img,adjust=True):
"""
文字方向检测
"""
h,w = img.shape[:2]
ROTATE = [0,90,180,270]
if adjust:
thesh = 0.05
xmin,ymin,xmax,ymax = int(thesh*w),int(thesh*h),w-int(thesh*w),h-int(thesh*h)
img = img[ymin:ymax,xmin:xmax]##剪切图片边缘
img = cv2.resize(img,(224,224))
img = img[..., ::-1].astype(np.float32)
img[..., 0] -= 103.939
img[..., 1] -= 116.779
img[..., 2] -= 123.68
img = np.array([img])
out = sess.run(predictions, feed_dict={inputImg: img,
keep_prob: 0
})
index = np.argmax(out,axis=1)[0]
return ROTATE[index]
def angle_detect(img,adjust=True):
"""
文字方向检测
"""
if AngleModelFlag=='tf':
return angle_detect_tf(img,adjust=adjust)
else:
return angle_detect_dnn(img,adjust=adjust) |
the-stack_0_15387 | #!/usr/bin/env python
from __future__ import print_function
import sys
import math
import numpy as np
#ROS Imports
import rospy
from sensor_msgs.msg import Image, LaserScan
from ackermann_msgs.msg import AckermannDriveStamped, AckermannDrive
class reactive_follow_gap:
def __init__(self):
#Topics & Subscriptions,Publishers
lidarscan_topic = 'scan'
drive_topic = 'drive'
#drive_topic = '/vesc/high_level/ackermann_cmd_mux/input/nav_1'
self.lidar_sub = rospy.Subscriber( lidarscan_topic, LaserScan, self.lidar_callback, queue_size=1)
self.drive_pub = rospy.Publisher( drive_topic, AckermannDriveStamped, queue_size=1)
def preprocess_lidar(self, ranges):
""" Preprocess the LiDAR scan array. Expert implementation includes:
1.Setting each value to the mean over some window
2.Rejecting high values (eg. > 3m) """
n = len(ranges)
proc_ranges = [0]*n
for i in range(n):
proc_ranges[i] = (ranges[i] + ranges[i-1] + ranges[i-2])/3
if ranges[i] < 1.2:
proc_ranges[i] = 0
if ranges[i] == "nan":
proc_ranges[i] = max(proc_ranges[i-1], 0)
return proc_ranges
def find_max_gap(self, free_space_ranges):
""" Return the start index & end index of the max gap in free_space_ranges
"""
start_i,end_i, best_start, best_end = 0,0,0,0
for i in range(len(free_space_ranges)):
if free_space_ranges[i] > 0:
end_i += 1
else:
if end_i != start_i and end_i - start_i + 1 > best_end-best_start+1:
best_start = start_i
best_end = end_i
start_i = i
end_i = i
if end_i != start_i-1 and end_i - start_i + 1 > best_end-best_start+1:
best_start = start_i
best_end = end_i
return best_start, best_end
def find_best_point(self, start_i, end_i, ranges):
return (start_i+end_i)//2
def lidar_callback(self, data):
""" Process each LiDAR scan as per the Follow Gap algorithm & publish an AckermannDriveStamped Message
"""
ranges = data.ranges
proc_ranges = self.preprocess_lidar(ranges)
n = len(proc_ranges)
#Find closest point to LiDAR
index = np.argmin(proc_ranges) # proc_ranges.index(min(proc_ranges))
min_distance = ranges[index]
#Eliminate all points inside 'bubble' (set them to zero)
r = 0.2
l = ranges[index]
if l == 0:
delta_a = math.asin(0)
elif l > r:
delta_a = math.asin(r/l)
else:
delta_a = math.asin(1)
angle_range = [data.angle_increment*index - delta_a, data.angle_increment*index + delta_a]
#print(angle_range)
for i in range(len(proc_ranges)):
angle_point = data.angle_increment*i
if angle_range[0] <= angle_point <= angle_range[1]:
proc_ranges[i] = 0
#Find max length gap
start_i, end_i = self.find_max_gap(proc_ranges)
#print([start_i, end_i])
#Find the best point in the gap
best_index = self.find_best_point(start_i, end_i, proc_ranges)
#Publish Drive message
drive_msg = AckermannDriveStamped()
angle = (best_index-0.5*n)*data.angle_increment
if abs(angle) <= 5*math.pi/180:
velocity = 4
elif abs(angle) <= 10*math.pi/180:
velocity = 3.7
elif abs(angle) <= 15*math.pi/180:
velocity = 3.5
elif abs(angle) <= 20*math.pi/180:
velocity = 3
else:
velocity = 2.5
angle = np.clip(angle, -0.43, 0.43)
#print(angle)
#print(angle)
drive_msg.header.stamp = rospy.Time.now()
drive_msg.header.frame_id = "drive"
drive_msg.drive.speed = velocity
drive_msg.drive.steering_angle = angle
self.drive_pub.publish(drive_msg)
return
if __name__ == '__main__':
rospy.init_node("FollowGap_node", anonymous=True)
rfgs = reactive_follow_gap()
rospy.sleep(0.1)
rospy.spin() |
the-stack_0_15388 | #!/usr/bin/env org.lxg.python3
# -*- coding: UTF-8 -*-
import time
import threading
from queue import Queue
from threading import Thread
class MyThread(threading.Thread):
def run(self):
for i in range(5):
print('thread {}, @number: {}'.format(self.name, i))
time.sleep(1)
'''
'''
class Consumer(threading.Thread):
def __init__(self, cond, name):
# 初始化
super(Consumer, self).__init__()
self.cond = cond
self.name = name
def run(self):
# 确保先运行Seeker中的方法
time.sleep(1)
self.cond.acquire()
print(self.name + ': 我这两件商品一起买,可以便宜点吗')
self.cond.notify()
self.cond.wait()
print(self.name + ': 我已经提交订单了,你修改下价格')
self.cond.notify()
self.cond.wait()
print(self.name + ': 收到,我支付成功了')
self.cond.notify()
self.cond.release()
print(self.name + ': 等待收货')
class Producer(threading.Thread):
def __init__(self, cond, name):
super(Producer, self).__init__()
self.cond = cond
self.name = name
def run(self):
self.cond.acquire()
# 释放对琐的占用,同时线程挂起在这里,直到被 notify 并重新占有琐。
self.cond.wait()
print(self.name + ': 可以的,你提交订单吧')
self.cond.notify()
self.cond.wait()
print(self.name + ': 好了,已经修改了')
self.cond.notify()
self.cond.wait()
print(self.name + ': 嗯,收款成功,马上给你发货')
self.cond.release()
print(self.name + ': 发货商品')
def producerTest():
cond = threading.Condition()
consumer = Consumer(cond, '买家(两点水)')
producer = Producer(cond, '卖家(三点水)')
consumer.start()
producer.start()
isRead = True
def write(q):
# 写数据进程
for value in ['两点水', '三点水', '四点水']:
print('写进 Queue 的值为:{0}'.format(value))
q.put(value)
def read(q):
# 读取数据进程
while isRead:
value = q.get(True)
print('从 Queue 读取的值为:{0}'.format(value))
def jiaoliu():
'''线程间通信'''
q = Queue()
t1 = Thread(target=write, args=(q,))
t2 = Thread(target=read, args=(q,))
t1.start()
t2.start()
def main():
print("Start main threading")
# 创建三个线程
threads = [MyThread() for i in range(3)]
# 启动三个线程
for t in threads:
t.start()
# 一次让新创建的线程执行 join
for t in threads:
t.join()
print("End Main threading")
if __name__ == '__main__':
# main()
# producerTest()
jiaoliu() |
the-stack_0_15389 | # Resource object code (Python 3)
# Created by: object code
# Created by: The Resource Compiler for Qt version 6.2.2
# WARNING! All changes made in this file will be lost!
from PySide6 import QtCore
qt_resource_data = b"\
\x00\x00\x07N\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
40-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:58:45-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:58:45-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:7bcbff55-51b7\
-1d4d-99d2-ba0c1\
99cd193\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:c6f328a5-3dfe-8\
e42-813f-76ba1ad\
36d34\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:892a84c\
7-9c85-5445-8b17\
-2dcd7e777554\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:892a84c7-9c85\
-5445-8b17-2dcd7\
e777554\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:40-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:7b\
cbff55-51b7-1d4d\
-99d2-ba0c199cd1\
93\x22 stEvt:when=\x22\
2020-05-02T17:58\
:45-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>cv\
*\xf0\x00\x00\x01\x03IDAT8\x8d\xb5\xd3\xbd.\
\xc4A\x14\xc6\xe1\xfd\x22\xd4*W \x115\x12\x95\xc6\
MP\x88\xc6W\xc3jh(\x88+\x90\x08[\x92(\
\xb0(\x15\xe2\x0a6\x94\xa2\x10\x8d\xa7W)G\xe1\xfc\
\x93\xc9f\x97%1\xc9\x9b\xf3\x9e\x99\x93_&of\
J)\xa5\x12V\xd1\xc4y\xe8\x22\xabg\xe1\xd7RJ\
\xa5\x98/\x17\xbe\xd8\xb8\xc55f\xb1\x82\xa5\xa8\xf38\
\xc0;\x9e\xb0\xdb\x0e)\x9a&\xe6\x8a\xc3\x5c\x98\xc2\x1d\
\x86\xf0\x82\xfd\xfc\xbc\x18\xba\xc6R\xf8~TP\x8b~\
\x06\xad\xf0\xc3x\xc6^'\xc0b\xf8j\xd4J\x06x\
\xc4@\xf4#\xbe\xd6r\xaf\x80I\xbc\xa2\x85\x07\xdc\xe3\
\x03\xc7\xdf\x02\xb2\x0c\x061\x86\xf1\xd0(\x1a8\xec\x09\
\xd0%\xd8M4z\x06D\xa8y\xb0;\xbf\x02\xe4\xa0\
\xa8\xdb\xff\x0a\xa8\xfd\x15P<\xa4>\x94\xbb\xa8\xda\x0d\
p\x85\x85\x9f\xd2\xcfn\xb2\xd5\xfe\x0en\xe2\xb3\x9c\xe2\
2~a7\x9d\xe0\x0dG9`\x1a\xeb\xd8@=j\
'\xd5\xb3\xb9\x89\x94R\xe9\x13\xc7\xca\x11\xdd\x9e\x8c\xc8\
\x08\x00\x00\x00\x00IEND\xaeB`\x82\
\x00\x00\x07k\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
42-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:59:57-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:59:57-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:aae01a3c-2360\
-1d46-9ade-fe6f6\
df1ebcd\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:c514dd32-c213-1\
44f-8f5d-aafa529\
464c4\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:74b30ba\
8-4d75-ac4e-8da6\
-a0f7238df7c6\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:74b30ba8-4d75\
-ac4e-8da6-a0f72\
38df7c6\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:42-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:aa\
e01a3c-2360-1d46\
-9ade-fe6f6df1eb\
cd\x22 stEvt:when=\x22\
2020-05-02T17:59\
:57-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\xd7B\
%\xed\x00\x00\x01 IDAT8\x11\xa5\xc1\xa1O\
\x94a\x1c\x00\xe0\xe7}\xbf\xf7\xf4\x98\xe0\x1cN6u\
\x93\xe0F\x85\xe2F\x901\xaa\xb3;\xfe\x06\xba\xddl\
p\xd3\xcdd3R\x08v\xfe\x01\x03\x85\x11!\xdd\x15\
\x0a\x8e\x93\xcf\x83\xbbW\xb6_\xf8\xc2\x81A\x9e'\xd5\
Z\xddE\x19\x0e\x87O\xf1\x0cI\x98\xe0\x12}\xffv\
\x89\x93\x82Ox\x8bC\x14,\xe2\x05\x0e\x90\xddl\x82\
5\xec\x14\xe1=\xbe\x0ao\xf0\x01\xaf\xd0G\xd5IB\
\x8b]\xdc/\xc8\x18\xeb\xfcA\x15Z\xb7\x9b\x22\x17!\
\xeb4HB\x83*$!\xe1\x0a\x19\xb5\x98U1\x15\
&n7F*f\xb5X\xc6\x9e\x900AE\x11\xae\
\xb0\x81\xfdbV\x83s\xfc@\x0f#l\xe31\xbe`\
\x01-^\xa2W\xcc\xba\x87S|\xd3y\x82e|\xd7\
\xd9B?\xa3\x22\xe9$\x14aNx\x88y\xe1\x810\
\x87i\x16F:\xe7\x18\x0b\x17\xc2\x08\x17\xc2Hh]\
+\xe8\xe15\x8e\x91\xb1\x81\xe7\xd8D\x1f\xbf\xb0\x86%\
\xac\xe3\x11~c\x05?\x0b\x8e\xf0\x0e\xabh\xd0\xe0\x0c\
\x1f\x91P\xd1C\xc6g$L\xb1\x80\x934\x18\x0c\xdc\
EA\xf2\xff\xea_\x8f\x14EW\xcad\x1ch\x00\x00\
\x00\x00IEND\xaeB`\x82\
\x00\x00\x07@\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
41-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:59:15-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:59:15-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:b12940c3-b6fd\
-3945-8016-863be\
de12a0b\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:65b3ae21-70a7-8\
149-bae7-502ce0d\
711f3\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:e2c9e49\
4-4daf-de49-b2c1\
-d1f6ffd35064\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:e2c9e494-4daf\
-de49-b2c1-d1f6f\
fd35064\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:41-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:b1\
2940c3-b6fd-3945\
-8016-863bede12a\
0b\x22 stEvt:when=\x22\
2020-05-02T17:59\
:15-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>h\xd0\
x\xef\x00\x00\x00\xf5IDAT8\x11cx\xf6\xec\
\x19\x03\x0e\xcc\x84G\x0e\x8eA\x04#\x94#\x0c\xc4\x0b\
\x818\x09I\x013\x92<N\x03`69\x02\xf1\x0b\
>\x0b\xc4{\x81\xd8\x18\xcd \x82\x06\xb8\x01\xf1Q\
(\xbb\x1e\x88\x1f\x02\xf1t \x16\xc0\xe7-dAW\
>\x83$)\x07\xc4\xab\x80\xf8\x1e\x10\xa7\xe0\xf2\x16\
\xba\x01\xa7\xa1l\x16$\x0d\xee@|\x1e\x88\x0f\x01\xb1\
\x19\x928#>\x03`\xe2\xc8N\xae|\x06\x01\x93\x80\
X\x10f\x08.\x03\x18q8\x99\x1b\x88O\x00\xf1}\
\xe6\xc1\xe7\x02F$W0\x22y+\x1b\x88\xaf\x02\
\xf1\x14\x987q\x19\xc0\x84\x16u\xfe\xd0p8\x0e\x8d\
n\x06B\x81\x08\xc3\xfa@\xbc\x05\x88\xaf\x03q2z\
\x00b3\xe0\x04\x94-\x0aM\x03\xa0\xb4\xd0\x0a\xc4\x5c\
\xb8\xd2\x02zB\x02\xf9/\x07\x1aHK\x80X\x85P\
jD6@\x07j\xe3\x11 \xb6!6?\xa0\x0b(\
\xe3I\x07D\x19@RV\x06a\x00\x03\xce\xd7l^\
\xdb>3\x00\x00\x00\x00IEND\xaeB`\x82\
\x00\x00\x07\xb2\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
41-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:59:36-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:59:36-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:3af4a655-5aa4\
-5746-9437-b3a77\
d38c26d\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:504f5e6a-e941-b\
a4a-a875-3bc6392\
4df2b\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:935523c\
b-05ec-f54c-8a3c\
-5dcaeefefa6b\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:935523cb-05ec\
-f54c-8a3c-5dcae\
efefa6b\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:41-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:3a\
f4a655-5aa4-5746\
-9437-b3a77d38c2\
6d\x22 stEvt:when=\x22\
2020-05-02T17:59\
:36-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\xe4\xec\
\x9a\xc2\x00\x00\x01gIDAT8\x11\x05\xc1\xbd\x8b\
\xcf\x01\x00\xc0\xe1\xcf\xf7\x1c\x83\xcb{\x0c\xfc\x03^'\
&Y\x94\xc1\xa6\x13)\xddJ7\xd8\x0cB\x8a(%\
e\xe0n \xa5\xc4\xc0\xdd\xc9\xa2d\xa6\xe4N\xf8\x91\
\xab[0<\xd8\xfc\x09\x9e'\x840`@\xd8\x80Y\
\xbc\xc4=\xacG\x180 \x84B\x08a\x15\xc2Q\xfc\
\xc1\x19\xfc\xc5\x11\x84U\x08!\x14\xc2\x04\xb6\x22L\xe1\
7n\x22\xdc\x02\x9cF\xd8\x86\xb5\x08\x85\xf0\x08?\xb1\
\x84\x11Nc\x02a\x02S\xf8\x8a%\xfc\xc2C\x84!\
\x9c\xc2'\x1c\xc4ylE\x08\xab\x11\xc26\x9c\xc7A\
|\xc6\x09\x14.\xe1\x076!\x84}x\x83\x11^c\
\x0fB\xd8\x82\x9f\xb8\x80Bx\x86y\x84\xf5\xf8\x88\xbb\
\xd8\x8bY,b\x02\xe1\x05\x9e\x22\x0c!\xdc\xc0\x0c\xc2\
I|@\x08a\x11\x93\x08\xb3\xb8\x8a0\x16\xc25<\
F8\x8ce\xacC\xd8\x88e\x1cBx\x82+\x08c\
a\x07\x16q\x1b!<\xc6\x08\xd71\xc2}\x84p\x07\
\xef\xb1\x1d\x85\xdb\xf8\x86\x10\xc2j\x9c\xc3\x03Lc@\
\x08\xe1;n\xa1\xb0\x1b#\xcc\xe0\x1d\x8e!\x84\x10\xc2\
$\xdeb\x06_\xb0\x13\x850\x8d9\x5c\xc6\x0a\xe6\xb1\
\x1f\xe1\x00\x16\xb0\x82\xcb\x98\xc3Y\x84\xb1\x10B\x08\x9b\
\xf1\x1a\xcf\x11\x16\xf0\x0a\x1b\x11B\x08\x850`\xc0\x1a\
\x84=\xf8\x87y\xfc\xc3.\x845\x180 \x14B\x08\
!\x8c\xe38.b\x12\xe3\x08!\x84\xd0\x7f\x0e\x0a\xf7\
(Z\x0b\xf5\xbb\x00\x00\x00\x00IEND\xaeB`\
\x82\
\x00\x00\x07v\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
40-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:58:59-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:58:59-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:d22633e2-f550\
-4544-b265-0f893\
0d4e793\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:1a11682d-8df9-0\
647-8a98-49ec6ec\
1729a\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:f9be694\
4-daa9-1f48-a13b\
-6018e8ecb298\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:f9be6944-daa9\
-1f48-a13b-6018e\
8ecb298\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:40-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:d2\
2633e2-f550-4544\
-b265-0f8930d4e7\
93\x22 stEvt:when=\x22\
2020-05-02T17:58\
:59-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\x8d&\
\xae\x84\x00\x00\x01+IDAT8\xcb\x95\xd2M+\
\xc4Q\x14\xc7\xf1kF3\x94\xb5\x85\xad\xb2\x12\xe5a\
\x8f(e\xa3\xbc\x01\xe3!be\xc8\xbb\xf0.\xc4b\
\xd4x\xdc\xb1\x94\x91\xb0D6V\xff,<\xbc\x05\xdf\
\x93su:\xeeU\xa6>\xdd\xee\xff\xde\xf3\xbb\x0fs\
CQ\x14\xc1)i\xdb\x81%\xf4h\xbf=17\xe4\
\x8ae\xf21^p\x8f\xbe\x5cH\xaaX\xda\x0b\x9cj\
\x7f\x1b\xaf\x18J\x85\xa4\x8a\xcfq\xe2VZ\xc0\x1b&\
\xb5_\xb6\x01\x7f\x15\x97\xcd\xf8,>0g\x17\x0df\
[\xbe\xb8\xe4\x82\xa4\x1d\xc7\xbb^\xee\xcf\x0e\xaaZ\x98\
+\x0e\xee\xec\xc3\xc5\xf7o=\x06L\xe0\xd9\x15\xb7\xa5\
\xfe2\x132\x8d\x07T\xa43\x85\xab\xc4\xca\x15t)\
\xbf\xb3A\xdc\xca[\x89\x017n\xc22\x9ep\xad\xef\
\xa0\x85~\x134\xaa\xdf\xab\xb9\x803\xec\xe8\x03\x1a\xd0\
\x80U\x130\x92\x0b\x88\xb7\xdd\xc4\xbc)h`\xe5?\
\x01\x87n\xc5#\x17\xf0\xeb\x08-w\xdb\x07\xae\xa0\xa9\
\xaf1\xf6\xe5Xw1`L\x1fGC\xed\xe1\x13\x8b\
n\x07\x8f\xd8\xc5>.5\xa0S\x06\xbbQ\xc3\x16\xea\
\xd8\xc4\x1azM\x80\xbc\xc0\x0d\x1d\xab\xeb\xdc\x199\xf2\
\x17\xdeZ\xed\xfe\x19H9N\x00\x00\x00\x00IEN\
D\xaeB`\x82\
\x00\x00\x07r\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
39-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:58:27-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:58:27-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:41a72668-2697\
-c34b-b659-1d4d1\
9bb8739\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:0ba35df4-ed1f-9\
e45-afaf-26118e1\
dca6a\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:7aa692d\
a-b29d-b342-add3\
-1bcf9ea2dbb8\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:7aa692da-b29d\
-b342-add3-1bcf9\
ea2dbb8\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:39-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:41\
a72668-2697-c34b\
-b659-1d4d19bb87\
39\x22 stEvt:when=\x22\
2020-05-02T17:58\
:27-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>/`\
\x9cx\x00\x00\x01'IDAT8\xcb\x85\xd3\xbd+\
\xc5a\x14\x07\xf0\x9f\xeb\xe5..F\x85\x85R&E\
6\x91U\x16\xa3\x7f\x81\x85n\xd15\x9a\x94A\xfe\x02\
\x9b\x01\x03&\x8b\x92Q\x06/\x93RJ\xfa\x94\xd5_\
`\xf0<uz\xba\xdc_==\xe7\xe5w\xbe\xe7|\
\xcf9O\x85\xaa\xc3\xe9\xc7\x1e\xc6\x92\xde\x15\xfd\xe5\xcf\
\xd1YK\xf7\x12^Q\xef\x04P+\x80z\x92\xbc\x8d\
\xd3\xbf\x12\x95\xc1\xb3\x18-lg\xd8L\xf2\x1cF\xa2\
\xbfBw2L\xf9\xfd\x16B\xa6!\xbc`&\xe9\x07\
\xb8\x8b\x95d\xa1\x8eg\xb4\x92\xde\x13*z\xc4@\x08\
\xba\xc5Q\x09p\x82\xcb6\xfd\xd8I\xbe\x08:\x8cO\
\xace\x0a\xeb\xf8\x08T\xbaB\x93\xae\xb0\x15\x002\xc8\
\x22\xbe1\x9e\x95\xf7Tn\xecI\x1d\x0f\x98\x0fUe\
\xdf.\xee\xd1\xc8%o\xe0\x0d\x8d@c!\x01\xf4\x16\
\xc0+\xf8\xc2D\xa6\x90\xf9\x1e\xe3:\x044qQ\x04\
O&\xfe\xab\xd9\x1e\x97\xa2\x177X\x0e\xf3o\x16\x00\
\x87\xd8\x8f\xb6r\xb3\xfa\xd2=\x80\xa70\xffZ\xf0\xff\
\xfb\x16\xf2\x99N\x00\x83\xed\xf6\xff?\x80\x9c\xa9\x85\xf3\
6o\xa4#@\x15\xe6<\xdb);\xaa\x1fED\xd8\
P\x22Am\x98\x00\x00\x00\x00IEND\xaeB`\
\x82\
\x00\x00\x07\x8f\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
41-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:59:34-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:59:34-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:e6b34f6d-3d90\
-5a46-b4d8-abf36\
82ac6ff\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:fcd8b74c-779b-0\
244-adcb-5d7fa6a\
bee87\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:7c6de3b\
6-f9f3-af4a-961e\
-8f7d5a7750c1\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:7c6de3b6-f9f3\
-af4a-961e-8f7d5\
a7750c1\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:41-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:e6\
b34f6d-3d90-5a46\
-b4d8-abf3682ac6\
ff\x22 stEvt:when=\x22\
2020-05-02T17:59\
:34-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\x92\xea\
G\xa3\x00\x00\x01DIDAT8\xcb\x8d\xd3\xcd+\
DQ\x1c\xc6\xf1;\x98\xc6\xc6F\x94-K\x12M^\
66^\xd76\xb2\xb0\xf0R6\xec\x06\xd9\x88By\
\xd9X\xcb\x96\x90\x11Y\xb2\x95\x97\xbc'\x0b\xd1D\xe9\
\x16;\xff\x81\xef\xaf\x9e[\xa7\xd3\x9d1\x8b\xcf=\xf7\
\xdcs\xees\xde:A\x18\x86\x95\xd8\xc01\x0e\xb0\x8d\
]dU\xb7r\x0f\xfb\x98D\x80\x84\xca\xc0\x1e]\xf8\
\xc68\xc6\xf0\xaa\x1f\x87\xf5\xc3\x08\xd6\xf1\x8b/,\xba\
!\xf6\xd2\x8b\x8b(\x11;\x18r\xea\xa6\x1e\xe7\xa8\xc3\
\x1bV\xdc\x19X\xc0\x8d\xd39\xabQ\xdd\x80&<\xea\
\xbd\x069\xcc\x14\x0a\xc8i\x1fN\xb4\xf6+\x05T\xa8\
O'^\x90\x8a\x0bh\xc3\x84\xf6\xc4\x962\xaaz7\
\x92\xea\x93\xc6}\x5c@\xc2\x9bzZ\xa7\x14x\xed\xad\
x\x88\x0b(U\xd9\x81g\x8db\xcbY\xf6\xda[\xf2\
\x05\x98\x14\x9e0\xa7z\x03>0\xe0\xf4)\x18`\x1b\
t\xeb\x8d\xb8\x8a\xcdb\x03j\xf1\xae\x91\xa3o\xa7\x98\
/& \x1aq\x09\x9fX\xc3\x19\xeeP\x95/\xa0\xc7\
\x09(q:\x0db\x0b\x0b\xa8\xd6\xb7\xb2\xb8S\xe8\xc3\
\xb5w|\xffiT@\xb9U\xda\xf1\xe3\xdd>s\xa4\
\x1bj\x0e\x9d6\xbb+\x97\xba?\xc9h\xda\xfd\x98E\
\x06S\x05dd\x1a\xcd6\x9b?s\x04\x06V\xd4G\
\x86o\x00\x00\x00\x00IEND\xaeB`\x82\
\x00\x00\x06\xed\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
39-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:58:19-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:58:19-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:968a856f-3112\
-7443-ae82-056f2\
fee2414\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:f3cdc500-e6fd-b\
44a-9824-074090b\
4ff5d\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:811bddb\
5-357e-7b46-8990\
-8e74c910eb68\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:811bddb5-357e\
-7b46-8990-8e74c\
910eb68\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:39-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:96\
8a856f-3112-7443\
-ae82-056f2fee24\
14\x22 stEvt:when=\x22\
2020-05-02T17:58\
:19-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\x8d\x0b\
\x9e5\x00\x00\x00\xa2IDAT8\x11c\xf8\xff\xff\
?\x03%\x98d\x0d\xcf\x9e=c\x04a\xb2\x0c\x00j\
d\x22\xdb\x05@\xcd\xccP\xda\x04\x88\x8bH2\x00I\
\xb3&\x10?\x05\xe2\x0a\x98w0\xfcG\x84\xe6bd\
\xb5\x18\x1a\x91\xfd\x89\xa6\xf9\x09\xcc\xe90qd\x03\xa4\
\x80x'\x10\xebB\xf9,Xl\xc6\xd0\x8c\x12\x06@\
\x892 ~\x0e\xc4\x86Hb\xe8\xcef\xc6\x1b\x0b@\
\x05i@\xfc\x16\x88-\x81X\xe5\x19\x04\xe0\xd4\x8c\xee\
\x02&(\x1d\x01\xb5\x15\xa7\xb3\xf1\xb9\x00fH\x00\x10\
\xe7\xe2J<x\x13\x12Z,0\x92\x95\x17\xa0\xe9\x9d\
\x89&\x99\x89\xea\x06\x00\x00kw\xfc\x8d\x05\xbe\xa4\xe5\
\x00\x00\x00\x00IEND\xaeB`\x82\
\x00\x00\x07S\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
41-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:59:27-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:59:27-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:b788edbf-e465\
-954b-9a61-3b3a6\
3c0b17c\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:a08c0a17-f23c-1\
c40-a0bf-91622c0\
d6f60\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:e34016c\
6-78a9-ac42-ad55\
-386a18c8ed58\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:e34016c6-78a9\
-ac42-ad55-386a1\
8c8ed58\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:41-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:b7\
88edbf-e465-954b\
-9a61-3b3a63c0b1\
7c\x22 stEvt:when=\x22\
2020-05-02T17:59\
:27-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>o\xc9\
\x9c\xc4\x00\x00\x01\x08IDAT8\xcb\x95\xd2\xbd.\
\x84A\x14\xc6\xf1Y\xbb\xcbV\xb8\x04\xb7\xe0k\xd5\xae\
@B\xaf\x10+\x11\x17\xa0\xd2\xe8]\x83\x8a\x82\x10\x1f\
\xd5\x12\xef6:\x0a\x09\x09\x12\xa5\xfc\xc2%\x88F3\
o2\x99\xec\xbe\xbb\x8a\x93I\xces\xfe\xcf\x99sf\
\x02\xc2\x90\x18\x8bg\x13\xbb8\xc5>\xa6\x10\xfe\x03w\
\xf1\x88m\xf4p\x83\xc6(p\x1d\x05N2\xfd\x09+\
\xa3t\xbe\xc2/\x8e\xb2\x9a\x1e\xd6\xab\xe0F,:\x8f\
\xb9.n\xa3\xb6\x83\x0fLV\xc1\x05.\x12\xad\x8ec\
\xbc\xe2\x0d\xed|\x89\xf9\xcc\x97\x89\xd6\x8ag\x07?\x98\
-\x99~\xf0]\x9c\xbb\xd4\xc6\x13\xf8\x0b\xf3Im\x08\
\xa8%\x0b\x1b\x04oFx)\x85\xf3\x11\xae\xb3k\x97\
\xf0V\x84\x17r85X\xc6{\x5c^\xc0D\xd2\xf9\
{\x10\x9c\x1a\x1c\xe2 \xdbG'\xc2\xedApi\xd0\
\xc4\x03\x16cr\x1a{\xf8\x1c\x06\x97\x06kx\xc1*\
\xce\xf0\x8c\xfb\xaak\xe7\x06\x1b\xf1\xab\x16\xb1\xf3\x5c\x9f\
\xbfQi\xd0\xc2LV\x5cK\x9e\xb72\xfe\x00O\xa3\
\xe0\x80\xf6\xc9\x1dU\x00\x00\x00\x00IEND\xaeB\
`\x82\
\x00\x00\x06\xff\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
41-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:59:29-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:59:29-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:52d4769c-120f\
-d248-a016-0f76e\
8c76999\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:5d80309b-4856-d\
e43-893b-7d5e502\
b5899\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:9faf887\
9-1915-f248-b102\
-81861202cd97\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:9faf8879-1915\
-f248-b102-81861\
202cd97\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:41-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:52\
d4769c-120f-d248\
-a016-0f76e8c769\
99\x22 stEvt:when=\x22\
2020-05-02T17:59\
:29-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>Ng\
\x80\xeb\x00\x00\x00\xb4IDAT8\xcb\xad\xd3\xb1\x0d\
\xc2@\x0c\x05\xd0\x84\x80\xd20\x12{\xa4\xa1\xa0K\x84\
\x18\x87\x15\xa8\x22\x14A\x81X\x83%\x1ec\x84\xc6\x91\
\xae\x81$@a\xd9\xf2\xbf\xfbg\x7f\xfb\xb2\xbe\xef\xb3\
_\xec#\x885\xca\xd9\x04\xc8\xc3\x1fQE\x5c|C\
p\xc3>\xe2\xe57\x04\x1d\xeaI\x15 O\xac\x88\xdc\
\x05M\xc4\xab\x01\x9f#\xe2\x19\xbbQ\x11C\xedc\xf4\
\xdc\xc5\xcbg<\xf1@\x8bk`\xf7\xa4\xad| (\
Qa\x8f\x1a\x0dvq\xf9\x84m\x82\x1d\xb0\x99\xdaB\
\x8b\xed\xa4=@\x81e\xf8U\xe4\xae\xc9\x18\xcb\xe4\xcc\
\xe2\xffc|Cp\xc7a\xf6\x22%D\xf5 XZ\
\xf6d\x82\x7f\xfc\xc6|\x8c\xe0\x05\x9cL\xff\x03\xfeO\
\x5c\xee\x00\x00\x00\x00IEND\xaeB`\x82\
\x00\x00\x074\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
42-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T18:00:04-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T18:00:04-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:cf4b52ce-2059\
-9d4b-a801-1b162\
862a2fb\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:86eaaca0-f1d9-0\
849-a7d2-b9becd7\
b4797\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:64436ca\
5-5bfe-8e4f-82b7\
-c2aceef5c797\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:64436ca5-5bfe\
-8e4f-82b7-c2ace\
ef5c797\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:42-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:cf\
4b52ce-2059-9d4b\
-a801-1b162862a2\
fb\x22 stEvt:when=\x22\
2020-05-02T18:00\
:04-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\x03q\
\x01\x94\x00\x00\x00\xe9IDAT8\xcb\x95\x93M\x0a\
\xc20\x10\x85\xdb\x1a\x17\xa2B\x11\xbd\x82w\x10\x5c\x0a\
\xfeau\xe1\x99\xdc\xd7^I\xd0\xb5\xa2\xe2\x01\x8c\xa2\
\x87\xd0WH\xe01L\xfcY|\xd0\xe9\xe4\xbd\xccL\
\x92\xc8Z\x1b9b\x90P\x1c\xa2\xc21\x8b\xfdw\xc8\
$\xd6\xfes\xa2\x0b\xfa\x01\x13\x16OA\x83\x0d\xfc\xe2\
\x1e\xb8\x82\xcc\xc5F\x11\x17`\x07j>'\xcb\x1e\x83\
;\x98\xb9\xb8J\xe2\x1c\xecA\x9b\x8d#\xa5\xf7\x913\
\x99Sn\xed\xc4-\xd9\xa2\x1c\x8a\xa1>\x1f\xcel\x05\
\x8e$VO\x81\xf1e\x0f\xc0\x0b\x5chhF;\x85\
\xd0\xb4\xcb\x9d\xcf\xe0\x00&\xda\xee\xd2 \x16=\x9f@\
\x13\x0c\xc1\x93\x06k\xbe]\xa4\x5c\x0c\x8c\x07\x9b\xc9\xf5\
\xb2\x85B\x88\x0d\x95]\x9a\xdc\xc0R\x1e#\x1f\xdf\x16\
t\x94~}\xd9\x0b\xb0\x01\xa9\xbcH%u\xbaa\xc9\
\x87\xb7\x90\xb2\xf9\xcf\x8f&\x94\xfbG\xac\xaey\x03\xbd\
\xe9\xce\xc4\x1c\xbdW\x11\x00\x00\x00\x00IEND\xae\
B`\x82\
\x00\x00\x07U\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
39-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:58:23-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:58:23-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:de63df63-2623\
-844f-8e1e-eff0b\
4f7ec30\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:7f81ae1e-1d38-e\
14f-af78-0bd706e\
0554b\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:1b99549\
8-6d7d-494a-9282\
-3e4d62be73dc\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:1b995498-6d7d\
-494a-9282-3e4d6\
2be73dc\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:39-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:de\
63df63-2623-844f\
-8e1e-eff0b4f7ec\
30\x22 stEvt:when=\x22\
2020-05-02T17:58\
:23-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\x13\xc1\
B\xcc\x00\x00\x01\x0aIDAT8\xcb\x8d\xd3\xbdJ\
\x03Q\x10\x05\xe0\x8d\xff\x85\x16bo%b\xb0Oa\
^\xc06>\x80\x95\xa8e \xa2oc\xaf\xe0Oa\
,\x8c\x82\xe6!l\xecD?\xb0\xb4\x17\xd6f\x227\
\xeb\xee&\xc5\xe5\xee\xec\xec93g\xe6l\x96\xe7y\
\x96\xe7y\x86\xc6\xe8F\x17\xab\x11\xcf\x8c\xbe);c\
\xe0x>G\x8e>\x16&\x91d\x09p.\xc0\xcfX\
\xc1\x05\x86\x98\xaf#\xf9K\xa0\x1d\x95w\x13\xd2\xeb \
\xac$)\xea\xdf\xc7'\xda\x09\xc9\x15^\xaa\xe4d%\
C<\xc0\x17v\x0a$\xc32\x92\xb1v0\x1b\xf7a\
t\xd2Jr7\x18`1-\xf8o(I\x95c|\
\xa3\x99\xe4\x9e0(\x95Pl-\xb6p\x87\xb5\x887\
\xf0\x8a\xb3\xaa\x19\xa4\xe0>\x1e\x93v7\xf1\x8e\xd3i\
\x8ct\x8f\x87D\xca\x16>p2i\x0b\xcb\xb8\xc4-\
\x96\xe2]\xb3\x00n\xd4\x19\xa9\x85\x1ft\x22\xde\x8eM\
\xf4j\x9d\x98&\xd1\x09\xd0\x11\xdeF\x9a\xcb*W\x0e\
\x11{a\xe9\xee\xd4\x7fc\x89\x1b\xd7\xeb@\xe9\xf9\x05\
n\x18\x05\x86=\xafg~\x00\x00\x00\x00IEND\
\xaeB`\x82\
\x00\x00\x00\xb8\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\
\x00\x00\x00\x7fIDATX\x85\xedVA\x0a\xc00\
\x08\xb3c\x1f\xf3\xe5>\xad\xbb\x0e;\xb5n\x05'5\
7+JL\x84\x0a\x85\xed\xd14\x01\x88HJuC\
\xb8\xc7\xbe\x888\xbc\x1d\xd1\x0e\x9c\x1f\xeb\xf9\xa4\x962\
n\x02VC)/\xbd\x0f\xd6\xfc\xde\x82v\x9bF]\
X\x05j}\xb8\x02)\x09t\xb6dV\xbc\x9c\xc0R\
\x14\x81\x22P\x04\x8a@\xca\x83\x84\xffjV\xac\x22\x95\
\x05\xeesk\xa6\xc6k\xc1\x1b\x12*\xc2-(l\x0e\
\x00\xb8\x00\x01\x88\x133r\x8cR\x12\x00\x00\x00\x00I\
END\xaeB`\x82\
\x00\x00\x07\x81\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
41-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:59:36-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:59:36-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:51c03d8c-315d\
-d54c-984e-2806e\
99f4f1e\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:3bc258cf-7134-7\
e4f-8a00-662b2e2\
14929\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:3d94788\
6-3906-a644-be7f\
-d026a9e996eb\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:3d947886-3906\
-a644-be7f-d026a\
9e996eb\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:41-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:51\
c03d8c-315d-d54c\
-984e-2806e99f4f\
1e\x22 stEvt:when=\x22\
2020-05-02T17:59\
:36-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\x85\x95\
\xaec\x00\x00\x016IDAT8\xcb\x95\xd2\xcf+\
DQ\x18\xc6\xf1\x19\xc3\xb0aea\xa34\x1b\x0bI\
I\xa8I\x84&\x1b\xc2\x1eE\xb1\x91\xa5?@v\xb6\
\xb3\x1a;Ia\x9af\xa9\x94D\x0a\xb1Q6$Y\
\xb8EJY*\xf1=3\xef[g\xde\xee\xfc0\xf5\
\xb9\xb7s\xe7\xbc\xcf}\xef9'\x12\x04A\xa4\x8c:\
\xb9w`\x17\x0b\xe6y\x81-\x88\x86\x04%\xf1\x85W\
,\xdb\x90\xb077\xa0\x19-2\x9e\xc4)\xba\xf0\xe1\
\x85\xc4l\xc0\x1c\xceq#nq\x85'\x5c\xcb\x9c^\
\xbcc\xc5v\xb0\x85\x07\x09\x19\xc4\x80\xd1\xe9\xb5\xdd\x87\
\x17,i\xc0\x04\x9e\xd1ZaAU\xa3\xdc\xd7\xf0\x83\
67\xd8\xc6\xa6\x99\xd8\x8f1\x8c\x8a\x1e\xaf\x83TP\
\xfcMi\x07\xfb\xde\xc2t\xe3\x0cw\xb8\xc4\x05\x1ee\
=\xdc\xff\xe3\xf8\xc4\xac\x8c\xa3\xeer\x88E\xc4q\x8f\
\x0d4y\xdd\x0c\xe1\x04\xc3R<mw!\x87\x19\xac\
\xe2\xc8+\xac\xf7\x02\xbe\xf1f\x8b5\xe0@:\xc8\xca\
.\xe8\x04=T\xedH\xcby()\xd6\x80\x1di\xdb\
\xeduB\xbf\xad\xca\xf1.\x09p\xbbp\x8c|\x85\xc2\
XX\xb1\x06d\xf0\x8bu\xf3\xed5q\x97=\x09H\
\x96k\xb3Z\xc0\x08\xe6\xbdS\xf6/\x7f4\xa4\xe7\xd1\
@\x19P2\x00\x00\x00\x00IEND\xaeB`\x82\
\
\x00\x00\x07\xc5\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x0a\x00\x00\x00\x0a\x08\x06\x00\x00\x00\x8d2\xcf\xbd\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x06\xbeiTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:xmpMM=\x22htt\
p://ns.adobe.com\
/xap/1.0/mm/\x22 xm\
lns:stEvt=\x22http:\
//ns.adobe.com/x\
ap/1.0/sType/Res\
ourceEvent#\x22 xml\
ns:dc=\x22http://pu\
rl.org/dc/elemen\
ts/1.1/\x22 xmlns:p\
hotoshop=\x22http:/\
/ns.adobe.com/ph\
otoshop/1.0/\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-04-26T14:37:\
13-03:00\x22 xmp:Me\
tadataDate=\x222020\
-05-03T08:46:13-\
03:00\x22 xmp:Modif\
yDate=\x222020-05-0\
3T08:46:13-03:00\
\x22 xmpMM:Instance\
ID=\x22xmp.iid:2ce5\
4e64-1b68-5140-b\
404-101ae4f2390d\
\x22 xmpMM:Document\
ID=\x22adobe:docid:\
photoshop:06387a\
18-9b10-4e44-b0d\
3-6a4c6e30ef94\x22 \
xmpMM:OriginalDo\
cumentID=\x22xmp.di\
d:5349ee02-98da-\
9648-893b-acc1e6\
33f5f6\x22 dc:forma\
t=\x22image/png\x22 ph\
otoshop:ColorMod\
e=\x223\x22 photoshop:\
ICCProfile=\x22sRGB\
IEC61966-2.1\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:5349ee02-98da\
-9648-893b-acc1e\
633f5f6\x22 stEvt:w\
hen=\x222020-04-26T\
14:37:13-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:68\
2e5c79-1a3b-8647\
-b55c-bcf3054d2d\
2f\x22 stEvt:when=\x22\
2020-04-26T14:37\
:13-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> <rdf:li stEv\
t:action=\x22saved\x22\
stEvt:instanceI\
D=\x22xmp.iid:2ce54\
e64-1b68-5140-b4\
04-101ae4f2390d\x22\
stEvt:when=\x22202\
0-05-03T08:46:13\
-03:00\x22 stEvt:so\
ftwareAgent=\x22Ado\
be Photoshop 21.\
0 (Windows)\x22 stE\
vt:changed=\x22/\x22/>\
</rdf:Seq> </xm\
pMM:History> </r\
df:Description> \
</rdf:RDF> </x:x\
mpmeta> <?xpacke\
t end=\x22r\x22?>\xcfr\x9f\xf7\x00\
\x00\x00\xadIDAT\x18\x95\x85\xce\xa1jBq\x14\
\x07\xe0\xcf\xeb\x15e\x93\x81 \x88\xcf`X\x9ba\xc5\
\x17XY_\x11\xc50L\x82\xb0w\xd8\x1e\xc0 \xa2\
\xd9\x22\xa8a\xc2\x9a`\xb2\xdb\x8c\xa2\x82\x0f`\x98\xe5\
\x9f.\xa2'\x1d8\xdf9\xe7\x97\xfa\xe9\x0d\xdc\xa8\x17\
\xbcb\x19\xdfRh\xa2\x81\xfe=8F\x84\xf15X\
\x0a\xc3\x1d\x16X\xe3\x14%P\x01+lPF\x0b[\
|%a\x0aY\xe4B\x9f\xc1\x032\xc9\xd7'T\x90\
\xc6\x11=L\xb0\x8fP\xc5\x08\xef\xf8G\x1d\x9fx\xc4\
3\xda\xa8\xc6\xa8\xe1\x03y\xcc\xf0\x1d\xae\x0f\xf1\x86\x0e\
\x9eb\xfc\xa2\x88?\x9c\xd1\x0dK\x07LC\xe6\xf9\x05\
\x90}\x1d\xd4\x12\xb7,_\x00\x00\x00\x00IEND\
\xaeB`\x82\
\x00\x00\x07P\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
39-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:58:22-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:58:22-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:280c3c82-fb61\
-9848-801e-51d32\
dbb76f7\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:727c23e0-a09c-0\
547-8742-e3fa8ab\
c7d56\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:76db3ca\
a-5ad9-1c44-a0e9\
-e4fce9b320bd\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:76db3caa-5ad9\
-1c44-a0e9-e4fce\
9b320bd\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:39-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:28\
0c3c82-fb61-9848\
-801e-51d32dbb76\
f7\x22 stEvt:when=\x22\
2020-05-02T17:58\
:22-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\x08E\
\xf1w\x00\x00\x01\x05IDAT8\xcb\x8d\xd3\xbdJ\
\x03A\x14\xc5\xf1\xf8Q$\x8de@+A\x90\xa0\x95\
\x8f`0\x8d\xd8\xd8\x08\xfa\x08\x8aE\xd4G\xf0-\x04\
K1\xb5/`\xa3\xa8\x9d`\xa3\x08\x22\xe8O\xb1\x10\
|\x82M3\x0b\xd7e\xb2l1\x0c;\x97\xff\x99s\
\xe7\x9em\x15E\xd1\x9a\xb40\x93\xf65\xf4\xe3Y\xb9\
\x9a\xc0+x\xc7\x1f6\xd2\xd9t\xad@\x05\xfe\xc2\x01\
6\xf1\x81A\xaaMe\x05*0\x0cCm\x84\xc7\x89\
\x0e\x02\xbc\x9an;\x0a\xb5\x13\xbc\xa0\x97uP\xaa&\
X\x05\x1e&\xc1^\xf6\x0d\x02\xdc\xc3'\x8e\x03|\x98\
\xe0\xe5\xf4=\xfbo\x0a\xa5\x15,\xe1\xa7\xd2\xf3>~\
\xb1\x98\x83\xab\x02]\x5c\xe3,\x08l\xe1\x15\xdb\xb9\x0c\
\xe4Z\xe8\xe0\x1e\x17Ad\x90\x9c\xed\xd4\x06)L\xa0\
\x8d\x1b\x5c\x86\xda:\xbe\xb1\x97u0A\xe4\x16\xa3P\
\xeb\xe3\x19\xbbM\x83\xd4INb;Wx(\xe1&\
Qn\xe3\x0e\xe78\xc5\x13\x16j\x1d\xd4\x88\xbc\xa1\xdb\
\xe8g\xca\xa4s\x0e\xf3\xb9)\x8c\x01[\xaf\x04d`\
$Ct\x00\x00\x00\x00IEND\xaeB`\x82\
\x00\x00\x03\x87\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x000\x00\x00\x000\x08\x06\x00\x00\x00W\x02\xf9\x87\
\x00\x00\x03NIDATh\x81\xed\x98\xdbJ\x1bA\
\x1c\x87\x7f\xb3\xebfsT\x93hN[\x8b\x05\x0b\x95\
R\x8a\x22\x06-R\x9f\xa4x\xd1\xab^\x16\xdaG\xe8\
\x1b\xf4\x05\xfa\x02\x85\xde\xf4\xaa\x94*Z\xab \x95\x82\
m\xc1\x03Rb\xceg5\x9b\xd3N\xd9\xd5\xd545\
j&\x9b\xb4\xa5\xfb\xc1\x10\xf64\xf9~\x9b\xf9\xcf\x0c\
\x81\x89\x89\x89\x89\x89\x89\xc9\x1f\x84\xb0|\xf5\xc2\xc2\x02\
(\xa5-\xaf\xb7\xba\xa6\x9e\xd7[\xbd^\xc7\xa8#C\
g\x021\x84\xfd\x09\xa4e\x11w\x9f\xef\xb7\xed\xd3\xd7\
\xee\x03\xcd\x82\x97\xc96\x8b\xab\xd2![\x01a_\x94\
\xce\x85b\x08:\x8e\xcf\xee\xf1\xdbK,*\x9d\x07h\
uN\x97\xae\xd5j\x90\xec\xc5\x13i)\x8e\xa0\xfd\x88\
I\xb4\x15\x86\x05h%\xfd \x14\x87\xe40V\xba\x11\
\xa6\x00:\xba\xb0\xca\xaf\xd2\x09H\x8eC\xe3m/\x80\
)\x80\xa2(gcz\xd8z\xa4I\xcfK1\xdcp\
\xf6F\xba\x11\xa6\x00.\x92\xc7C)Bg\x83q\xdc\
tuox\x5c\x07\xa6\x00/f\xd6\xa8[,\x1b*\
\x92\x91-L\xcf1\x050J^-\x9f\x9d\xbc\x0b\xab\
\xb1a|\x88\x04\x08\xf0\xb6\xed>:*b\x16(\x08\
\xbeg\x07\xb0\x16\xf3\xe0ct\x98$d'x\x9eg\
\xee\xaf'\x01\xaa\x0a\x87\xad\xf4\x00\xd6\xe3^,G}\
\xa4P\xb5\x9dIw\xe0\xae\xc1\x19\xa3x=\x1a\xb7\x12\
\x8dS0.Y[\xae\xa2'\xbf\x80\xc0)\xb8?\x9c\
\xd5\xda\xe3{\xbb\xf4[\xa6\xffl\x08\xc5K\x8e\xbf\x7f\
\x085B@1\xee\xc9k\xed\xd1\xf8\x1e\xdd\xce9\xb1\
r\xe0\xc5\xa2V\xc4,\xfd1\x90y\xe93|\x1a=\
\xac\xf0p=\xc9\xb4\xed\xc3T\x03\xcf\x16'\xc8\xab\xad\
Ql\xe7\x5c`\x1c\xba\xbf\xe1\xb4\xd4\x99\x9ec\x1aB\
\xb9\x9a\x13o\xf6\xc7\xc8\xeb\xdd[\xf0\x88%L\x0c\xa5\
h8\x94\xc5\xa4/\x0b\x9e(L\x22\xactT\x03j\
\xf1\xe5\xaa\x0e\xbc\x8f:\xc8\xbb\xc8\x08l\x9c\x8c)\x7f\
\x9aN\x072\x98\xf2g`\xed\xeb~\x18\xe6\x00\xfa\xb4\
\xd7\xf8Y\xac\xf4a3\xee\x22\xbcR\xa2|]\xc0\x1d\
w\x11V\x0b\x0f\xd1\xc2\x81#L\xe5v%\xcc\xbbQ\
]Z\xddF{,\xc7\x98\x0a%i\xd8\x9f\xc2X\x7f\
\x0e\x14\x14u\x85\xa2\x5c\xa5(UjPo\x17\x04\x0e\
6\x0b\x0f\x87\xd8\x07\x9e7.\x0cS\x80j\xb5\x0a\xaf\
(c:\x90\xa4\xb3\xc1$n\x0f\xe4\xa1\x80j\xa2j\
k.l\xf5P.\xd7q,\xd7\x90Pd\x88\x02\x0f\
\xa7\x95\x87\xd3.@\xe0;[K\x99\x02<\x9d\xfcJ\
\xe7B\xc9S\xe9\x13q\xb41\x1b\x95\xca5\x1c\xc9U\
\xc4r2\xac\x02\x0f\x97M\x80(\xf6p7:?\x92\
j[\xfaB(\xb0\x97\xb3\xe1\xf3\x8e\x1fk)\x89\x00\
\x8bmw\xd1\xf3\x95X%-\xdb\xb0\x99\xf6b=%\
\x91\x83\xd2\xe0\xbf\xb1\x95H\xcbVl$\xbd\xd8H\xf9\
\xc8\xde\xa1W\x93V'\x81N\x0b\xba\xab\x01T\xe9\xf5\
\xb8\x07\xeb\x09\x1f\xd9-z\xc0q'\x05\xabK\x93\xd3\
\xa9\x95u'\x8an\x04H\xc9V|\x8a\xba\xb1\x1a\x1d\
\x22;E78N\xdf\xf7\x9f\xcbv\x22\xdc\x8c!\x01\
R%\x11+\x07n,G\xbcd\xbb\xe0\x06!\x9c\xf6\
vU\xe9\xe6\x05O\xc7\xa8\x10L\x01\xbe$\x9d\xf0Z\
+X\xfa1\x88\xa5\xc8\x10\xd9-\xf4koZ\x95\xe2\
\xb8\xf31\xdd\xee_\x90&&&&&&\xff\x17\x00\
~\x02\xc4\xb5\xa5\x18\xa9J\x10\x9a\x00\x00\x00\x00IE\
ND\xaeB`\x82\
\x00\x00\x07n\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
40-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:58:43-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:58:43-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:2fcd4059-a3f9\
-144e-ad3e-403d8\
a86b0c0\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:95fcc203-20c1-e\
d4b-8fd4-7530c65\
6e2e1\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:c3d2897\
3-2432-f843-b13d\
-b00338c9e3cd\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:c3d28973-2432\
-f843-b13d-b0033\
8c9e3cd\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:40-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:2f\
cd4059-a3f9-144e\
-ad3e-403d8a86b0\
c0\x22 stEvt:when=\x22\
2020-05-02T17:58\
:43-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\xc3\xfb\
F\xbb\x00\x00\x01#IDAT8\xcb\x8d\xd2;J\
CQ\x14\x85\xe1\x93D\xb1P\x0b\xb5\x13\xb1\x15\xc9\x10\
La\xe30\x14A\x8d(jB\xc6 \xe9}\xd5\x82\
\xa0\xf8\xac,l\x1c\x82\xf8D\x10+A\xf8jg\xa0\
\xcd\xbep\xbd$!\xc5\xe2<\xb8\xfb_k\xef{\x12\
R\x1fj\xe1\x16W\xb8\xc61\xcePK\x18\xc6\x04\xc6\
\x0b\x9a\xc0P\x00\x16\xb0\x83:\x96\xf1\x8d_\xac&\xb4\
\xf1\x85\x07<\xe3\x09\x8f\xf8\x88\xc2b\x9a6\xeeC\x9b\
\x097\xd8\xc5\x14\xaa\x98\x0dU1\x1aE\xa5X\x0f\xf1\
\x8e\x91h\xa3\x95p\x81\xa5>\xe6\xb0\x87\xb7h-3\
n$\x5cb=.\x07\xc2\xad\x84J\xaex\x1f\xaf1\
\x9b\x94\x03\xb42@=.\xcb\x85\xc8\x09\x07\xe1<\x16\
\xe7\xc1Xk\x98\xe9\x04(\x15b\xe7\x9d\x07\x8a\xadu\
K\x90p\x84\x97\x0e\xc5\xdb\x98\x8f}\xa5\x08\xc8\xfa\x9e\
\x8a\x872\xd9\xc1\xf9\x0e[\x99a\xaf\x04\xa9\xcb\x5c.\
\xb1\xd2\x0f ?\x8fRn\x7f\x8d\xb5n\x80J\xee\xe3\
\xa2\xca\xb9\xdf\xf7\x0fp\x11\xef;\xf5\xa9\xf3\x22\xe0\x1c\
\x9f8\x894\xbdt\x8a\x1f,\xe6\x01sh\xa2\x11j\
\xf6P\x03\x1b\x98\xce\x12\xfd\x01\xf03\xf1\xb1\x1b\x5c2\
\xd3\x00\x00\x00\x00IEND\xaeB`\x82\
\x00\x00\x07I\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
39-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:58:25-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:58:25-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:a9cc2974-a095\
-dd47-bd4a-d21e2\
34416b4\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:1bb85f3d-b3aa-f\
548-be1d-53f6bce\
3278a\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:32cde73\
b-5f38-2b4b-94e8\
-47e8aaedbb93\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:32cde73b-5f38\
-2b4b-94e8-47e8a\
aedbb93\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:39-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:a9\
cc2974-a095-dd47\
-bd4a-d21e234416\
b4\x22 stEvt:when=\x22\
2020-05-02T17:58\
:25-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\xfe\xb0\
\xab\xa2\x00\x00\x00\xfeIDAT8\xcb\xd5\xd3=/\
DA\x14\xc6\xf1]\xb6#\xd9\x84\x9ef\x85J\x22>\
\x83b\xf5:\x89\x82J#vC4l#:\x1a\xc5\
&(5\xde\x0bjQ\x90\x10\xb5F-\xbf\x84/q\
5g\x93q\xdd\xb5\x12\x95\xe2\xc9L\xe6\x9c\xf9\xe7\x99\
3\xe7\x94\xb2,+\xa5B\x19\xbb\xb8\xc2\x19Np\x8d\
\xf9|n\x96e\xa5\xce\xa5a\xaca\x03#x\xc5\x1e\
\x96\xb0\x80{\x1c\xa0\x16\xe7sy\xc0\x0a\xde\xb1\x83q\
<`4q\xb5\x8a6&q\x897L\xa4\x80\x16\xf6\
c\xdf\x87'\xbc\xe0\x0e\xb7\xf8\xc0v\x02|\xc4t\x0a\
\xd8\xc4Q\x920\x86Y\xd4c\x9d\xc1P\xc4\xaax\xc6\
T\x1ep\x18\xfb\xfe\xa2b%\xf0\x81\x22\xc0V\x0a\x88\
g\xe4U\xfe5\xe0\xcf\x0e\xfe'\xa0\xe7/\xf4*b\
\x0a\xa8\xc4<|S\xc4\x07\x8b\x00-\xb4\x7f\xb2\x9e8\
\xa9D\xa7~\xe9\xc4\xe5h\xd7c\x5c\xe0\xbc\x8bNq\
\x13\xb3PK\x01U,\xc6D6\xd0\xec\xa2\x06\xd6Q\
\xef8\xfa\x04\xf9x\x0c\xe8;\xd5N\x98\x00\x00\x00\x00\
IEND\xaeB`\x82\
\x00\x00\x07k\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
41-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:59:14-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:59:14-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:99432555-d62c\
-434d-97aa-a9c19\
01a4aba\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:9b75ec5e-4812-5\
a49-841b-f195554\
3a259\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:460a839\
f-1202-8a43-a32f\
-c4fdc20fbd6c\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:460a839f-1202\
-8a43-a32f-c4fdc\
20fbd6c\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:41-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:99\
432555-d62c-434d\
-97aa-a9c1901a4a\
ba\x22 stEvt:when=\x22\
2020-05-02T17:59\
:14-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\xcc\xe6\
\x11.\x00\x00\x01 IDAT8\xcb\xa5\xd3\xbf+\
\xc5Q\x18\xc7\xf1\xefu\x91;\xf8Q\x16\x94d\xb3`\
\xd3\x1dXl\xfe\x00\x83\xc8\x1f\x80;\xa8\xfb'\xdc\x12\
\x06B\x16\x0b\xc9@\x19\xc5$\xcad\xa1n\x06\x83\x98\
^\xa3\x7f\x81\xe5\x9c:\xf1\xbd\xf7*\xc3\xa7\x9e\xd39\
\xbd\xfb<\xcf\xe7<\x19\xb2\xa0B\xa2\xac\x89\xda\xd2s\
\xd6\xe2qK\xc5\xa2\x8a\x1b\x9c\xe1\x1c\x9b\x89\xab\xec\x87\
\xcbYt\xff\x04\xcca\x19\xbb\xf8\xc2G\x9e\xdd\xa0k\
l\xc5\xfb\x0c\xc5p\x18\xc2#.p\x9b\xe3 \xc2\xc6\
\xf1\x8a\x91\xd4A\x1f\xdePA\x19\xf5\x06-D\xc8\x15\
VR\xc0\x1e.C]\xc6I\x83\xa1E\xe0!6\x22\
\xa0\x03\xcf\x98\x09\x97\x9d(\xb5\x88\xf0\x08\xb5\x08\x98\x08\
\xbd\xf76\x89\xab\x88\xf6\xc4\xc1\x03\x16#`2\x00z\
\x9aX\xae`;\xd4\x0bxAW\x04\x94\xf0\x84\xa9\x9c\
\xe8b]\xc3\x01F\xf1\x89\xf94\xc6\x0c\xc7\xa1\xafF\
\x80*\xeep\x8f\xa5\xbc\x9f8\x8c\xf7\x90@j=\x02\
\xd6\xc2\x07\xabc\x15\xeb\xe8\x8f\x80\xf8x\x0c\x03\x0df\
0\x1dZ\xd8\xc1)\xf61\x98:(\xfcw\x99\xb2?\
\xae\xf1\xaf\x95\xff\x06\xe8`\xda(\x18\x1d\xb1\x17\x00\x00\
\x00\x00IEND\xaeB`\x82\
\x00\x00\x07\x81\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
40-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:58:49-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:58:49-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:2ed8d1c4-39a2\
-e64a-9045-a5252\
3f71f36\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:40f2b37a-fa77-6\
f4c-92be-003048f\
2cb26\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:68d76d6\
c-4737-af46-9a91\
-13c80c801fc7\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:68d76d6c-4737\
-af46-9a91-13c80\
c801fc7\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:40-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:2e\
d8d1c4-39a2-e64a\
-9045-a52523f71f\
36\x22 stEvt:when=\x22\
2020-05-02T17:58\
:49-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>ij\
\x85(\x00\x00\x016IDAT8\x11\x95\xc1\xb1K\
\x94a\x1c\x00\xe0\xe7\xfd\xf8\x1d\xda\xd1\x0d\xc5\x0d\x09-\
A8\xb5E\x87\xd0\xd4\x1f\xe0\xe0\x1e-\xfd\x0b.\xce\
5\xb4E{C\x93\xdb\x11\x18g\x0e\xb64\xe8\xe0\xe4\
\xa6\x047\x1eIC\x81e~q\xde\xeb\xc17|w\
\xe8]\xf8<1\x18\x0c\x92JFR\xc9(0\xf2\x1f\
\x81\xac\x96\xd5F*\xc9\xb4lB`\x0d/Q\x22!\
c\x11\xfbx\x8dl\x8e\xc0\x0b\x8c\xf0\x11-\x5c\xa0\x81\
\x0d\x1c\xe23Z\xc8\x18\xe2\xb7\x09\x81\x12[\xd84\xad\
\x85.\x8e\xd0P[G\x0f\x0b(\x03\x09-W\xbd\xc1\
{4\x90\xf0\x0b\x9bx\x84\x1eJc\x81\xa4\xd6\xc1s\
$\x5c\xe0\x1c\x19\x09\xe7X\xc6}\xdcE\x89\xb7\xa1\x92\
T^\xa1\x89\xaf\xb8\x8dE\xb5&\xba\xc8hc\x1d;\
\x81\x84\x7f\x08\xdc\xc3\x1a\xfa\xe6kc\x05\xfd@\x81?\
x\x8a\x12}\x14\xaeW`\x88U\x9c\xe0{ \xa1\xc4\
c|S\x1b\x99\xed\x19\x0e\x8c\x05\x86\xb8\x83\x0evU\
\x0a\xf3=D\xd7X\xe0\x0cm,\xa1\xa724\xdb\x03\
\xdc\xc2\x9e\xb1@\x13\x1bX\xc0;\xfcE\x81lZ\x81\
3<\xc1O\xfc@\x11\xf8\x80c\x9c\xa2\x89\x84l\xb6\
O\xf8\xa2\x92\x03\xdb\xd8vs\x09\xf9\x12\x02\xf7O!\
\xc3\x80\xff7\x00\x00\x00\x00IEND\xaeB`\x82\
\
\x00\x00\x07\x98\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
41-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:59:34-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:59:34-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:b86bf133-5ad9\
-1e42-a6fa-c3e38\
dbadf24\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:c18791e8-9d11-a\
14d-b738-98bf778\
7e2a6\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:9fa91d6\
9-69c6-e24b-87b1\
-5e70179b2629\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:9fa91d69-69c6\
-e24b-87b1-5e701\
79b2629\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:41-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:b8\
6bf133-5ad9-1e42\
-a6fa-c3e38dbadf\
24\x22 stEvt:when=\x22\
2020-05-02T17:59\
:34-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\xe5U\
\xdb\xf0\x00\x00\x01MIDAT8\xcb\x8d\xd3\xcf.\
\x03Q\x14\xc7\xf1\xa9Z\xd9\xf8\xfb\x02\xe21\xa4;\xf1\
\x04\x12\x1b\xffB\xfd\xa9\x10m\xd9\xb1\xb6&\x11\x1b\x11\
\x89d4\x9d)\x1a\x22\x22\x22\xb5a!v\xc2\x02\xeb\
\x09\xf1\x10|Ornr\x8c\x8b.>\xcd\x9d\x99s\
\x7f\xf7\x9e\xdb\x99 I\x92<N\x11!\xc6!j:\
\xae\xfd\x22\xd6\xfa\xfe\x80\x9f:N0\x8c\x02\xa61\x85\
y\xccyH\xcd$^P\x94\x80*\x860\x8aAt\
c\x15\xc1?\xf6\xb1\xe4\x02f0\x80^mgO\x8b\
Z\xd1\x92\x92\xd5g\x91\x0b\x88uK]\xda\xca\xb5)\
\xf2\xc9\x98\x80\xa2\x0b\x18\xc7\x9an+k\x0a3\x9e\xc9\
?\x02\xa4\x85\xd9&VL\x07T]@\xa4'\x1f\x98\
\xd5\xb7pa&\xfaZ\x8a\xed!\xda\x1d\xc8\xe4;\x9c\
\xe3,\xb5\x83\x0d\xec\xe8\xb8\x82\x92K\xca\xeb\xcdm<\
\xa0]\xaf\xafL\x88\x04?\xe1\x1e\x9b8\xb0-\x8c`\
\x1d\x8f\xe8LmU^\xb474\xd0\xa6\xe17\xf8\x94\
\xc3\x97\x82]|\xe0\x16=\xa9\xff\xdf\x854\xb4%w\
-u\xcf\xd2\xba\x5c\x84xE\x87\x99\xec;<9\xd4\
K\xf3\xfc[\x0bcM\xbc\xba\xae\x9dc\x1d\x87.\xa0\
\xa2\x1fF\xf8\xc7\x97w\xa4+\xd6\xb5wi\xf7]>\
,\x09\xc8a\x05e\xb5\xecQ\xd6\x9a\x12&\xb0\x88\x05\
\xf4}\x01 g\x0b\x14\xcc\xbcp.\x00\x00\x00\x00I\
END\xaeB`\x82\
\x00\x00\x02$\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00@\x00\x00\x00@\x08\x06\x00\x00\x00\xaaiq\xde\
\x00\x00\x01\xebIDATx\x9c\xed\x9a\x0b\x8e\xc3 \
\x0cDa\xb5\x17\xe3\xe49ZV\xaa\x9a\x15E\xc4\x1e\
\xb0\x01\xa7\xf8IQ+\x0a\x89=5\x98O\x82\xe38\
\x8e\xe38\xce\xaeD\xca\xef\xe38V\xcarj\xdc$\
\xa5D\xfa\xf8\xab\xf1\x10\x00\x15gF\x80\x0a`\xd6\x01\
)\x9c\x00+\x1d'C\x17\x00\xb2}V\x17\x90:\x93\
\x93;v\xdd\xb7V\x06\xd1\x22\x80\xa6\x13!3:\x12\
e\xdd\x8e\xa1\xa0\x02P\x0fG\x8c\xa6\x1c9\xdfeK\
\xba\xdbOc\xfd3\xbb\xc2@\xa3\xa7\x89\xd1*\x00\x85\
\xd4h\x8d\x10o\xbe\x87t\x10\x1c\x15\xba\xb1\xf8\xac\xfd\
\xc6\x95A\x8c\xc8\x025Qb\xd6\xd7\xf3\xb2Z\xdb\xa9\
\xf4\x0a\xd0\xe3\xc8t\xe7\x10Z\x050\xe9\x84\x04\xcdA\
\xf0\x91\xb8\x00\x06lX\x8a\x0b`\xc0\x86\x11\xc0s\x13\
T\x80m\xf7\x03r\xbeR\x84\xed\xc7\x00.\x02\xd0\x89\
\x8f\xc5\xe8\xf8\xbaI\x9b3\x02\xee\x5c@;\xb4cJ\
\xc9\xd4\x1f9kS\xf4\xe2cI\xdc \xf0\x87p\xef\
\x03\x1b\xae\xed\xeb9\x9c\xe0O\xc9\x02\xa5\xb3\x88p'\
ROcS\x14\xe1\xf1'CR\x90\xad\xb3\x9a\xc8\x88\
p\xbd\xed^X\xe9\x02w\x11\xc6E\x9e8\xd7[\x11\
@=\xdb\xa0\x15-\x0d\x825\x118a\xc4\xc2\xcdN\
\x83\x1cZ\x91\x00\xdfg\xb5\x00\x1a\xe7\x0a\xa2\xf6\x16\xba\
@\x9c1\xd8\xdd\xb14\x02\x8aYZ\x8b\x93\xb5CW\
\xaa\xde-K\x05\x00\xa6\xb4\xff\x0e\xe4be\xdf\xc5\x91\
\xb1z\x0c@G\xf9\x0fG\xb3\x97\xb7\xaa\xbf\xe7pk\
\x01kY\xe0\x8e\xf2\x5c\x11]\x1b\xb0\x11biS4\
\x16\x97\x06j\x8b\xa1\xd0(B\xed-\x90\x96\xfa\xa1!\
E\x8aR\xe9\xa8.\xd0jP\x19\xe2\x17\x9csw\xef\
\x11\xa8-\x86\x90\x1c\xadE\xcfT\x98\xb2\x1bb\xd4~\
@\xef\xa0\xa4\x1590\xa4\x00\x82\xfd;\xd4\xa8\xe5S\
a\x0bi\x90Jo\xb5:\xaaX\x9a\x0a\x87\xc9\x91\xe3\
l\xcf\xf6l\xcf\xf6l\xcf\xf6lO\x08!\xfc\x01w\
\x90`\xa2\xaa\xcc\x1f\x1c\x00\x00\x00\x00IEND\xae\
B`\x82\
\x00\x00\x08\xa7\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\xc9\x00\x00\x00S\x08\x06\x00\x00\x005;# \
\x00\x00\x08nIDATx\x9c\xed]\x09\x8e\xdc8\
\x0c\xb4\x83\xf9\xff\x8f\xf6\x05yB\x1e\xe3\x05\x82\xed\x8c\
\x96\x11%\x1eER\xb6U@\x90\x1e\xb5\xac\x93\xa4x\
\xc9}\xfe\xfa\xf9\xcfq\x1c\xc7u\xfc\x1f\xe7\x7f\x7fq\
\xe5\xf4\xbbY}m;\xab\x96\xb7\x7f\xf7\xc6\xe9Y\x8b\
k\xf0\x19\xd5\xfe\xa8\xfc\xec\x8c\xa1W\xbf\x1d+]\x13\
m9\x07k\xfb\x14\x92\xfa\xd3\xcf_\x9d\xc5h\x1b\x1a\
MBS_\xd3\xce\xca\xe5-A\xf5\xc6\xc9\x95K\xe7\
\xd5\xdbL\xc4\xfaH }\x96\xae\x85\xb7\xfc\x03\xbav\
\x9e\xf6\xcfN\xf9\xc5\x94\xcf\xda?~\xd0\x82\x01\xa7K\
$\x8c\xa6~T\xf9\x91T\xae\xc1l\x8d\xe8\xb85\xf5\
%\xe5\xd9{\x10%D$\xe5\xa35\xd2\xd6\xff\xdd\x07\
e\x92\xca\xc9Y\xca{@\x13\x18W~7\xc2\xa3\xf3\
\xc9<\xa5f\xc8\xda\x1bm\xfd\xdf\xe5\xbd\x93D\x83,\
\x82\xe4\xca\xab$\x1b\x87\x0a\xa1\xc0\xe1m\xc2\xe2\x08*\
?\xbf\x98E\xa8&\xfe\xd5\xcb\xb9\xb1F\x8f\xdf\xdb\x16\
\xf7\x19\xd5\xfe#\xcb\xbd'\xc9\xc6\xc6\xe3\xf1E&(\
=\xe6\xa5n\xbc\xd1\xb3\x9c\xae,\x05\xc2N\x19!B\
\x07\xb7\xceu\xa3\x10\xedI\x82$\x8a\x99\xfb\xf8\x12\xd6\
\xcd\x18O&\xae\xce\xfc\x9f>\xe7\xdb\xc3\xaanic\
(#DHyI\xd0\xaf\x1a\x19\xcc\xb2\x19\x05\x00\x8f\
MR\xb5\x01\x92~7ql\xc0\xe05\xdcgQT\
4\xbc\xed\xaf\xc2<\xa7\xc0\x95\xe9AE\xac\xe3\xb1\xa0\
\x86\xfb\x86\x0e3U\xd1\xea\xcbG:3z\x82l;\
\x10\x14@\xba\x80\xef\xa2_\xa3\xda\x91\x10\xda\x99H\x90\
\x9aye9\x0f\x1e\x01\x04\x93x\x17\x1cI\xb4\x99\x12\
2\xc2\x05\x9d-\xe17\xb3\x08\xb0B0\xd1\x13s\xb1\
\xa2\x820F\xf3\xac&\xd6\xcd,\x03\xac\xc0$\xe8\xcd\
\x91\xaaA\xab\x81\xcbR\xcd\xc4f\x96\x0e4LRE\
X\x92M\xa3c\xb3\x1a\xd4\x1aX\xd6cv\x9aX\xf1\
\x84\x93vYD\x9f$\xd9v\xc2\x07w\x8dh\xaf\x92\
>\x13\xd9\xee\xed\xf0a\x92\xe8\x0d\xcff\x94\x0a;\xa7\
\x02\x19\x1e\xc5\xd73\x8b\xf6$\xb1\x9e\x0c\x15\x17v$\
X1U%\xa2n\xe6\xb8\x1e\x87\xd5\x83\x89\x19)(\
\x9e\xe0ZU`\xae\x82h\xbd\x81N\xae\xcd\xe5\x03\x9b\
V\x9bd61\xed\xc4%\x91b\x0dV]x\x04q\
\xaf\xe2\x01\xe3\xf6L:\xbe\x8b\xfc\xbf,>L\x82 \
\xea\x0f\x10\x04\xfa\xc4\xe3}\xcf\x89\x7fn\xe9\xb5\x91\xa8\
[\x9ak\xaa\xab\xc0r7=jN\xc8\xab\x03\x16b\
j\xdb\xcbL\xa6D\xd4\xe7\xee\xb7\xa7\xe2M\x09\x8e\xb3\
`]\xcb(U\x064\x82\x08f\xf7\xe33\xdd\xe3\x1a\
\x06\xd5\xda\x9f\xb4\xed0\x06\x920I\xb6qe\x09\x1e\
\xa2Q\x11\xfd\xf6\xcei\xc5\xd3=r\x0d9\x95\x0d\xbe\
\x0eR\xc3\xfd-.\xc0\xaayf2\xc8\xd3\xd3\xe4\xe1\
\xb1\x1d\xee\xe5t\x14+\xa5{?e\x93\xcf\xe6\x9f\xe6\
\x99\x0d\x19`\xcc\xd2\xaa[\xd5\x1b\x90\xc1 \x125*\
Z\xbd\x8c85\xf6E\xaa@\xf4l\x92\x08\x83HB\
\x9c\xb4\x9f(\xcf\xc6\xcc{D\xd3i.\xc1\xe7\xde\x98\
3\x89\xf6T\xee\xd5\x0a\x19\xc7Yp\xd3\xf0\xccpG\
n\xb4\xb6\xad\x0a\xc9\xd8\xcb7\xd3\xbc\x01\xb1R\x9aS\
\x86\xde\xf8\x86\x8bQ\xf6\x1b\x1c\x9f\x85\xb3a\x96\xd7'\
&\x12\x98\xd7c\xbf\x08\xe2\x99\xe0T\xd7}\xc2\x18\xd0\
2\xc9\xd5\xd1UO\xf2\x1dE\xf6\xe2\xa3\xd3a\xb8\xb9\
Z\xdb\xa5\xf6\x8b%\xdd\xc7\xfb\xa6\x94^\xdf\x9cj\xf8\
\xb6\xd3\xc6\xb4\xbe\x1f&\xe1\x92\xcd.\xe6s\xaf\xf3\x16\
(\xfd83\x85\x81\xeb\xcbJ\xec\xb4\xcd\x91c\xe2$\
\xcfx\xc6.\xe9{C\x81\xa8\xeb\xbbt\xd3,\xf9=\
\x88\x14x\x94\xa4\x8cL\xd9\x1f\x9d\xd0\x12<1[:\
\x12\xea\xf5\xd20\x89\xd7\x95\xa6M\xa1F!\x93Q8\
D]T{S\xf0\xb5\x0c\xdaT\xf9H\xe9\xee\x95\xfc\
\x96\xec\xd9J[\xaa\xb5\x81\xb6'jaX\xee\xb8G\
\xdf\x17\x91\x5c\x11\xee\xd5\xb1d\x99F\xa6y\xb7\xf0\x1a\
\xe3\xd6\xb1\xecS\x04\x00\x8d\x0bX\xf2b\x07\x84D\xd4\
H\xfd\xe8\xbb\x12\xd1\xb0\xa6\xc9 \x19\xe4\x8d^.\x15\
\xb46\xc9\x0cQ\x92K\xd2\xae\x84\x89\x11\xe3\xb0\x10\x14\
\xdau=\xc3>A\x80\x900Iuz\x88\xe5\xb9\x91\
\xdd\x83@\xb6\xe4\x95\xba\xe2\xad\xd8L5\x80\xc6p\x8f\
\xbc\xde\x1a-i{j\x99\xd7\xbe\xc8\xbe\x91(5\xf0\
\xa3<iw\x02\x94V%'I\xe6\xed\xb2\x83I,\
\x8c\xecG\x02\xca\x18U\x8c\x12\xd5\xc6\x93N\x12\xf8\x5c\
4\x86\xfb,N\x82\xf2\x90E\xdd\x8d@\x18\xa8#\xe6\
\xa86\x80\xefJ\xe8(\x9a\x0a\x83\xd6\x05|\x11u\xe5\
R\xa8\x01\x1fd\xc5d\x10\xe0\xdc\xd1Q^\xb5\xeaT\
\xfb\xd5\xfa\x94\x84\x03\xc2\xf19I\xb4R0\x9a\x80\xd1\
\xed{\x17:\xdapna9\x91\xeer\x8ax\xd4\xc1\
2\xef\xaa7U^\xb3\xa1U~{\x94\xaeO\xb3l\
[D0u\x05Qx\xd7>\x92Y\xb5c\x83\xd1\x91\
\x97If\x83\x90\x1c\xa7\xabf\xab\xd2\xb1\xec\xa0\xdb7\
\xaa\xf6\xa9d\x0f\x22~z\xe1t\xe8\x92=\xfb\xc6\xab\
\x06zS\xe8\xabPI\x88\x9e\xef\x1f\x07\xadMR\xb5\
@\x12\x8f\x17z\xfc\xbd>\xabN\x93}\x8a\xd9\x00Y\
\xb76NR%A\x10\xde.t\xaa\xc8\x08\x15\x82b\
3\xc87\xb4\xeb\xef\xde/z}w\x84\xa8\xf8\x85\x06\
h\xb5p\x13_\x1fOZ\x1b\xd8IR\xbd \xd1\xfe\
\xf0\xd5\x5c\xca\xab\xf7wWx3+\xba\xb0\xfe>I\
\x15,\xcc\xc4=\x83\x8a\xbeW!\xba\xff\x95i\x22u\
l\xab\xbdw+\xc2\xef\xff\x04l\xb5\xd0\x07\x17\x1d\x8c\
\x98\xa4\xd2\x05\xe9Q\xbf\xa4\xcf\x8f\xbe\x97\xf6\xcf\xd5i\
\x9f\x8f\xcc\x9e\x8e\xc6\xebO\x93C\xf0\xc2\xec\xd5U\x0a\
\xef\xf8\xa23o\xbd\x8c\xb2m\x911RRx\xee\xfa\
\x06\xc7M<\x1bi\xd8\xef\x02\xde\xd0be\xfb(D\
xn&\xd9\xd0b\xb5S<|<\x9c\xbae\x91\x16\
\xb3\x0bI\x1c\xe8m?:\x86\x93\x042\xb5Y\xc7\x9e\
\xc8\xba\xb6_\xcf\x86\xa1|\xfc\xa3\xb7\xcd\xf4\x92Ii\
\xd9l\xce\xb3w\x88qmK\xc0=g\xc9\xca\xe0\xda\
R\x8f)\xeb$\x91\xbc\x98\x81^\xe8\xa2u\xaa^\xbe\
\xa0M\xb0\xb4\x5cFC\xbc\xb8b\xd6\x9f\x84X\xe8\x85\
\xba\x0aH\x19S\x02\xc8\x1c2\xd5-\x84$\xaeJ.\
\xf4@\x92\xee\xe3m\xffIq\x14\xedz\x87D\xd9[\
p\xea\xd6\xec\xb8\xac\x88\xf6\x8e\x08\xc12\x9e\xf6\x19\x9a\
\x97\xa6\xc9S\x8b\xces\x1a\x8d\xc5\x22x$\xaa\x8c\xf5\
\x9eP/k\x9ak\xcf\xc2\x0cH\xbak\xc76\xdc{\
\xcbI\x82\xd6\xbb\xa5\xdfG\xc6D\xb8\xcd\x95\xb69\x0b\
`\x22\xd4)\x0dF\xe3\x91\x04[\xad\xdf\xf5`a\x10\
\xcf^K\x18\x98\xfb\xdc}~\xc4$\x11\x1bXy\x1f\
\xe3\x00K\x22K\xff\xd6\xba\x1a=}\xc7\x90\xfe\x86k\
MF\xc1D\x0bAKO\x82\xaa\xb4{\xcb\x0b\x1d$\
\x9e9\x14\xdeB\xe0\x92\xfd\x97\xd0\x9f\xf5\xd6\xaaj\xef\
-\xea\x96\xc7=,-G#\xdc\xb8\x1b\xc0cOl\
\xf0\xd0\xec\xa9K(g\xa8[(\x7f\xb5\x17=\x03-\
\x1a\xc8\x9c\xad72\xd5\x12Y\xe1huk\x86jF\
i\xb1\xa2j\xb3O\x17;\xbcv\x1f\x8b\xcc8IT\
\xc6m%\xb1g{\xb5$\xcfE2\xda*\x82%5\
\xb8\x98\x9d\xbb\x15\xb1\xc8\x1a#\x0c\xadB\xce\xca\x8e`\
7/\x87(f\xaa\x0e\xe6\x8e\x02\xa7ac\xa3?Q\
-E/\xbf\xe7\x0cR\xd1Z \xdb\xf6x\xb7\xb4\x86\
2\xf7L\xda\xb7\x94Q\x90\x82\xcb\xe2m:\xc9\xe7\
\x0c\xa8\xf6\xc0s\x9f\xc4J\xb0U\xb1\x92\x08\xfb\xaa\x17\
\x88Bz\xff\xa4\xcfY\x12\x09+\x19\x04\xf1\x5c\x0b\x94\
\xd0\xe8\x02\xa5ni\x17\xfb\x0e\xf1\x00tD\x9aS\x15\
,\xd1\xe7^jI\xf5\x1eD\xee)2\x03@\xda\xdf\
\x9fv\xdb78\x22;@\xd6\xb3\xb4\xed}\xa6\xd7\x9f\
&u\xa5\x8a\x00+\xf61\xaa\x7fD\xdb\x90\xf1\x8c\xd4\
-.\x11nV>\x93|\xbdX\xc5,~\xd1Sc\
F\xcfH\xc7\x8al\xa7WN\xd5\x1am;\xdcXQ\
{\xe3\x1d\xcf\x11\xbc7\xb4\xbcg\xbf\xa0\xf7\xec\xaf9\
p\xea\x96GO\xa4j\x85\xc4\x1b\xe1Il\xf4.F\
d;\xdau\x90\xc0\xd3\x8eW\xff\x1f1\x83\xa6\xbe\xb5\
|\xc6 \x5c\xbf\x9e\xf2Kj\xb8#\xb8\x93J\x97\x08\
\xa9p\x80\xcb5\xe0\x08\xc6[n\xb1Y\x8e\x22\xa1`\
\xa9?+\x97\xc2\xa2\xfeJ\xca\xbb7\x13\xd1\x847:\
\xe2\xd1\x06'\x9aP\xa3\x8fr\x8d\x9a\xc2\x95i\xcaQ\
\x92_\xd2\x96\xb4>jo\x0e\xa6\xdc\xbd7\x9e\xdfq\
\xd7\x96\xcf\x9e\xcbV\x8f\xac\xee\xd3(IH\xdb\x92\x10\
\xa1f\x0f\x90\x8e\x86La\xe1\xa9\xff\x81}\xcf\x8e\xe3\
\xf8\x17\x1f\x0f\xe1_\xd3\x90o\xab\x00\x00\x00\x00IE\
ND\xaeB`\x82\
\x00\x00\x17\xca\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x85\x00\x00\x007\x08\x06\x00\x00\x00\x22\xd8\x89B\
\x00\x00\x17\x91IDATx\x9c\xed\x9d\x0b\xb4\x8fU\
\xde\xc7\xbf\xe78:\x08En\xb9\x87\x0a\x19J\xa6t\
B\xe9&]\x94t\xbfH\xef\xe8&\x85\xe9\x9d\xc9\x94\
)]u\x91\x19MM5x\xbb\xe9\xa6\xa1L\x97\x91\
\xa6D\xd2U\x09!*\xe3\xe4r\x90D!\x97\xe3\xbc\
k{?\xbb\xf5[{\xed\xe7\xf9\xff\xff\x87)\xf3\xae\
\xf9\xae\xf5,g\xff\x9f\xe7\xd9\xcf\xbe\xfc\xf6\xef\xbe\xb7\
\xbcE\x1f\xbc~\x8b\xa4k$\xed%\xa9\x8b\xa4\x03$\
\x8d\x90T]R{I\x1fKzGRGI\xbf\x94\
\xd4U\xd2\x0d<\xef\xae\xb6\x92^\xe2\xefF\x92\xbe\x95\
\xf4\xae\xa4\x83$\xd5\x96\xb4\x8d\xf2\x81\x92\xaaJ\xaaB\
}\xcd%\xed-iOI3$5\x90T\x99\x7f]\
\xb9\x96\xa4B\x9e{\x87\xf6T\x92\xd4Z\xd2\xdb\x92*\
\xf0[wIWJ\x1a+i\x9c\xa4\x13%=\xc9\xfd\
\x0b%}#\xe9\x15\xdaw\xa3\xa4\x89\x92\xa6Q\xfe\x8d\
\xa4\x05\x92zHzAR\x0bI\x1f\x99\xfe\xf4\x95\xf4\
\xa5\xa4\xe7)_ i\xb5\xa4\xe7h\xf7\xf1\xb4\xcd}\
\xa3\xa2\xa4\xce\x92\x16J\x9a'\xe9\x03IwQ\xff\x17\
\xb4\xb53m\xff\x1f\xfaq\x1b\xdfzDRKI7\
Iz\x8d\xfb-\x19\xe7\xe9\xb4\xaf\x12}\xac\xc9\xfc\xb8\
q\x1a$i\xa9\xa4?J\xaa/\xa9?\xe3\xef\xea{\
F\xd2*Ik\x98\xdf:\x92\x9e\xe5\xfb\xae]S%\
\xb5\x91\xf4\x17I\xd7\xf1{\x91\xa43\x0b$\xfd^\xff\
\x07\xd7\x98M\x92FS~\x85\x0f\xbe'\xe90I\xef\
3\xc1ws\xdf\x0dl\x9e\xa4\xfb\x180\xf7\xfe:\x88\
\xa8\x05\x9d\xdd*i\x96\xa4\xa6\x92\xc6K\xda\x83\xfb\xae\
C\x8f\xf2\xfcB\x1a\xbc\x1e\xa2\xfa\x90\x8e\xaf\x84 \xdc\
\xf3\xd5$\x15K\xda\x9f\xfb\x95\xf9\xb7\x0d\xf5Vf\x82\
&H\xba\x9d\xf7g\xd3\x86\x05\xb4o\x91\xa4\xc7$\xbd\
Hy\x1e\x93\xfbwI\xad\xf8m\x00\x93\xec\xfe\x9e\xcf\
\xb7\xfd\xf3\xb3\xa9k\x12e7\xd8_1^U \x96\
eLh=I\x87C\x0cO\xf0\xfcg\xbc?\x95\xc5\
\xf7%\xed\xf3\x0b\xeeSI\x93\xf9\xe6\xc1\xf4\xef\x0d~\
o\xcd\x98;B{Z\xd2!\x94\xd7\xf3~K\xca\x1b\
y\xa6\xb1\xa4\x93$\x1d\x0aa_$i-\xc4=\x95\
\xf6\x9d\xce\xf8\xba\xf1:\x83\xeb\x17\x8e0\x1d\xa7(\xa3\
B7\xc1OI:\xcf\x94_\x95t\x02\xe5\xe3$]\
*\xe9\x1c\xcaGI\xba^R7\xcaGJ\x1aF\x87\
\xc5\xa0\xdc\x0fw\x11\x0d|\x8c\x89\x14\x0dw+\xf9\x0e\
I\xae\x0d\x1dh\xfc@\x88\xc9u\xfc*\xb8\xc0F\xca\
\x83%]\xc2\xfb\xa7\xc1\x15\xae\xa4\xec\xde\xaf\x01q:\
\x9c\xcc\xc0\x5cE\xf9\x18I\xa7H\xfa5\xe5\xce|\xff\
r\xca\xdd\xe1\x04\x17R\xaeD\x7f\xffD9\x9f\xef\xdf\
A\xb97\xf5\x9f\x0c\xa1|\x0cw\xf3\xf5\xf7\x92t\xac\
\xa4~\x94\xddj\xefi\xda\xdf\x8b~\xfe\x8d\xf2I\x10\
\xf7x\xca]\xcd\xea\x16\x93\xd6D\xd2\x1f\xcc\xf3\xed\x18\
s\xf1\xad.p\x1b\x87k!\xca\x17(\xf7\x85\x9b=\
H\xf9\xbf$\x952'\xa2\xdfnN\xfa9\xa2\xe8A\
\xe3\xdc\x8a\xa9K\xc76A\xc9\xf5y\xd05v\x0a\x84\
Q\x05\x165\x0dJ+\x84}\x960I\xc5\xfc\xf61\
\x03\xe6\xd8\xf8\x0a(\xb8\x07\x0dX\x0a\x9b.\xa2\xae%\
\x926H:\x02\xee\xb1\x02\xca\xbe\x98\x7f\x17\xc0Q.\
a\x85\x8a\x89hE\xdb\xc5\xca\x17\xe2l\x1b\xe5\x8e\xd4\
[J\xb9\x88U\xeb\xda4W\xd2\xd1|\xef\x078\x83\
#\x1c\xd7\x97\xedLVK._n\x03\x17\xf4\xe5N\
\x10\xf4\x12\xfa\xf4K\xc6l\x0b\xe3y\x04\x13\xfb\x03\x0b\
\xcc=\xbf\x0fm\xfa\x87\xa4S!\xb6\xf5\x8coO\xda\
\xea\xc7\xf7,I\x9bi\xd3t\x08i\x1bb\xc1q\x88\
\xb3\xb9\xbf\x02N~.\x0bH\x8c\x7f{\x08a\x0b\x1c\
\xbb-\xdf\xf3\xf7\x0f\xe3\xef\xad\x94\x1d\x03x\xce\x11\x05\
\xbf\xebL&\xe0{\x06\xad\x09,|&\x03\x17\xa26\
z\xc3&\xc4\xc2\x02X\xe8\xe1\x88\x9cLh\x83N\xb1\
\x15B\xab\xcf\xf7\xbe\xe3\xdf\x7ffx\xff\x10t\x94-\
\x94\x1b3`\xcb)\xd7fp\xb7\x9a\xf2Zd\xe7\x7f\
\x90\x82\x02n\xddbt\x8b\x10\xaf\x19\x11\xe2q\x00\xf2\
\xae\x81\xf9\xcd\xc9\xd39p\x8b\xd1\xb0\xde4\xdc\x8a\x5c\
\xb3(Cl\x89{/\xb2*cx\x10\x02\xcc\x153\
YY\xb7!\xaf\xbf/G\x1d\xb5`\xc7\xf7\x99\x95\xf9\
\xff\x06\x9e\x95tM\xe9\x90c\xa9\xbf\x0d~\xbb? \
\x08Q>\x91\xbf_P:\x06 \xdf-\x1c\x97y\xdc\
\x94\xc7\x9b\xf6\xc5PR\xceI8\x94o;\x8b\xe8\xb2\
r\xd6\xf15\xb2\xbcO9\xdf\xdf\xad\xe1\x07}uJ\
#+`\xc2\x0c\xa2\xdc\x11\xd9\x9c\x864\xd6\x7f\x16&\
TA\xf0{mt\x08\xfb\xdd\xeb\xcb1x\x9b\xe0\x04\
\xf6\x0a\xf1,f\xda\x00\xf4\xa2\xaah\xe4\xd9\xa0\x86Y\
$N?\xd8\x17\x93\xaem9\xda\xba[\x22m%\x86\
\xd8\x9f\xf2\xef\xb0\xd1\xd3P\x96p\xaf\x0a\xbe\x81lq\
y\x0e\x93\xe5\xf0\x09\xca\xd5~\xc1\xd5\xddh\xf9\x823\
^\x85\x1e\xf2O,\x86\xaf\xb2\xa8\xffpD\xe4]\x94\
\x9d\x1fa\xb1\xa4;!\xb4\x0brh\xebn\x8b\x5c\x88\
\xe2x4\xe8\x1eY<\xdb+\xe1\xf7n\xc6D\xf5(\
\xc3\x9c\xfaG\xe4\xf9\xfa\xac\xca\x8aY\xb6\xb1\x10\xc5s\
spM\xc2\x84\xbd\x97\xfbup\xa6\x09\x8bj\x126\
\xbc#\xda4\x0c\x0c\xc4f!\x97\xb0R\xc6b5\x9c\
a~\xff\xb7C.D\xd1\x22\xa2p&\xa1r\xc2\xef\
7F~\xcbCi\xbd\xd3X\x0a\x16us \x8a\x96\
\x19\xd8\xf8\x7f\xb3\xb2-\xfe\x88\xcfe{\x8aR\xeb\x11\
\xbe\x1bCg\xf4\xa1\xe9\xc6\xe7\xf1o\x05O\x141\x0d\
\xfc\xc9\x80\xe5\xc6P\xc2\x8a\x0c\x11\x1b\x8c\xbe\xc63g\
q\x1f+\xd4\xd9\xc6\x0fE\xee\xd7\xcaA\xc3\x9f\x8e\xbd\
\x9e\x86\xeb\x82{\xce[\xfa+D\x83\xf3'$\xe1\x88\
\x1c'\xb9\x03\xde\xcc\x119\xbc\xb3[\xc0\x11E3\xdc\
\x9b!f\x1a\x9b?\x869\xac\xcc\xf68n,\xbe\x89\
<\xdf\x06\x7fF\x887\xcc\xa4\xaf\x89\xdc?!K\x91\
%<|u2<3=(7@,\xfe\x01\x02\x89\
\xa1-n\xffFY\xb6\xc3b\x10\xe6\xf3\x91\xe5x\xf7\
gA>\xae\xd1\x83#\x1fo\xc9@\xfd%\xa1ao\
\xe1\xd4j\x10\x99\xec\x18\xf7\x88\xc9\xd8\x85\x98wMQ\
d\x97$|k\x22VK&qW\x0d\x87Z^\xca\
3\xa5\x91\xdfz\xa0\x8c\xc68\xd2\x118\xe3\xf6\xce\xf0\
\xed4\x5c\x011\xf6\xc4}\x9e+b\x8b\xe9_\x86|\
\xbc\x801\xd4e\xd2\x86\xe3\x86\x0d1\x85r\xe7\xc8\xea\
\xac\x1f(\x94\xd720!\x1a\xe3\x0e\xfe\x12\xbfA\xcf\
\x94\x8e>\x85\x99\x9a\x09wfx.\xa6\x047\x85\xcb\
\x84\x04s0\x1c\xa2<\x13\x19\xc3\x04\x02R\xfd\x02\xb3\
\xfe|\x14\xe1\x18\x9c\xc9\xfc&\xc4\xf9\x93 \x1f\xcd\xfb\
\xad\xe0c+\x88*\x0ay?\x9b\x89\xf3\x97\x0b\x8a\xfd\
\x95\xfb1\x82iH\xc0H\xe8\x12\xc3\x13:S\x89\xd5\
\x9dG<\xe0\xc5\x94N\x97\xe2\xcb\xf0\xd8\x90\xf2l\x8c\
\xd0k@\x98\xa3#\xf7d\xfa\xebQ\x88b\x9c\xc6!\
\xb60\x1e\xb9\xc0\xc5\x1b\x1e\xc0\xdd\xde\x03\xf1\xfd\xa4\x09\
\xa4Y\xb4BG:\x82\x08j\xe8\x0ah\xc6\xb5KQ\
\x00\xfb.\x0e*=\x83\x95\xeb\xf09f[L$\xa4\
a-\xf7\xf6\xcf\xf2y'\xafG\x99\xf2;\xfc\xd6\x90\
r!\x5c\xa5\x03\xc4\x11\x9a\xb6\x1eUqJ\x85\x96\xc4\
\xc0\x94\x01\x9c\xc7j\xb5h\x97\xc0\xb9lz\xc1\xd3\xb4\
\xb3\xb7\x896\xe6\x82\x89\xe6\xd9\x02\x14\xd9\xb1\x94\xaf\xc6\
\xa7S\xd3\xf4\xeb\x0e\xe6CDI\xbd\x07y\x18\x0b\xd9\
Gt\x8f\xdc\x99\x18\x8f\xf7*\xd6\x0c~\xbf\x97\x90\xf4\
\xec\x0c+2\x0d\xde\x0d}^\xe4\x99U\x116\x9e\x07\
\xdb\xf7\xab\xfcC\x22w\x16m\xb1\x14\x9e#p\x16\xc3\
\xde\x98\x99\xd9\xe2c\xf4\xaa\xd0\x02\x0b\xcd\xea\x1fpZ\
\x8d5\x13\xe3\xf18\x1e\xd2C\xcb9VB\x9c\x14\xe1\
\xb0\xdb\xd3\xe4LX\x5c\x93\xf0\xee\xef\xf8\xb7/\x9c\xbb\
\x13:\xccu$,\xe5\x04O\x14\xabX-\xded,\
\xa2\x83\xb3\xb3\xa8,\xc9\xe1\xb3\x9d\x09\x8ci\xec##\
V@\x0c-#\xbf=\x80\xe9:\x1b\xcb\xa7\xbcx\x0e\
\xabiX\x82I\xbe\x98pz+\xcaN\xbc\x0cM\xf9\
\xd6\xd0\x0c\xe2/[t\xda\x89w\xad\x8f\xa6\x13\xdc\xe2\
y\x886S<\xeaG\xf8\xd0\xf9>(U\x0f\xe0\xa4\
ZF\xe8<\x1b\xb8d\x8d1\x91\xe7\x8aY\xf5[\x8d\
\xf7\xd0\xe1\xe6\x0c\x83\xeb\x91O\xfeD\xe8\x16?\x90\x18\
\xc9Q|[|\xa7\x00\xbd\xa3\x02YN\x85(\x90\xa2\
\xecQ\xc2\xcaz'\x8b6T\xc7\x9a\x11\x03^\x9b\xe8\
j\x0cE\x88\xb7\xeaY\x8e\xdbO\x89\x15(\xffY\xc1\
\xe6Sx\x1c\x8d\xfe\x90\xcd\xa0y\xf4\x8f\x10\xd1\xbb\xc8\
\xccmX\x1f\xf5\x883<\xb0\x93\x83q\x16u7\xa0\
\xce\x96&\xbf\xf0]\x22\xa0\xbfA\x89\xbd\x95\x9c\x8a\xdb\
w\xd1\x04\xf4\xc1Jx\x00\xeb+\xb4V\x1e3\x0a\xf6\
\xee\x86\x09\x8c]&\xafm\x94(\xfe\x83t\x1cF\xc6\
\xd3F\xac\x93\x1e\xe4e|\x81%\xd7m7\x1e\xbf\x07\
Mz`\x22*\x5cs\xd9\x8ehu;X\xed\x85\xb0\
\xdf%\xac\xb8\x85\xe6\xc5\xeb`\xc3;\x9bTR\x84\xa6\
\xdf\x89+\x1bM\xb9\x05I;U0\x01\x07\xc0\xd1\xaa\
2\x19I\xb8\x8e\x95Q\x03\xbd)\x86|\xd2\x01\xcaR\
\x94W\x8fS\xe1|\x1f\x91^\xf8\x0d9\x19%p\xc1\
\xbc\x0c\xbe\x96\x9f\x1b\xcd\xc9x\xff6\xad\x1d\x8eS\x1c\
\x8b\x82T`\x02Os\xf1\xa2-g@+\xa2\x84.\
E>\xcdA\x11;\x89\xc9\xfa4\xe5\x1b\x05\x98N>\
\x18\xd6>\xe2\x5cZ\xc0\x84\xccC\xc3\x9f\x1f\xdc?\x1f\
[^L\x865G\xcf4\xc9\xae\x16]p\x14\x95b\
9,\xc2\x11u\x03\xcf<\x83\x8fd\x18\xce\xa1Yp\
\x81$\xf6z<\xbe\x02\xa1\x98~\xcd\xb3\xcd\x83\xe7>\
\xc0l\xde]1\x9c<\x95X\xf0q\x07\x1cQ\xd8\x01\
\xb7\x98A\x07\x07d\xd19'\xb3\x87D~\xaf\x8e\xc9\
\x97\xe6`)\x8b\xb8\xa5\xfb\x06\xca\xeb\xc9\xe8'I\x9e\
J\xc7\xd5^\x0e~\x9b\x1e\x897\x5cl\xb2\xbb\xca\xb0\
b\xae4\xf9\x1f.\xec}\xbc\xc9\xfb\xf4(B\x87\x08\
\xdd\xcd\xf3\xf9\xc6Z\xf3\xdb\x09\xc4Rvg4\x8a\xc4\
\xab~D>\xac{f\xe4^\x11\x9e\xcbX\xe6R\x88\
\x1b\xa8\xc3\x86\xd6\x07@T\xc5\x09&\x9f7w\xf3\x88\
&Z\x8c\x86-\x17\x98gcm\xf4\x18\x1a\x09f\x85\
^\xcd5XU\x1e\xe7B|\x1f\x99\xdf\x1cw96\
R\xff\xef\x13\xe2\x0f\x97\x05\x04!\xb2\xc5wg\x94\xa6\
$A\xed@>\xa6h,\x07a&\x09\xbd\xa7\xf0\xaf\
\xbfb\xc90B,<l\xcamH\xf0\x9dfX\xd5\
\xd7\xd4q1z\xcc5\x98\xa8\xde\x91eC\xd7\x0f\x1b\
\xa2\xf8E\x823\xc7\xa3\x83IW\xf7\x08\xc5\xc0Z\x22\
\xb2\x1e\xe3\x10{a\xbdO\xd06\x8f;\x8c\xe7\xd0b\
@\x82\xaf\xe5\x93,\xb3\xb8~.d\xb4>\x0a\xf0f\
\x86I,\x9f\x9b@\xcc\xfc`5\x09\x82\x899\x8e\xac\
\x12\xea\xbd\x917\x1aNq.y\x13\x1e\xde-;\x87\
\xbf_\xc1\xe1\xe5\xd1\x0c=cC\x86D\x9b\xb1\xe8\x16\
\xeb\xe1N14\x22\x8c\xed\xf3\x1b\xcap\xd0\xad\x0a<\
\xba\xfb\x90\xedu\x01\x5clP\xa4\xae\xfb\xc9\x03\x89a\
!\xe2\xf4\xbe\x9f:\xba\xb9\xab\x90OX8T\xec\xee\
!H\x96DU+\x13~\xf7\xac\xb3/f[),\
v\x1e~\x8f\xa9\x09\xef\x8d\xc7\x8e\xf69\x8eo\xe3\xe6\
\x9e\x0c\x17HJ\xef\xf3X\x01\xe1\x8e'7S\x91@\
Va$\xc9\xa74!Xw>>\x8e)\x91\x08\xe9\
\xea\x14\x82\xf0X\x9f\x12}\xfe)\xf1i\xc0\x1d\xb3B\
>\x13\x16N\x96\x0f\xb0|\x16\xa9\xa4}J2\xed\x8b\
\x10\xc3(<\x8eK\xe1\x0c\xf7\x90\xc5\x956P\x15\xcd\
\x04\xec\x0b\xbbo\x80S,\xcd\x7f?\x92\xac\xa9QX\
\x10>\xe2\x1br7%Dt\x87\x99\xady\x16C\xe8\
\x83\xc52\x14\xd10\xa2\x1aby\x16\xf9\x9e\xb9\xe2\xa3\
\xe0\x0a\x95\xe1\x18\x8aps\xe7\x84|\xe4\xe7q\xc1K\
5Y\xe1\xe3P\xbe\xce\x80e\xfe\x19\xd1\x11\x93\xef\xb3\
\xd0\xe4[\x98\xdf\x9a`9\xb4\x80-\xa7\xc1Z \xcd\
\xb0\xe5Ld\x18\xda\x17\xa2j\x08\x1ch:>\x0b\
OHEp\xabT\x85\xca\xe0\xce\x04\x82\x09q;:\
C&L\x0d\xf6\xb0\xec,\xee@\xd4\xd9\xab\x8b\xd9\x17\
\x9a\x84\xf5\x11\x939#\x0a\x90\xa9\xab\x83\xc9\xf48\x0b\
\xd6\xbd\x16s0)\xb7`\xb1\xd9\x8d\xbeop\xaf\x0f\
\xdc\xe5\x86\xc8{\x16^T\x95\xb1\xea\x9f \xf3\xab \
!\x9eP\x85I\x1a\x0e\xf1\xb56q\x8a!9\xee\x19\
\x99E\xe6\x95\x13?1.\xf8\x15\x84\xf9L\x0eu\x0e\
\xc3%\x9e\x8d5\xf2Uds\xd3k$\x16)\xc1\x0f\
\xf4\x1e\xd7rD]\x88;M\xdd\x9bs\xc9./H\
\xd90\xe3\x91\xcf*\xff0\xe2\x94y\xc8\xc43\xbc2\
\xf94J_=\x5c\xbe\xad \x8aM\x19\xda\xe2W\xf5\
\x16\xb3\x13\xbcBJj\x9d\xef\xf4\x08\x02g\xbdM\xe8\
\xba;W.\x98A?\x86F\xfc!\xb7\x07\x96U6\
\xf0\xc7\x16\x9c\x13y\xf6q\xb3!Z\x98\xe0\xb1\xfc\xd4\
lp\x1bc\x17\x06\xea\xbc\x88\x1b\xc1\x82\xcc\x89(:\
$\x98\x5c\xc2\xdd]\xc2\xaa\xaci6\x0e\x0fC\xcb\x0e\
\x15T1\xb8\xde\xb4\xbc\xde$\xd9\x0c&\x8b(\xe6\xd2\
\xee\x85e\x224\xf6\x07I\x8a\x19\x8e\x05\x14\xcb\x02\x9f\
\xc3\xe4-\xe7\x9b9+T\x118\x22\xef\x87\xfb\xba\xba\
\x09\xfb'mY\xc8\x84\x9b0\xe9m\xc6\xd4K\xc1N\
\xb8]\x81g#Da\xfd6\xf3sI\x1c\xce\x87\x03\
\x9c\x17\xc9\xacz\x1f\xdd\xa1#~\x8c\xe6\xfc\xdd\x11\xef\
b\x8c <\xbc7\xf2(\x13\xb2\xad\x835\x116\xee\
tR\xfb\xbch\xca#\xf2\xb9\x99h\xe7\xc6\xc8^V\
q8\x88W\xe6\xa6\xec\xa2\x81v~\x94Mp+\xab\
\x14\xa7%\x02\xa7aa$\x92\x9ad\xb9\xed\x0c\xbe7\
9\xb3\x1e6\xbe\xd1+K]h\x07\xf2\x91\xc7\xa7E\
Xf\xfd\x1cr*B\x8cD\x01\x0c\x09\xad\x0aJ\xe1\
\x16\xd8\xeb\x82\x04\xed\xf8 \xe3\x1a\xcfOp\x98m\x08\
\x14\xc9\xc7\xd3\x5c\xb79`3\xdc\xcd\xeaF5\xcaY\
W\x9e9\xe4\xc4cW%\x01[\x94D\xfc3\x07\x9b\
o\x15\xe6b\x22\xe7\xc3f\xbaG6\xfc64\xf9\x91\
\xb9\xe2;,\x80X\x04s<\xf2\xf9\xc0 \xf9\xc6b\
\x94Q\xd0\x0a\x22\xd6\x91\x10g\xe1\x0a>\x92\xd5\x9e\x84\
l\xb2\xc1\x9baB\xfb\xfc\xc9\x85\x117|\xb6\xd8\xce\
1N\xbb\x82X3\xe1\xc5 \xc85\x90ly\x11j\
\x08S\x1b\x13\x91OD\xd4*=v\x07zH(\xb9\
\xc2\xefwX`\xde\x1bCRj\x88\xd5<\xdb\x07\xc5\
\xc8w\xa2\xc0\x1c\x99d\xf1r\x84\xfa\x8bM\xe2\xab\xc7\
G\xc6\xed\x9c\xcd*\xdd\x8e\xeb\xdd+\x88\xf5v2]\
`%\x8b \xb6\xdfdWbz$\x10g\xbf9\x04\
K\xb3b\xa6y\xf57G\xb02\x8a\x19D\x7f\x98H\
\xcc\x01\x94\x0b\xdeb_H\x819\x9bj\x91\x89\xb5\xdc\
bV\xf6\x0b\x091\x83\x8d\x88\x13\x9bm=\x07\x054\
\x86A|\xc3s\x85?\xc1U\xdaE\x82W1\x5c\x82\
\xd8,E\xccU\xa4\xbc,\x8bw\x930\x0c\xfd\xa8\xfa\
N$Bg\x83\xb3\xe1\x18>\xa8g'\x7f%\x0a\xef\
\xc4\x0c\xdc\xf4g\xcb\xbc\xaa\x8c\x99\xfbS\xb0\xd5\xdd\x01\
\xd5 \xaajXJg\xfc\x8b\x14N\xe1\xdb\x19\x88!\
\xd0% \x80\xb2 } \x8a\xd86\xbc\xbc\x0cW\xf8\
\x5cy\xb0\xc9\x10D^\xa4\xaeLm\x08\xbf[1\xc3\
\xfdL\xc8\xf5\xbd\xa46\xc7\xfeV\xa0pW\xc7\xd2\xb2\
\xefg\xdb\xf7\x8a\xc1s1<\xc7o\x0f\x05\x04\xe19\
g\xddL\xe3\x1ar\x8a\xc78\xceh[P\x99gC\
\xc5\x98\xa4c\x8di9\xc9\x88\x06\x91\xd67\x09\xeb\xe3\
q|\x1do\xe0\x1e/\xc6_\xf1\x84\xc9\x9e*0\x0d\
~\x0dJ~\x97\xc6{\x14B\xe5\xa5&k\xfbe\xfe\
\x1dI}~\xebb>\xd7JX\xb6\xb7P\x92\xc4\xcd\
\x89\x81\xbbx@\xcan\xfb\xda\xac\xf2\xc1\xa6\xed\xd3P\
\xac\xbb\x91%\xfe9\xefOeR^%\xd0w6\xed\
*\xc1\xfd\x5c\x8d6\x1e\x82\x98\x9d\xcb8.c\xfc\xbd\
\x87\xd7\x8e\xff\xfb\xe4v<\x8cu\xb4\x84\x14\xca\x90\xe3\
.\xc0\xdd\xbf\x11q\xba\x18\xefhSD\xe8w\x10\x98\
\xcd\x82\xf7\xdf\xba,T8\x1ae\xb08\xbcW\xec\x00\
\x93>\x7f\x05\x03\xef\x13B\xfb\x19\xcb\xe2i\x9cRM\
\x8cy{\x1a+%f\xe6ym\xf9 b\x19i\xb8\
\x9c\xce\x8fAG\x09\x95\xc8\x86\x81Ck\x0c\xf1\x10\x8b\
\x8e\x84\xeb\xed\xaa\xbb\x0f\xb9l\xcd\xdd<\x93\x07\x12\xee\
c\xb1\xba\xce\xd3\xe4\x88\xeck\x9cq\xfd\x83\xe7\xeb\x19\
W\xbaw\xdf77\xfaNZ*~\x13\xc6\xc5\xef+\
m\x0a\x11\xdbm\x10\xbd \xde\xaa\xe8\x10\x0b\xf1\x03\xf9\
\xf1\xaf\x91\xc1\xc4\xae\x16\x8a\x8f3\x83\x14\xf5\xcd\xb8}\
\x1bs\xf9\x83QCe\xe9J\xe3\xb9\xb4\x9a\xfa&V\
\xb2\x7f\xbf+\x03b\x1b5\xc4\xdc?\x8b\x09(4n\
\xdf\xcd\x98\x89\x07\x91\x88[\x87\xc9\xb9\x08\x17\xfa\xdc\xc0\
\xd4|\x14\xcb\xe1%\x14R\x8f_\x05\x9bz\xf7\xa4\xbe\
\x90\x0d\xdb\x09\xf5\xb8\x9avz\x82X\x81\xef\xc4\x1fH\
\xda\x1c\xee0\x89\x09\xf0\xfd90r|d\x09\x09\xc0\
\x8dY\x5c\xc2=\xef\x0f]-2\x09\xd3\x1bX\xe9\xbe\
\xbe\x1ep`\x9b\xaaxi`\xda\x9fh\xf2C\xee\xc6\
9\xe9\xe7\xd9\xbb\x08\xee\xa1/\x83\x19\xcf\x0b\xf9\xbe\xeb\
\xfb\xc4\x98\xa29\x8e\xc9\x11l'\xcc\xaf\xcc\x87eZ\
\xcf\xe4\x5cX\xff\x08&\xcc\xbb\x5c\x8b#\x0e\xb0\x9b\x83\
\x13m.a\x22=\x9ap\xfc\xc19\xac\x80\xeb\x19\x90\
\xa4l\xa6\xbadx{\xeffG3\x11\x17\x04&j\
7\x93|[\xdfX\x14\x1bY\xe5np\x0a\x09@\x9d\
\x80\x0b\xbd9\xef\xf8qX\x88\xa76\xdb\xd3\xf9\xba\x06\
\x1ckT\x86S\xf9\xfa\x9a=\xb5\x0b\xcc\x0e5\x8b\x86\
\xc4\x9c\xfcb8\x84\xa0\x9e\xd8\xf2\xe97+\xef\xcdq\
\x11\x9b!\x84\xc9\x88\xb5=\xd3\xcc\xec\x90ST\x086\
\x01U\x8b\xa4\xea\xf5\x0e\xb2\xa9\xbf']n5r\xfe\
u\x13\x1c\x8b\x99>\xa1X\x08\xf3\x0e\x0a`\xa5\xdf\x92\
-\xfe={C\xd7D\xae\xe1\x91\x93\xfd^B\xc6.\
%\xdco\x7f\x9fl\xcav{\xde`&\xc3\x8b\xc7\x83\
\x8c\xf8h\x1c,\x8c\xc59\x1e\xd7\x18\xee\xd3\xcd\x94g\
a\xb3\xb5Z$\x1c\x09\xb52 v\xef\xb4\xba\xc8\x10\
\xdc\xe7\xa6\xae<\xf4\x06\xbf\xaf\xf7\xab\x84\xf1\xdc\xb1I\
:$\x8aJ\x01\x07\xa8e\xcez\xb6\x9d\xf4\x1f+!\
\xa0v&+\xaf\xad9\xb29\x09\xe1 \x85a\xf1\xc5\
p\x89\xf7L\xf6Wu\xde\x0b\xaf\x1a\x91\xec\xb0Z(\
Y\x0dL<e\x02,\xdb\xc2\x0evE.\xa7_x\
x\x07[\xe8 +\xaf\xc5e\xdb\x97-\x0a\x12B\xf9\
[\x89\x12{N\xe7\xdbt\x17\x8b\xce;\xe0\xfc\x82)\
\xa1\xbfC\xe1\x88\xb1\xb1\xac\xe9\xdb\x16\x12\xc5\x86\x88\xc3\
*\x5c\x89\xd6K\xf65\xd9Y=\x8dB\xf5\x06\x1bu\
\x920\xd5p\x92\xe5\x91\xa8\xa9\xcf\xf18\x0c\xb9]\x9d\
\x0e\xd5\x88\x5c\xfdQ\xaa\xacR\xf8\x01\xb2\xdd\x22\xcc&\
o\x188vn\x83\x18m\xf2\xd0\x95\x10U\xb8A\xa8\
\xfeN\xc4/~0&c\x12\xc2\x8c\xaa\xa4\x0dJ\xd6\
K<\x17\x91\xe0\x09xF\xc0I*@([\x10'\
]\x13\xc6\xd3YH;\x88\xa2\x02\xab\xbb2\xec3\x8c\
W\xd88|5:\xe6W\xa7\xe7\x18\x17\x04)}I\
gG)\x88\x05lKH+\xdb\x07\xfd\xc4\xeb6s\
\x11'\xe1\xb5\x89>\x14b\x9e=\x85\xf5\xd3=\xc8c\
\xe8Mx\xd9\x13B\xff@9\xf3\xff\xcf\x88S\xb4\xfc\
$\xecM]\xb3\x02\x91\xda\x06\xc2k`\xc6\xad2\x0a\
\xdbo3\x9c8\xb3=%\xaf\xc4\xd7\x13\x1eLb\x09\
0\x1cW\x9fS\xf2(\x0a\xa5W\x84;\x05\xe7\x84\xe5\
!\x92{`-\xceN\x18\xcf\x1d\x06D>lu5\
\xd7\x9aH\xec\xa0\x0f\xabf\x849*\xf9C\xd3\x11\x8f\
\x9b\x13:\x1ab\x989b\xa0q\xc21\x8c\x15\x18\x00\
\x7fTs\x09\xab\xdd^e$\xc5\xacf\xb2\x9b0\xa9\
\xdej\x19\x17\x9c\xa1u\xb6\xd9\xa5nC\xf1\x1f\xa0o\
\xbc\x04\x11\x1ec\xee\xf9\x8d\xc9C\x03\x0e\xda\x06Eo\
\xb5\xb9^\xe5\xb9\xf0\x04=k\x19U\x89(\x99\xad!\
<_Wx\x9a\xde5\xc45\xc6E\x8e\x86\xf0\x1b\xa0\
.\x0e\x8em\x98\x19d\xc9\xed\xc1B\xf3z\xd4\xd2\x84\
\xf1t\xbb\xe6\x7f\xdc*h\xa9\xf3\x0b(f/*Z\
F\xf8\xb7*lv\x13\x83\xdd:\xc8\x16\x9a\xc2\x80\xdc\
\xc6j\xdb/!\xa3k9+\xbf&\xa2anp\x7f\
\x0b\xb2\xbd;\xed\xf2G:\xd7c\xd5|M\xfb\x9e\x82\
X\xb63a\xab\xe9X#s\xdee{\xf2.\xda@\
\xa0\x97\xb2\xc2\x17\xf1\xde\x1c\xcf2\x0d\xf2\x91\xcd=\xb1\
<\xae&~2\x19E\xed\x0a\x06\xbc.\xd7\x12\xc6\xe9\
3bF\xe1an\x9f1\x99U\xe1&\xb3\x82\xfb+\
\xe8ScX\xf8Zci5\xa5\x9d\xfb\xa1t\x86\xa2\
|-\xe6\xe9\xf1\xac\xf4f\x94\x07\x06\xcf\xbd\xcd\xbd\xbe\
X\x84\xdb\xe9gc\xeaXO\x9fw\x9c^\xe4L\xd2\
B\xc3\x96\xd6a\xa3\xef\x015\xbd\x8d\x12Y\xc0@\xbf\
\x0e\x0b\x9ab\xe4W%V\xfb\x22&\xe7p\xa8\xd6'\
\xce\x1cj\xfek'\x7f\xa0W5\x06\xeb}:~\x00\
\x036\x1ay\x7f\x1c\x03>\x12K\xa0\x1dQ\xd3y\xe8\
\x1a\xfbC<c\xf9V'\xda\xfcgL\xd2:|\xf3\
y\xda\xe6O\xe0]J\xfb\x7f\x80\x80\x1e\xa1\xee&\xdc\
w\xab\xbd\x0a\x04Y\x05\xc2\xbb\x98I[\x8b\xe2{\x8a\
\xf9\x9f\x8c&CTo\xb2@\xd6\x12o\xa8\xc1\x04\xce\
\xc0YW\x8b\xfb\xaf\xd3\xce#\x19\x9b\x19\x10\xcb\xfe,\
\xb67y\xf6\x18\x16\x8d\x0f\xd9\x9f\x0c\xe1\x8d\xa2\xed'\
\xa0Ky\x7fE[,\xb5\xd1,\xc8\xe3h\xc3X\xda\
z\x12c>\x06\xe2\xea\xc0\x22y\x04\x93\xb7\x1dc3\
Y\x117\xf7\xe5\xe6\x80\xd3\xa1\x88\x04\xaf\xc4]\x0bk\
+3'\xbc\xcd\x83B\xf7\xe24\xdb{!\xac\xea(\
mk\xccyZO\xb1j}\xc2\xcc9p\x84\xcf!\
\x80\x97\xa1\xe4O\xe8\xc8\x18\xec\xed\xd9LZ\x0f\x06q\
>\x03r\x0f!\xf6\xf5\x10\xe6\x8d\x98\xa8k\xe8\xb0\xdf\
9?\xc7\xe45\x0cc\xa5\xee\xc9\x04\xf73\x0e\xae\x12\
\xc4Ok\xfc\x14\xe3\xe1\x90~\xbb\xe2_\xcd\xb9\x14B\
G9\x97\xf1X\x0a\x87\xaab\x1c{c1\x11\xfd\xf8\
\xf9}\xaboA\xc4\xabp4y\xb1t/c\xe87\
(\x7f\x88\xe9?\xcd\xfc\xdfc]hok\xac\xb3\xa3\
\xa9\xaf\x0e\xe3t*\x0a\xa8\xd7\x97Z1\xde\x0d0\x10\
\xea\xa1\x1f\xb5@d\xec\xc7w\x9a\xe0\xb7p\xd1\xd5\xfe\
V\x03ojd\xe8\x06d\xacw\x02\xad\x85\x10|\x07\
\x8e\xa2\xc2\x87Qb\x8e\xa3\xd2\x09\xf8\x06\x9a\xe1\xe4\xf1\
\xf1\xfde(\xa3~\xff\xc6\x83\xc8\xc8\x05&\xc6\xd1\x1f\
\xaf`\x1d&q(\x03\xd1\x04\xaeP\x0c{\xabmN\
\xa3\xb9\x15\x82\xf0\x19\xcd\xf7B\x10\xc5\xf8N\xfc\xa6\x9d\
U\x10\xc4-\x10\x84\xb7\x98\xfc\x81fK\x98 \xc7\xd5\
6\xb0*+\x07G\x02\xdd\x1e\xf8=F\x98D\x9c=\
\xe0Lw\x9b\xfb#\xcd\x02x\x85\xf7\xa7\x19\xb9\xde\xdb\
(\xd2\xc2Q7\xc1$G\x0fd\x11u\xa6<\x88\x8d\
\xe0>_\xd5\x8b1\xff|\x17\x16\xda\x81\x88\xdc\xb6x\
a\x1b\xd0\xa7\x8e\xe8T-XH\x1d \xd2&p\xc2\
\xa3\xf1\xf0\x96Z\x93\xb4\x0b+|\x1d\x14^\x1fV\xbd\
\x0e]\xa1\x19\xab\x7f\x1d\xec\xab\x15\x134\x18\x96\xb6\x09\
\xd6\x7f\x03\xb2\xf7(X\xbb\xaf\xefh\xdeY\xc7\x0a\xed\
\x06A|\xcb\x8a8\x80\xfbk\xd0\x05\xda\xf3\xcdU\xfc\
\xdd\x82\xfa\xd7\xc1\xf6J\xe9\xc4:c\xa9\xfc\x9a\xf2=\
\x10j\x1f\xca7\xc1\x91\xce\xa3|\x0b\xdf9\x9d\xf2\xf5\
\xc8v7h\xce\x07\xe0\x12w\x1dq\xba\x81s\xf7\xdd\
\xe0\xba\xb1rl\xdf\x95\x1d\xd7t\x84\xeb\xb8\x97#T\
\xa7{\xb8\xf68\x8e\xe9\x06\xdc\xb9\xd4\x1d\xf1\xb9\x95\xe7\
\xcan\xfc\x9cXp\x13\xec\xdew\xdfq+\xd4-\x14\
?>\xae\x0e\xff=\x17\xbfp\x84\xee\xc4\x98+\xbb\xef\
8\xb1\xe0\xease'\xd2\x1c\xc7s\x13\xeeML\xc7\
\xe5\x1cwreG@\xeey\xc7\xc9\xfcx9\x02r\
\x9c\xcf\x95\x9d\xc7\xd8\xbd\xeb$\x81+;\xdf\x85\xe3\x8e\
\x17I\x9a\xf8\xbfur?\x06n\xd4\x8a\x8b\x00\x00\x00\
\x00IEND\xaeB`\x82\
\x00\x00\x04u\
\x00\
\x00\x10\xbex\x9c\xedU],\xd6\x7f\x14\x7f\x98\x96\xcc\
K$\xe4e2\xf32\xab5\xcd\xbb2\xd2\x96\x94\xf7\
a\x930c\xc2P\x11K-\x14&\x9a\xa1F%\xef\
\xef\xb6\x90%/7V,+.\xb8\xe8&#/\x17\
\xd2\xc6\xba\xc8f.|\xeas\xfe\xfbY\x17\x7f\x17\xff\
+\x17\xff\xe7\xfb\xec<\xcf\xf7w\xbe\xe7w\xce\xf9~\
>\xe7\x9cG\xa5\xd2\xf8\xf39yR\xc5oU\xf7Q\
\x95\xcaD\xa5R9\xfe\x91?*\x95\xaf\xea\x1f\xbd\xac\
\xa3\xaa\x7f[P\x8bZ\xd4\xa2\x96\x83\x92C\x87\x0e\xc1\
\xd4\xd4\x14fff8~\xfc8\xf4\xf4\xf4p\xec\xd8\
1\x9c8qBt\xe6\xe6\xe6rN\xa1\x8e\xcf\xd4s\
ohh(\xef\xf0\xcc\xd8\xd8X\xf4\xdc+6\x8a\xd0\
\xc6\xc4\xc4d\xcf'\xf7\xda\xda\xda\x12\xdf\xd1\xd1\x11/\
_\xbe\xc4\xabW\xafPZZ\x8a\xc4\xc4D\xdc\xbbw\
\x0f\xf7\xef\xdfGyy9\x1e=z\x84\xe6\xe6f\xd4\
\xd5\xd5\xe1\xc9\x93'\xa2\xa7\xed\xd3\xa7O\x91\x90\x90\x80\
\x87\x0f\x1f\xa2\xb2\xb2Rlo\xdd\xba\xb5g\xc3\xf3\xf6\
\xf6v\xd1\xe7\xe7\xe7\xe3\xf1\xe3\xc7\xe2\x9f:\xda\xdb\xda\
\xdaJ|;;;\x8c\x8c\x8c\x88Mcc\xa3\x9c7\
55\xe1\xca\x95+b\x1f\x14\x14\x847o\xdeH\x1c\
\xe6\xd9\xd2\xd2\x22\xbe;;;\xd1\xd7\xd7'\xb6\xdc_\
\xbdz\x15\x96\x96\x96\xa8\xad\xad\xc5\xf9\xf3\xe7\x11\x11\x11\
\x81\x89\x89\x09\x9c;wN\xee000\x80\xbbw\xef\
\xe2\xed\xdb\xb7\x12\xcb\xc8\xc8H\xe2\x1f>|\x18/^\
\xbc@NN\x0e\xba\xbb\xbbQSS\x83\x8a\x8a\x0a\xdc\
\xb9s\x07mmm\xc8\xcc\xccDOO\x0f\x8a\x8a\x8a\
$\xce\xd8\xd8\x18\xb2\xb2\xb2\x90\x96\x96\x86\xe1\xe1aD\
FF\xca\xfb\xe1\xe1\xe1\x08\x0b\x0b\xc3\xbbw\xef\xc4?\
\xf7\xfd\xfd\xfd\x88\x8b\x8b\x93{<x\xf0@\xde\x1f\x1a\
\x1aBAA\x81\xf0\xc4\xf8\x0e\x0e\x0e\xb8q\xe3\x06\xce\
\x9c9\x03kkk\xc9\xcb\xcd\xcd\x0d7o\xdeDa\
a!._\xbe,wKJJ\xc2\xa9S\xa7\x90\x92\
\x92\x02\x7f\x7f\x7f\xe8\xeb\xeb\xc3\xc7\xc7\x07\xbe\xbe\xbe\xb0\
\xb1\xb1\x81\x93\x93\x93`J\xdf\xe4\x85XPG\xb1\xb0\
\xb0@nn\xae\xe4P\x5c\x5c\x0cggg\xb97\xe3\
3\xf6\xd7\xaf_\x11\x1b\x1b+z\xd6\x1e\xf9\xee\xe8\xe8\
\x10\x1cZ[[\x91\x97\x97\x87\xf7\xef\xdf#::\x1a\
?~\xfc\x90;hiiIN\x1f>|\x80\x81\x81\
\x81\xf8\xd2\xd5\xd5\x15Nxw\x1d\x1d\x1d\xb1\xa1\x9e\xf9\
\x8d\x8f\x8f\xa3\xaa\xaa\x0aeeer\xa6\xd4\x7fFF\
\x06\xbe}\xfb&\xfc~\xfe\xfcY\xf8'\xc6\x01\x01\x01\
\x9271\xee\xed\xed\xc5\xf2\xf22\xea\xeb\xeb199\
\x89\xc1\xc1AXYYatt\x14\xdf\xbf\x7f\xc7\xb5\
k\xd7\xc4W`` ~\xfd\xfa\x85\x95\x95\x15LO\
OK.\xac\xc9\xae\xae.lll`ff\x06\xd5\
\xd5\xd58r\xe4\xc8^\xfc\xdb\xb7o\xe3\xcb\x97/R\
\xa7\xb3\xb3\xb3\x92#\xb1'\xe7\xac7\xf2\x99\x9d\x9d\x8d\
\xcd\xcdM\xa9\x01\xee\x1b\x1a\x1a\xc4\xe6\xf5\xeb\xd7HM\
M\x95\x9c\x88\x1d}\xef\xb7vww\xb1\xb3\xb3#\xfd\
\xa1`O\xe1{\xc9\xc9\xc9\xf0\xf0\xf0\x80\x8b\x8b\x0b.\
^\xbc(\x9c3NHH\x88\xd4\x84\x9f\x9f\x9f\xf4$\
\xf1g=\x11w\xda*}\x1e\x1c\x1c\x8c\x0b\x17.`\
nnn\xdf\xf8\x5c\xeb\xeb\xeb\xc2\x8d\x86\x86\xc6^|\
\xfa#\xc7g\xcf\x9e\x95\xba\xfb\xf8\xf1\xa3\xe0EL\xd8\
g%%%\xf8\xf4\xe9\x13\x9e?\x7f\x8e\xb5\xb55\xa9\
\x0d\x9e\xb3gO\x9f>-|>{\xf6Lj~k\
kK\xe2,--\x096\xc4\x9f\xbc\xb3\xee\x89/{\
\xe4o\xee\x15\xfc\xc9-k\x90\x9c-..\x22>>\
^\xfa\x95q\x99\x0fk\xe2\xfa\xf5\xeb\x92?\xf3dM\
\xcf\xcf\xcf#**\x0a\xee\xee\xee\x12\x8f\xeb\xe7\xcf\x9f\
\xc2\x01g\x1a\xe3p\x96rFjjj\xee;\x7fy\
\xff\xed\xedm\xa9\xa9\xa9\xa9)\xe1\x998\xb0\xe7VW\
W\xb1\xb0\xb0\x80K\x97.\xc9\x1cb|\xdeW\xe9\x0f\
e\x86\xa5\xa7\xa7\xcb\xecb_r\x9e\xff\x97\xf9O\xbf\
\x9ca\xacS\xd6\x17\xb9`\x8f\x93k\xea9\xefxw\
\x9e\x13\x07\x0a\xe7\x12q\xe3\x8c#\x06111\xf2\xcb\
: \xbf\xdc\xf3\x8cxs>Q\xa8\xa3p\x1f\x1a\x1a\
*\xff\x09\x8cooo/\xb5\xe7\xe9\xe9\x09///\
\xf9\xf5\xf6\xf6\x16\x9d\xa2\xe73\xc5\xd5\xd5U\xb0\xa1^\
\xb1\xe5\xac\xe5\x99\xf2L\xe1\xcc\xfd\xfbYy_\x11r\
Fn\x0e\xfa\xbfW-jQ\xcb\xffZ\x0et\xfd\x06\
\xe0\xd1!\xf2\
\x00\x00\x12\x0b\
\x00\
\x00\x01\x00\x01\x00@@\x00\x00\x00\x00 \x00\xf5\x11\x00\
\x00\x16\x00\x00\x00\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\
\x0dIHDR\x00\x00\x00@\x00\x00\x00@\x08\x06\x00\
\x00\x00\xaaiq\xde\x00\x00\x11\xbcIDATx\x9c\
\xed\x9a{pT\xe5\xf9\xc7?\xe7\xecfw\x93M\xba\
\x89\x92\x84\x5c\xcc\x854\xc5\x90\x12\x04\xa4B\x13*\x15\
D\x14;(b\xad\x0eQ0\xa3\x16\xd1\xb62\xd3\xca\
\x94\x96\x0e\xbdL\xa7Z\x1b\x19ia\xaa\xe3\x05\x1c\x14\
\xb0-\x1d\x0b\x8aE\x03IJ\xac`n\x5c\x86\xd0l\
\xc8&!\x84\x84\x5c\xf6\x92\xbd\x9es\x9e\xdf\x1f\xb8\xa7\
\xa4\xd5\xf9u:\xbf\x99\xfd\xcd\x98\xef\xcc\x99=\xe7}\
\xdf\xf3\x9e\xe7\xfd\x9e\xe7\xf6>g\x15@\xf8\x1cCM\
\xb4\x00\x89\xc6$\x01\x89\x16 \xd1\x98$ \xd1\x02$\
\x1a\x93\x04$Z\x80Dc\x92\x80D\x0b\x90hL\x12\
\x90h\x01\x12\x8dI\x02\x12-@\xa21I@\xa2\x05\
H4>\xf7\x04X\xe3'\x8a\xa2`\xb1X\x10\xf9\xf4\
\xfa\x88\xa2(\x88\x88\xf9\xfb\xffi\x8ca\x18(\x8a\xf2\
\x1f\xcf\xa3(\x0a\x9a\xa6]9\xe7s^\x11\xb2\xaa\xaa\
\x8aa\x18\x14\x17\x17\xf3\xe8\xa3\x8f\xd2\xd1\xd1AAA\
\x01\xdd\xdd\xdd8\x9dN\xd2\xd2\xd2\x18\x1d\x1d\xa5\xa0\xa0\
\x80\xce\xceN\xa6M\x9b\xc6\xf9\xf3\xe7III!\x18\
\x0c\x92\x9e\x9e\x8e\xcdf\xc3\xeb\xf5\x92\x93\x93c\x8eq\
\xbb\xdd\x94\x94\x94000\x80\xa6i\x5cs\xcd5\x0c\
\x0f\x0fSPP@oo/EEEtvvb\
\xb5ZQ\x14\x05\x97\xcb\x05\xc0\xd8\xd8\x18\xb9\xb9\xb9\x9c\
?\x7f\x9e\x92\x92\x12<\x1e\x0f\xa9\xa9\xa9\x8c\x8d\x8d\x91\
\x99\x99\x89\xae\xeb\x04\x83A\xae\xbd\xf6Zzzz\xf8\
\xd2\x97\xbeDkk+\xc5\xc5\xc5\xf8|>RSS\
\x11\x11B\xa1\x10S\xa6L\xa1\xaf\xaf\x8f\xa2\xa2\x22\xba\
\xba\xba(((\xe0\xc2\x85\x0b\x14\x16\x16\xb2u\xebV\
\x02\x81\x00\x8a\xc5b\x11]\xd7Y\xbcx1\x8f<\xf2\
\x08\xba\xae344DEE\x05===\xcc\x9a5\
\x8b\xfa\xfaz\xca\xca\xca\xe8\xee\xee\xa6\xa0\xa0\x00\x9b\xcd\
FWW\x17\xd7]w\x1d\x1d\x1d\x1d\xcc\x993\x87\xd6\
\xd6V\xca\xca\xca\x18\x19\x19\xc1\xe9tb\xb1X\x18\x1a\
\x1ab\xea\xd4\xa9\xd4\xd5\xd5QYYI{{;3\
g\xce\xa4\xb7\xb7\x97\xdc\xdc\x5c\x92\x93\x93\x19\x19\x19\xa1\
\xb8\xb8\x98\x8e\x8e\x0e***hjj2\xe7s\xb9\
\x5cdff\xe2\xf1x\xc8\xce\xcefpp\x90\xb9s\
\xe7\xd2\xdc\xdcLyy9mmm\xe4\xe5\xe5\x11\x8b\
\xc5\x88\xc5b\xe6\x0b\x989s&\xcd\xcd\xcd\xcc\x9d;\
\x973g\xce\x90\x9f\x9f\x8f\xd7\xeb%\x18\x0c2u\xea\
T|>\x1f\xbf\xfe\xf5\xaf9r\xe4\xc8\x15\x13PU\
\x15\x11\xe1\xe6\x9bo\xe6\xee\xbb\xef&\x10\x08p\xfa\xf4\
iv\xef\xde\x8d\xcb\xe5b\xe3\xc6\x8d\xb8\x5c.\x02\x81\
\x00{\xf7\xee\xa5\xbd\xbd\x1dM\xd30\x0c\x83\x1f\xfd\xe8\
G\x5c\x7f\xfd\xf5\xe8\xbaN\x7f\x7f??\xf9\xc9O\x88\
F\xa3\xd8l6\xee\xb9\xe7\x1e\xee\xbe\xfbn\xdcn7\
\x19\x19\x19\x18\x86a\x12|\xf8\xf0a>\xfe\xf8c\x22\
\x91\x08III\xc4b1\xd3\x07\xa5\xa5\xa5\xa1\xaa*\
\xa3\xa3\xa38\x1c\x0e\xc2\xe10V\xab\x15M\xd3P\xd5\
+~\xdb0\x0c\xe2\xdak\xb3\xd9\x88F\xa3\x13\xc6\x88\
\x08\x22b\xb6\xc5\x9f\xf1\xaf\xbf\x00b\xb1X\x04\x90C\
\x87\x0eI\x1c>\x9fO\xf2\xf2\xf2d\xdd\xbaur5\
V\xae\x5c)\x5c\xf1\x1b\xb2t\xe9R\xb3=\x1c\x0e\x8b\
\x88\xc8\xed\xb7\xdfn\xf6\xef\xda\xb5\xcb\xec\xd7u]\x86\
\x86\x86\xcc\xeb\xa1\xa1!Y\xbdz\xb5\xb8\x5c.s\xbc\
\xa2(\xa2(\x8a|\xff\xfb\xdf\x97-[\xb6\xc8\xacY\
\xb3&\xf4\xc5\xcf\xff\x8f\x0fDUU\x01\xe4\xe8\xd1\xa3\
\xa2i\x9aD\xa3Q\xd1u]\x1a\x1a\x1a\xe4\x1f\xff\xf8\
\x87\x88\x88\xc4b1\xd14M\xee\xbf\xff~Q\x14E\
l6\x9b\x1c?~\x5c\x0c\xc3\x90\xbe\xbe>\xf1x<\
\x12\x8b\xc5\xa4\xae\xaeN\xecv\xbb(\x8a\x22\xbf\xfa\xd5\
\xafD\xd34\x09\x06\x832\x7f\xfe|IJJ\x92\xe2\
\xe2b\xf9\xfd\xef\x7f/\xb1XLv\xee\xdc)O>\
\xf9\xa4X\xadV\xb1Z\xad\xa2(\x8a\xdcy\xe7\x9d\x13\
\x08\xdf\xbbw\xaf\xcc\x993\xc7$!>.~\xc4\xe5\
\xb7X,\xff\xd6~\xf5\xf5\xd5m\xffF@|\x92S\
\xa7N\x89\x88\x88\xc7\xe3\x91\xf1\xf1qS\x88\xee\xeen\
\xe9\xeb\xeb\x13\x11\x91\xe7\x9e{N\x00SP\xc30\xe4\
\x9b\xdf\xfc\xa6<\xfa\xe8\xa3\x12\x8b\xc5dppP\x0a\
\x0b\x0bEQ\x14\xd9\xb6m\x9b\x88\x88\x84B!)/\
/7\x1fz\xed\xb5\xd7J \x10\x90K\x97.\xc9\xf3\
\xcf?/\xaa\xaa\x9a/\xe1/\x7f\xf9\x8bh\x9a&\xe1\
pXt]\x17\x11\x11M\xd3d\xcb\x96-\xa6\xa6^\
}\xc4\xef\x8bkr\x9c\xcc\xf8\x9c\xff\x9b\xe6\xa8\xf1\xf8\
i\xb3\xd90\x0c\x03\x80w\xdf}\x97\xda\xdaZ3T\
<\xf0\xc0\x03\xb4\xb4\xb4\x00\x90\x93\x93\x83\xa2(TW\
W#\x22D\xa3Q\xb2\xb2\xb2\xc8\xca\xca2\xed\xf7\xa5\
\x97^\x9a\x10w\x1d\x0e\x07\xb3g\xcf6\xaf\xa3\xd1(\
^\xaf\x97\xac\xac,\xae\xbb\xee:\x0c\xc3\xc00\x0c\xb6\
m\xdb\xc6\xf2\xe5\xcb\xb1X,\xd8\xedvTUE\xd7\
u,\x16\x0b\x9b7o\xa6\xb1\xb1\x91;\xef\xbc\x13U\
U\xcd~\xc30p\xb9\x5c\x94\x95\x95\xa1\xeb:\x9a\xa6\
\x99\xfe\xc90\x8c\xcf\xcc#\xcc0\x18\x1f\x10\x8dF\x09\
\x04\x02h\x9aFSS\x13\xef\xbc\xf3\x0ev\xbb\x9d\x93\
'Or\xec\xd81\xb2\xb3\xb3\xd14\x8d`0He\
e%\xf7\xde{/\x86a`\xb7\xdby\xe1\x85\x17\xb8\
x\xf1\x22~\xbf\x9fp8\xcc\x92%K\xa8\xaa\xaa\xa2\
\xab\xab\xcbL8\x8a\x8a\x8a())a\xc6\x8c\x19|\
\xe7;\xdf!77\x17\x80\x93'O\x02\xf0\xd0C\x0f\
\xb1~\xfdz4M\xa3\xb7\xb7\x97\xc3\x87\x0fS^^\
\xce\x82\x05\x0b\xd0u\x1d\x80\xf9\xf3\xe7\xf3\xf6\xdbos\
\xe0\xc0\x01\x82\xc1 \xaf\xbe\xfa\xaa\xe9\xac\xf3\xf3\xf3Y\
\xb7n\x1d\xc5\xc5\xc5\x84B!\x8e\x1e=\x8a\xae\xeb\x8c\
\x8d\x8d\x11\x0a\x85\xd04\xcd\x9c\xe7_a\xaa\xd1\xcb/\
\xbf,\x22\x22?\xf8\xc1\x0f&\xa8\x89\xd5j\x95\x9e\x9e\
\x1e\x11\x119t\xe8\x90\x1c9rDDD\xc6\xc6\xc6\
d\xd9\xb2e2\x7f\xfe|\xa9\xaa\xaa\x92/~\xf1\x8b\
\xf2\xd7\xbf\xfeUDDN\x9f>-;v\xec\x90\xcf\
Bgg\xa7\xfc\xf4\xa7?\x15\x9b\xcd&\x80\xd4\xd4\xd4\
\x88\xd7\xeb\x95\xc7\x1f\x7f\xdc\x94\xe7\xd6[o5M \
\xeeH\xe3f!\x22\x12\x8dF%\x14\x0a}\xea\xfc\xc1\
`P|>\x9f466\xca\xb2e\xcb\xc4\xe5rM\
0\x97\xf8\xa1|\xe2\x18\x10\x11\x92\x93\x93Y\xbe|9\
\x1f}\xf4\x11===\x13X\xba\xff\xfe\xfb\x99>}\
:\xc3\xc3\xc3\xf8\xfd~\xae\xb9\xe6\x1a\xea\xea\xeaL\xd3\
\x88\xa3\xb4\xb4\x94\x993g\x92\x94\x94D4\x1a\x9d\x10\
\xefc\xb1\x18\x81@\x80\xfe\xfe~\xda\xda\xda\x08\x87\xc3\
\x00\xa6*\xe7\xe6\xe6\xe2p8\x00\xe8\xef\xef\xc7n\xb7\
s\xea\xd4)\xf2\xf3\xf3\xcd\xb0\x07\x98\xaam\xb1X\xcc\
\xebxJ\x1c\xd7h\xab\xd5\xcc\xf2\xe9\xe9\xe9a\xfd\xfa\
\xf5\x1c>|\x98X,6A\x13\xccT8))\x89\
o}\xeb[$%%166\xc6\x85\x0b\x17\x983\
g\x0e===\xb8\xddn\xce\x9e=;a\xa1_\xfe\
\xf2\x97\x99?\x7f>\xb3g\xcf\xc6\xedv\xd3\xdc\xdc\xcc\
\x91#G\x00\x987o\x1e\xd3\xa7O\xc7\xe1pp\xf0\
\xe0A\xfa\xfb\xfb\x01\xb8\xe5\x96[HOO\xa7\xa1\xa1\
\x01]\xd7Y\xb4h\x11^\xaf\x97\xf7\xdf\x7f\xdf\x8c\xd7\
K\x97.e\xf1\xe2\xc5X\xadV\xdex\xe3\x0d6n\
\xdc\xc8\xca\x95+1\x0c\xc3\x5c\xb0\xa9\xba\x9f,\xf6\xd3\
\xf6\x01\xf1>]\xd7\xb1Z\xad\xec\xdf\xbf\x9f'\x9ex\
\x82\x8b\x17/\x9a9B\xfcf\x01$%%E|>\
\x9f\x88\x88\x0c\x0f\x0f\xcb\xe5\xcb\x97Mu\x8a\xc5bR\
__/%%%\x92\x91\x911!\xbe_\x8d]\xbb\
vIZZ\x9a\xd4\xd6\xd6\x9am}}}\x92\x9e\x9e\
.V\xabU<\x1e\x8f\x88\x88\xacZ\xb5J\xbe\xf2\x95\
\xaf\x88\xdf\xef\x97\xf1\xf1qY\xb6l\x99\x00\xb2h\xd1\
\x22S\xdd\xb7n\xdd*\x80\xdcw\xdf}\x13\xcc\xe0\xb3\
\xa0\xeb\xbah\x9a&\x86aLh\x8f\xb7\x9d?\x7f^\
\x16,X \xc9\xc9\xc9\x13\xcd N\x80\xc3\xe1\x90\xce\
\xceN\x89\xc5b\xd2\xd4\xd4$\xb3f\xcd\x92\xd9\xb3g\
\xcb\x8e\x1d;\xccI\x9fy\xe6\x19\xa9\xaa\xaa\x92\xe1\xe1\
ainn\x96\xe5\xcb\x97KII\x89\xac]\xbb\xd6\
\x5c\xdc\x92%Kd\xf3\xe6\xcd\x12\x8b\xc5$\x12\x89\x88\
\x88HMM\x8d\xa8\xaa*\xdd\xdd\xdd\x12\x8b\xc5d\xd5\
\xaaU\xa2\xaa\xaa|\xf0\xc1\x07\x22\x22\xe2v\xbb\xa5\xb4\
\xb4TN\x9f>-\x22\x22\x7f\xfc\xe3\x1f\xcd\x10\x97\x95\
\x95%\x83\x83\x83\x22r%\xe4\x1a\x86a.V\xd34\
3?\xf9W2\xe22\x8f\x8e\x8eJ$\x12\x91\xcb\x97\
/\xcb\xc2\x85\x0b%55U\xacV\xeb?\xc3\xa0|\
\xb2]\x8cD\x22\x04\x83A,\x16\x0b\x03\x03\x03tw\
w\xd3\xd2\xd2B__\x9f\xa9b\x16\x8b\x85-[\xb6\
\xd0\xda\xda\xca\x8b/\xbe\xc8\x81\x03\x07p\xbb\xdd\xbc\xf2\
\xca+\xc4b1\x82\xc1 \xb7\xddv\x1bs\xe7\xce\xc5\
j\xb5\xa2\xaa*\xfb\xf7\xef\xe7\xc6\x1bo\xe4\xa1\x87\x1e\
\xe2\xfc\xf9\xf3X\xadVJKK1\x0c\x83\xea\xeaj\
.]\xba\xc4\xb4i\xd3hjjb\xc6\x8c\x19\xf4\xf7\
\xf7\xb3n\xdd:\x14EAUU\x06\x07\x07M?\x13\
\xb7uUU\xb1X,X,\x16\xacV+\x16\x8b\x85\
\x93'O\xd2\xd0\xd0\xc0\xd0\xd0\x90\xe9+D\x84\x86\x86\
\x06\x82\xc1 \x8a\xa2\x98\x1b\xaf\xabaz\x0aUU\x09\
\x85B\x88\x08\xb7\xddv\x1b\xfd\xfd\xfd\x8c\x8e\x8e\x92\x97\
\x97\x87a\x18\xec\xd9\xb3\x87\xed\xdb\xb7s\xea\xd4)\xdc\
n\xb7i\x8fq\x07:22BGG\x07ccc\
$''\x03p\xe0\xc0\x01~\xfb\xdb\xdf\xf2\xc3\x1f\xfe\
\x90[o\xbd\x95@ \x00@JJ\x0a\x00\x17.\x5c\
\xa0\xa6\xa6\x86?\xfc\xe1\x0fddd\xd0\xdb\xdb\xcb\xea\
\xd5\xab\xb9t\xe9\x92\xb9/P\x14\x85\xdf\xfc\xe67,\
^\xbc\x18EQ\xf0\xf9|tvv\xe2\xf7\xfb\xf1\xf9\
|\xf4\xf6\xf6R__\xcf\x9f\xff\xfcg\xc2\xe10\x99\
\x99\x99\xfc\xeew\xbfc\xd5\xaaU\x04\x02\x01\xea\xeb\xeb\
\xb9\xe1\x86\x1b\xb0\xd9lf~\x10\x0f\xcd\xa6\x0fP\x14\
E,\x16\x8b466J$\x12\x91\x93'OJm\
m\xad\xfc\xf2\x97\xbf\x94u\xeb\xd6IEE\x85\xa92\
\x1f~\xf8\xa1\x04\x83A9~\xfc\xb8TTT\x88\xdd\
n\x97{\xee\xb9G\xa2\xd1\xa8l\xdb\xb6M\xb2\xb3\xb3\
\xe5\xe0\xc1\x83\x12\x8b\xc5d\xdf\xbe}\x92\x92\x92\x22\x1f\
~\xf8\xa1i\x8f\x9a\xa6Imm\xad\x00f\x08|\xef\
\xbd\xf7$\x16\x8b\xc9\x9e={\xcc\xb0k\xaa\xe8'\xa1\
\xebo\x7f\xfb\x9b\x88\x88l\xdc\xb8\xf13\xb3\xba\xf8}\
EEEb\x18\x86\x0c\x0e\x0e\xca\xc2\x85\x0b\xe5\xc8\x91\
#r\xf9\xf2e\xb9\xe3\x8e;L\x1f\x107}\xab\x5c\
UUq:\x9d\xd8l6233y\xea\xa9\xa7&\
\xa8JRR\x12\x9a\xa6\xf1\xd4SO\xf1\xce;\xefp\
\xe3\x8d7\xb2{\xf7nRSS),,\x04\xc0\xe9\
tr\xe9\xd2%\x5c.\x17V\xab\x95i\xd3\xa6\x11\x0c\
\x06y\xec\xb1\xc7hmm5\xb5&\x1e\xea\xe2j\x19\
\x08\x04\xb0Z\xad\xe6.\xf0\xea\xea\x94\xc5bAQ\x14\
jkk\xf9\xeaW\xbf\xca\xe9\xd3\xa7M\xd5\xd7u\xdd\
T\xf7xhKKK\xc3\xe3\xf1\xd0\xd0\xd0@ee\
%\x16\x8b\x85\xba\xba:3*\xa5\xa6\xa6\x12\x8dF\xcd\
\xac\xd74\x01\x11\xe1\xc0\x81\x03\x1c<x\x10\x9f\xcf\xf7\
oe\xa4X,\x86\xaa\xaa455q\xc3\x0d7\xb0\
v\xedZ:::\xb8\xe3\x8e;\xd8\xb7o\x1f\x07\x0f\
\x1e\xa4\xb1\xb1\x11\x80W^y\x85\x96\x96\x16^\x7f\xfd\
u\x00\xda\xda\xdaX\xb9r%\x8b\x16-\xa2\xbf\xbf\x9f\
\xa6\xa6&\xe0J\xf6)\x22\xec\xdb\xb7\x0f\xaf\xd7\xcb\xd1\
\xa3G\x11\x11s\x9b\x1a\xb7{\x80\xb7\xdez\x8b\x81\x81\
\x01\x5c.\x17\xba\xae\x7ffV\xe7\xf7\xfb\x01\xf8\xc5/\
~\xc1\xa1C\x87P\x14\x85\xba\xba:n\xbf\xfdvz\
{{\x09\x06\x83\x9f\x9e\x07\xc4\x17\x9c\x9f\x9f\x8f\xddn\
'//\x0f]\xd7\xf1x<deeq\xee\xdc9\
\x02\x81\x00\xe5\xe5\xe5\x8c\x8c\x8c\x98L\xa6\xa6\xa6\x02\x10\
\x89D\x08\x85B\xe4\xe7\xe7\xa3\xeb:\x22\x82\xa6i\xf8\
\xfd~\x9cN'\x86a0>>Nww\xb7\xf9\xf6\
\xd3\xd2\xd2\x98>}:\x1d\x1d\x1d\xf8|>S\xa8\x99\
3g\x92\x99\x99\xc9\xc8\xc8\x08~\xbf\x9f\xcc\xccL\xbc\
^/o\xbd\xf5\x16n\xb7\x9b\x9f\xfd\xecg\xf8\xfd~\
TU%%%\x05\x9b\xcd\x86\xddngdd\x04U\
U9y\xf2$\x9a\xa6q\xe2\xc4\x09\x9ey\xe6\x19\x14\
E\xe1\xdb\xdf\xfe6;w\xee\xc4\xef\xf7300@\
KK\x0b\xe3\xe3\xe3\xff$ \xaez;w\xee\xe4\x81\
\x07\x1e\xa0\xa3\xa3\x83S\xa7Nq\xf6\xecY6m\xda\
\xc4\xbe}\xfbX\xbbv-\xc3\xc3\xc3\xd4\xd6\xd6RY\
Y\xc9\x94)S(++\xa3\xa7\xa7\x87\x8f>\xfa\x88\
\xe4\xe4d\x96,Y\xc2\x89\x13'\x08\x06\x83\x84B!\
\xdcn7\x8f?\xfe8'N\x9c ##\x03\xa7\xd3\
\xc9\x8a\x15+hkkc\xd7\xae]\xac^\xbd\x9a\x0f\
>\xf8\x80\xa5K\x97\x92\x93\x93\xc3\xcb/\xbf\xccM7\
\xdd\xc4\x993g\xf0\xfb\xfdX,\x16\x0c\xc3 ;;\
\x9b\xb2\xb22.\x5c\xb8@kk+\xed\xed\xedL\x99\
2\x85\xea\xeaj|>\x1fn\xb7\x9b\xe3\xc7\x8f\xb3a\
\xc3\x06\xd6\xacY\xc3k\xaf\xbdFgg'\xbd\xbd\xbd\
\xd8\xedvf\xcc\x98Akk\xeb\x95E+\x0a\xeb\xd6\
\xad\xc3\xe3\xf1\x5c\xd1\xfe\xb8\x03\xb1X,R]]-\
\x86a\xc8}\xf7\xdd'\x80\xfc\xfc\xe7?\x17\x11\x91\xc6\
\xc6FY\xbdz\xb5\xc4b1y\xf0\xc1\x07\x05\x90\xa7\
\x9f~ZDDn\xba\xe9&\x01d\xd3\xa6Mr\xec\
\xd81\xd9\xbau\xabl\xda\xb4I\xbe\xf7\xbd\xef\xc9\x13\
O<!\xc1`Pjkk\xe5\xa5\x97^\x92P(\
$_\xfb\xda\xd7$++K\x22\x91\x88\xec\xde\xbd[\
B\xa1\x90L\x992E\x9e{\xee9\x11\x11\xa9\xaa\xaa\
\x12@\xecv\xbb\xa4\xa7\xa7\x0b \x0f?\xfc\xb0\x88\x88\
\xacY\xb3\xc6tz\x85\x85\x85\x12\x89Dd\xfb\xf6\xed\
f\x9b\xc7\xe3\x91\xd7_\x7f]\x8a\x8a\x8aDD\xe4\xde\
{\xef\x95\xd7^{M>\xfe\xf8cy\xe1\x85\x17\xe4\
\xc7?\xfe\xb1l\xd8\xb0A\xecv\xfb\x15'{\xb5\x0f\
\xd0u\x9dH$\x82\xa2(\x94\x96\x96\x020u\xeaT\
\x86\x87\x87y\xe3\x8d7\xa8\xad\xad\xc50\x0crrr\
\xb0Z\xad\x84\xc3a\xd3f-\x16\x0b\x0e\x87\xc3T}\
UUINN\xc6\xe1p0::JUU\x155\
55466R__\xcf\x9a5k\xb0\xd9lX\
\xadV\x1c\x0e\x07\xeb\xd7\xaf\x9f\x90\xbf\xc7\xcd\xea\xear\
\x98\x88\xe0\xf5zMg\x1a\x08\x04\xb0\xd9lTTT\
\x98\xf7\xec\xd8\xb1\x83\xdc\xdc\x5c\x9e\x7f\xfey\x02\x81\x00\
\x87\x0e\x1d\x22==\x1d\xaf\xd7k\xa6\xc5iii\xe6\
\x1cf= --\x8d\xed\xdb\xb7\xf3\xdd\xef~\x17]\
\xd7Y\xb6l\x19\x1b6l\xc0f\xb3\x91\x94\x94\xc4\x8e\
\x1d;p\xbb\xdd\xd8l6\xc6\xc6\xc6\xd04\x8dp8\
l&Q\xba\xae\x93\x93\x93Cii)999\xa6\
\x0f\x09\x85B\xe8\xba\xce\xbcy\xf3x\xec\xb1\xc7\xb8\xf9\
\xe6\x9b\xd9\xb8q#k\xd6\xac\xe1\xd0\xa1C477\
\xf3\xe2\x8b/\xf2\xe4\x93O\xf2\xa7?\xfd\x89\x96\x96\x16\
\xde|\xf3M\xde|\xf3Mv\xed\xda\xc5\x8a\x15+\xcc\
\xf8\xad(\x0a\xdd\xdd\xdd\xe8\xbaNMM\x0d\xaf\xbe\xfa\
*\xd1h\x94\xbc\xbc<\x9e}\xf6YJKKy\xf6\
\xd9g)..f\xc5\x8a\x15\xec\xdc\xb9\x13\x9f\xcfG\
JJ\x0a3f\xcc ==\x9d\xac\xac,\x82\xc1 \
\x91Hd\xa2\x13\xb4\xd9l\xdcr\xcb-<\xf2\xc8#\
f\xb9\xbb\xb7\xb7\x17EQ\xb8x\xf1\x22\xe7\xce\x9d\x03\
\xaelh\xda\xda\xda\x18\x1d\x1d\xa5\xac\xac\x8c\xc2\xc2B\
\xce\x9e=KKK\x0b\x0f>\xf8 \xe3\xe3\xe3\xe4\xe6\
\xe6\x12\x0e\x87\x89F\xa3x<\x1es\x8f\x7f\xe1\xc2\x05\
\x1e~\xf8a\x82\xc1 \x99\x99\x99\xec\xd9\xb3\x07\x8f\xc7\
C(\x14b\xf5\xea\xd5\xf4\xf6\xf6\xd2\xd9\xd9\xc9\xca\x95\
+\x01\xe8\xea\xea\xe2\xc4\x89\x13\xa4\xa5\xa5\x91\x9f\x9fO\
yy9g\xce\x9c\xa1\xb5\xb5\x95\xbb\xee\xba\x0b\xa7\xd3\
\x89\xd3\xe9\xe4\xf2\xe5\xcb\xe4\xe6\xe6\xf2\xde{\xef\xd1\xd5\
\xd5\xc5\xd7\xbf\xfeu\xd2\xd2\xd2hmm\xe5\xdc\xb9s\
\xac_\xbf\x9e@ \x80\xcb\xe52\x8b\xadO?\xfd4\
===\xff\xac\x0a\xc7++\xc7\x8e\x1d\xa3\xa1\xa1\x81\
y\xf3\xe6\xd1\xde\xde\xce\x8c\x193x\xf7\xddw\xb1\xdb\
\xedtuu\xe1\xf5z\xd9\xbcy3\xe7\xce\x9d#+\
+\x8bc\xc7\x8e\xf1\x8do|\x83\xae\xae.\x92\x92\x92\
\x10\x11\x06\x06\x06Lb*++ijjb\xc1\x82\
\x05x<\x1e,\x16\x0b\x9a\xa6\xe1\xf3\xf9\x98={6\
\xf5\xf5\xf5\xd4\xd5\xd5q\xfd\xf5\xd7S]]\xcd\xdf\xff\
\xfew\xa2\xd1(\x19\x19\x19\xbc\xff\xfe\xfb\xacZ\xb5\x0a\
\xaf\xd7Kvv6G\x8e\x1c\xe1\xae\xbb\xee\xa2\xa3\xa3\
\x83\xe4\xe4d|>\x1f\x22Bvv6\x1d\x1d\x1d,\
\x5c\xb8\x90\xfd\xfb\xf7SZZj~\x0f\xb0\xdb\xed\x84\
B!\xb2\xb2\xb2hoo\xa7\xbc\xbc\x9c\xf3\xe7\xcf\xf3\
\xf6\xdbo\xb3w\xef\xde\x89_\x86\xd2\xd2\xd2\xb0Z\xad\
D\x22\x91\x09{mUUM\x8f\xac(\x0aN\xa7\x93\
H$b\xe6\xe4\xb1X\x0c\x9b\xcdF$\x12!55\
\x95@ @rr\xb2i\xbf\xaa\xaa\x9a\xa5\xf2h4\
\x8a\xd3\xe9d||\x1c\x87\xc3A \x10@Dp:\
\x9d\xe6\x98p8Ljj*~\xbf\x1f\x87\xc3A4\
\x1a%))\x09\xf8\xe7\xf66\x16\x8b\x99\xf3\xa4\xa4\xa4\
\x10\x0a\x85\xcc\xa4\xca\xe1p`\xb3\xd9\xcc\xbeH$\xc2\
\x17\xbe\xf0\x05\xc6\xc7\xc7\xcd\x84\xce0\x0cB\xa1\xd0\xe4\
\xa71\xeb\xbf6|Zq\x01\x98\x90\x15~\xd6\x98\xff\
\x16\xf1t\xfc?m\xffo\xe6\xfa\xb4q0\xf9qt\
\xf2\xff\x01\x93\x04$Z\x80Dc\x92\x80D\x0b\x90h\
L\x12\x90h\x01\x12\x8dI\x02\x12-@\xa21I@\
\xa2\x05H4&\x09H\xb4\x00\x89\xc6$\x01\x89\x16 \
\xd1\xf8\xdc\x13\xf0?E\x0a}G\xcd\xb2\x85|\x00\x00\
\x00\x00IEND\xaeB`\x82\
"
qt_resource_name = b"\
\x00\x06\
\x07\x03}\xc3\
\x00i\
\x00m\x00a\x00g\x00e\x00s\
\x00\x05\
\x00o\xa6S\
\x00i\
\x00c\x00o\x00n\x00s\
\x00\x03\
\x00\x00p7\
\x00i\
\x00m\x00g\
\x00\x0c\
\x05\x1b\xb0\xc7\
\x00c\
\x00i\x00l\x00-\x00f\x00i\x00l\x00e\x00.\x00p\x00n\x00g\
\x00\x12\
\x0d\xc4\x15'\
\x00c\
\x00i\x00l\x00-\x00v\x00i\x00e\x00w\x00-\x00q\x00u\x00i\x00l\x00t\x00.\x00p\x00n\
\x00g\
\x00\x12\
\x0f\xad\x8fg\
\x00c\
\x00i\x00l\x00-\x00m\x00e\x00d\x00i\x00a\x00-\x00p\x00l\x00a\x00y\x00.\x00p\x00n\
\x00g\
\x00\x10\
\x0d\xc9]\x07\
\x00c\
\x00i\x00l\x00-\x00s\x00e\x00t\x00t\x00i\x00n\x00g\x00s\x00.\x00p\x00n\x00g\
\x00\x0c\
\x0b\x0b\xb0\xa7\
\x00c\
\x00i\x00l\x00-\x00h\x00o\x00m\x00e\x00.\x00p\x00n\x00g\
\x00\x0c\
\x08\x9b\xb0\x07\
\x00c\
\x00i\x00l\x00-\x00c\x00o\x00d\x00e\x00.\x00p\x00n\x00g\
\x00\x0c\
\x05\xfb\xbeg\
\x00c\
\x00i\x00l\x00-\x00s\x00a\x00v\x00e\x00.\x00p\x00n\x00g\
\x00\x11\
\x06G\x9f\xc7\
\x00c\
\x00i\x00l\x00-\x00c\x00h\x00e\x00c\x00k\x00-\x00a\x00l\x00t\x00.\x00p\x00n\x00g\
\
\x00\x0e\
\x06\x17\x85\xa7\
\x00c\
\x00i\x00l\x00-\x00p\x00e\x00n\x00c\x00i\x00l\x00.\x00p\x00n\x00g\
\x00\x0c\
\x0e\xfd\xbf\xa7\
\x00c\
\x00i\x00l\x00-\x00p\x00l\x00u\x00s\x00.\x00p\x00n\x00g\
\x00\x09\
\x0fK\x84\xa7\
\x00c\
\x00i\x00l\x00-\x00x\x00.\x00p\x00n\x00g\
\x00\x15\
\x03Q:'\
\x00c\
\x00i\x00l\x00-\x00c\x00h\x00e\x00v\x00r\x00o\x00n\x00-\x00r\x00i\x00g\x00h\x00t\
\x00.\x00p\x00n\x00g\
\x00\x0c\
\x0f\x14D'\
\x00t\
\x00r\x00a\x00s\x00h\x00-\x003\x002\x00.\x00p\x00n\x00g\
\x00\x0d\
\x0bz\xc5'\
\x00c\
\x00i\x00l\x00-\x00s\x00h\x00a\x00r\x00e\x00.\x00p\x00n\x00g\
\x00\x11\
\x0c\x84-\xa7\
\x00c\
\x00i\x00l\x00-\x00s\x00i\x00z\x00e\x00-\x00g\x00r\x00i\x00p\x00.\x00p\x00n\x00g\
\
\x00\x14\
\x0f=8\xc7\
\x00c\
\x00i\x00l\x00-\x00c\x00h\x00e\x00v\x00r\x00o\x00n\x00-\x00l\x00e\x00f\x00t\x00.\
\x00p\x00n\x00g\
\x00\x13\
\x0a\x0a\x0a\xa7\
\x00s\
\x00u\x00b\x00l\x00i\x00m\x00e\x00-\x00t\x00e\x00x\x00t\x00-\x004\x008\x00.\x00p\
\x00n\x00g\
\x00\x15\
\x025\x13\xc7\
\x00c\
\x00i\x00l\x00-\x00e\x00x\x00t\x00e\x00r\x00n\x00a\x00l\x00-\x00l\x00i\x00n\x00k\
\x00.\x00p\x00n\x00g\
\x00\x11\
\x0d@y\x07\
\x00c\
\x00i\x00l\x00-\x00c\x00l\x00i\x00p\x00b\x00o\x00a\x00r\x00d\x00.\x00p\x00n\x00g\
\
\x00\x15\
\x08\xca\x95\xe7\
\x00c\
\x00i\x00l\x00-\x00l\x00o\x00o\x00p\x00-\x00c\x00i\x00r\x00c\x00u\x00l\x00a\x00r\
\x00.\x00p\x00n\x00g\
\x00\x13\
\x04%\x01G\
\x00c\
\x00i\x00l\x00-\x00f\x00o\x00l\x00d\x00e\x00r\x00-\x00o\x00p\x00e\x00n\x00.\x00p\
\x00n\x00g\
\x00\x10\
\x09\x8fx\xe7\
\x00c\
\x00i\x00l\x00-\x00s\x00a\x00t\x00e\x00l\x00i\x00t\x00e\x00.\x00p\x00n\x00g\
\x00\x0e\
\x0f\xcc\xddg\
\x00w\
\x00i\x00d\x00g\x00e\x00t\x00s\x00-\x006\x004\x00.\x00p\x00n\x00g\
\x00\x0c\
\x0bo\xbb'\
\x00m\
\x00p\x00i\x00_\x00l\x00o\x00g\x00o\x00.\x00p\x00n\x00g\
\x00\x12\
\x02\xfe[\xc7\
\x00m\
\x00p\x00i\x00_\x00l\x00o\x00g\x00o\x00_\x00s\x00m\x00a\x00l\x00l\x00.\x00p\x00n\
\x00g\
\x00\x11\
\x01LY\x1f\
\x00m\
\x00p\x00i\x00_\x00l\x00o\x00g\x00o\x00_\x00i\x00c\x00o\x00n\x00.\x00i\x00c\x00o\
\
\x00\x0c\
\x0bo\xa3\xff\
\x00m\
\x00p\x00i\x00_\x00l\x00o\x00g\x00o\x00.\x00i\x00c\x00o\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x12\x00\x02\x00\x00\x00\x01\x00\x00\x00\x08\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x22\x00\x02\x00\x00\x00\x04\x00\x00\x00\x04\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x03\xd4\x00\x01\x00\x00\x00\x01\x00\x00\xba\xf3\
\x00\x00\x01}\x91@\x8d\xb7\
\x00\x00\x03\xaa\x00\x00\x00\x00\x00\x01\x00\x00\xa3%\
\x00\x00\x01}\x9fv1\x97\
\x00\x00\x03\xfc\x00\x00\x00\x00\x00\x01\x00\x00\xbfl\
\x00\x00\x01}\xdbT\xbb\x1e\
\x00\x00\x03\x8c\x00\x00\x00\x00\x00\x01\x00\x00\x9az\
\x00\x00\x01}\x97W\x0f\xd1\
\x00\x00\x00\x22\x00\x02\x00\x00\x00\x17\x00\x00\x00\x09\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x02\x90\x00\x00\x00\x00\x00\x01\x00\x00s\x03\
\x00\x00\x01y w\x85\x07\
\x00\x00\x01\xa0\x00\x00\x00\x00\x00\x01\x00\x00P\xc1\
\x00\x00\x01y w\x82\xa5\
\x00\x00\x03\x18\x00\x00\x00\x00\x00\x01\x00\x00\x891\
\x00\x00\x01y w\x85\x81\
\x00\x00\x00.\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01y w\x851\
\x00\x00\x01\x02\x00\x00\x00\x00\x00\x01\x00\x00,\xab\
\x00\x00\x01y w\x8a\x8c\
\x00\x00\x01H\x00\x00\x00\x00\x00\x01\x00\x00;/\
\x00\x00\x01y w\x89\xe2\
\x00\x00\x01 \x00\x00\x00\x00\x00\x01\x00\x004>\
\x00\x00\x01y w\x81\x7f\
\x00\x00\x00\xe4\x00\x00\x00\x00\x00\x01\x00\x00%5\
\x00\x00\x01y w\x83D\
\x00\x00\x02\xe8\x00\x00\x00\x00\x00\x01\x00\x00\x81\xc2\
\x00\x00\x01y w\x87\xba\
\x00\x00\x03D\x00\x00\x00\x00\x00\x01\x00\x00\x90\xb6\
\x00\x00\x01y w\x8a~\
\x00\x00\x02d\x00\x00\x00\x00\x00\x01\x00\x00ox\
\x00\x00\x01}\x94\xbdm}\
\x00\x00\x00\xc6\x00\x00\x00\x00\x00\x01\x00\x00\x1d\xbb\
\x00\x00\x01y w\x86V\
\x00\x00\x01\xee\x00\x00\x00\x00\x00\x01\x00\x00X\xd6\
\x00\x00\x01y w\x8a\xd1\
\x00\x00\x02\x0e\x00\x00\x00\x00\x00\x01\x00\x00`[\
\x00\x00\x01y w\x8a\xfb\
\x00\x00\x02\xc0\x00\x00\x00\x00\x00\x01\x00\x00zu\
\x00\x00\x01y w\x82\xd4\
\x00\x00\x00L\x00\x00\x00\x00\x00\x01\x00\x00\x07R\
\x00\x00\x01y w\x8cd\
\x00\x00\x00\xa0\x00\x00\x00\x00\x00\x01\x00\x00\x16\x05\
\x00\x00\x01y w\x8a\xb5\
\x00\x00\x01j\x00\x00\x00\x00\x00\x01\x00\x00B\x86\
\x00\x00\x01y w\x8a\x19\
\x00\x00\x01\xd0\x00\x00\x00\x00\x00\x01\x00\x00X\x1a\
\x00\x00\x01}\x95E\xa3\xdc\
\x00\x00\x026\x00\x00\x00\x00\x00\x01\x00\x00h$\
\x00\x00\x01y w\x82\x96\
\x00\x00\x01\x88\x00\x00\x00\x00\x00\x01\x00\x00I\x89\
\x00\x00\x01y w\x8ds\
\x00\x00\x00v\x00\x00\x00\x00\x00\x01\x00\x00\x0e\xc1\
\x00\x00\x01y w\x88$\
\x00\x00\x03j\x00\x00\x00\x00\x00\x01\x00\x00\x98R\
\x00\x00\x01}\x95D\xe9\x11\
"
def qInitResources():
QtCore.qRegisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
the-stack_0_15390 | # -*- coding: utf-8 -*-
"""Project myip
Will show your IP address.
"""
# The package name, which is also the "UNIX name" for the project.
package = 'myip'
project = "Otype myip"
project_no_spaces = project.replace(' ', '')
version = '0.1'
description = 'Shows your IP address'
authors = ['Hans-Gunther Schmidt']
authors_string = ', '.join(authors)
emails = ['[email protected]']
license = 'MIT'
copyright = '2014 ' + authors_string
url = 'http://otype.de/'
|
the-stack_0_15391 | import os
import re
from subprocess import PIPE, Popen
def git_file_deltas(git_dir, commit, compare=None):
#source: http://stackoverflow.com/a/2713363
pass
def sub_git_remote_url(git_dir):
args = ['config', '--get', "remote.origin.url"]
with sub_git_cmd(git_dir, args) as p:
gitout = p.stdout.read().decode('utf-8').strip()
return gitout
def sub_git_cmd(git_dir, args):
"""
run git command
args are the full command with args
git_dir, the actual full path of the .git/ repo directory to run commands against
returns popen object for access to stdout+stderr
"""
git_dir_to_use = None
if os.path.exists(os.path.join(git_dir, '.git')):
#ok file exists
git_dir_to_use = os.path.join(git_dir, '.git')
elif os.path.isfile(os.path.join(git_dir, '.git')):
#it's a submodule, dereference the actual git info
git_dir_to_use = os.path.join(git_dir, '.git')
# else:
# raise Exception("Error, the .git location for %s doesn't exists" % git_dir)
try:
p = Popen(
[
'git',
'--git-dir',
git_dir_to_use,
] + args,
stdout=PIPE, stderr=PIPE
)
except OSError as e:
# Is git missing ?
if e.errno == 2:
e.strerror += ": git"
raise(e)
return p
def sub_get_current_branch(git_dir):
#HT: http://stackoverflow.com/a/12142066
args = [
'rev-parse',
'--abbrev-ref',
'HEAD'
]
with sub_git_cmd(git_dir, args) as p:
gitout = p.stdout.read().decode('utf-8').strip()
return gitout
def get_project_snapshot(git_dir, submodules=False, log_count=1, submodule_count=1):
root_info = sub_git_info(git_dir, log_count=log_count)
root_info['current_branch'] = sub_get_current_branch(git_dir)
if submodules:
root_info['submodules'] = list(sub_git_submodules(git_dir, log_count=submodule_count))
return root_info
def sub_git_info(git_dir, log_count=1):
"""
Given a git dir and log count, return a json formatted representation
"""
return_dict = {}
kv_line_format = {
'sha': '%H',
'author': '%an <%ae>',
'date': '%ai',
'subject': '%s',
'message': '%b'
}
KV_DELIMITER = ':~@#$~:'
LINE_DELIMITER = '@#\n#@'
# construct an output of git log that is essentially a:
# key=value
# key=value, etc
# but a sing a custom Key=Value delimiter, and a custom Line delimiter since
# there might be newlines in messages and subjects
# the git log -z format delimits the entire log by null, but we need separation of each property
line_by_line_format = LINE_DELIMITER.join(['%s%s%s' % (k, KV_DELIMITER, v) for k, v in kv_line_format.items()])
args = ['log',
'-%s' % log_count,
'-z',
'--pretty=format:%s' % line_by_line_format
]
with sub_git_cmd(git_dir, args) as p:
gitout = p.stdout.read().decode('utf-8').strip()
url = sub_git_remote_url(git_dir)
all_raw_revs = gitout.split('\0')
def parse_rev_block(block_text):
ret = {}
for prop in block_text.split(LINE_DELIMITER):
if len(prop) == 0:
continue
try:
k, v = prop.split(KV_DELIMITER)
except ValueError:
k = "GitParseError"
v = prop
ret[k] = v
return ret
commit_list = [parse_rev_block(s) for s in all_raw_revs]
for commit in commit_list:
commit['commit_url'] = get_commit_url(url, commit['sha'])
commit['compare_master'] = get_compare_url(url, commit['sha'], 'master')
return_dict['commits'] = commit_list
return return_dict
def get_git_sub_info(git_dir, sub_path, log_count=1):
full_sub_path = os.path.join(git_dir, sub_path)
sub_info = sub_git_info(full_sub_path, log_count=log_count)
return sub_info
def sub_git_submodules(git_dir, log_count=1):
"""
Using shell, get the active submodule info
"""
args =['submodule', 'status' ]
with sub_git_cmd(git_dir, args) as p:
gitout = p.stdout.read().decode('utf-8').strip()
for x in gitout:
splits = x.strip().split(' ')
if len(splits) == 3:
sub_sha = splits[0].strip()
sub_path = splits[1]
sub_log = get_git_sub_info(git_dir, sub_path, log_count=log_count)
sub_log['path'] = sub_path
sub_log['branch'] = splits[2]
sub_log['sha_sha'] = sub_sha
yield sub_log
def split_repo_url(repo_url):
"""
Repo url splits to [git_account, git_repo]
even if it's git://, or git@
"""
if re.search(r'^\w+://', repo_url):
chunks = repo_url.split("/")[-2:]
elif repo_url.startswith("git@"):
chunks = repo_url.split(':')[-1].split('/')
return chunks
def get_commit_url(repo_url, hexsha, compare=False):
chunks = split_repo_url(repo_url)
url = "https://github.com/%s/%s/commit/%s" % (chunks[0], chunks[1].replace('.git', ''), hexsha)
return url
def get_compare_url(repo_url, start_cmp, end_cmp):
chunks = split_repo_url(repo_url)
url = "https://github.com/%(account)s/%(repo)s/compare/%(start_cmp)s...%(end_cmp)s" % {
"account": chunks[0],
"repo": chunks[1].replace('.git', ''),
"start_cmp": start_cmp,
"end_cmp": end_cmp
}
return url
|
the-stack_0_15394 | # Ensures that:
# 1. all worker containers in the database are still responsive; workers that have stopped
# responding are shutdown and removed from the database.
# 2. Enforce ttl for idle workers.
#
# In the future, this module will also implement:
# 3. all actors with stateless=true have a number of workers proportional to the messages in the queue.
# Execute from a container on a schedule as follows:
# docker run -it --rm -v /var/run/docker.sock:/var/run/docker.sock abaco/core python3 -u /actors/health.py
import os
import shutil
import time
import datetime
from agaveflask.auth import get_api_server
import channelpy
from aga import Agave
from auth import get_tenants, get_tenant_verify
import codes
from config import Config
from docker_utils import rm_container, DockerError, container_running, run_container_with_docker
from models import Actor, Worker, is_hashid
from channels import ClientsChannel, CommandChannel, WorkerChannel
from stores import actors_store, clients_store, executions_store, workers_store
from worker import shutdown_worker
TAG = os.environ.get('TAG') or Config.get('general', 'TAG') or ''
if not TAG[0] == ':':
TAG = ':{}',format(TAG)
AE_IMAGE = '{}{}'.format(os.environ.get('AE_IMAGE', 'abaco/core'), TAG)
from agaveflask.logs import get_logger, get_log_file_strategy
logger = get_logger(__name__)
# max executions allowed in a mongo document; if the total executions for a given actor exceeds this number,
# the health process will place
MAX_EXECUTIONS_PER_MONGO_DOC = 25000
def get_actor_ids():
"""Returns the list of actor ids currently registered."""
return [aid for aid in actors_store]
def check_workers_store(ttl):
logger.debug("Top of check_workers_store.")
"""Run through all workers in workers_store and ensure there is no data integrity issue."""
for worker in workers_store.items():
aid = worker['actor_id']
check_worker_health(aid, worker, ttl)
def get_worker(wid):
"""
Check to see if a string `wid` is the id of a worker in the worker store.
If so, return it; if not, return None.
"""
worker = workers_store.items({'id': wid})
if worker:
return worker
return None
def clean_up_socket_dirs():
logger.debug("top of clean_up_socket_dirs")
socket_dir = os.path.join('/host/', Config.get('workers', 'socket_host_path_dir').strip('/'))
logger.debug("processing socket_dir: {}".format(socket_dir))
for p in os.listdir(socket_dir):
# check to see if p is a worker
worker = get_worker(p)
if not worker:
path = os.path.join(socket_dir, p)
logger.debug("Determined that {} was not a worker; deleting directory: {}.".format(p, path))
shutil.rmtree(path)
def clean_up_fifo_dirs():
logger.debug("top of clean_up_fifo_dirs")
fifo_dir = os.path.join('/host/', Config.get('workers', 'fifo_host_path_dir').strip('/'))
logger.debug("processing fifo_dir: {}".format(fifo_dir))
for p in os.listdir(fifo_dir):
# check to see if p is a worker
worker = get_worker(p)
if not worker:
path = os.path.join(fifo_dir, p)
logger.debug("Determined that {} was not a worker; deleting directory: {}.".format(p, path))
shutil.rmtree(path)
def clean_up_ipc_dirs():
"""Remove all directories created for worker sockets and fifos"""
clean_up_socket_dirs()
clean_up_fifo_dirs()
def delete_client(ag, client_name):
"""Remove a client from the APIM."""
try:
ag.clients.delete(clientName=client_name)
except Exception as e:
m = 'Not able to delete client from APIM. Got an exception: {}'.format(e)
logger.error(m)
return None
def clean_up_apim_clients(tenant):
"""Check the list of clients registered in APIM and remove any that are associated with retired workers."""
username = os.environ.get('_abaco_{}_username'.format(tenant), '')
password = os.environ.get('_abaco_{}_password'.format(tenant), '')
if not username:
msg = "Health process did not get a username for tenant {}; " \
"returning from clean_up_apim_clients".format(tenant)
if tenant in ['SD2E', 'TACC-PROD']:
logger.error(msg)
else:
logger.info(msg)
return None
if not password:
msg = "Health process did not get a password for tenant {}; " \
"returning from clean_up_apim_clients".format(tenant)
if tenant in ['SD2E', 'TACC-PROD']:
logger.error(msg)
else:
logger.info(msg)
return None
api_server = get_api_server(tenant)
verify = get_tenant_verify(tenant)
ag = Agave(api_server=api_server,
username=username,
password=password,
verify=verify)
logger.debug("health process created an ag for tenant: {}".format(tenant))
try:
cs = ag.clients.list()
clients = cs.json()['result']
except Exception as e:
msg = "Health process got an exception trying to retrieve clients; exception: {}".format(e)
logger.error(msg)
return None
for client in clients:
# check if the name of the client is an abaco hash (i.e., a worker id). if not, we ignore it from the beginning
name = client.get('name')
if not is_hashid(name):
logger.debug("client {} is not an abaco hash id; skipping.".format(name))
continue
# we know this client came from a worker, so we need to check to see if the worker is still active;
# first check if the worker even exists; if it does, the id will be the client name:
worker = get_worker(name)
if not worker:
logger.info("no worker associated with id: {}; deleting client.".format(name))
delete_client(ag, name)
logger.info("client {} deleted by health process.".format(name))
continue
# if the worker exists, we should check the status:
status = worker.get('status')
if status == codes.ERROR:
logger.info("worker {} was in ERROR status so deleting client; worker: {}.".format(name, worker))
delete_client(ag, name)
logger.info("client {} deleted by health process.".format(name))
else:
logger.debug("worker {} still active; not deleting client.".format(worker))
def clean_up_clients_store():
logger.debug("top of clean_up_clients_store")
secret = os.environ.get('_abaco_secret')
if not secret:
logger.error("health.py not configured with _abaco_secret. exiting clean_up_clients_store.")
return None
for client in clients_store.items():
wid = client.get('worker_id')
if not wid:
logger.error("client object in clients_store without worker_id. client: {}".format(client))
continue
tenant = client.get('tenant')
if not tenant:
logger.error("client object in clients_store without tenant. client: {}".format(client))
continue
actor_id = client.get('actor_id')
if not actor_id:
logger.error("client object in clients_store without actor_id. client: {}".format(client))
continue
client_key = client.get('client_key')
if not client_key:
logger.error("client object in clients_store without client_key. client: {}".format(client))
continue
# check to see if the wid is the id of an actual worker:
worker = get_worker(wid)
if not worker:
logger.info(f"worker {wid} is gone. deleting client {client}.")
clients_ch = ClientsChannel()
msg = clients_ch.request_delete_client(tenant=tenant,
actor_id=actor_id,
worker_id=wid,
client_id=client_key,
secret=secret)
if msg['status'] == 'ok':
logger.info(f"Client delete request completed successfully for "
"worker_id: {wid}, client_id: {client_key}.".format(wid, client_key))
else:
logger.error(f"Error deleting client for "
"worker_id: {wid}, client_id: {client_key}. Message: {msg}")
else:
logger.info(f"worker {wid} still here. ignoring client {client}.")
def check_worker_health(actor_id, worker, ttl):
"""Check the specific health of a worker object."""
logger.debug("top of check_worker_health")
worker_id = worker.get('id')
logger.info("Checking status of worker from db with worker_id: {}".format(worker_id))
if not worker_id:
logger.error("Corrupt data in the workers_store. Worker object without an id attribute. {}".format(worker))
try:
workers_store.pop_field([actor_id])
except KeyError:
# it's possible another health agent already removed the worker record.
pass
return None
# make sure the actor id still exists:
try:
actors_store[actor_id]
except KeyError:
logger.error("Corrupt data in the workers_store. Worker object found but no corresponding actor. {}".format(worker))
try:
# todo - removing worker objects from db can be problematic if other aspects of the worker are not cleaned
# up properly. this code should be reviewed.
workers_store.pop_field([actor_id])
except KeyError:
# it's possible another health agent already removed the worker record.
pass
return None
def zero_out_workers_db():
"""
Set all workers collections in the db to empty. Run this as part of a maintenance; steps:
1) remove all docker containers
2) run this function
3) run clean_up_apim_clients().
4) run zero_out_clients_db()
:return:
"""
for worker in workers_store.items(proj_inp=None):
del workers_store[worker['_id']]
def zero_out_clients_db():
"""
Set all clients collections in the db to empty. Run this as part of a maintenance; steps:
1) remove all docker containers
2) run zero_out_workers_db()
3) run clean_up_apim_clients().
4) run this function
:return:
"""
for client in clients_store.items():
clients_store[client['_id']] = {}
def check_workers(actor_id, ttl):
"""Check health of all workers for an actor."""
logger.info("Checking health for actor: {}".format(actor_id))
try:
workers = Worker.get_workers(actor_id)
except Exception as e:
logger.error("Got exception trying to retrieve workers: {}".format(e))
return None
logger.debug("workers: {}".format(workers))
host_id = os.environ.get('SPAWNER_HOST_ID', Config.get('spawner', 'host_id'))
logger.debug("host_id: {}".format(host_id))
for worker in workers:
# if the worker has only been requested, it will not have a host_id.
if 'host_id' not in worker:
# @todo- we will skip for now, but we need something more robust in case the worker is never claimed.
continue
# ignore workers on different hosts
if not host_id == worker['host_id']:
continue
# first check if worker is responsive; if not, will need to manually kill
logger.info("Checking health for worker: {}".format(worker))
ch = WorkerChannel(worker_id=worker['id'])
worker_id = worker.get('id')
result = None
try:
logger.debug("Issuing status check to channel: {}".format(worker['ch_name']))
result = ch.put_sync('status', timeout=5)
except channelpy.exceptions.ChannelTimeoutException:
logger.info("Worker did not respond, removing container and deleting worker.")
try:
rm_container(worker['cid'])
except DockerError:
pass
try:
Worker.delete_worker(actor_id, worker_id)
logger.info("worker {} deleted from store".format(worker_id))
except Exception as e:
logger.error("Got exception trying to delete worker: {}".format(e))
# if the put_sync timed out and we removed the worker, we also need to delete the channel
# otherwise the un-acked message will remain.
try:
ch.delete()
except Exception as e:
logger.error("Got exception: {} while trying to delete worker channel for worker: {}".format(e, worker_id))
finally:
try:
ch.close()
except Exception as e:
logger.error("Got an error trying to close the worker channel for dead worker. Exception: {}".format(e))
if result and not result == 'ok':
logger.error("Worker responded unexpectedly: {}, deleting worker.".format(result))
try:
rm_container(worker['cid'])
Worker.delete_worker(actor_id, worker_id)
except Exception as e:
logger.error("Got error removing/deleting worker: {}".format(e))
else:
# worker is healthy so update last health check:
Worker.update_worker_health_time(actor_id, worker_id)
logger.info("Worker ok.")
# now check if the worker has been idle beyond the ttl:
if ttl < 0:
# ttl < 0 means infinite life
logger.info("Infinite ttl configured; leaving worker")
return
# we don't shut down workers that are currently running:
if not worker['status'] == codes.BUSY:
last_execution = worker.get('last_execution_time', 0)
# if worker has made zero executions, use the create_time
if last_execution == 0:
last_execution = worker.get('create_time', datetime.datetime.min)
logger.debug("using last_execution: {}".format(last_execution))
try:
assert type(last_execution) == datetime.datetime
except:
logger.error("Time received for TTL measurements is not of type datetime.")
last_execution = datetime.datetime.min
if last_execution + datetime.timedelta(seconds=ttl) < datetime.datetime.utcnow():
# shutdown worker
logger.info("Shutting down worker beyond ttl.")
shutdown_worker(actor_id, worker['id'])
else:
logger.info("Still time left for this worker.")
if worker['status'] == codes.ERROR:
# shutdown worker
logger.info("Shutting down worker in error status.")
shutdown_worker(actor_id, worker['id'])
# else:
# logger.debug("Worker not in READY status, will postpone.")
def get_host_queues():
"""
Read host_queues string from config and parse to return a Python list.
:return: list[str]
"""
try:
host_queues_str = Config.get('spawner', 'host_queues')
return [ s.strip() for s in host_queues_str.split(',')]
except Exception as e:
msg = "Got unexpected exception attempting to parse the host_queues config. Exception: {}".format(e)
logger.error(e)
raise e
def start_spawner(queue, idx='0'):
"""
Start a spawner on this host listening to a queue, `queue`.
:param queue: (str) - the queue the spawner should listen to.
:param idx: (str) - the index to use as a suffix to the spawner container name.
:return:
"""
command = 'python3 -u /actors/spawner.py'
name = 'healthg_{}_spawner_{}'.format(queue, idx)
try:
environment = dict(os.environ)
except Exception as e:
environment = {}
logger.error("Unable to convert environment to dict; exception: {}".format(e))
environment.update({'AE_IMAGE': AE_IMAGE.split(':')[0],
'queue': queue,
})
if not '_abaco_secret' in environment:
msg = 'Error in health process trying to start spawner. Did not find an _abaco_secret. Aborting'
logger.critical(msg)
raise
# check logging strategy to determine log file name:
log_file = 'abaco.log'
if get_log_file_strategy() == 'split':
log_file = 'spawner.log'
try:
run_container_with_docker(AE_IMAGE,
command,
name=name,
environment=environment,
mounts=[],
log_file=log_file)
except Exception as e:
logger.critical("Could not restart spawner for queue {}. Exception: {}".format(queue, e))
def check_spawner(queue):
"""
Check the health and existence of a spawner on this host for a particular queue.
:param queue: (str) - the queue to check on.
:return:
"""
logger.debug("top of check_spawner for queue: {}".format(queue))
# spawner container names by convention should have the format <project>_<queue>_spawner_<count>; for example
# abaco_default_spawner_2.
# so, we look for container names containing a string with that format:
spawner_name_segment = '{}_spawner'.format(queue)
if not container_running(name=spawner_name_segment):
logger.critical("No spawners running for queue {}! Launching new spawner..".format(queue))
start_spawner(queue)
else:
logger.debug("spawner for queue {} already running.".format(queue))
def check_spawners():
"""
Check health of spawners running on a given host.
:return:
"""
logger.debug("top of check_spawners")
host_queues = get_host_queues()
logger.debug("checking spawners for queues: {}".format(host_queues))
for queue in host_queues:
check_spawner(queue)
def manage_workers(actor_id):
"""Scale workers for an actor if based on message queue size and policy."""
logger.info("Entering manage_workers for {}".format(actor_id))
try:
actor = Actor.from_db(actors_store[actor_id])
except KeyError:
logger.info("Did not find actor; returning.")
return
workers = Worker.get_workers(actor_id)
for worker in workers:
time_difference = time.time() - worker['create_time']
if worker['status'] == 'PROCESSING' and time_difference > 1:
logger.info("LOOK HERE - worker creation time {}".format(worker['create_time']))
#TODO - implement policy
def shutdown_all_workers():
"""
Utility function for properly shutting down all existing workers.
This function is useful when deploying a new version of the worker code.
"""
# iterate over the workers_store directly, not the actors_store, since there could be data integrity issue.
logger.debug("Top of shutdown_all_workers.")
actors_with_workers = set()
for worker in workers_store.items():
actors_with_workers.add(worker['actor_id'])
for actor_id in actors_with_workers:
check_workers(actor_id, 0)
def main():
logger.info("Running abaco health checks. Now: {}".format(time.time()))
# TODO - turning off the check_spawners call in the health process for now as there seem to be some issues.
# the way the check works currently is to look for a spawner with a specific name. However, that check does not
# appear to be working currently.
# check_spawners()
try:
clean_up_ipc_dirs()
except Exception as e:
logger.error("Got exception from clean_up_ipc_dirs: {}".format(e))
try:
ttl = Config.get('workers', 'worker_ttl')
except Exception as e:
logger.error("Could not get worker_ttl config. Exception: {}".format(e))
try:
ttl = int(ttl)
except Exception as e:
logger.error("Invalid ttl config: {}. Setting to -1.".format(e))
ttl = -1
ids = get_actor_ids()
logger.info("Found {} actor(s). Now checking status.".format(len(ids)))
for id in ids:
# manage_workers(id)
check_workers(id, ttl)
tenants = get_tenants()
for t in tenants:
logger.debug("health process cleaning up apim_clients for tenant: {}".format(t))
clean_up_apim_clients(t)
# TODO - turning off the check_workers_store for now. unclear that removing worker objects
# check_workers_store(ttl)
if __name__ == '__main__':
main() |
the-stack_0_15395 | """Home Assistant Cast integration for Cast."""
from typing import Optional
from pychromecast.controllers.homeassistant import HomeAssistantController
import voluptuous as vol
from homeassistant import auth, config_entries, core
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.helpers import config_validation as cv, dispatcher
from homeassistant.helpers.network import get_url
from .const import DOMAIN, SIGNAL_HASS_CAST_SHOW_VIEW
SERVICE_SHOW_VIEW = "show_lovelace_view"
ATTR_VIEW_PATH = "view_path"
ATTR_URL_PATH = "dashboard_path"
async def async_setup_ha_cast(
hass: core.HomeAssistant, entry: config_entries.ConfigEntry
):
"""Set up Home Assistant Cast."""
user_id: Optional[str] = entry.data.get("user_id")
user: Optional[auth.models.User] = None
if user_id is not None:
user = await hass.auth.async_get_user(user_id)
if user is None:
user = await hass.auth.async_create_system_user(
"Home Assistant Cast", [auth.GROUP_ID_ADMIN]
)
hass.config_entries.async_update_entry(
entry, data={**entry.data, "user_id": user.id}
)
if user.refresh_tokens:
refresh_token: auth.models.RefreshToken = list(user.refresh_tokens.values())[0]
else:
refresh_token = await hass.auth.async_create_refresh_token(user)
async def handle_show_view(call: core.ServiceCall):
"""Handle a Show View service call."""
hass_url = get_url(hass, require_ssl=True)
controller = HomeAssistantController(
# If you are developing Home Assistant Cast, uncomment and set to your dev app id.
# app_id="5FE44367",
hass_url=hass_url,
client_id=None,
refresh_token=refresh_token.token,
)
dispatcher.async_dispatcher_send(
hass,
SIGNAL_HASS_CAST_SHOW_VIEW,
controller,
call.data[ATTR_ENTITY_ID],
call.data[ATTR_VIEW_PATH],
call.data.get(ATTR_URL_PATH),
)
hass.helpers.service.async_register_admin_service(
DOMAIN,
SERVICE_SHOW_VIEW,
handle_show_view,
vol.Schema(
{
ATTR_ENTITY_ID: cv.entity_id,
ATTR_VIEW_PATH: str,
vol.Optional(ATTR_URL_PATH): str,
}
),
)
|
the-stack_0_15398 | # -*- coding: utf-8 -*-
from setuptools import setup
project = "fbone"
setup(
name = project,
version = '0.1',
url = '',
description = '',
author = '',
author_email = '',
packages = ["fbone"],
include_package_data = True,
zip_safe = False,
install_requires=[
'Flask>=0.10.1',
'Flask-SQLAlchemy',
'Flask-WTF',
'Flask-Script',
'Flask-Babel',
'Flask-Testing',
'Flask-Mail',
'Flask-Cache',
'Flask-Login',
'Flask-OpenID',
'nose',
'fabric',
],
test_suite ='tests',
classifiers = [
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries'
]
)
|
the-stack_0_15399 | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2003 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from builtins import object
class Body(object):
def inspect(self, inspector):
raise NotImplementedError(
"class '%s' should override method '%s'"
% (self.__class__.__name__, method)
)
# version
__id__ = "$Id$"
#
# End of file
|
the-stack_0_15400 | """Test sobel vs gradient."""
import os
from typing import Tuple
import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
import src.constants as cst
import src.plot_utils.latex_style as lsty
import src.plot_utils.xarray_panels as xp
import src.time_wrapper as twr
from scipy import signal
def sobel_np(values: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Sobel operator on np array.
https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.convolve2d.html
Args:
values (np.ndarray): values to differentiate.
Returns:
Tuple[np.ndarray, np.ndarray]: gx, gy
"""
sobel = np.array(
[
[1 + 1j, 0 + 2j, -1 + 1j],
[2 + 0j, 0 + 0j, -2 + 0j],
[1 - 1j, 0 - 2j, -1 - 1j],
]
) # Gx + j*Gy
grad = signal.convolve2d(values, sobel, boundary="symm", mode="same")
return np.real(grad), np.imag(grad)
@twr.timeit
def sobel_vs_grad() -> None:
"""
Sobel versus dimension.
"""
lsty.mpl_params()
ds = xr.open_dataset(cst.DEFAULT_NC)
da_temp = ds.PCA_VALUES.isel(time=cst.EXAMPLE_TIME_INDEX)
pc1_y: xr.DataArray = da_temp.isel(pca=0)
pc1_y.values = sobel_np(pc1_y.values)[1]
pc2_y: xr.DataArray = da_temp.isel(pca=1)
pc2_y.values = sobel_np(pc2_y.values)[1]
pc3_y: xr.DataArray = da_temp.isel(pca=2)
pc3_y.values = sobel_np(pc3_y.values)[1]
xp.sep_plots(
[pc1_y, pc2_y, pc3_y],
["$G_y$ * PC1", "$G_y$ * PC2", "$G_y$ * PC3"],
[[-40, 40], [-40, 40], [-40, 40]],
)
plt.savefig(
os.path.join(cst.FIGURE_PATH, "RUN_" + cst.RUN_NAME + "_example_pcy.png")
)
plt.clf()
pc1_x: xr.DataArray = da_temp.isel(pca=0)
pc1_x.values = sobel_np(pc1_x.values)[0]
pc2_x: xr.DataArray = da_temp.isel(pca=1)
pc2_x.values = sobel_np(pc2_x.values)[0]
pc3_x: xr.DataArray = da_temp.isel(pca=2)
pc3_x.values = sobel_np(pc3_x.values)[0]
xp.sep_plots(
[pc1_x, pc2_x, pc3_x],
["$G_x$ * PC1", "$G_x$ * PC2", "$G_x$ * PC3"],
[[-40, 40], [-40, 40], [-40, 40]],
)
plt.savefig(
os.path.join(cst.FIGURE_PATH, "RUN_" + cst.RUN_NAME + "_example_pcx.png")
)
plt.clf()
da_y = ds.PCA_VALUES.isel(time=cst.EXAMPLE_TIME_INDEX).differentiate(cst.Y_COORD)
xp.sep_plots(
[da_y.isel(pca=0), da_y.isel(pca=1), da_y.isel(pca=2)],
["PC1 y-grad", "PC2 y-grad", "PC3 y-grad"],
[[-20, 20], [-20, 20], [-20, 20]],
)
plt.savefig(
os.path.join(cst.FIGURE_PATH, "RUN_" + cst.RUN_NAME + "_example_pc_y.png")
)
plt.clf()
da_x = ds.PCA_VALUES.isel(time=cst.EXAMPLE_TIME_INDEX).differentiate(cst.X_COORD)
xp.sep_plots(
[da_x.isel(pca=0), da_x.isel(pca=1), da_x.isel(pca=2)],
["PC1 x-grad", "PC2 x-grad", "PC3 x-grad"],
[[-20, 20], [-20, 20], [-20, 20]],
)
plt.savefig(
os.path.join(cst.FIGURE_PATH, "RUN_" + cst.RUN_NAME + "_example_pc_x.png")
)
plt.clf()
def sobel_scharr_test() -> None:
"""Test scharr / sobel."""
da = xr.DataArray(np.random.randn(15, 30), dims=[cst.X_COORD, cst.Y_COORD])
# kernel = xr.DataArray(filter, dims=["kx", "ky"])
# da_new = da.rolling(XC=3, YC=3).construct(XC="kx", YC="ky").dot(kernel)
val = da.values
print("val", val)
# print(da_new)
scharr = np.array(
[
[-3 - 3j, 0 - 10j, +3 - 3j],
[-10 + 0j, 0 + 0j, +10 + 0j],
[-3 + 3j, 0 + 10j, +3 + 3j],
]
) # Gx + j*Gy
sobel = np.array(
[
[1 + 1j, 0 + 2j, -1 + 1j],
[2 + 0j, 0 + 0j, -2 + 0j],
[1 - 1j, 0 - 2j, -1 - 1j],
]
) # Gx + j*Gy
for filt in [sobel, scharr]:
grad = signal.convolve2d(val, filt, boundary="symm", mode="same")
gx = np.real(grad)
gy = np.imag(grad)
print(gx)
print(gy)
# print(grad)
_, (ax_orig, ax_mag, ax_ang) = plt.subplots(3, 1, figsize=(6, 15))
ax_orig.imshow(val, cmap="gray")
ax_orig.set_title("Original")
ax_orig.set_axis_off()
ax_mag.imshow(np.absolute(grad), cmap="gray")
ax_mag.set_title("Gradient magnitude")
ax_mag.set_axis_off()
ax_ang.imshow(np.angle(grad), cmap="hsv") # hsv is cyclic, like angles
ax_ang.set_title("Gradient orientation")
ax_ang.set_axis_off()
# fig.show()
plt.savefig("example.png")
def grad_v() -> None:
"""Gradient in v direction."""
ds = xr.open_dataset(cst.DEFAULT_NC)
da_y = ds.PCA_VALUES.isel(time=cst.EXAMPLE_TIME_INDEX).differentiate(cst.Y_COORD)
xp.sep_plots(
[da_y.isel(pca=0), da_y.isel(pca=1), da_y.isel(pca=2)],
["PC1 y-grad", "PC2 y-grad", "PC3 y-grad"],
)
pc_y_grad_name = os.path.join(
cst.FIGURE_PATH, "RUN_" + cst.RUN_NAME + "_y_grad.png"
)
plt.savefig(pc_y_grad_name)
plt.clf()
if __name__ == "__main__":
sobel_vs_grad()
# python3 src/sobel.py
|
the-stack_0_15402 | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from oslo.config import cfg
from oslo import messaging
from sahara import conductor as c
from sahara import context
from sahara.openstack.common import log as logging
from sahara.plugins import base as plugin_base
from sahara.service.edp import job_manager
from sahara.service import trusts
from sahara.utils import general as g
from sahara.utils import rpc as rpc_utils
conductor = c.API
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
INFRA = None
def setup_ops(engine):
global INFRA
INFRA = engine
class LocalOps(object):
def provision_cluster(self, cluster_id):
context.spawn("cluster-creating-%s" % cluster_id,
_provision_cluster, cluster_id)
def provision_scaled_cluster(self, cluster_id, node_group_id_map):
context.spawn("cluster-scaling-%s" % cluster_id,
_provision_scaled_cluster, cluster_id, node_group_id_map)
def terminate_cluster(self, cluster_id):
context.spawn("cluster-terminating-%s" % cluster_id,
_terminate_cluster, cluster_id)
def run_edp_job(self, job_execution_id):
context.spawn("Starting Job Execution %s" % job_execution_id,
_run_edp_job, job_execution_id)
class RemoteOps(rpc_utils.RPCClient):
def __init__(self):
target = messaging.Target(topic='sahara-ops', version='1.0')
super(RemoteOps, self).__init__(target)
def provision_cluster(self, cluster_id):
self.cast('provision_cluster', cluster_id=cluster_id)
def provision_scaled_cluster(self, cluster_id, node_group_id_map):
self.cast('provision_scaled_cluster', cluster_id=cluster_id,
node_group_id_map=node_group_id_map)
def terminate_cluster(self, cluster_id):
self.cast('terminate_cluster', cluster_id=cluster_id)
def run_edp_job(self, job_execution_id):
self.cast('run_edp_job', job_execution_id=job_execution_id)
class OpsServer(rpc_utils.RPCServer):
def __init__(self):
target = messaging.Target(topic='sahara-ops', server=uuid.uuid4(),
version='1.0')
super(OpsServer, self).__init__(target)
def provision_cluster(self, cluster_id):
_provision_cluster(cluster_id)
def provision_scaled_cluster(self, cluster_id, node_group_id_map):
_provision_scaled_cluster(cluster_id, node_group_id_map)
def terminate_cluster(self, cluster_id):
_terminate_cluster(cluster_id)
def run_edp_job(self, job_execution_id):
_run_edp_job(job_execution_id)
def _prepare_provisioning(cluster_id):
ctx = context.ctx()
cluster = conductor.cluster_get(ctx, cluster_id)
plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
for nodegroup in cluster.node_groups:
conductor.node_group_update(
ctx, nodegroup,
{"image_username": INFRA.get_node_group_image_username(nodegroup)})
cluster = conductor.cluster_get(ctx, cluster_id)
return ctx, cluster, plugin
def _provision_cluster(cluster_id):
ctx, cluster, plugin = _prepare_provisioning(cluster_id)
if CONF.use_identity_api_v3 and cluster.is_transient:
trusts.create_trust(cluster)
# updating cluster infra
cluster = conductor.cluster_update(ctx, cluster,
{"status": "InfraUpdating"})
LOG.info(g.format_cluster_status(cluster))
plugin.update_infra(cluster)
# creating instances and configuring them
cluster = conductor.cluster_get(ctx, cluster_id)
INFRA.create_cluster(cluster)
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return
# configure cluster
cluster = conductor.cluster_update(ctx, cluster, {"status": "Configuring"})
LOG.info(g.format_cluster_status(cluster))
try:
plugin.configure_cluster(cluster)
except Exception as ex:
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return
LOG.exception("Can't configure cluster '%s' (reason: %s)",
cluster.name, ex)
cluster = conductor.cluster_update(ctx, cluster, {"status": "Error"})
LOG.info(g.format_cluster_status(cluster))
return
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return
# starting prepared and configured cluster
cluster = conductor.cluster_update(ctx, cluster, {"status": "Starting"})
LOG.info(g.format_cluster_status(cluster))
try:
plugin.start_cluster(cluster)
except Exception as ex:
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return
LOG.exception("Can't start services for cluster '%s' (reason: %s)",
cluster.name, ex)
cluster = conductor.cluster_update(ctx, cluster, {"status": "Error"})
LOG.info(g.format_cluster_status(cluster))
return
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return
# cluster is now up and ready
cluster = conductor.cluster_update(ctx, cluster, {"status": "Active"})
LOG.info(g.format_cluster_status(cluster))
# schedule execution pending job for cluster
for je in conductor.job_execution_get_all(ctx, cluster_id=cluster.id):
job_manager.run_job(je.id)
def _provision_scaled_cluster(cluster_id, node_group_id_map):
ctx, cluster, plugin = _prepare_provisioning(cluster_id)
# Decommissioning surplus nodes with the plugin
cluster = conductor.cluster_update(ctx, cluster,
{"status": "Decommissioning"})
LOG.info(g.format_cluster_status(cluster))
instances_to_delete = []
for node_group in cluster.node_groups:
new_count = node_group_id_map[node_group.id]
if new_count < node_group.count:
instances_to_delete += node_group.instances[new_count:
node_group.count]
if instances_to_delete:
plugin.decommission_nodes(cluster, instances_to_delete)
# Scaling infrastructure
cluster = conductor.cluster_update(ctx, cluster, {"status": "Scaling"})
LOG.info(g.format_cluster_status(cluster))
instances = INFRA.scale_cluster(cluster, node_group_id_map)
# Setting up new nodes with the plugin
if instances:
cluster = conductor.cluster_update(ctx, cluster,
{"status": "Configuring"})
LOG.info(g.format_cluster_status(cluster))
try:
instances = g.get_instances(cluster, instances)
plugin.scale_cluster(cluster, instances)
except Exception as ex:
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return
LOG.exception("Can't scale cluster '%s' (reason: %s)",
cluster.name, ex)
cluster = conductor.cluster_update(ctx, cluster,
{"status": "Error"})
LOG.info(g.format_cluster_status(cluster))
return
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return
cluster = conductor.cluster_update(ctx, cluster, {"status": "Active"})
LOG.info(g.format_cluster_status(cluster))
def _terminate_cluster(cluster_id):
ctx = context.ctx()
cluster = conductor.cluster_get(ctx, cluster_id)
plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
plugin.on_terminate_cluster(cluster)
INFRA.shutdown_cluster(cluster)
if CONF.use_identity_api_v3:
trusts.delete_trust(cluster)
conductor.cluster_destroy(ctx, cluster)
def _run_edp_job(job_execution_id):
job_manager.run_job(job_execution_id)
|
the-stack_0_15409 | # -*- coding: utf-8 -*-
###
# (C) Copyright [2019] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import unittest
import mock
from hpOneView.connection import connection
from hpOneView.resources.networking.logical_switch_groups import LogicalSwitchGroups
from hpOneView.resources.resource import Resource, ResourceHelper, ResourcePatchMixin
class LogicalSwitchGroupsTest(unittest.TestCase):
def setUp(self):
self.host = '127.0.0.1'
self.connection = connection(self.host)
self._lsg = LogicalSwitchGroups(self.connection)
self.uri = "/rest/logical-switch-groups/dce3fc90-873e-48f7-8340-cc927d625b16"
self._lsg.data = {"uri": self.uri}
@mock.patch.object(ResourceHelper, 'get_all')
def test_get_all_called_once(self, mock_get_all):
filter = 'name=TestName'
sort = 'name:ascending'
self._lsg.get_all(2, 500, filter, sort)
mock_get_all.assert_called_once_with(count=500, filter='name=TestName',
sort='name:ascending', start=2)
@mock.patch.object(ResourceHelper, 'get_all')
def test_get_all_called_once_with_default(self, mock_get_all):
self._lsg.get_all()
mock_get_all.assert_called_once_with(count=-1, filter=u'', sort=u'', start=0)
@mock.patch.object(ResourceHelper, 'create')
def test_create_called_once(self, mock_create):
lsg = {
"name": "OneView Test Logical Switch Group",
"switchMapTemplate": {
"switchMapEntryTemplates": [{
"logicalLocation": {
"locationEntries": [{
"relativeValue": 1,
"type": "StackingMemberId"
}]
},
"permittedSwitchTypeUri": "/rest/switch-types/46d7ffad-4424-4e36-acf3-b379c3116206"
}]
}
}
self._lsg.create(lsg, timeout=70)
mock_create.assert_called_once_with(lsg, None, 70, None, False)
@mock.patch.object(Resource, 'ensure_resource_data')
@mock.patch.object(ResourceHelper, 'update')
def test_update_called_once(self, mock_update, mock_ensure_client):
lsg = {
"name": "Updated name",
"switchMapTemplate": {
"switchMapEntryTemplates": [{
"logicalLocation": {
"locationEntries": [{
"relativeValue": 1,
"type": "StackingMemberId"
}]
},
"permittedSwitchTypeUri": "/rest/switch-types/46d7ffad-4424-4e36-acf3-b379c3116206"
}]
},
"uri": self.uri
}
self._lsg.update(lsg, timeout=70)
mock_update.assert_called_once_with(lsg, self.uri, False, 70, None)
@mock.patch.object(ResourceHelper, 'delete')
def test_delete_called_once(self, mock_delete):
self._lsg.delete(force=True, timeout=50)
mock_delete.assert_called_once_with(self.uri, custom_headers=None, force=True, timeout=50)
@mock.patch.object(ResourceHelper, 'delete')
def test_delete_called_once_with_defaults(self, mock_delete):
self._lsg.delete()
mock_delete.assert_called_once_with(self.uri, custom_headers=None, force=False, timeout=-1)
@mock.patch.object(ResourcePatchMixin, 'patch_request')
def test_patch_should_use_user_defined_values(self, mock_patch):
mock_patch.return_value = {}
self._lsg.patch('replace',
'/scopeUris', ['rest/fake/scope123'], 1)
mock_patch.assert_called_once_with('/rest/logical-switch-groups/dce3fc90-873e-48f7-8340-cc927d625b16',
body=[{'path': '/scopeUris',
'value': ['rest/fake/scope123'],
'op': 'replace'}],
custom_headers=1, timeout=-1)
|
the-stack_0_15410 | """Pipeline implementation.
This module provides methods to run pipelines of functions with dependencies
and handle their results.
"""
from copy import deepcopy
from importlib import import_module
import builtins
import networkx
__all__ = [
'Pipeline',
]
def _yaml_tag(loader, tag, node):
'''handler for generic YAML tags
tags are stored as a tuple `(tag, value)`
'''
import yaml
if isinstance(node, yaml.ScalarNode):
value = loader.construct_scalar(node)
elif isinstance(node, yaml.SequenceNode):
value = loader.construct_sequence(node)
elif isinstance(node, yaml.MappingNode):
value = loader.construct_mapping(node)
# tags without arguments have empty string value
if value == '':
return tag,
return tag, value
class Pipeline:
r'''Class for running pipelines.
This is the main class for running pipelines of functions with dependencies
and using their results to generate variables and tables.
'''
@classmethod
def read(cls, filename):
'''Read a pipeline from a configuration file.
Parameters
----------
filename : str
The name of the configuration file.
'''
import yaml
# register custom tags
yaml.SafeLoader.add_multi_constructor('!', _yaml_tag)
# read the file
with open(filename, 'r') as stream:
config = yaml.safe_load(stream) or {}
# construct the pipeline
return cls(config)
def __init__(self, configuration):
'''Construct the pipeline.
Parameters
----------
configuration : dict-like
Configuration for the pipeline.
Notes
-----
Each step in the pipeline is configured by a dictionary specifying
a variable name and the associated value.
A value that is a tuple `(function_name, function_args)` specifies that
the value will be the result of a function call. The first item is the
fully qualified function name, and the second value specifies the
function arguments.
If a function argument is a tuple `(variable_name,)`, it refers to the
values of previous step in the pipeline. The tuple item must be the
name of the reference variable.
'configuration' should contain the name and configuration of each
variable and/or an entry named 'tables'. 'tables' should contain a set
of nested dictionaries, first containing the name of each table, then
the name and configuration of each column and optionally an entry named
'init' with a configuration that initialises the table. If 'init' is
not specificed the table will be initialised as an empty astropy Table
by default.
See [1]_ for examples of pipeline configurations in YAML format.
References
----------
.. [1] https://github.com/skypyproject/skypy/tree/master/examples
'''
# config contains settings for all variables and table initialisation
# table_config contains settings for all table columns
self.config = deepcopy(configuration)
self.table_config = self.config.pop('tables', {})
default_table = ('astropy.table.Table',)
self.config.update({k: v.pop('.init', default_table)
for k, v in self.table_config.items()})
# Create a Directed Acyclic Graph of all jobs and dependencies
self.dag = networkx.DiGraph()
# - add nodes for each variable, table and column
# - add edges for the table dependencies
# - keep track where functions need to be called
# functions are tuples (function name, [function args])
functions = {}
for job, settings in self.config.items():
self.dag.add_node(job)
if isinstance(settings, tuple):
functions[job] = settings
for table, columns in self.table_config.items():
table_complete = '.'.join((table, 'complete'))
self.dag.add_node(table_complete)
self.dag.add_edge(table, table_complete)
for column, settings in columns.items():
job = '.'.join((table, column))
self.dag.add_node(job)
self.dag.add_edge(table, job)
self.dag.add_edge(job, table_complete)
if isinstance(settings, tuple):
functions[job] = settings
# go through functions and add edges for all references
for job, settings in functions.items():
# settings are tuple (function, [args])
args = settings[1] if len(settings) > 1 else None
# get dependencies from arguments
deps = self.get_deps(args)
# add edges for dependencies
for d in deps:
if self.dag.has_node(d):
self.dag.add_edge(d, job)
else:
raise KeyError(d)
def execute(self):
r'''Run a pipeline.
This function runs a pipeline of functions to generate variables and
the columns of a set of tables. It uses a Directed Acyclic Graph to
determine a non-blocking order of execution that resolves any
dependencies, see [1]_.
References
----------
.. [1] https://networkx.github.io/documentation/stable/
'''
for job in networkx.topological_sort(self.dag):
if job.endswith('.complete'):
continue
elif job in self.config:
settings = self.config.get(job)
setattr(self, job, self.get_value(settings))
else:
table, column = job.split('.')
settings = self.table_config[table][column]
getattr(self, table)[column] = self.get_value(settings)
def write(self, file_format=None, overwrite=False):
r'''Write pipeline results to disk.
Parameters
----------
file_format : str
File format used to write tables. Files are written using the
Astropy unified file read/write interface; see [1]_ for supported
file formats. If None (default) tables are not written to file.
overwrite : bool
Whether to overwrite any existing files without warning.
References
----------
.. [1] https://docs.astropy.org/en/stable/io/unified.html
'''
if file_format:
for table in self.table_config.keys():
filename = '.'.join((table, file_format))
getattr(self, table).write(filename, overwrite=overwrite)
def get_value(self, value):
'''return the value of a field
tuples specify function calls `(function name, function args)`
'''
# check if not function
if not isinstance(value, tuple):
# check for reference
if isinstance(value, str) and value[0] == '$':
return self[value[1:]]
else:
# plain value
return value
# value is tuple (function name, [function args])
name = value[0]
args = value[1] if len(value) > 1 else []
# Import function
function_path = name.split('.')
module = builtins
for i, key in enumerate(function_path[:-1]):
if not hasattr(module, key):
module_name = '.'.join(function_path[:i+1])
try:
module = import_module(module_name)
except ModuleNotFoundError:
raise ModuleNotFoundError(module_name)
else:
module = getattr(module, key)
function = getattr(module, function_path[-1])
# Parse arguments
parsed_args = self.get_args(args)
# Call function
if isinstance(args, dict):
result = function(**parsed_args)
elif isinstance(args, list):
result = function(*parsed_args)
else:
result = function(parsed_args)
return result
def get_args(self, args):
'''parse function arguments
strings beginning with `$` are references to other fields
'''
if isinstance(args, dict):
# recurse kwargs
return {k: self.get_args(v) for k, v in args.items()}
elif isinstance(args, list):
# recurse args
return [self.get_args(a) for a in args]
else:
# return value
return self.get_value(args)
def get_deps(self, args):
'''get dependencies from function args
returns a list of all references found
'''
if isinstance(args, str) and args[0] == '$':
# reference
return [args[1:]]
elif isinstance(args, tuple):
# recurse on function arguments
return self.get_deps(args[1]) if len(args) > 1 else []
elif isinstance(args, dict):
# get explicit dependencies
deps = args.pop('.depends', [])
# turn a single value into a list
if isinstance(deps, str) or not isinstance(deps, list):
deps = [deps]
# recurse remaining kwargs
return deps + sum([self.get_deps(a) for a in args.values()], [])
elif isinstance(args, list):
# recurse args
return sum([self.get_deps(a) for a in args], [])
else:
# no reference
return []
def __getitem__(self, label):
name, _, key = label.partition('.')
item = getattr(self, name)
return item[key] if key else item
|
the-stack_0_15411 | import tensorflow as tf
from tensorflow.contrib.layers import xavier_initializer as xav
import numpy as np
class LSTM_net():
def __init__(self, obs_size, nb_hidden=128, action_size=16):
self.obs_size = obs_size
self.nb_hidden = nb_hidden
self.action_size = action_size
def __graph__():
tf.reset_default_graph()
# entry points
features_ = tf.placeholder(tf.float32, [1, obs_size], name='input_features')
init_state_c_, init_state_h_ = ( tf.placeholder(tf.float32, [1, nb_hidden]) for _ in range(2) )
action_ = tf.placeholder(tf.int32, name='ground_truth_action')
action_mask_ = tf.placeholder(tf.float32, [action_size], name='action_mask')
# input projection
Wi = tf.get_variable('Wi', [obs_size, nb_hidden],
initializer=xav())
bi = tf.get_variable('bi', [nb_hidden],
initializer=tf.constant_initializer(0.))
# add relu/tanh here if necessary
projected_features = tf.matmul(features_, Wi) + bi
lstm_f = tf.contrib.rnn.LSTMCell(nb_hidden, state_is_tuple=True)
lstm_op, state = lstm_f(inputs=projected_features, state=(init_state_c_, init_state_h_))
# reshape LSTM's state tuple (2,128) -> (1,256)
state_reshaped = tf.concat(axis=1, values=(state.c, state.h))
# output projection
Wo = tf.get_variable('Wo', [2*nb_hidden, action_size],
initializer=xav())
bo = tf.get_variable('bo', [action_size],
initializer=tf.constant_initializer(0.))
# get logits
logits = tf.matmul(state_reshaped, Wo) + bo
# probabilities
# normalization : elemwise multiply with action mask
probs = tf.multiply(tf.squeeze(tf.nn.softmax(logits)), action_mask_)
# prediction
prediction = tf.arg_max(probs, dimension=0)
# loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=action_)
# train op
train_op = tf.train.AdadeltaOptimizer(0.1).minimize(loss)
# attach symbols to self
self.loss = loss
self.prediction = prediction
self.probs = probs
self.logits = logits
self.state = state
self.train_op = train_op
# attach placeholders
self.features_ = features_
self.init_state_c_ = init_state_c_
self.init_state_h_ = init_state_h_
self.action_ = action_
self.action_mask_ = action_mask_
# build graph
__graph__()
# start a session; attach to self
sess = tf.Session()
sess.run(tf.global_variables_initializer())
self.sess = sess
# set init state to zeros
self.init_state_c = np.zeros([1,self.nb_hidden], dtype=np.float32)
self.init_state_h = np.zeros([1,self.nb_hidden], dtype=np.float32)
# forward propagation
def forward(self, features, action_mask):
# forward
probs, prediction, state_c, state_h = self.sess.run( [self.probs, self.prediction, self.state.c, self.state.h],
feed_dict = {
self.features_ : features.reshape([1,self.obs_size]),
self.init_state_c_ : self.init_state_c,
self.init_state_h_ : self.init_state_h,
self.action_mask_ : action_mask
})
# maintain state
self.init_state_c = state_c
self.init_state_h = state_h
# return argmax
return prediction
# training
def train_step(self, features, action, action_mask):
_, loss_value, state_c, state_h = self.sess.run( [self.train_op, self.loss, self.state.c, self.state.h],
feed_dict = {
self.features_ : features.reshape([1, self.obs_size]),
self.action_ : [action],
self.init_state_c_ : self.init_state_c,
self.init_state_h_ : self.init_state_h,
self.action_mask_ : action_mask
})
# maintain state
self.init_state_c = state_c
self.init_state_h = state_h
return loss_value
def reset_state(self):
# set init state to zeros
self.init_state_c = np.zeros([1,self.nb_hidden], dtype=np.float32)
self.init_state_h = np.zeros([1,self.nb_hidden], dtype=np.float32)
# save session to checkpoint
def save(self):
saver = tf.train.Saver()
saver.save(self.sess, 'ckpt/hcn.ckpt', global_step=0)
print('\n:: saved to ckpt/hcn.ckpt \n')
# restore session from checkpoint
def restore(self):
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state('ckpt/')
if ckpt and ckpt.model_checkpoint_path:
print('\n:: restoring checkpoint from', ckpt.model_checkpoint_path, '\n')
saver.restore(self.sess, ckpt.model_checkpoint_path)
else:
print('\n:: <ERR> checkpoint not found! \n')
|
the-stack_0_15412 | # -*- coding: utf-8 -*-
"""
Django settings for psppi project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (psppi/config/settings/common.py - 3 = psppi/)
APPS_DIR = ROOT_DIR.path('psppi')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.postgres',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'corsheaders',
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'rest_framework',
)
# Apps specific for this project go here.
LOCAL_APPS = (
# custom users app
'psppi.users.apps.UsersConfig',
'psppi.responses',
'psppi.questions'
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""T""", '[email protected]'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db('DATABASE_URL', default='postgres:///psppi'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(ROOT_DIR.path('dist')),
str(APPS_DIR.path('templates'))
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('dist'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config-django.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config-django.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_ADAPTER = 'psppi.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'psppi.users.adapters.SocialAccountAdapter'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# django-compressor
# ------------------------------------------------------------------------------
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
CORS_ORIGIN_ALLOW_ALL = False
CORS_ALLOW_CREDENTIALS = True
CORS_ORIGIN_WHITELIST = (
'localhost:3000',
'127.0.0.1:3000',
'attacusatlas.io'
)
|
the-stack_0_15413 | #
# Copyright (c) 2020 Juniper Networks, Inc. All rights reserved.
#
"""DC Gateway Feature Implementation."""
from builtins import str
from collections import OrderedDict
import copy
from abstract_device_api.abstract_device_xsd import Feature, Firewall, \
FirewallFilter, From, NatRule, NatRules, RoutingInstance, Term, Then
import gevent
from .db import GlobalVRouterConfigDM, LogicalRouterDM, PhysicalInterfaceDM, \
RoutingInstanceDM, VirtualMachineInterfaceDM, VirtualNetworkDM, \
VirtualPortGroupDM
from .dm_utils import DMUtils
from .feature_base import FeatureBase
class JunosInterface(object):
def __init__(self, if_name, if_type, if_vlan_tag=0, if_ip=None,
li_uuid=None, port_vlan_tag=4094, vpg_obj=None):
"""Initialize JunosInterface init params."""
self.li_uuid = li_uuid
self.name = if_name
self.if_type = if_type
self.vlan_tag = if_vlan_tag
ifparts = if_name.split('.')
self.ifd_name = ifparts[0]
self.unit = ifparts[1]
self.ip = if_ip
# end __init__
def is_untagged(self):
return not self.vlan_tag
# end is_untagged
# end class JunosInterface
class DcGatewayFeature(FeatureBase):
@classmethod
def feature_name(cls):
return 'dc-gateway'
# end feature_name
def __init__(self, logger, physical_router, configs):
"""Create dc-gateway feature abstract config for public VN and LR.
It prepares dc-gateway feature abstract config
- retirve list of vn (internal vn and tenant vn) which is marked as
public vn or all vn of public lr.
- walk through this vn and create respective ri marked as
public_network True. for mx PR, it also creates firewall and
physical_interface abstract config for mx fip and snat
: Args:
: self: current instance of class
: logger: logger to be use to log messages
: physical_router: current PR of feature config
: configs: feature configs
: return: None
:
"""
self.ri_map = {}
self.firewall_config = None
self.pi_map = OrderedDict()
self.inet4_forwarding_filter = None
self.inet6_forwarding_filter = None
super(DcGatewayFeature, self).__init__(
logger, physical_router, configs)
# end __init__
def _get_export_import_set(self, vn_obj, ri_obj):
export_set = None
import_set = None
if vn_obj.route_targets:
export_set = vn_obj.route_targets & ri_obj.export_targets
import_set = vn_obj.route_targets & ri_obj.import_targets
else:
export_set = copy.copy(ri_obj.export_targets)
import_set = copy.copy(ri_obj.import_targets)
for ri2_id in ri_obj.routing_instances:
ri2 = RoutingInstanceDM.get(ri2_id)
if ri2 is None:
continue
import_set |= ri2.export_targets
return export_set, import_set
# end _get_export_import_set
def _add_ri_prefixes(self, vn, router_external, interfaces, prefixes, ri):
for interface in interfaces:
self._add_ref_to_list(
ri.get_interfaces(), interface.name)
if len(prefixes) < 1:
return
# for DC-gateway, skip routed vn prefix for public LR
routed_vn_prefix = set()
if vn:
routed_vn_prefix = vn.get_prefixes(
pr_uuid=self._physical_router.uuid,
only_routedvn_prefix=True)
for prefix in prefixes:
ri.add_static_routes(
self._get_route_for_cidr(prefix))
if router_external and prefix in routed_vn_prefix:
continue
ri.add_prefixes(self._get_subnet_for_cidr(prefix))
# if vn internal then also add rib interfaces since in
# overlay_networking we use this to filter out irb interfaces to set.
if router_external and '_contrail_lr_internal_vn_' in vn.name:
lr_uuid = DMUtils.extract_lr_uuid_from_internal_vn_name(vn.name)
lr = LogicalRouterDM.get(lr_uuid)
if lr:
vn_list = lr.get_connected_networks(
include_internal=False,
pr_uuid=self._physical_router.uuid)
for vn in vn_list:
vn_obj = VirtualNetworkDM.get(vn)
if vn_obj and vn_obj.vn_network_id is not None:
irb_name = "irb." + str(vn_obj.vn_network_id)
self._add_ref_to_list(
ri.get_routing_interfaces(), irb_name)
# end _add_ri_prefixes
def _add_inet_public_vrf_filter(cls, firewall_config, inet_type):
firewall_config.set_family(inet_type)
f = FirewallFilter(name=DMUtils.make_public_vrf_filter_name(inet_type))
f.set_comment(DMUtils.public_vrf_filter_comment())
firewall_config.add_firewall_filters(f)
term = Term(name="default-term", then=Then(accept_or_reject=True))
f.add_terms(term)
return f
# end _add_inet_public_vrf_filter
def _add_inet_filter_term(self, ri_name, prefixes, inet_type):
if inet_type == 'inet6':
prefixes = DMUtils.get_ipv6_prefixes(prefixes)
else:
prefixes = DMUtils.get_ipv4_prefixes(prefixes)
from_ = From()
for prefix in prefixes:
from_.add_destination_address(self._get_subnet_for_cidr(prefix))
then_ = Then()
then_.add_routing_instance(ri_name)
return Term(name=DMUtils.make_vrf_term_name(ri_name),
fromxx=from_, then=then_)
# end _add_inet_filter_term
def _check_term_exist(self, new_term_name):
for t in self.inet4_forwarding_filter.get_terms() or []:
if t.name == new_term_name:
return True
return False
# end _check_term_exist
def _add_ri_vrf_firewall_config(self, prefixes, ri):
has_ipv6_prefixes = DMUtils.has_ipv6_prefixes(prefixes)
has_ipv4_prefixes = DMUtils.has_ipv4_prefixes(prefixes)
term_ri_name = ri.get_name()
if ri.get_virtual_network_is_internal():
term_ri_name = ri.get_description()
self.firewall_config = self.firewall_config or Firewall(
comment=DMUtils.firewall_comment())
if has_ipv4_prefixes and not self.inet4_forwarding_filter:
# create single instance inet4 filter
self.inet4_forwarding_filter = self. \
_add_inet_public_vrf_filter(self.firewall_config, "inet")
if has_ipv6_prefixes and not self.inet6_forwarding_filter:
# create single instance inet6 filter
self.inet6_forwarding_filter = self. \
_add_inet_public_vrf_filter(self.firewall_config, "inet6")
if self._check_term_exist(DMUtils.make_vrf_term_name(term_ri_name)):
return
if has_ipv4_prefixes:
# add terms to inet4 filter
term = self._add_inet_filter_term(
term_ri_name, prefixes, "inet4")
# insert before the last term
terms = self.inet4_forwarding_filter.get_terms()
terms = [term] + (terms or [])
self.inet4_forwarding_filter.set_terms(terms)
if has_ipv6_prefixes:
# add terms to inet6 filter
term = self._add_inet_filter_term(
term_ri_name, prefixes, "inet6")
# insert before the last term
terms = self.inet6_forwarding_filter.get_terms()
terms = [term] + (terms or [])
self.inet6_forwarding_filter.set_terms(terms)
# end _add_ri_firewall_config
def _add_routing_instance(self, ri_conf):
gevent.idle()
ri_name = ri_conf.get("ri_name")
vn = ri_conf.get("vn")
is_l2 = ri_conf.get("is_l2", False)
is_l2_l3 = ri_conf.get("is_l2_l3", False)
import_targets = ri_conf.get("import_targets", set())
export_targets = ri_conf.get("export_targets", set())
prefixes = ri_conf.get("prefixes") or []
gateways = ri_conf.get("gateways") or []
router_external = ri_conf.get("router_external", False)
interfaces = ri_conf.get("interfaces", [])
vni = ri_conf.get("vni", None)
fip_map = ri_conf.get("fip_map", None)
network_id = ri_conf.get("network_id", None)
is_internal_vn = True if '_contrail_lr_internal_vn_' in vn.name \
else False
encapsulation_priorities = ri_conf.get(
"encapsulation_priorities") or ["MPLSoGRE"]
highest_encapsulation = encapsulation_priorities[0]
ri = RoutingInstance(name=ri_name)
is_master_int_vn = False
if vn:
is_nat = True if fip_map else False
ri.set_comment(DMUtils.vn_ri_comment(vn, is_l2, is_l2_l3, is_nat,
router_external))
if is_internal_vn:
lr_uuid = DMUtils.extract_lr_uuid_from_internal_vn_name(
ri_name)
else:
if vn.logical_router is None:
# try updating logical router to handle DM restart
# vn.logical_router could be none as sequencing of
# locate object calls in device_manager.py
vn.set_logical_router(vn.fq_name[-1])
lr_uuid = vn.logical_router
if lr_uuid:
lr = LogicalRouterDM.get(lr_uuid)
if lr:
is_master_int_vn = lr.is_master
if is_internal_vn:
# set description only for interval VN/VRF
ri.set_description("__contrail_%s_%s" %
(lr.name, lr_uuid))
ri.set_is_master(is_master_int_vn)
ri.set_virtual_network_id(str(network_id))
ri.set_vxlan_id(str(vni))
ri.set_virtual_network_is_internal(is_internal_vn)
ri.set_is_public_network(router_external)
if is_l2_l3:
ri.set_virtual_network_mode('l2-l3')
elif is_l2:
ri.set_virtual_network_mode('l2')
if highest_encapsulation == "VXLAN":
ri.set_routing_instance_type("virtual-switch")
elif highest_encapsulation in ["MPLSoGRE", "MPLSoUDP"]:
ri.set_routing_instance_type("evpn")
else:
ri.set_virtual_network_mode('l3')
if not is_l2:
ri.set_routing_instance_type("vrf")
if fip_map is None and (router_external or not is_internal_vn):
# add RI prefixes for dc-gateway
self._add_ri_prefixes(vn, router_external, interfaces,
prefixes, ri)
if ri.get_routing_instance_type() != 'virtual-switch' and \
ri.get_virtual_network_mode() != 'l2':
self.ri_map[ri_name] = ri
# add irb physical interface and irb vni gateway settings for l2_l3
if self._is_gateway() and is_l2_l3 and not is_internal_vn:
__, li_map = self._add_or_lookup_pi(self.pi_map, 'irb', 'irb')
intf_unit = self._add_or_lookup_li(
li_map, 'irb.' + str(network_id), network_id)
if len(gateways) > 0:
if vn.has_ipv6_subnet is True:
intf_unit.set_is_virtual_router(True)
intf_unit.set_comment(
DMUtils.vn_irb_comment(vn, False, is_l2_l3,
router_external))
for (irb_ip, gateway) in gateways:
if len(gateway) and gateway != '0.0.0.0':
intf_unit.set_gateway(gateway)
self._add_ip_address(intf_unit, irb_ip,
gateway=gateway)
else:
self._add_ip_address(intf_unit, irb_ip)
if (is_l2 and vni is not None and
self._is_evpn(self._physical_router)):
irb_name = 'irb.' + str(network_id)
self._add_ref_to_list(ri.get_routing_interfaces(), irb_name)
# add firewall config for public VRF
if router_external and is_l2 is False:
self._add_ri_vrf_firewall_config(prefixes, ri)
# add firewall config for DCI Network
if fip_map is not None:
self._add_ref_to_list(ri.get_interfaces(), interfaces[0].name)
self.firewall_config = self.firewall_config or Firewall(
comment=DMUtils.firewall_comment())
f = FirewallFilter(
name=DMUtils.make_private_vrf_filter_name(ri_name))
f.set_comment(DMUtils.vn_firewall_comment(vn, "private"))
self.firewall_config.add_firewall_filters(f)
term = Term(name=DMUtils.make_vrf_term_name(ri_name))
from_ = From()
for fip_user_ip in list(fip_map.keys()):
from_.add_source_address(self._get_subnet_for_cidr(
fip_user_ip))
term.set_from(from_)
term.set_then(Then(routing_instance=[ri_name]))
f.add_terms(term)
__, li_map = self._add_or_lookup_pi(self.pi_map, 'irb', 'irb')
intf_name = 'irb.' + str(network_id)
intf_unit = self._add_or_lookup_li(li_map, intf_name, network_id)
intf_unit.set_comment(DMUtils.vn_irb_fip_inet_comment(vn))
intf_unit.set_family("inet")
intf_unit.add_firewall_filters(
DMUtils.make_private_vrf_filter_name(ri_name))
self._add_ref_to_list(ri.get_routing_interfaces(), intf_name)
# fip services config
nat_rules = NatRules(allow_overlapping_nat_pools=True,
name=DMUtils.make_services_set_name(ri_name),
comment=DMUtils.service_set_comment(vn))
ri.set_nat_rules(nat_rules)
snat_rule = NatRule(
name=DMUtils.make_snat_rule_name(ri_name),
comment=DMUtils.service_set_nat_rule_comment(vn, "SNAT"),
direction="input", translation_type="basic-nat44")
snat_rule.set_comment(DMUtils.snat_rule_comment())
nat_rules.add_rules(snat_rule)
dnat_rule = NatRule(
name=DMUtils.make_dnat_rule_name(ri_name),
comment=DMUtils.service_set_nat_rule_comment(vn, "DNAT"),
direction="output", translation_type="dnat-44")
dnat_rule.set_comment(DMUtils.dnat_rule_comment())
nat_rules.add_rules(dnat_rule)
nat_rules.set_inside_interface(interfaces[0].name)
nat_rules.set_outside_interface(interfaces[1].name)
for pip, fip_vn in list(fip_map.items()):
fip = fip_vn["floating_ip"]
snat_rule.add_source_addresses(self._get_subnet_for_cidr(pip))
snat_rule.add_source_prefixes(self._get_subnet_for_cidr(fip))
dnat_rule.add_destination_addresses(
self._get_subnet_for_cidr(fip))
dnat_rule.add_destination_prefixes(
self._get_subnet_for_cidr(pip))
self._add_ref_to_list(ri.get_ingress_interfaces(),
interfaces[0].name)
self._add_ref_to_list(ri.get_egress_interfaces(),
interfaces[1].name)
for target in import_targets:
self._add_to_list(ri.get_import_targets(), target)
for target in export_targets:
self._add_to_list(ri.get_export_targets(), target)
# end _add_routing_instance
def _update_vn_dict_for_external_vn(self, vn_dict, pr):
# get all extended VN and private VN which has used in BMS fip pool
for vn_id in pr.virtual_networks:
vn_dict[vn_id] = []
vn = VirtualNetworkDM.get(vn_id)
if vn and vn.router_external:
# retrieve and add all tenant private vn which has used in BMS
# with fip pool of external vn
vn_list = vn.get_connected_private_networks()
for pvn in vn_list or []:
vn_dict[pvn] = []
# MX snat requires physical interface and firewall config for current
# PR. get PR's PI used in VPG's VN and its LI interface. Interface has
# l2 name (ae or PI name), vlan tag, port_vlantag and vpg obj
for vpg_uuid in pr.virtual_port_groups or []:
vpg_obj = VirtualPortGroupDM.get(vpg_uuid)
if not vpg_obj:
continue
vpg_interfaces = vpg_obj.physical_interfaces
for vmi_uuid in vpg_obj.virtual_machine_interfaces:
vmi_obj = VirtualMachineInterfaceDM.get(vmi_uuid)
vn = VirtualNetworkDM.get(vmi_obj.virtual_network) if \
vmi_obj and vmi_obj.virtual_network is not None else None
if not vn:
continue
vlan_tag = vmi_obj.vlan_tag
port_vlan_tag = vmi_obj.port_vlan_tag
for pi_uuid in vpg_interfaces:
if pi_uuid not in pr.physical_interfaces:
continue
ae_id = vpg_obj.pi_ae_map.get(pi_uuid)
if ae_id is not None and vlan_tag is not None:
ae_name = "ae" + str(ae_id) + "." + str(vlan_tag)
vn_dict.setdefault(vn.uuid, []).append(
JunosInterface(ae_name, 'l2', vlan_tag,
port_vlan_tag=port_vlan_tag,
vpg_obj=vpg_obj))
break
else:
pi_obj = PhysicalInterfaceDM.get(pi_uuid)
if pi_obj:
li_name = pi_obj.name + "." + str(vlan_tag)
vn_dict.setdefault(vn.uuid, []).append(
JunosInterface(li_name, 'l2', vlan_tag,
port_vlan_tag=port_vlan_tag,
vpg_obj=vpg_obj))
break
# end _update_vn_dict_for_external_vn
def _build_ri_config_for_dc(self):
pr = self._physical_router
vn_dict = {}
# For Pulic LR, add all tenant VN and contrail internal vn in dict
vn_list = []
for lr_id in pr.logical_routers or []:
lr = LogicalRouterDM.get(lr_id)
if not lr or (lr.logical_router_gateway_external is False) or \
not lr.virtual_network or \
not self._is_valid_vn(lr.virtual_network, 'l3'):
continue
if lr.logical_router_gateway_external is True:
# Here means the vn_obj is internal network and its a public LR
# So for junos family, we need to check for the CGNAT VN.
if pr.device_family == 'junos':
if lr.cgnat_vn:
ms_enabled, ms_ifc = self.is_service_interface_enabled(
ifc_prefix="ms")
cgnat_vn_obj = VirtualNetworkDM.get(lr.cgnat_vn)
if ms_enabled:
self.construct_cgnat_config(lr, cgnat_vn_obj,
ms_ifc)
vn_obj = VirtualNetworkDM.get(lr.virtual_network)
if '_contrail_lr_internal_vn_' not in vn_obj.name:
continue
ri_obj = self._get_primary_ri(vn_obj)
if ri_obj is None:
continue
lr_obj = LogicalRouterDM.get(vn_obj.logical_router)
if lr_obj is None or lr_obj.is_master is True:
continue
# vn_dict[lr.virtual_network] = []
vn_list += lr.get_connected_networks(include_internal=True,
pr_uuid=pr.uuid)
for vn_id in vn_list:
vn_dict[vn_id] = []
if pr.device_family == 'junos':
# only for Junos MX platform we support fip and snat
# through external vn
self._update_vn_dict_for_external_vn(vn_dict, pr)
if len(vn_dict) > 0:
# refresh prepared vn's pr.vn_ip_map dictionary for irb and lo0
pr.evaluate_vn_irb_ip_map(set(vn_dict.keys()), 'l2_l3', 'irb',
False)
pr.evaluate_vn_irb_ip_map(set(vn_dict.keys()), 'l3', 'lo0', True)
vn_irb_ip_map = pr.get_vn_irb_ip_map()
for vn_id, interfaces in self._get_sorted_key_value_pairs(vn_dict):
vn_obj = VirtualNetworkDM.get(vn_id)
if (vn_obj is None or vn_obj.get_vxlan_vni() is None or
vn_obj.vn_network_id is None):
continue
export_set = None
import_set = None
for ri_id in vn_obj.routing_instances:
# Find the primary RI by matching the fabric name
ri_obj = RoutingInstanceDM.get(ri_id)
if ri_obj is None or ri_obj.fq_name[-1] != vn_obj.fq_name[-1]:
continue
export_set, import_set = self._get_export_import_set(vn_obj,
ri_obj)
if vn_obj.get_forwarding_mode() in ['l2', 'l2_l3']:
# create ri config for is_l2 True
irb_ips = []
if vn_obj.get_forwarding_mode() == 'l2_l3' and \
self._is_gateway():
irb_ips = vn_irb_ip_map['irb'].get(vn_id, [])
vrf_name_l2 = DMUtils.make_vrf_name(vn_obj.fq_name[-1],
vn_obj.vn_network_id,
'l2')
ri_conf = {'ri_name': vrf_name_l2, 'vn': vn_obj,
'is_l2': True, 'is_l2_l3':
(vn_obj.get_forwarding_mode() == 'l2_l3'),
'import_targets': import_set,
'export_targets': export_set,
'prefixes': vn_obj.get_prefixes(pr.uuid),
'gateways': irb_ips,
'router_external': vn_obj.router_external,
'interfaces': interfaces,
'vni': vn_obj.get_vxlan_vni(),
'network_id': vn_obj.vn_network_id,
'encapsulation_priorities':
GlobalVRouterConfigDM.
global_encapsulation_priorities}
self._add_routing_instance(ri_conf)
if vn_obj.get_forwarding_mode() in ['l3', 'l2_l3'] and \
self._is_gateway():
interfaces = []
lo0_ips = []
if vn_obj.get_forwarding_mode() == 'l2_l3':
interfaces = [
JunosInterface(
'irb.' + str(vn_obj.vn_network_id),
'l3', 0)]
else:
lo0_ips = vn_irb_ip_map['lo0'].get(vn_id, [])
is_internal_vn = True if '_contrail_lr_internal_vn_' in \
vn_obj.name else False
vrf_name_l3 = DMUtils.make_vrf_name(vn_obj.fq_name[-1],
vn_obj.vn_network_id,
'l3')
ri_conf = {'ri_name': vrf_name_l3, 'vn': vn_obj,
'is_l2': False,
'is_l2_l3':
vn_obj.get_forwarding_mode() == 'l2_l3',
'import_targets': import_set,
'export_targets': export_set,
'prefixes': vn_obj.get_prefixes(pr.uuid),
'router_external': vn_obj.router_external,
'interfaces': interfaces,
'gateways': lo0_ips,
'network_id': vn_obj.vn_network_id}
if is_internal_vn:
lr_uuid = DMUtils.\
extract_lr_uuid_from_internal_vn_name(vrf_name_l3)
lr = LogicalRouterDM.get(lr_uuid)
if lr and not lr.is_master:
ri_conf['vni'] = vn_obj.get_vxlan_vni(
is_internal_vn=is_internal_vn)
ri_conf['router_external'] = lr.\
logical_router_gateway_external
dci = lr.get_interfabric_dci()
if dci:
ri_conf['connected_dci_network'] = dci.uuid
lr_vn_list = dci.\
get_connected_lr_internal_vns(
exclude_lr=lr.uuid, pr_uuid=pr.uuid)
for lr_vn in lr_vn_list:
exports, imports = lr_vn.\
get_route_targets()
if imports:
ri_conf['import_targets'] |= imports
if exports:
ri_conf['export_targets'] |= exports
self._add_routing_instance(ri_conf)
break
# end for ri_id in vn_obj.routing_instances:
si_enabled, si_ifc = self.is_service_interface_enabled(
ifc_prefix="si")
if export_set and \
pr.is_junos_service_ports_enabled() and \
len(vn_obj.instance_ip_map) > 0 and si_enabled:
service_port_ids = DMUtils.get_service_ports(
vn_obj.vn_network_id)
if not pr \
.is_service_port_id_valid(service_port_ids[0]):
self._logger.error("DM can't allocate service interfaces"
" for (vn, vn-id)=(%s,%s)" %
(vn_obj.fq_name,
vn_obj.vn_network_id))
else:
vrf_name = DMUtils.make_vrf_name(vn_obj.fq_name[-1],
vn_obj.vn_network_id,
'l3', True)
interfaces = []
interfaces.append(
JunosInterface(
si_ifc + "." + str(service_port_ids[0]),
'l3', 0))
interfaces.append(
JunosInterface(
si_ifc + "." + str(service_port_ids[1]),
'l3', 0))
ri_conf = {'ri_name': vrf_name, 'vn': vn_obj,
'import_targets': import_set,
'interfaces': interfaces,
'fip_map': vn_obj.instance_ip_map,
'network_id': vn_obj.vn_network_id,
'restrict_proxy_arp': vn_obj.router_external}
self._add_routing_instance(ri_conf)
# end _build_ri_config_for_dc
def is_service_interface_enabled(self, ifc_prefix="si"):
pr = self._physical_router
if pr.is_junos_service_ports_enabled():
sps = pr.junos_service_ports.get('service_port')
if sps and type(sps) is list:
for sp in sps:
if sp and str(sp).strip().startswith("{}-".format(
ifc_prefix)):
return True, str(sp).strip()
return False, None
def construct_cgnat_config(self, lr, cgnat_vn, ms_ifc):
vn_obj = cgnat_vn
pr = self._physical_router
private_vns = lr.get_connected_networks(include_internal=False,
pr_uuid=pr.uuid)
if ms_ifc:
internal_vn = lr.virtual_network
internal_vn_obj = VirtualNetworkDM.get(internal_vn)
service_port_ids = DMUtils.get_service_ports(
internal_vn_obj.vn_network_id)
if not pr \
.is_service_port_id_valid(service_port_ids[0]):
self._logger.error("DM can't allocate service interfaces"
" for (vn, vn-id)=(%s,%s)" %
(internal_vn_obj.fq_name,
internal_vn_obj.vn_network_id))
else:
vrf_name = DMUtils.make_vrf_name(internal_vn_obj.fq_name[-1],
internal_vn_obj.vn_network_id,
'l3', True)
interfaces = []
interfaces.append(
JunosInterface(
ms_ifc + "." + str(service_port_ids[0]),
'l3', 0))
interfaces.append(
JunosInterface(
ms_ifc + "." + str(service_port_ids[1]),
'l3', 0))
ex_rt, im_rt = vn_obj.get_route_targets()
ri_conf = {'ri_name': vrf_name, 'vn': vn_obj,
'import_targets': im_rt,
'interfaces': interfaces,
'fip_map': vn_obj.instance_ip_map,
'network_id': vn_obj.vn_network_id,
'restrict_proxy_arp': vn_obj.router_external,
'is_cgnat_vrf': True,
'private_vns': private_vns}
self.add_routing_instance(ri_conf)
def feature_config(self, **kwargs):
self.ri_map = {}
self.firewall_config = None
self.pi_map = OrderedDict()
self.inet4_forwarding_filter = None
self.inet6_forwarding_filter = None
feature_config = Feature(name=self.feature_name())
self._build_ri_config_for_dc()
feature_config.set_routing_instances(
self._get_values_sorted_by_key(
self.ri_map))
if self.firewall_config is not None:
feature_config.set_firewall(self.firewall_config)
for pi, li_map in list(self.pi_map.values()):
pi.set_logical_interfaces(list(li_map.values()))
feature_config.add_physical_interfaces(pi)
return feature_config
# end DcGatewayFeature
|
the-stack_0_15414 | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Any, Callable, Dict, List, Optional, Type
from magma.common.service import MagmaService
from magma.enodebd.data_models import transform_for_enb, transform_for_magma
from magma.enodebd.data_models.data_model import DataModel, TrParam
from magma.enodebd.data_models.data_model_parameters import (
ParameterName,
TrParameterType,
)
from magma.enodebd.device_config.enodeb_config_postprocessor import (
EnodebConfigurationPostProcessor,
)
from magma.enodebd.device_config.enodeb_configuration import EnodebConfiguration
from magma.enodebd.devices.device_utils import EnodebDeviceName
from magma.enodebd.exceptions import Tr069Error
from magma.enodebd.logger import EnodebdLogger as logger
from magma.enodebd.state_machines.acs_state_utils import (
get_all_objects_to_add,
get_all_objects_to_delete,
)
from magma.enodebd.state_machines.enb_acs import EnodebAcsStateMachine
from magma.enodebd.state_machines.enb_acs_impl import BasicEnodebAcsStateMachine
from magma.enodebd.state_machines.enb_acs_states import (
AcsMsgAndTransition,
AcsReadMsgResult,
AddObjectsState,
DeleteObjectsState,
EndSessionState,
EnodebAcsState,
ErrorState,
GetParametersState,
GetRPCMethodsState,
SendGetTransientParametersState,
SendRebootState,
SetParameterValuesNotAdminState,
WaitEmptyMessageState,
WaitGetObjectParametersState,
WaitGetParametersState,
WaitGetTransientParametersState,
WaitInformMRebootState,
WaitInformState,
WaitRebootResponseState,
WaitSetParameterValuesState,
)
from magma.enodebd.tr069 import models
class CaviumHandler(BasicEnodebAcsStateMachine):
def __init__(
self,
service: MagmaService,
) -> None:
self._state_map = {}
super().__init__(service=service, use_param_key=False)
def reboot_asap(self) -> None:
self.transition('reboot')
def is_enodeb_connected(self) -> bool:
return not isinstance(self.state, WaitInformState)
def _init_state_map(self) -> None:
self._state_map = {
'wait_inform': WaitInformState(self, when_done='get_rpc_methods'),
'get_rpc_methods': GetRPCMethodsState(self, when_done='wait_empty', when_skip='get_transient_params'),
'wait_empty': WaitEmptyMessageState(self, when_done='get_transient_params'),
'get_transient_params': SendGetTransientParametersState(self, when_done='wait_get_transient_params'),
'wait_get_transient_params': WaitGetTransientParametersState(self, when_get='get_params', when_get_obj_params='get_obj_params', when_delete='delete_objs', when_add='add_objs', when_set='set_params', when_skip='end_session'),
'get_params': GetParametersState(self, when_done='wait_get_params'),
'wait_get_params': WaitGetParametersState(self, when_done='get_obj_params'),
'get_obj_params': CaviumGetObjectParametersState(self, when_done='wait_get_obj_params'),
'wait_get_obj_params': CaviumWaitGetObjectParametersState(self, when_edit='disable_admin', when_skip='get_transient_params'),
'disable_admin': CaviumDisableAdminEnableState(self, admin_value=False, when_done='wait_disable_admin'),
'wait_disable_admin': CaviumWaitDisableAdminEnableState(self, admin_value=False, when_add='add_objs', when_delete='delete_objs', when_done='set_params'),
'delete_objs': DeleteObjectsState(self, when_add='add_objs', when_skip='set_params'),
'add_objs': AddObjectsState(self, when_done='set_params'),
'set_params': SetParameterValuesNotAdminState(self, when_done='wait_set_params'),
'wait_set_params': WaitSetParameterValuesState(self, when_done='enable_admin', when_apply_invasive='enable_admin'),
'enable_admin': CaviumDisableAdminEnableState(self, admin_value=True, when_done='wait_enable_admin'),
'wait_enable_admin': CaviumWaitDisableAdminEnableState(self, admin_value=True, when_done='check_get_params', when_add='check_get_params', when_delete='check_get_params'),
'check_get_params': GetParametersState(self, when_done='check_wait_get_params', request_all_params=True),
'check_wait_get_params': WaitGetParametersState(self, when_done='end_session'),
'end_session': EndSessionState(self),
# Below states only entered through manual user intervention
'reboot': SendRebootState(self, when_done='wait_reboot'),
'wait_reboot': WaitRebootResponseState(self, when_done='wait_post_reboot_inform'),
'wait_post_reboot_inform': WaitInformMRebootState(self, when_done='wait_reboot_delay', when_timeout='wait_inform'),
# The states below are entered when an unexpected message type is
# received
'unexpected_fault': ErrorState(self, inform_transition_target='wait_inform'),
}
@property
def device_name(self) -> str:
return EnodebDeviceName.CAVIUM
@property
def data_model_class(self) -> Type[DataModel]:
return CaviumTrDataModel
@property
def config_postprocessor(self) -> EnodebConfigurationPostProcessor:
return CaviumTrConfigurationInitializer()
@property
def state_map(self) -> Dict[str, EnodebAcsState]:
return self._state_map
@property
def disconnected_state_name(self) -> str:
return 'wait_inform'
@property
def unexpected_fault_state_name(self) -> str:
return 'unexpected_fault'
class CaviumGetObjectParametersState(EnodebAcsState):
"""
When booted, the PLMN list is empty so we cannot get individual
object parameters. Instead, get the parent object PLMN_LIST
which will include any children if they exist.
"""
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
def get_msg(self, message: Any) -> AcsMsgAndTransition:
""" Respond with GetParameterValuesRequest """
names = [ParameterName.PLMN_LIST]
# Generate the request
request = models.GetParameterValues()
request.ParameterNames = models.ParameterNames()
request.ParameterNames.arrayType = 'xsd:string[%d]' \
% len(names)
request.ParameterNames.string = []
for name in names:
path = self.acs.data_model.get_parameter(name).path
request.ParameterNames.string.append(path)
return AcsMsgAndTransition(request, self.done_transition)
def state_description(self) -> str:
return 'Getting object parameters'
class CaviumWaitGetObjectParametersState(WaitGetObjectParametersState):
def __init__(
self,
acs: EnodebAcsStateMachine,
when_edit: str,
when_skip: str,
):
super().__init__(
acs=acs,
when_add=when_edit,
when_delete=when_edit,
when_set=when_edit,
when_skip=when_skip,
)
class CaviumDisableAdminEnableState(EnodebAcsState):
"""
Cavium requires that we disable 'Admin Enable' before configuring
most parameters
"""
def __init__(self, acs: EnodebAcsStateMachine, admin_value: bool, when_done: str):
super().__init__()
self.acs = acs
self.admin_value = admin_value
self.done_transition = when_done
def read_msg(self, message: Any) -> AcsReadMsgResult:
if not isinstance(message, models.DummyInput):
return AcsReadMsgResult(False, None)
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
"""
Returns:
A SetParameterValueRequest for setting 'Admin Enable' to False
"""
param_name = ParameterName.ADMIN_STATE
# if we want the cell to be down don't force it up
desired_admin_value = \
self.acs.desired_cfg.get_parameter(param_name) \
and self.admin_value
admin_value = \
self.acs.data_model.transform_for_enb(
param_name,
desired_admin_value,
)
admin_path = self.acs.data_model.get_parameter(param_name).path
param_values = {admin_path: admin_value}
request = models.SetParameterValues()
request.ParameterList = models.ParameterValueList()
request.ParameterList.arrayType = 'cwmp:ParameterValueStruct[%d]' \
% len(param_values)
name_value = models.ParameterValueStruct()
name_value.Name = admin_path
name_value.Value = models.anySimpleType()
name_value.Value.type = 'xsd:string'
name_value.Value.Data = str(admin_value)
request.ParameterList.ParameterValueStruct = [name_value]
return AcsMsgAndTransition(request, self.done_transition)
def state_description(self) -> str:
return 'Disabling admin_enable (Cavium only)'
class CaviumWaitDisableAdminEnableState(EnodebAcsState):
def __init__(
self,
acs: EnodebAcsStateMachine,
admin_value: bool,
when_done: str,
when_add: str,
when_delete: str,
):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.add_obj_transition = when_add
self.del_obj_transition = when_delete
self.admin_value = admin_value
def read_msg(self, message: Any) -> Optional[str]:
if type(message) == models.Fault:
logger.error('Received Fault in response to SetParameterValues')
if message.SetParameterValuesFault is not None:
for fault in message.SetParameterValuesFault:
logger.error(
'SetParameterValuesFault Param: %s, Code: %s, String: %s',
fault.ParameterName, fault.FaultCode, fault.FaultString,
)
raise Tr069Error(
'Received Fault in response to SetParameterValues '
'(faultstring = %s)' % message.FaultString,
)
elif not isinstance(message, models.SetParameterValuesResponse):
return AcsReadMsgResult(False, None)
if message.Status != 0:
raise Tr069Error(
'Received SetParameterValuesResponse with '
'Status=%d' % message.Status,
)
param_name = ParameterName.ADMIN_STATE
desired_admin_value = \
self.acs.desired_cfg.get_parameter(param_name) \
and self.admin_value
magma_value = \
self.acs.data_model.transform_for_magma(
param_name,
desired_admin_value,
)
self.acs.device_cfg.set_parameter(param_name, magma_value)
if len(
get_all_objects_to_delete(
self.acs.desired_cfg,
self.acs.device_cfg,
),
) > 0:
return AcsReadMsgResult(True, self.del_obj_transition)
elif len(
get_all_objects_to_add(
self.acs.desired_cfg,
self.acs.device_cfg,
),
) > 0:
return AcsReadMsgResult(True, self.add_obj_transition)
else:
return AcsReadMsgResult(True, self.done_transition)
def state_description(self) -> str:
return 'Disabling admin_enable (Cavium only)'
class CaviumTrDataModel(DataModel):
"""
Class to represent relevant data model parameters from TR-196/TR-098/TR-181.
This class is effectively read-only
"""
# Mapping of TR parameter paths to aliases
DEVICE_PATH = 'Device.'
FAPSERVICE_PATH = DEVICE_PATH + 'Services.FAPService.1.'
PARAMETERS = {
# Top-level objects
ParameterName.DEVICE: TrParam(DEVICE_PATH, True, TrParameterType.OBJECT, False),
ParameterName.FAP_SERVICE: TrParam(FAPSERVICE_PATH, True, TrParameterType.OBJECT, False),
# Device info parameters
ParameterName.GPS_STATUS: TrParam(DEVICE_PATH + 'FAP.GPS.ContinuousGPSStatus.GotFix', True, TrParameterType.BOOLEAN, False),
ParameterName.GPS_LAT: TrParam(DEVICE_PATH + 'FAP.GPS.LockedLatitude', True, TrParameterType.INT, False),
ParameterName.GPS_LONG: TrParam(DEVICE_PATH + 'FAP.GPS.LockedLongitude', True, TrParameterType.INT, False),
ParameterName.SW_VERSION: TrParam(DEVICE_PATH + 'DeviceInfo.SoftwareVersion', True, TrParameterType.STRING, False),
ParameterName.SERIAL_NUMBER: TrParam(DEVICE_PATH + 'DeviceInfo.SerialNumber', True, TrParameterType.STRING, False),
# Capabilities
ParameterName.DUPLEX_MODE_CAPABILITY: TrParam(
FAPSERVICE_PATH + 'Capabilities.LTE.DuplexMode', True, TrParameterType.STRING, False,
),
ParameterName.BAND_CAPABILITY: TrParam(FAPSERVICE_PATH + 'Capabilities.LTE.BandsSupported', True, TrParameterType.UNSIGNED_INT, False),
# RF-related parameters
ParameterName.EARFCNDL: TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.RAN.RF.EARFCNDL', True, TrParameterType.UNSIGNED_INT, False),
ParameterName.EARFCNUL: TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.RAN.RF.EARFCNUL', True, TrParameterType.UNSIGNED_INT, False),
ParameterName.BAND: TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.RAN.RF.FreqBandIndicator', True, TrParameterType.UNSIGNED_INT, False),
ParameterName.PCI: TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.RAN.RF.PhyCellID', True, TrParameterType.STRING, False),
ParameterName.DL_BANDWIDTH: TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.RAN.RF.DLBandwidth', True, TrParameterType.STRING, False),
ParameterName.UL_BANDWIDTH: TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.RAN.RF.ULBandwidth', True, TrParameterType.STRING, False),
ParameterName.CELL_ID: TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.RAN.Common.CellIdentity', True, TrParameterType.UNSIGNED_INT, False),
# Other LTE parameters
ParameterName.ADMIN_STATE: TrParam(FAPSERVICE_PATH + 'FAPControl.LTE.AdminState', False, TrParameterType.BOOLEAN, False),
ParameterName.OP_STATE: TrParam(FAPSERVICE_PATH + 'FAPControl.LTE.OpState', True, TrParameterType.BOOLEAN, False),
ParameterName.RF_TX_STATUS: TrParam(FAPSERVICE_PATH + 'FAPControl.LTE.RFTxStatus', True, TrParameterType.BOOLEAN, False),
# RAN parameters
ParameterName.CELL_RESERVED: TrParam(
FAPSERVICE_PATH
+ 'CellConfig.LTE.RAN.CellRestriction.CellReservedForOperatorUse', True, TrParameterType.BOOLEAN, False,
),
ParameterName.CELL_BARRED: TrParam(
FAPSERVICE_PATH
+ 'CellConfig.LTE.RAN.CellRestriction.CellBarred', True, TrParameterType.BOOLEAN, False,
),
# Core network parameters
ParameterName.MME_IP: TrParam(
FAPSERVICE_PATH + 'FAPControl.LTE.Gateway.S1SigLinkServerList', True, TrParameterType.STRING, False,
),
ParameterName.MME_PORT: TrParam(FAPSERVICE_PATH + 'FAPControl.LTE.Gateway.S1SigLinkPort', True, TrParameterType.UNSIGNED_INT, False),
ParameterName.NUM_PLMNS: TrParam(
FAPSERVICE_PATH + 'CellConfig.LTE.EPC.PLMNListNumberOfEntries', True, TrParameterType.UNSIGNED_INT, False,
),
ParameterName.PLMN: TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.EPC.PLMNList.', True, TrParameterType.OBJECT, False),
# PLMN arrays are added below
ParameterName.TAC: TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.EPC.TAC', True, TrParameterType.UNSIGNED_INT, False),
ParameterName.IP_SEC_ENABLE: TrParam(
DEVICE_PATH + 'IPsec.Enable', False, TrParameterType.BOOLEAN, False,
),
ParameterName.PERIODIC_INFORM_INTERVAL:
TrParam(DEVICE_PATH + 'ManagementServer.PeriodicInformInterval', False, TrParameterType.UNSIGNED_INT, False),
# Management server parameters
ParameterName.PERIODIC_INFORM_ENABLE: TrParam(
DEVICE_PATH + 'ManagementServer.PeriodicInformEnable',
False, TrParameterType.BOOLEAN, False,
),
ParameterName.PERIODIC_INFORM_INTERVAL: TrParam(
DEVICE_PATH + 'ManagementServer.PeriodicInformInterval',
False, TrParameterType.UNSIGNED_INT, False,
),
# Performance management parameters
ParameterName.PERF_MGMT_ENABLE: TrParam(
FAPSERVICE_PATH + 'PerfMgmt.Config.1.Enable', False, TrParameterType.BOOLEAN, False,
),
ParameterName.PERF_MGMT_UPLOAD_INTERVAL: TrParam(
FAPSERVICE_PATH + 'PerfMgmt.Config.1.PeriodicUploadInterval', False, TrParameterType.UNSIGNED_INT, False,
),
ParameterName.PERF_MGMT_UPLOAD_URL: TrParam(
FAPSERVICE_PATH + 'PerfMgmt.Config.1.URL', False, TrParameterType.STRING, False,
),
ParameterName.PERF_MGMT_USER: TrParam(
FAPSERVICE_PATH + 'PerfMgmt.Config.1.Username',
False, TrParameterType.STRING, False,
),
ParameterName.PERF_MGMT_PASSWORD: TrParam(
FAPSERVICE_PATH + 'PerfMgmt.Config.1.Password',
False, TrParameterType.STRING, False,
),
# PLMN Info
ParameterName.PLMN_LIST: TrParam(
FAPSERVICE_PATH + 'CellConfig.LTE.EPC.PLMNList.', False, TrParameterType.OBJECT, False,
),
}
NUM_PLMNS_IN_CONFIG = 6
for i in range(1, NUM_PLMNS_IN_CONFIG + 1):
PARAMETERS[ParameterName.PLMN_N % i] = TrParam(
FAPSERVICE_PATH + 'CellConfig.LTE.EPC.PLMNList.%d.' % i, True, TrParameterType.OBJECT, False,
)
PARAMETERS[ParameterName.PLMN_N_CELL_RESERVED % i] = TrParam(
FAPSERVICE_PATH
+ 'CellConfig.LTE.EPC.PLMNList.%d.CellReservedForOperatorUse' % i, True, TrParameterType.BOOLEAN, False,
)
PARAMETERS[ParameterName.PLMN_N_ENABLE % i] = TrParam(
FAPSERVICE_PATH + 'CellConfig.LTE.EPC.PLMNList.%d.Enable' % i, True, TrParameterType.BOOLEAN, False,
)
PARAMETERS[ParameterName.PLMN_N_PRIMARY % i] = TrParam(
FAPSERVICE_PATH + 'CellConfig.LTE.EPC.PLMNList.%d.IsPrimary' % i, True, TrParameterType.BOOLEAN, False,
)
PARAMETERS[ParameterName.PLMN_N_PLMNID % i] = TrParam(
FAPSERVICE_PATH + 'CellConfig.LTE.EPC.PLMNList.%d.PLMNID' % i, True, TrParameterType.STRING, False,
)
TRANSFORMS_FOR_ENB = {
ParameterName.DL_BANDWIDTH: transform_for_enb.bandwidth,
ParameterName.UL_BANDWIDTH: transform_for_enb.bandwidth,
}
TRANSFORMS_FOR_MAGMA = {
ParameterName.DL_BANDWIDTH: transform_for_magma.bandwidth,
ParameterName.UL_BANDWIDTH: transform_for_magma.bandwidth,
# We don't set GPS, so we don't need transform for enb
ParameterName.GPS_LAT: transform_for_magma.gps_tr181,
ParameterName.GPS_LONG: transform_for_magma.gps_tr181,
}
@classmethod
def get_parameter(cls, param_name: ParameterName) -> Optional[TrParam]:
return cls.PARAMETERS.get(param_name)
@classmethod
def _get_magma_transforms(
cls,
) -> Dict[ParameterName, Callable[[Any], Any]]:
return cls.TRANSFORMS_FOR_MAGMA
@classmethod
def _get_enb_transforms(cls) -> Dict[ParameterName, Callable[[Any], Any]]:
return cls.TRANSFORMS_FOR_ENB
@classmethod
def get_load_parameters(cls) -> List[ParameterName]:
"""
Load all the parameters instead of a subset.
"""
return [ParameterName.DEVICE]
@classmethod
def get_num_plmns(cls) -> int:
return cls.NUM_PLMNS_IN_CONFIG
@classmethod
def get_parameter_names(cls) -> List[ParameterName]:
excluded_params = [
str(ParameterName.DEVICE),
str(ParameterName.FAP_SERVICE),
]
names = list(
filter(
lambda x: (not str(x).startswith('PLMN'))
and (str(x) not in excluded_params),
cls.PARAMETERS.keys(),
),
)
return names
@classmethod
def get_numbered_param_names(
cls,
) -> Dict[ParameterName, List[ParameterName]]:
names = {}
for i in range(1, cls.NUM_PLMNS_IN_CONFIG + 1):
params = []
params.append(ParameterName.PLMN_N_CELL_RESERVED % i)
params.append(ParameterName.PLMN_N_ENABLE % i)
params.append(ParameterName.PLMN_N_PRIMARY % i)
params.append(ParameterName.PLMN_N_PLMNID % i)
names[ParameterName.PLMN_N % i] = params
return names
class CaviumTrConfigurationInitializer(EnodebConfigurationPostProcessor):
def postprocess(self, mconfig: Any, service_cfg: Any, desired_cfg: EnodebConfiguration) -> None:
desired_cfg.set_parameter(ParameterName.CELL_BARRED, True)
desired_cfg.set_parameter(ParameterName.ADMIN_STATE, True)
|
the-stack_0_15415 | # Create the MovieReview class with the following methods:
#
# - a constructor (__init__()) that receives two input parameters that are used to initialise
# attributes *rating* and *comment*, respectively. Default value for the 2nd input parameter
# is an empty string. The constructor also sets the value of the *timestamp* attribute to the
# current date and time.
#
# - get and set methods for the *rating* and *comment* attributes (using appropriate decorators);
# designate both attributes as private; valid values for these two attributes are as follows:
# - for *rating*: int values between 1 and 5, including 1 and 5
# - for *comment*: any string value
#
# - a method that returns a string representation of a MovieReview object (__str__())
from datetime import datetime
class MovieReview:
def __init__(self, rating, comment=""):
self.rating = rating
self.comment = comment
self.timestamp = datetime.today()
@property
def rating(self):
return self.__rating
@rating.setter
def rating(self, value):
if isinstance(value, int) and 1 <= value <= 5:
self.__rating = value
elif isinstance(value, str) and (len(value) == 1) and (value in '12345'):
self.__rating = int(value)
else:
print(f"Invalid value ({value}) passed for movie rating")
self.__rating = None
@property
def comment(self):
return self.__comment if self.__comment else ""
@comment.setter
def comment(self, value):
if isinstance(value, str):
self.__comment = value
else:
print(f"Error! String value expected, received {type(value)} instead. Coercing the input to a string")
self.__comment = str(value)
def __str__(self):
s = f"{self.rating} stars; " if self.rating else "Rating not available; "
s += f"comment: '{self.comment}'" if self.comment else "no comment left"
s += f"(received {datetime.strftime(self.timestamp, '%b %d, %Y %H:%M')})"
return s
# Create the Movie class with the following methods:
#
# - a constructor (__init__()) that receives three input parameters to be used to initialise
# attributes *title*, *year*, and *director*, respectively. Default value for the 3rd input
# parameter is None. The constructor also initializes the *reviews* attribute
# (a list of MovieReview objects) to an empty list.
#
# - a method that returns a string representation of the given Movie object (__str__())
#
# - a method for adding a new review to the Movie objects, that is, to the *reviews* list.
# The review to be added is passed as the input argument; it is added to the list, only
# if it is an object of the MovieReview class and the review is not older than 1 year.
# (a useful StackOverflow entry:
# https://stackoverflow.com/questions/1345827/how-do-i-find-the-time-difference-between-two-datetime-objects-in-python)
#
# - a method (__eq__()) for checking for equality of the given Movie object and another
# object that is passed to the method as its input parameter. Two Movie objects are
# considered the same if they have the same title and director, or, if the director is
# unknown, then the same title and year.
#
# - methods for turning the given Movie object into an iterator (__iter__(), __next__())
# over the movie reviews (that is, elements of the *reviews* list)
#
class Movie:
def __init__(self, title, year, director=None):
self.title = title
self.year = year
self.director = director
self.reviews = list()
def __str__(self):
movie_str = f"Movie '{self.title}' from {self.year}"
movie_str += f" directed by {self.director}" if self.director else " (director unknown)"
if len(self.reviews) > 0:
movie_str += "\nReviews:\n" + "\n".join([str(mr) for mr in self.reviews])
else:
movie_str += ", no reviews yet"
return movie_str
def add_review(self, review):
if isinstance(review, MovieReview):
time_diff = datetime.today() - review.timestamp
time_diff_sec = time_diff.total_seconds()
secs_in_year = 365*24*60*60
time_diff_year = time_diff_sec // secs_in_year # integer division
if time_diff_year < 1:
self.reviews.append(review)
else:
print("An outdated review")
else:
print("Not an object of MovieReview class; cannot be added")
def __eq__(self, other):
if isinstance(other, Movie):
if self.director and other.director:
return (self.title == other.title) and (self.director == other.director)
else:
print("Director(s) unknown; checking for equality based on the title-year pair")
return (self.title == other.title) and (self.year == other.year)
else:
print("The other object is not a Movie")
return False
def __iter__(self):
self.__review_counter = 0
return self
def __next__(self):
if self.__review_counter == len(self.reviews):
raise StopIteration
current_review = self.reviews[self.__review_counter]
self.__review_counter += 1
return current_review
if __name__ == '__main__':
mr_1 = MovieReview(5, "Superb!")
mr_2 = MovieReview(5, "The best ever!")
mr_3 = MovieReview(3, "Expected more...")
# print(mr_1)
# print(mr_2)
# print(mr_3)
godfather = Movie("The Godfather", year=1972, director="Francis Ford Coppola")
print(godfather)
print()
godfather_2 = Movie("The Godfather: part II", 1974, "Francis Ford Coppola")
print(godfather_2)
print()
if godfather == godfather_2:
print("No difference observed!")
else:
print("Different movies!")
print()
for mr in (mr_1, mr_2, mr_3):
godfather_2.add_review(mr)
print("Printing movie data after adding reviews")
print(godfather_2)
print("\nReviews for the Godfather 2 movie:")
for review in godfather_2:
print(review) |
the-stack_0_15416 | """
It is a simple sorted algorithm, that builds the final sorted list one item at a time
** it's like soring the cards
Algorithm:
1. Consider the first element to be sorted and the rest to be unsorted.
2. Take the first element in the unsorted part(u1) and compare it with sorted part elements(s1).
3. If u1<s1 then insert u1 in the correct index, else leave it as it.
4. Take next elements in the unsorted part and compare with sorted elements.
5. Repeat 3 and 4 until all the elements are sorted.
"""
def insertion_sort(list1):
for index in range(1, len(list1)):
current_element = list1[index]
pos = index
while current_element < list1[pos - 1] and pos > 0:
list1[pos] = list1[pos - 1]
pos -= 1
list1[pos] = current_element
list1 = [9, 35, 0, 15, 11]
insertion_sort(list1)
print(list1)
|
the-stack_0_15417 | # pylint: disable=invalid-name
import pickle
from math import inf
import pandas as pd
import numpy as np
#from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
#import matplotlib.pyplot as plt
first_time = False
#parameters = [(50, 300) - 0.54, 0.6, (40, 600) - 0.438, 0.3, (75, 300), (100, 200), (200, 200), (200, 100), (400, 100)]
#parameters = [(10, 100), (20, 100), (30, 100), (40, 100), (50, 100)] - 0.38, 0.1
#parameters = [(10, 100) - 0.388, 0.094, (20, 200), (50, 200) - - 0.414, 0.19, (50, 400), (100, 400) - 0.456, 0.3857]
parameters = [(60, 700), (40, 600), (50, 500), (100, 400), (50, 300)]
filename = 'german.data-numeric.txt'
def readData():
'''
Reads data from text file and stores as data frame
'''
df = pd.read_table(filename, header=None, delim_whitespace=True)
df = df.iloc[:, :-1]
df = (df - df.min()) / (df.max() - df.min())
Y = df.iloc[:, -1]
return (df, Y)
def mahanalobisdist(a, b):
'''
Calculates the mahalanobis distance
between 2 points of the data
'''
temp = np.array([a, b]).T
cov = np.cov(temp)
delta = a - b
inv = np.linalg.pinv(cov)
mdist = np.dot(np.dot(np.transpose(delta), inv), delta)
mdist = np.sqrt(mdist)
return mdist
def createDistanceMatrix(data, first_timeval, N):
'''
Computes the distance matrix and
writes to to a pickle file to save time
on future runs
'''
distancematrix = np.zeros((N, N))
if first_timeval:
i = 0
for value1 in data:
j = 0
for value2 in data:
distancematrix[i][j] = mahanalobisdist(value1, value2)
#print(distancematrix[i][j])
j += 1
i += 1
f = open('distancematrix', 'wb')
pickle.dump(distancematrix, f)
f.close()
else:
f2 = open('distancematrix', 'rb')
distancematrix = pickle.load(f2)
f2.close()
return distancematrix
def getLRD(N, distancematrix, k, data):
'''
Finds
1. The KNN and hence the kdistance for each point
i.e the distance to its kthNN,
2. The number of points that fall within the k-distance neighbourhood
3. Reachability distances
4. lrd (local reachability density)
for each point
'''
kdist = np.zeros(N)
kneighbours = {}
Numneighbours = 0
lrd = np.zeros(N)
for i in range(N):
distancefrompoint = distancematrix[i]
knn = np.partition(distancefrompoint, k-1)
kdist[i] = knn[k-1]
sort_index = np.argsort(distancefrompoint)
j = 0
temp = []
for dist in distancefrompoint:
if dist <= kdist[i]:
temp.append(sort_index[j])
Numneighbours += 1
j += 1
kneighbours[i] = temp
reachabilitydistance = getReachabilityDistances(N, data, kdist, distancematrix)
for i in range(N):
sumOfReachabilityDistances = 0
for value in kneighbours[i]:
sumOfReachabilityDistances += reachabilitydistance[int(value)][i]
if sumOfReachabilityDistances == 0:
lrd[i] = inf
lrd[i] = len(kneighbours[i])/sumOfReachabilityDistances
return lrd
def getReachabilityDistances(N, data, kdist, distancematrix):
'''
Calculates the reachability distance
between all pairs of points
'''
reachabilitydistance = np.zeros((N, N))
i = 0
for _ in data:
j = 0
for _ in data:
reachabilitydistance[i][j] = max(kdist[i], distancematrix[i][j])
j += 1
i += 1
return reachabilitydistance
def getAccuracy(outliers, Y, N, PrecisionList, RecallList):
'''
Gets the performace measures of the outlier detection done,
in terms of Accuracy, Precision, Recall, F1-Score
using true and false +ves and -ves
'''
tp = 0
fp = 0
tn = 0
fn = 0
#testY = []
for i, row in Y.iteritems():
if i in outliers:
#testY.append(1)
if row == 1:
tp += 1
else:
fp += 1
else:
#testY.append(0)
if row == 1:
fn += 1
else:
tn += 1
print("True +ve:" + str(tp) + " True -ve:" + str(tn))
print(" False +ve:" + str(fp) + " False -ve:" + str(fn))
A = (tp + tn)/(tp + tn + fp + fn)
P = (float(tp)/(tp + fp))
R = (float(tp)/(tp + fn))
f1 = 2*P*R/float(P+R)
print("Accuracy : " + str(A) + " Precision : " + str(P) + " Recall : " + str(R) + " F1-Score : " + str(f1))
print()
PrecisionList.append(P)
RecallList.append(R)
#return testY
# def dimRedPlot(df, testY):
# '''
# Reduce dimensions to 2, then plot the points
# of the obtained results, with outliers (i.e testY = 1)
# highlighted in red and normal pts in blue
# '''
# lda = LDA(n_components=2)
# lda_transformed = pd.DataFrame(lda.fit_transform(df, testY))
# Plot normal points in blue and outliers in red
# plt.scatter(lda_transformed[:][testY == 1], lda_transformed[:][testY == 1], label='Outliers', c='red')
# plt.scatter(lda_transformed[testY == 0][0], lda_transformed[testY == 0][1], label='Normal points', c='blue')
# #plt.legend(loc=3)
# plt.show()
def main():
'''
Calls the functions to get distance matrix,
the LRD, and the 1st O points after sorting of LRD
and gets the Precision and Recall values
'''
df, Y = readData()
i = 1
PrecisionList = []
RecallList = []
data = df.values
N = len(data)
distancematrix = createDistanceMatrix(data, first_time, N)
#O is the #of outliers
for (k, O) in parameters:
print("Experiment:", i, ", k =", k, ", num_outliers =", O)
lrd = getLRD(N, distancematrix, k, data)
sorted_outlier_factor_indexes = np.argsort(lrd)
outliers = sorted_outlier_factor_indexes[-O:]
getAccuracy(outliers, Y, N, PrecisionList, RecallList)
i += 1
#dimRedPlot(df, testY)
# plt.plot(RecallList, PrecisionList, 'ro')
# plt.axis([0, 1, 0, 1])
# plt.show()
if __name__ == '__main__':
main()
|
the-stack_0_15419 | import boringmindmachine as bmm
import logging
import os, time, datetime, urllib
import twitter
import traceback
import base64
import oauth2 as oauth
import simplejson as json
class TwitterSheep(bmm.BoringSheep):
"""
Twitter Sheep class.
Sheep are created by the Shepherd.
Sheep are initialized with a JSON key file plus parameters from the Shepherd.
Sheep are expected to take care of their own API instance.
Input bot key (JSON file) is stored as self.params and contains everything the sheep needs
"""
def __init__(self, bot_key, **kwargs):
"""
bot_key - parameters that come from the keys (and the Keymaker, and the key-making process)
- consumer_token
- consumer_token_secret
- oauth_token
- oauth_token_secret
- user_id
- screen_name
kwargs - extra parameter args passed
into the Sheep (from the Shepherd)
A Sheep object manages information for a single Twitter bot account.
The information (oauth keys, bot name, bot account, etc) are contained
in the JSON file passed in by the Shepherd.
The JSON file contains information compiled by the Keymaker.
If there is other information the Shepherd needs to pass to the
Sheep that is not in the JSON file, it can use keyword args.
"""
# This is where we should initialize the Twitter API instance
# using params found in the json file.
self.params = bot_key
# combine the user-provided parameters
# (in kwargs) with the json-provided parameters
for keys in kwargs:
self.params[key] = kwargs[key]
# Initialize your API instance
self.api = twitter.Api( consumer_key = self.params['consumer_token'],
consumer_secret = self.params['consumer_token_secret'],
access_token_key = self.params['oauth_token'],
access_token_secret = self.params['oauth_token_secret'])
# Get an OAuth token to do bot stuff
self.token = oauth.Token(
key = self.params['oauth_token'],
secret = self.params['oauth_token_secret']
)
# Add an OAuth application to consume the API
self.consumer = oauth.Consumer(
key = self.params['consumer_token'],
secret = self.params['consumer_token_secret']
)
# Create an OAuth client
self.client = oauth.Client(
self.consumer,
self.token
)
# Set names
self.name = bot_key['screen_name']
msg = "TwitterSheep: constructor: Finished setting up Twitter API for bot {screen_name}"
msg = msg.format(screen_name=self.name)
logging.info(self.sign_message(msg))
#####################################
# rainbow mind machine Sheep:
# non-Twitter actions
def dummy(self, **kwargs):
"""Debug: do nothing."""
msg = "TwitterSheep: dummy(): dummy action"
logging.debug(self.sign_message(msg))
def echo(self, **kwargs):
"""Just say hi"""
msg = "Hello world! This is {name}".format(name=self.name)
logging.info(self.sign_message(msg))
#################################
# rainbow mind machine Sheep:
# Twitter actions
#
# change_url
# change_bio
# change_color
# change_image
# tweet
# follow_user
# unfollow_user
def change_url(self, **kwargs):
"""Update twiter profile URL.
kwargs:
url: The new url (string) to set as the profile URL
Does not return anything.
"""
if( 'url' not in kwargs.keys()):
err = "TwitterSheep Error: change_url() action called without 'url' kwarg specified."
logging.error(self.sign_message(err), exc_info=True)
raise Exception(err)
# Set the API endpoint
api_url = "https://api.twitter.com/1.1/account/update_profile.json"
bot_url = kwargs['url']
resp, content = self.client.request(
api_url,
method = "POST",
body = urllib.parse.urlencode({'url':bot_url}),
headers = None
)
msg = "TwitterSheep: change_url(): Done. Set url to: %s"%(bot_url)
logging.info(self.sign_message(msg))
def change_bio(self,**kwargs):
"""Update twitter profile bio.
kwargs:
bio: The bio string
Does not return anything.
"""
if( 'bio' not in kwargs.keys()):
err = "TwitterSheep Error: change_bio() action called without 'bio' key specified in the parameters dict."
logging.error(self.sign_message(err), exc_info=True)
raise Exception(err)
# Set the API endpoint
url = "https://api.twitter.com/1.1/account/update_profile.json"
bot_bio = kwargs['bio']
resp, content = self.client.request(
url,
method = "POST",
body=urllib.urlencode({'description': bot_bio}),
headers=None
)
msg = "TwitterSheep: change_bio(): Done."
logging.info(self.sign_message(msg))
logging.info(content)
def change_colors(self,**kwargs):
"""
Update twitter profile colors.
kwargs:
background: RGB code for background color (no #)
links: RGB code for links color (no #)
Example:
kwargs = {
'background':'3D3D3D',
'link':'AAF'
}
Does not return anything.
"""
if( 'background' not in kwargs.keys()
and 'links' not in kwargs.keys()):
err = "TwitterSheep Error: change_colors() action called "
err += "with neither 'background' nor 'links' kwargs specified."
logging.error(self.sign_message(err), exc_info=True)
raise Exception(err)
# json sent to the Twitter API
payload = {}
if 'background' in kwargs.keys():
background_rgbcode = kwargs['background']
payload['profile_background_color'] = background_rgbcode
if 'links' in kwargs.keys():
links_rgbcode = kwargs['links']
payload['profile_link_color'] = links_rgbcode
# Set the API endpoint
url = "https://api.twitter.com/1.1/account/update_profile_colors.json"
resp, content = self.client.request(
url,
method = "POST",
body=urllib.urlencode(payload),
headers=None
)
msg = "TwitterSheep: change_colors(): Done."
logging.info(self.sign_message(msg))
logging.info(content)
def change_image(self,**kwargs):
"""Update twitter profile bio.
Setting 'image' keyword argument takes the highest
priority and is the image used if present.
If that is not available, change_image() will look
for an 'image' keyword argument in the bot key.
kwargs:
image: The path to the image to use as the Twitter avatar
This method does not return anything.
"""
if( 'image' not in kwargs.keys() and 'image' not in self.params):
err = "TwitterSheep Error: change_image() action called without 'image' key specified in the bot key or the parameters dict."
logging.error(self.sign_message(err), exc_info=True)
raise Exception(err)
img_file = ''
if( 'image' in kwargs.keys() ):
img_file = kwargs['image']
if os.path.isfile(img_file) is False:
err = "TwitterSheep Error: change_image() action called with an 'image' key that is not a file!"
logging.error(self.sign_message(err), exc_info=True)
raise Exception(err)
elif( 'image' in self.params ):
img_file = self.params['image']
if os.path.isfile(img_file) is False:
err = "TwitterSheep Error: change_image() action called with an 'image' key that is not a file!"
logging.error(self.sign_message(err), exc_info=True)
raise Exception(err)
# json sent to the Twitter API
payload = {}
b64 = base64.encodestring(open(img_file,"rb").read())
# Set the API endpoint
api_url = "https://api.twitter.com/1.1/account/update_profile_image.json"
resp, content = self.client.request(
api_url,
method = "POST",
body=urllib.parse.urlencode({'image': b64}),
headers=None
)
logging.info("TwitterSheep: change_image(): Done.")
logging.info(content)
def follow_user(self, **kwargs):
"""
Follow a twitter user.
kwargs:
username: The username of the user to follow
notify: Whether to notify the followed user (boolean)
This method does not return anything.
"""
if( 'username' not in kwargs.keys()):
err = "TwitterSheep Error: change_image() action called without 'image' key specified in the parameters dict."
logging.error(self.sign_message(err), exc_info=True)
raise Exception(err)
if( 'notify' not in kwargs.keys()):
kwargs['notify'] = False
# json sent to the Twitter API
payload = {}
# Set the API endpoint
url = "https://api.twitter.com/1.1/friendships/create.json"
resp, content = self.client.request(
api_url,
method = "POST",
body=urllib.urlencode({'image': b64}),
headers=None
)
logging.info("TwitterSheep: follow_user(): Done.")
logging.info(content)
def unfollow_user(self, notify=True, **kwargs):
"""
Unfollow a twitter user.
kwargs:
username: The username of the user to follow
notify: Whether to notify the followed user (boolean)
This method does not return anything.
"""
if 'username' not in kwargs.keys():
err = "TwitterSheep Error: unfollow_user() action called without a 'username' key specified in the params dict."
logging.error(self.sign_message(err), exc_info=True)
raise Exception(err)
if( 'notify' not in kwargs.keys()):
kwargs['notify'] = False
# json sent to the Twitter API
payload = {}
# Set the API endpoint
url = "https://api.twitter.com/1.1/friendships/destroy.json"
payload['user_id'] = kwargs['username']
resp, content = self.client.request(
url,
method = "POST",
body=urllib.urlencode(payload),
headers=None
)
logging.info("TwitterSheep: unfollow_user(): Done.")
logging.info(content)
def tweet(self, **kwargs):
"""
Send out a tweet. This uses the function:
populate_tweet_queue()
Run an infinity loop in which the bot decides when to tweet.
Default Sheep have the following scheduling kwargs:
kwargs:
inner_sleep: Inner loop sleep time (1 s)
outer_sleep: Outer loop sleep time (10 s)
publish: Actually publish (boolean, False by default)
Additional kwargs:
media: A URL, a local file, or a file-like object (something with a read() method)
or a list of any of the above
This function never ends, so it never returns.
"""
# Process kwargs
defaults = {}
defaults['inner_sleep'] = 1.0
defaults['outer_sleep'] = 10.0
defaults['publish'] = False
# populate missing params with default values
for dk in defaults.keys():
if dk not in kwargs.keys():
kwargs[dk] = defaults[dk]
# --------------------------
# The Real McCoy
#
# call populate_tweet_queue() to populate the list of tweets to send out
#
# apply some rube goldberg logic to figure out when to tweet each item
while True:
try:
# Outer loop
tweet_queue = self.populate_tweet_queue()
nelements = len(tweet_queue)
msg = "TwitterSheep: tweet(): Populated tweet queue with %d tweets"%(nelements)
logging.debug(self.sign_message(msg))
assert nelements>0
for ii in range(nelements):
twit = tweet_queue.pop(0)
msg = "TwitterSheep: tweet(): Preparing twit"
logging.debug(self.sign_message(msg))
# Fire off the tweet
if kwargs['publish']:
if('media' in kwargs.keys()):
self._tweet(
twit,
media = kwargs['media ']
)
else:
self._tweet( twit )
msg = "TwitterSheep: tweet(): Published tweet \"%s\""%(twit)
logging.info(self.sign_message(msg))
else:
msg = "TwitterSheep: tweet(): Not publishing tweet \"%s\""%(twit)
logging.info(self.sign_message(msg))
msg = "TwitterSheep: tweet(): Finished with twit"
logging.debug(self.sign_message(msg))
time.sleep( kwargs['inner_sleep'] )
time.sleep( kwargs['outer_sleep'] )
msg = "TwitterSheep: tweet(): Completed a cycle."
logging.debug(self.sign_message(msg))
except Exception:
# oops!
msg1 = self.sign_message("TwitterSheep: tweet(): Sheep encountered an exception. More info:")
msg2 = self.sign_message(traceback.format_exc())
msg3 = self.sign_message("Sheep is continuing...")
# Add this line in to debug sheep
#raise Exception(err)
logging.error(msg1)
logging.error(msg2)
logging.error(msg3)
time.sleep( kwargs['outer_sleep'] )
except AssertionError:
err = "TwitterSheep Error: tweet(): tweet queue was empty. Check your populate_tweet_queue() method definition."
logging.error(self.sign_message(err))
raise Exception(err)
def _tweet(self,twit,media=None):
"""
Private method.
Publish a twit.
"""
# call twitter api to tweet the twit
try:
# tweet:
if(media is not None):
stats = self.api.PostUpdates(twit,media=media)
else:
stats = self.api.PostUpdates(twit)
# everything else:
msg = "TwitterSheep: _tweet(): @%s tweeted: \"%s\""%(self.name, twit)
logging.info(self.sign_message(msg))
except twitter.TwitterError as e:
if e.message[0]['code'] == 185:
msg = "TwitterSheep Error: _tweet(): Twitter error: Daily message limit reached"
logging.info(self.sign_message(msg))
elif e.message[0]['code'] == 187:
msg = "TwitterSheep Error: _tweet(): Twitter error: Duplicate error"
logging.info(self.sign_message(msg))
else:
msg = "TwitterSheep Error: _tweet(): Twitter error: %s"%(e.message)
logging.info(self.sign_message(msg))
def populate_tweet_queue(self):
"""
Populate a tweet queue.
This method should be extended by new Sheep classes that have their own
creative means of generating tweets.
The default Sheep object will generate a tweet queue filled with
5 "Hello World" messages.
Returns a list of tweets.
"""
maxlen = 5
tweet_queue = []
# (technically, a list is a queue)
for j in range(maxlen):
tweet = "Hello world! That's number %d of 5."%(j+1)
tweet_queue.append(tweet)
msg = "TwitterSheep: populate_tweet_queue(): Finished populating a new tweet queue with %d Hello World tweets."%(len(tweet_queue))
logging.debug(self.sign_message(msg))
return tweet_queue
def sign_message(self,msg):
"""
Given a message, prepend it with [@botname]
"""
result = "[@%s] %s"%(self.name, msg)
return result
|
the-stack_0_15423 | import voluptuous as vol
from esphome import pins
from esphome.components import sensor, spi
from esphome.components.spi import SPIComponent
import esphome.config_validation as cv
from esphome.const import CONF_CS_PIN, CONF_ID, CONF_NAME, CONF_SPI_ID, CONF_UPDATE_INTERVAL
from esphome.cpp_generator import Pvariable, get_variable
from esphome.cpp_helpers import gpio_output_pin_expression, setup_component
from esphome.cpp_types import App
MAX31855Sensor = sensor.sensor_ns.class_('MAX31855Sensor', sensor.PollingSensorComponent,
spi.SPIDevice)
PLATFORM_SCHEMA = cv.nameable(sensor.SENSOR_PLATFORM_SCHEMA.extend({
cv.GenerateID(): cv.declare_variable_id(MAX31855Sensor),
cv.GenerateID(CONF_SPI_ID): cv.use_variable_id(SPIComponent),
vol.Required(CONF_CS_PIN): pins.gpio_output_pin_schema,
vol.Optional(CONF_UPDATE_INTERVAL): cv.update_interval,
}).extend(cv.COMPONENT_SCHEMA.schema))
def to_code(config):
for spi_ in get_variable(config[CONF_SPI_ID]):
yield
for cs in gpio_output_pin_expression(config[CONF_CS_PIN]):
yield
rhs = App.make_max31855_sensor(config[CONF_NAME], spi_, cs,
config.get(CONF_UPDATE_INTERVAL))
max31855 = Pvariable(config[CONF_ID], rhs)
sensor.setup_sensor(max31855, config)
setup_component(max31855, config)
BUILD_FLAGS = '-DUSE_MAX31855_SENSOR'
def to_hass_config(data, config):
return sensor.core_to_hass_config(data, config)
|
the-stack_0_15425 | import setuptools
import pentagraph
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pentagraph",
version=pentagraph.__version__,
author=pentagraph.__author__,
author_email="[email protected]",
description="Graph representation and tools for programming with pentagame",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Penta-Game/pentagraph",
packages=setuptools.find_packages("."),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.8",
)
|
the-stack_0_15427 | """Declare runtime dependencies
These are needed for local dev, and users must install them as well.
See https://docs.bazel.build/versions/main/skylark/deploying.html#dependencies
"""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
versions = struct(
aspect_bazel_lib = "0.11.1",
rules_nodejs = "5.4.2",
)
# WARNING: any changes in this function may be BREAKING CHANGES for users
# because we'll fetch a dependency which may be different from one that
# they were previously fetching later in their WORKSPACE setup, and now
# ours took precedence. Such breakages are challenging for users, so any
# changes in this function should be marked as BREAKING in the commit message
# and released only in semver majors.
def rules_js_dependencies():
"Dependencies for users of aspect_rules_js"
# The minimal version of bazel_skylib we require
maybe(
http_archive,
name = "bazel_skylib",
sha256 = "c6966ec828da198c5d9adbaa94c05e3a1c7f21bd012a0b29ba8ddbccb2c93b0d",
urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/1.1.1/bazel-skylib-1.1.1.tar.gz"],
)
maybe(
http_archive,
name = "rules_nodejs",
sha256 = "26766278d815a6e2c43d2f6c9c72fde3fec8729e84138ffa4dabee47edc7702a",
urls = ["https://github.com/bazelbuild/rules_nodejs/releases/download/{0}/rules_nodejs-core-{0}.tar.gz".format(versions.rules_nodejs)],
)
maybe(
http_archive,
name = "aspect_bazel_lib",
sha256 = "a8b47eeaf3c1bd41c4f4b633ef4c959daf83fdee343379495098b50571d4b3b8",
strip_prefix = "bazel-lib-{}".format(versions.aspect_bazel_lib),
url = "https://github.com/aspect-build/bazel-lib/archive/refs/tags/v{}.tar.gz".format(versions.aspect_bazel_lib),
)
|
the-stack_0_15429 | import numpy as np
from matplotlib import pyplot as plt
import cv2
import argparse
import os
from slam import SLAM
import tqdm
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='TODO')
parser.add_argument('path', metavar='path', type=str, help='data')
args = parser.parse_args()
path = args.path
image_names = sorted(os.listdir(path))
w = 1280
h = 1024
calibration_matrix = np.array([
[0.535719308086809*w, 0, 0.493248545285398*w],
[0, 0.669566858850269*h, 0.500408664348414*h],
[0, 0, 1]
])
sigma = 0.897966326944875
slam = SLAM(width=w, height=h, calibration_matrix=calibration_matrix)
t = tqdm.tqdm(image_names, total=len(image_names))
for name in t:
#print(name)
#fig = plt.figure()
img = cv2.imread(path + '/' + name, cv2.IMREAD_GRAYSCALE)
#plt.imshow(img)
#plt.show()
img2 = cv2.undistort(img, calibration_matrix, sigma)
# plt.imshow(img2)
# plt.show()
slam.run(img)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(slam.position[:,0], slam.position[:,1], slam.position[:,2], '-')
#ax.scatter(slam.map[:,0], slam.map[:,1], slam.map[:,2])
#ax.set_xlim3d(-20,20)
#ax.set_ylim3d(-20,20)
#ax.set_zlim3d(-20,20)
plt.show()
#print(dir(key_point[0]))
# print('angle: ', key_point[1].angle)
# print('class_id: ', key_point[1].class_id)
# print('octave: ', key_point[1].octave)
# print('pt: ', key_point[1].pt)
# print('response: ', key_point[1].response)
# print('size: ', key_point[1].size)
|
the-stack_0_15430 | import json
from packlib.base import ProxmoxAction
class ClusterCephFlagsFlagUpdateFlagAction(ProxmoxAction):
"""
Set or clear (unset) a specific ceph flag
"""
def run(self, flag, value, profile_name=None):
super().run(profile_name)
# Only include non None arguments to pass through to proxmox api.
proxmox_kwargs = {}
for api_arg in [
["flag", flag, "string"],
["value", value, "boolean"],
]:
if api_arg[1] is None:
continue
if "[n]" in api_arg[0]:
unit_list = json.loads(api_arg[1])
for i, v in enumerate(unit_list):
proxmox_kwargs[api_arg[0].replace("[n]", str(i))] = v
else:
if api_arg[2] == "boolean":
api_arg[1] = int(api_arg[1])
proxmox_kwargs[api_arg[0]] = api_arg[1]
return self.proxmox.put(f"cluster/ceph/flags/{flag}", **proxmox_kwargs)
|
the-stack_0_15431 | from pypy.interpreter.error import OperationError
from pypy.interpreter import module
from pypy.interpreter.mixedmodule import MixedModule
import pypy.module.imp.importing
# put builtins here that should be optimized somehow
class Module(MixedModule):
"""Built-in functions, exceptions, and other objects."""
appleveldefs = {
'execfile' : 'app_io.execfile',
'raw_input' : 'app_io.raw_input',
'input' : 'app_io.input',
'print' : 'app_io.print_',
'apply' : 'app_functional.apply',
'sorted' : 'app_functional.sorted',
'any' : 'app_functional.any',
'all' : 'app_functional.all',
'sum' : 'app_functional.sum',
'map' : 'app_functional.map',
'reduce' : 'app_functional.reduce',
'filter' : 'app_functional.filter',
'zip' : 'app_functional.zip',
'vars' : 'app_inspect.vars',
'dir' : 'app_inspect.dir',
'bin' : 'app_operation.bin',
}
interpleveldefs = {
# constants
'__debug__' : '(space.w_True)',
'None' : '(space.w_None)',
'False' : '(space.w_False)',
'True' : '(space.w_True)',
'bytes' : '(space.w_bytes)',
'file' : 'state.get(space).w_file',
'open' : 'state.get(space).w_file',
# default __metaclass__: old-style class
'__metaclass__' : 'interp_classobj.W_ClassObject',
# interp-level function definitions
'abs' : 'operation.abs',
'chr' : 'operation.chr',
'unichr' : 'operation.unichr',
'len' : 'operation.len',
'ord' : 'operation.ord',
'pow' : 'operation.pow',
'repr' : 'operation.repr',
'hash' : 'operation.hash',
'oct' : 'operation.oct',
'hex' : 'operation.hex',
'round' : 'operation.round',
'cmp' : 'operation.cmp',
'coerce' : 'operation.coerce',
'divmod' : 'operation.divmod',
'format' : 'operation.format',
'_issubtype' : 'operation._issubtype',
'issubclass' : 'abstractinst.app_issubclass',
'isinstance' : 'abstractinst.app_isinstance',
'getattr' : 'operation.getattr',
'setattr' : 'operation.setattr',
'delattr' : 'operation.delattr',
'hasattr' : 'operation.hasattr',
'iter' : 'operation.iter',
'next' : 'operation.next',
'id' : 'operation.id',
'intern' : 'operation.intern',
'callable' : 'operation.callable',
'compile' : 'compiling.compile',
'eval' : 'compiling.eval',
'__import__' : 'pypy.module.imp.importing.importhook',
'reload' : 'pypy.module.imp.importing.reload',
'range' : 'functional.range_int',
'xrange' : 'functional.W_XRange',
'enumerate' : 'functional.W_Enumerate',
'min' : 'functional.min',
'max' : 'functional.max',
'reversed' : 'functional.reversed',
'super' : 'descriptor.W_Super',
'staticmethod' : 'pypy.interpreter.function.StaticMethod',
'classmethod' : 'pypy.interpreter.function.ClassMethod',
'property' : 'descriptor.W_Property',
'globals' : 'interp_inspect.globals',
'locals' : 'interp_inspect.locals',
}
def pick_builtin(self, w_globals):
"Look up the builtin module to use from the __builtins__ global"
# pick the __builtins__ roughly in the same way CPython does it
# this is obscure and slow
space = self.space
try:
w_builtin = space.getitem(w_globals, space.newtext('__builtins__'))
except OperationError as e:
if not e.match(space, space.w_KeyError):
raise
else:
if w_builtin is space.builtin: # common case
return space.builtin
if space.isinstance_w(w_builtin, space.w_dict):
return module.Module(space, None, w_builtin)
if isinstance(w_builtin, module.Module):
return w_builtin
# no builtin! make a default one. Give them None, at least.
builtin = module.Module(space, None)
space.setitem(builtin.w_dict, space.newtext('None'), space.w_None)
return builtin
def setup_after_space_initialization(self):
"""NOT_RPYTHON"""
space = self.space
# install the more general version of isinstance() & co. in the space
from pypy.module.__builtin__ import abstractinst as ab
space.abstract_isinstance_w = ab.abstract_isinstance_w.__get__(space)
space.abstract_issubclass_w = ab.abstract_issubclass_w.__get__(space)
space.abstract_isclass_w = ab.abstract_isclass_w.__get__(space)
space.abstract_getclass = ab.abstract_getclass.__get__(space)
space.exception_is_valid_class_w = ab.exception_is_valid_class_w.__get__(space)
space.exception_is_valid_obj_as_class_w = ab.exception_is_valid_obj_as_class_w.__get__(space)
space.exception_getclass = ab.exception_getclass.__get__(space)
space.exception_issubclass_w = ab.exception_issubclass_w.__get__(space)
|
the-stack_0_15432 | from mesh import QuadMesh, Mesh1D
from plot import Plot
from fem import QuadFE, DofHandler
from function import Explicit
import numpy as np
plot = Plot()
mesh = Mesh1D()
Q0 = QuadFE(1,'DQ0')
dh0 = DofHandler(mesh,Q0)
n_levels = 10
for l in range(n_levels):
mesh.cells.refine(new_label=l)
dh0.distribute_dofs(subforest_flag=l)
f = Explicit(lambda x: np.abs(x-0.5), dim=1)
fQ = f.interpolant(dh0, subforest_flag=3)
plot.line(fQ, mesh)
plot.mesh(mesh, dofhandler=dh0, subforest_flag=0)
mesh = QuadMesh(resolution=(10,10))
plot.mesh(mesh) |
the-stack_0_15433 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
from requests import __version__ as requests_version
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.compute_v1.types import compute
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-compute",).version,
grpc_version=None,
rest_version=requests_version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class LicenseCodesTransport(abc.ABC):
"""Abstract transport class for LicenseCodes."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/compute.readonly",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
)
DEFAULT_HOST: str = "compute.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials is service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(
cls, host: str, scopes: Optional[Sequence[str]]
) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.get: gapic_v1.method.wrap_method(
self.get, default_timeout=None, client_info=client_info,
),
self.test_iam_permissions: gapic_v1.method.wrap_method(
self.test_iam_permissions,
default_timeout=None,
client_info=client_info,
),
}
@property
def get(
self,
) -> Callable[
[compute.GetLicenseCodeRequest],
Union[compute.LicenseCode, Awaitable[compute.LicenseCode]],
]:
raise NotImplementedError()
@property
def test_iam_permissions(
self,
) -> Callable[
[compute.TestIamPermissionsLicenseCodeRequest],
Union[
compute.TestPermissionsResponse, Awaitable[compute.TestPermissionsResponse]
],
]:
raise NotImplementedError()
__all__ = ("LicenseCodesTransport",)
|
the-stack_0_15439 | import json
class StandardVocabulary:
"""Class for the standard vocabulary"""
def __init__(self, json_content: list):
"""Initiliaze the class with the json tree content (from JSTree)
Args:
json_content (list): JSON from JSTree
"""
self.jstree_as_list = json_content
self.jstree_as_dict = {
i["id"]: {
"id": i["id"],
"text": i["text"],
"icon": i["icon"],
"data": i["data"],
"parent": i["parent"],
}
for i in self.jstree_as_list
}
def update_ontology(self, dest_onto: object) -> list:
"""Update the current standard vocabulary tree with the latest modification
(destination) of the tree (delete, add, update, check parents).
Args:
dest_onto (object): Another instance of the class StandardVocabulary
Returns:
list: return the updated tree as list of dict (json)
"""
updated_jstree_as_list = []
for i in self.jstree_as_list:
if i["id"] not in dest_onto.jstree_as_dict.keys():
# If destination is missing a node: mark the node as outdated
i["data"]["outdated"] = True
if "OUTDATED" not in i["text"]:
i["text"] = "OUTDATED : " + i["text"]
updated_jstree_as_list.append(i)
elif i["id"] in dest_onto.jstree_as_dict.keys():
if (
i["text"] != dest_onto.jstree_as_dict[i["id"]]["text"]
or i["data"] != dest_onto.jstree_as_dict[i["id"]]["data"]
):
# If destination has modified data or name: update
i["text"] = dest_onto.jstree_as_dict[i["id"]]["text"]
i["data"]["description"] = dest_onto.jstree_as_dict[i["id"]][
"data"
].get("description", "")
i["data"]["hpo_datamined"] = dest_onto.jstree_as_dict[i["id"]][
"data"
].get("hpo_datamined", "")
i["data"]["phenotype_datamined"] = dest_onto.jstree_as_dict[
i["id"]
]["data"].get("phenotype_datamined", "")
i["data"]["gene_datamined"] = dest_onto.jstree_as_dict[i["id"]][
"data"
].get("gene_datamined", "")
i["data"]["alternative_language"] = dest_onto.jstree_as_dict[
i["id"]
]["data"].get("alternative_language", "")
i["data"]["correlates_with"] = dest_onto.jstree_as_dict[i["id"]][
"data"
].get("correlates_with", "")
i["data"]["synonymes"] = dest_onto.jstree_as_dict[i["id"]][
"data"
].get("synonymes", "")
i["data"]["hex_color"] = dest_onto.jstree_as_dict[i["id"]][
"data"
].get("hex_color", "")
updated_jstree_as_list.append(i)
# If destination has new entry: add them
for i in dest_onto.jstree_as_dict.keys():
if i not in self.jstree_as_dict.keys():
updated_jstree_as_list.append(dest_onto.jstree_as_dict[i])
self.jstree_as_dict = {
j["id"]: {
"id": j["id"],
"text": j["text"],
"icon": j["icon"],
"data": j["data"],
"parent": j["parent"],
}
for j in updated_jstree_as_list
}
# If destination has different parent ID: change it.
for i in dest_onto.jstree_as_dict.keys():
if (
dest_onto.jstree_as_dict[i]["parent"]
!= self.jstree_as_dict[i]["parent"]
):
self.jstree_as_dict[i]["parent"] = dest_onto.jstree_as_dict[i]["parent"]
self.jstree_as_list = list(self.jstree_as_dict.values())
self.clean_tree()
return self.jstree_as_list
def dump_updated_to_file(self, file_path: str):
"""Dump the updated tree to a json file
Args:
file_path (str): path to save the json file
"""
with open(file_path, "w") as fp:
json.dump(self.jstree_as_dict, fp, indent=4)
def clean_tree(self) -> list:
"""Clean the tree of non informative fields.
Returns:
list: return the updated tree as list of dict (json)
"""
clean_tree_list = []
for i in self.jstree_as_dict:
clean_tree_list.append(self.jstree_as_dict[i])
self.jstree_as_list = clean_tree_list
return self.jstree_as_list
|
the-stack_0_15440 | from typing import Dict, Any
import os
import sys
import glob
import json
import yaml
import time
import gzip
import random
import logging
import multiprocessing as mp
import queue
import threading
import ai2thor.controller
import ai2thor.util.metrics
from robothor_challenge.startx import startx
logger = logging.getLogger(__name__)
ch = logging.StreamHandler(sys.stdout)
ch.flush = sys.stdout.flush
ch.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s [%(levelname)s] %(name)s - %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
ALLOWED_ACTIONS = ["MoveAhead", "RotateRight", "RotateLeft", "LookUp", "LookDown", "Stop"]
def get_object_by_type(event_objects, object_type):
for obj in event_objects:
if obj['objectId'].split("|")[0] == object_type:
return obj
return None
class RobothorChallenge:
def __init__(self, cfg_file, agent_class, agent_kwargs, render_depth=False):
self.agent_class = agent_class
self.agent_kwargs = agent_kwargs
self.config = self.load_config(cfg_file, render_depth)
self.setup_env()
self.controller_kwargs = {
"commit_id": self.config["thor_build_id"],
"width": self.config["width"],
"height": self.config["height"],
**self.config["initialize"]
}
self.current_scene = None
self.reachable_positions_per_scene = {}
@staticmethod
def load_config(cfg_file, render_depth):
logger.info("Loading configuration from: %s" % cfg_file)
with open(cfg_file, "r") as f:
config = yaml.safe_load(f.read())
if render_depth:
config["initialize"]["renderDepthImage"] = True
return config
@staticmethod
def setup_env():
if "DISPLAY" not in os.environ:
xthread = threading.Thread(target=startx)
xthread.daemon = True
xthread.start()
import time
# XXX change this to use xdpyinfo
time.sleep(4)
@staticmethod
def load_split(dataset_dir, split):
split_paths = os.path.join(dataset_dir, split, "episodes", "*.json.gz")
split_paths = sorted(glob.glob(split_paths))
episode_list = []
dataset = {}
for split_path in split_paths:
logger.info("Loading: {path}".format(path=split_path))
with gzip.GzipFile(split_path, "r") as f:
episodes = json.loads(f.read().decode("utf-8"))
# Build a dictionary of the dataset indexed by scene, object_type
curr_scene = None
curr_object = None
points = []
scene_points = {}
for data_point in episodes:
if curr_object != data_point["object_type"]:
scene_points[curr_object] = points
curr_object = data_point["object_type"]
points = []
if curr_scene != data_point["scene"]:
dataset[curr_scene] = scene_points
curr_scene = data_point["scene"]
scene_points = {}
points.append(data_point)
episode_list += episodes
return episode_list, dataset
@staticmethod
def inference_worker(
worker_ind: int,
in_queue: mp.Queue,
out_queue: mp.Queue,
agent_class: Any,
agent_kwargs: Dict[str, Any],
controller_kwargs: Dict[str, Any],
max_steps: int,
test: bool
):
agent = agent_class(**agent_kwargs)
controller = ai2thor.controller.Controller(**controller_kwargs)
while True:
try:
e = in_queue.get(timeout=1)
except queue.Empty:
break
logger.info("Task Start id:{id} scene:{scene} target_object:{object_type} initial_position:{initial_position} rotation:{initial_orientation}".format(**e))
controller.initialization_parameters["robothorChallengeEpisodeId"] = e["id"]
print(e["scene"])
controller.reset(e["scene"])
teleport_action = {
"action": "TeleportFull",
**e["initial_position"],
"rotation": {"x": 0, "y": e["initial_orientation"], "z": 0},
"horizon": e["initial_horizon"],
"standing": True
}
controller.step(action=teleport_action)
total_steps = 0
agent.reset()
episode_metrics = {
"trajectory" : [{
**e["initial_position"],
"rotation" : float(e["initial_orientation"]),
"horizon" : e["initial_horizon"]
}],
"actions_taken" : []
}
stopped = False
while total_steps < max_steps and stopped is False:
total_steps += 1
event = controller.last_event
event.metadata.clear()
action = agent.act({
"object_goal" : e["object_type"],
"depth" : event.depth_frame,
"rgb" : event.frame
})
if action not in ALLOWED_ACTIONS:
raise ValueError("Invalid action: {action}".format(action=action))
logger.info("Agent action: {action}".format(action=action))
event = controller.step(action=action)
episode_metrics["trajectory"].append({
**event.metadata["agent"]["position"],
"rotation": event.metadata["agent"]["rotation"]["y"],
"horizon": event.metadata["agent"]["cameraHorizon"]
})
episode_metrics["actions_taken"].append({
"action": action,
"success": event.metadata["lastActionSuccess"]
})
stopped = action == "Stop"
if not test:
target_obj = get_object_by_type(event.metadata["objects"], e["object_type"])
assert target_obj is not None
target_visible = target_obj["visible"]
episode_metrics["success"] = stopped and target_visible
if not test:
episode_result = {
"path": episode_metrics["trajectory"],
"shortest_path": e["shortest_path"],
"success": episode_metrics["success"]
}
else:
episode_result = None
out_queue.put((e["id"], episode_metrics, episode_result))
controller.stop()
print(f"Worker {worker_ind} Finished.")
def inference(self, episodes, nprocesses=1, test=False):
send_queue = mp.Queue()
receive_queue = mp.Queue()
expected_count = len(episodes)
for e in episodes:
send_queue.put(e)
processes = []
for worker_ind in range(nprocesses):
p = mp.Process(
target=self.inference_worker,
kwargs=dict(
worker_ind=worker_ind,
in_queue=send_queue,
out_queue=receive_queue,
agent_class=self.agent_class,
agent_kwargs=self.agent_kwargs,
controller_kwargs=self.controller_kwargs,
max_steps=self.config["max_steps"],
test=test
),
)
p.start()
processes.append(p)
time.sleep(0.2)
metrics = {"episodes" : {}}
episode_results = []
while len(metrics["episodes"]) < expected_count:
try:
ep_id, episode_metrics, episode_result = receive_queue.get(timeout=10)
metrics["episodes"][ep_id] = episode_metrics
if not test:
episode_results.append(episode_result)
except TimeoutError:
print("Went 10 seconds without a new episode result.")
if all(not p.is_alive() for p in processes):
try:
ep_id, episode_metrics, episode_result = receive_queue.get(timeout=1)
metrics["episodes"][ep_id] = episode_metrics
if not test:
episode_results.append(episode_result)
except TimeoutError:
raise RuntimeError("All processes dead but nothing in queue!")
for p in processes:
p.join(timeout=2)
metrics["ep_len"] = sum([len(em["trajectory"]) for em in metrics["episodes"].values()]) / len(metrics["episodes"])
if not test:
metrics["success"] = sum([r["success"] for r in episode_results]) / len(episode_results)
metrics["spl"] = ai2thor.util.metrics.compute_spl(episode_results)
if not test:
logger.info("Total Episodes: {episode_count} Success:{success} SPL:{spl} Episode Length:{ep_len}".format(episode_count=len(episodes), success=metrics["success"], spl=metrics["spl"], ep_len=metrics["ep_len"]))
else:
logger.info("Total Episodes: {episode_count} Episode Length:{ep_len}".format(episode_count=len(episodes), ep_len=metrics["ep_len"]))
return metrics
def _change_scene(self, scene):
if self.current_scene != scene:
self.current_scene = scene
self.controller.reset(scene)
logger.info("Changed to scene: '{scene}'".format(scene=scene))
def move_to_point(self, datapoint):
self._change_scene(datapoint["scene"])
logger.info("Moving to position: {p}, y-rotation: {rot}, horizon: {hor}".format(
p=datapoint["initial_position"],
rot=datapoint["initial_orientation"],
hor=datapoint["initial_horizon"]
))
return self.controller.step(
action="TeleportFull",
x=datapoint["initial_position"]["x"],
y=datapoint["initial_position"]["y"],
z=datapoint["initial_position"]["z"],
rotation={"x" : 0, "y" : datapoint["initial_orientation"], "z" : 0},
horizon=datapoint["initial_horizon"],
standing=True
)
def move_to_random_dataset_point(self, dataset, scene, object_type):
if scene in dataset:
if object_type in dataset[scene]:
datapoint = random.choice(dataset[scene][object_type])
return self.move_to_point(datapoint)
else:
logger.warning(
"No object of type: '{object_type}' for scene: '{scene}', in dataset".format(
object_type=object_type,
scene=scene
)
)
return None
else:
logger.warning("No scene: '{scene}' in dataset".format(scene=scene))
return None
def move_to_random_point(self, scene, y_rotation=0, horizon=0):
if "test" in scene:
raise RuntimeError(
"Moving to random points is not posible in test scenes"
)
reachable_positions = self._get_reachable_positions_in_scene(scene)
p = random.choice(reachable_positions)
return self.move_to_point({
"initial_position": p,
"initial_orientation": y_rotation,
"initial_horizon": horizon,
"scene" : scene
})
def _get_reachable_positions_in_scene(self, scene):
self._change_scene(scene)
if scene not in self.reachable_positions_per_scene:
event_reachable = self.controller.step({
"action" : "GetReachablePositions",
"gridSize" : self.config["initialize"]["gridSize"]
})
self.reachable_positions_per_scene[scene] = event_reachable.metadata["actionReturn"]
return self.reachable_positions_per_scene[scene]
|
the-stack_0_15441 | # -*- coding: utf-8 -*-
from rest_framework import status as http_status
import mock
from nose.tools import * # noqa
from framework.auth import Auth
from tests.base import OsfTestCase, get_default_metaschema
from osf_tests.factories import ProjectFactory
from .. import SHORT_NAME
from .. import settings
from .factories import make_binderhub
from .utils import BaseAddonTestCase
from website.util import api_url_for
from future.moves.urllib.parse import urlparse, parse_qs
class TestViews(BaseAddonTestCase, OsfTestCase):
def test_user_binderhubs(self):
new_binderhub_a = make_binderhub(
binderhub_url='https://testa.my.site',
binderhub_oauth_client_secret='MY_CUSTOM_SECRET_A',
)
url = self.project.api_url_for('{}_set_user_config'.format(SHORT_NAME))
res = self.app.put_json(url, {
'binderhubs': [new_binderhub_a],
}, auth=self.user.auth)
url = self.project.api_url_for('{}_get_user_config'.format(SHORT_NAME))
res = self.app.get(url, auth=self.user.auth)
binderhubs = res.json['binderhubs']
assert_equals(len(binderhubs), 1)
assert_equals(binderhubs[0]['binderhub_url'], 'https://testa.my.site')
assert_in('binderhub_oauth_client_secret', binderhubs[0])
new_binderhub_b = make_binderhub(
binderhub_url='https://testb.my.site',
binderhub_oauth_client_secret='MY_CUSTOM_SECRET_B',
)
url = self.project.api_url_for('{}_add_user_config'.format(SHORT_NAME))
res = self.app.post_json(url, {
'binderhub': new_binderhub_b,
}, auth=self.user.auth)
url = self.project.api_url_for('{}_get_user_config'.format(SHORT_NAME))
res = self.app.get(url, auth=self.user.auth)
binderhubs = res.json['binderhubs']
assert_equals(len(binderhubs), 2)
assert_equals(binderhubs[0]['binderhub_url'], 'https://testa.my.site')
assert_in('binderhub_oauth_client_secret', binderhubs[0])
assert_equals(binderhubs[1]['binderhub_url'], 'https://testb.my.site')
assert_in('binderhub_oauth_client_secret', binderhubs[1])
def test_binderhub_authorize(self):
url = self.project.api_url_for('{}_oauth_authorize'.format(SHORT_NAME),
serviceid='binderhub')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, http_status.HTTP_302_FOUND)
url = res.headers['Location']
parsed = urlparse(url)
params = parse_qs(parsed.query)
assert_equal(params['response_type'][0], 'code')
assert_equal(params['scope'][0], 'identity')
assert_equal(urlparse(params['redirect_uri'][0]).path, '/project/binderhub/callback')
def test_empty_binder_url(self):
self.node_settings.set_binder_url('')
self.node_settings.save()
url = self.project.api_url_for('{}_get_config'.format(SHORT_NAME))
res = self.app.get(url, auth=self.user.auth)
assert_equals(res.json['binder_url'], settings.DEFAULT_BINDER_URL)
def test_binder_url(self):
self.node_settings.set_binder_url('URL_1')
self.node_settings.save()
url = self.project.api_url_for('{}_get_config'.format(SHORT_NAME))
res = self.app.get(url, auth=self.user.auth)
assert_equals(res.json['binder_url'], 'URL_1')
def test_ember_empty_binder_url(self):
url = self.project.api_url_for('{}_set_config'.format(SHORT_NAME))
res = self.app.put_json(url, {
'binder_url': '',
'available_binderhubs': [],
}, auth=self.user.auth)
url = self.project.api_url_for('{}_get_config_ember'.format(SHORT_NAME))
res = self.app.get(url, auth=self.user.auth)
assert_equals(res.json['data']['id'], self.project._id)
assert_equals(res.json['data']['type'], 'binderhub-config')
binderhubs = res.json['data']['attributes']['binderhubs']
default_binderhub = [b for b in binderhubs if b['default']][0]
assert_equals(default_binderhub['url'], settings.DEFAULT_BINDER_URL)
assert_not_in('binderhub_oauth_client_secret', default_binderhub)
def test_ember_custom_binder_url(self):
new_binderhub = make_binderhub(
binderhub_url='https://testa.my.site',
binderhub_oauth_client_secret='MY_CUSTOM_SECRET_A',
)
url = self.project.api_url_for('{}_set_config'.format(SHORT_NAME))
res = self.app.put_json(url, {
'binder_url': 'https://testa.my.site',
'available_binderhubs': [new_binderhub],
}, auth=self.user.auth)
url = self.project.api_url_for('{}_get_config_ember'.format(SHORT_NAME))
res = self.app.get(url, auth=self.user.auth)
assert_equals(res.json['data']['id'], self.project._id)
assert_equals(res.json['data']['type'], 'binderhub-config')
binderhubs = res.json['data']['attributes']['binderhubs']
default_binderhub = [b for b in binderhubs if b['default']][0]
assert_equals(default_binderhub['url'], 'https://testa.my.site')
assert_not_in('binderhub_oauth_client_secret', default_binderhub)
|
the-stack_0_15442 | import math
import numpy as np
import torch
from envs.LQR import LQR
from utils import get_AB
torch.manual_seed(2021)
np.random.seed(2021)
learning_rate = 0.0003
gamma = 0.9
lmbda = 0.9
eps_clip = 0.2
K_epoch = 10
rollout_len = 3
buffer_size = 30
minibatch_size = 32
def PDcontrol(x, K):
u = K @ x
return u
def main(state_dim):
print_interval = 20
# create environment
# state_dim = 8
action_dim = 1
# A = np.array([[1.0]])
dt = 0.1
A, B = get_AB(state_dim, action_dim, dt)
sigma = 0.1
W = sigma * np.eye(state_dim)
# B = np.eye(2)
Q = np.eye(state_dim) * 10.0
R = np.eye(action_dim)
env = LQR(A, B, Q, R, W, state_dim)
P, K, op_cost, La = env.optimum()
# print('Optimal cost:{}; La: {}'.format(op_cost, La))
# print(f'P: {P};\n K : {K}')
return op_cost, La
# print(A + B @ K)
# print(np.linalg.eigvals(A + B @ K))
sample_num = 1000
avg_score = 0.0
for n_epi in range(sample_num):
s = env.reset(factor=2.0)
score = 0.0
for i in range(1000):
for t in range(rollout_len):
a = PDcontrol(s, K)
s_prime, r, done, info = env.step(a)
s = s_prime
score += r
score /= 1000 * rollout_len
avg_score += score
if n_epi % print_interval == 0 and n_epi != 0:
print("# of episode :{}, avg score : {:.1f}".
format(n_epi, avg_score / print_interval))
avg_score = 0.0
if __name__ == '__main__':
for x in range(1, 7):
op_cost, La = main(2 * x)
print(f'State dim: {2 * x}; optimal cost: {op_cost}; La: {La}')
|
the-stack_0_15444 | from distutils.core import setup
from os import path
import site
site_dir = site.getsitepackages()[0]
with open('requirements.txt', 'r') as f:
requirements = list(map(str.strip, f))
if path.exists('README.md'):
with open('README.md', encoding='utf-8') as f:
long_description = f.read()
else:
long_description = None
setup_kwargs = dict(
name='sk-torch',
version='0.1dev',
packages=['sktorch'],
provides=['sktorch'],
url='[email protected]:mattHawthorn/sk-torch.git',
license='MIT license',
author='Matt Hawthorn',
maintainer='Matt Hawthorn',
author_email='[email protected]',
description='A wrapper around pytorch module objects with a sklearn-like interface, allowing boilerplate-free '
'training of complex neural nets.',
long_description=long_description,
requires=requirements
)
if __name__ == "__main__":
try:
setup(**setup_kwargs)
except Exception as e:
print(e)
print("Failed to execute setup()")
exit(1)
exit(0)
|
the-stack_0_15445 | from collections import namedtuple
import contextlib
import itertools
import os
import pickle
import sys
from textwrap import dedent
import threading
import time
import unittest
from test import support
from test.support import script_helper
interpreters = support.import_module('_xxsubinterpreters')
##################################
# helpers
def powerset(*sets):
return itertools.chain.from_iterable(
combinations(sets, r)
for r in range(len(sets)+1))
def _captured_script(script):
r, w = os.pipe()
indented = script.replace('\n', '\n ')
wrapped = dedent(f"""
import contextlib
with open({w}, 'w') as spipe:
with contextlib.redirect_stdout(spipe):
{indented}
""")
return wrapped, open(r)
def _run_output(interp, request, shared=None):
script, rpipe = _captured_script(request)
with rpipe:
interpreters.run_string(interp, script, shared)
return rpipe.read()
@contextlib.contextmanager
def _running(interp):
r, w = os.pipe()
def run():
interpreters.run_string(interp, dedent(f"""
# wait for "signal"
with open({r}) as rpipe:
rpipe.read()
"""))
t = threading.Thread(target=run)
t.start()
yield
with open(w, 'w') as spipe:
spipe.write('done')
t.join()
#@contextmanager
#def run_threaded(id, source, **shared):
# def run():
# run_interp(id, source, **shared)
# t = threading.Thread(target=run)
# t.start()
# yield
# t.join()
def run_interp(id, source, **shared):
_run_interp(id, source, shared)
def _run_interp(id, source, shared, _mainns={}):
source = dedent(source)
main = interpreters.get_main()
if main == id:
if interpreters.get_current() != main:
raise RuntimeError
# XXX Run a func?
exec(source, _mainns)
else:
interpreters.run_string(id, source, shared)
def run_interp_threaded(id, source, **shared):
def run():
_run(id, source, shared)
t = threading.Thread(target=run)
t.start()
t.join()
class Interpreter(namedtuple('Interpreter', 'name id')):
@classmethod
def from_raw(cls, raw):
if isinstance(raw, cls):
return raw
elif isinstance(raw, str):
return cls(raw)
else:
raise NotImplementedError
def __new__(cls, name=None, id=None):
main = interpreters.get_main()
if id == main:
if not name:
name = 'main'
elif name != 'main':
raise ValueError(
'name mismatch (expected "main", got "{}")'.format(name))
id = main
elif id is not None:
if not name:
name = 'interp'
elif name == 'main':
raise ValueError('name mismatch (unexpected "main")')
if not isinstance(id, interpreters.InterpreterID):
id = interpreters.InterpreterID(id)
elif not name or name == 'main':
name = 'main'
id = main
else:
id = interpreters.create()
self = super().__new__(cls, name, id)
return self
# XXX expect_channel_closed() is unnecessary once we improve exc propagation.
@contextlib.contextmanager
def expect_channel_closed():
try:
yield
except interpreters.ChannelClosedError:
pass
else:
assert False, 'channel not closed'
class ChannelAction(namedtuple('ChannelAction', 'action end interp')):
def __new__(cls, action, end=None, interp=None):
if not end:
end = 'both'
if not interp:
interp = 'main'
self = super().__new__(cls, action, end, interp)
return self
def __init__(self, *args, **kwargs):
if self.action == 'use':
if self.end not in ('same', 'opposite', 'send', 'recv'):
raise ValueError(self.end)
elif self.action in ('close', 'force-close'):
if self.end not in ('both', 'same', 'opposite', 'send', 'recv'):
raise ValueError(self.end)
else:
raise ValueError(self.action)
if self.interp not in ('main', 'same', 'other', 'extra'):
raise ValueError(self.interp)
def resolve_end(self, end):
if self.end == 'same':
return end
elif self.end == 'opposite':
return 'recv' if end == 'send' else 'send'
else:
return self.end
def resolve_interp(self, interp, other, extra):
if self.interp == 'same':
return interp
elif self.interp == 'other':
if other is None:
raise RuntimeError
return other
elif self.interp == 'extra':
if extra is None:
raise RuntimeError
return extra
elif self.interp == 'main':
if interp.name == 'main':
return interp
elif other and other.name == 'main':
return other
else:
raise RuntimeError
# Per __init__(), there aren't any others.
class ChannelState(namedtuple('ChannelState', 'pending closed')):
def __new__(cls, pending=0, *, closed=False):
self = super().__new__(cls, pending, closed)
return self
def incr(self):
return type(self)(self.pending + 1, closed=self.closed)
def decr(self):
return type(self)(self.pending - 1, closed=self.closed)
def close(self, *, force=True):
if self.closed:
if not force or self.pending == 0:
return self
return type(self)(0 if force else self.pending, closed=True)
def run_action(cid, action, end, state, *, hideclosed=True):
if state.closed:
if action == 'use' and end == 'recv' and state.pending:
expectfail = False
else:
expectfail = True
else:
expectfail = False
try:
result = _run_action(cid, action, end, state)
except interpreters.ChannelClosedError:
if not hideclosed and not expectfail:
raise
result = state.close()
else:
if expectfail:
raise ... # XXX
return result
def _run_action(cid, action, end, state):
if action == 'use':
if end == 'send':
interpreters.channel_send(cid, b'spam')
return state.incr()
elif end == 'recv':
if not state.pending:
try:
interpreters.channel_recv(cid)
except interpreters.ChannelEmptyError:
return state
else:
raise Exception('expected ChannelEmptyError')
else:
interpreters.channel_recv(cid)
return state.decr()
else:
raise ValueError(end)
elif action == 'close':
kwargs = {}
if end in ('recv', 'send'):
kwargs[end] = True
interpreters.channel_close(cid, **kwargs)
return state.close()
elif action == 'force-close':
kwargs = {
'force': True,
}
if end in ('recv', 'send'):
kwargs[end] = True
interpreters.channel_close(cid, **kwargs)
return state.close(force=True)
else:
raise ValueError(action)
def clean_up_interpreters():
for id in interpreters.list_all():
if id == 0: # main
continue
try:
interpreters.destroy(id)
except RuntimeError:
pass # already destroyed
def clean_up_channels():
for cid in interpreters.channel_list_all():
try:
interpreters.channel_destroy(cid)
except interpreters.ChannelNotFoundError:
pass # already destroyed
class TestBase(unittest.TestCase):
def tearDown(self):
clean_up_interpreters()
clean_up_channels()
##################################
# misc. tests
class IsShareableTests(unittest.TestCase):
def test_default_shareables(self):
shareables = [
# singletons
None,
# builtin objects
b'spam',
'spam',
10,
-10,
]
for obj in shareables:
with self.subTest(obj):
self.assertTrue(
interpreters.is_shareable(obj))
def test_not_shareable(self):
class Cheese:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class SubBytes(bytes):
"""A subclass of a shareable type."""
not_shareables = [
# singletons
True,
False,
NotImplemented,
...,
# builtin types and objects
type,
object,
object(),
Exception(),
100.0,
# user-defined types and objects
Cheese,
Cheese('Wensleydale'),
SubBytes(b'spam'),
]
for obj in not_shareables:
with self.subTest(repr(obj)):
self.assertFalse(
interpreters.is_shareable(obj))
class ShareableTypeTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.cid = interpreters.channel_create()
def tearDown(self):
interpreters.channel_destroy(self.cid)
super().tearDown()
def _assert_values(self, values):
for obj in values:
with self.subTest(obj):
interpreters.channel_send(self.cid, obj)
got = interpreters.channel_recv(self.cid)
self.assertEqual(got, obj)
self.assertIs(type(got), type(obj))
# XXX Check the following in the channel tests?
#self.assertIsNot(got, obj)
def test_singletons(self):
for obj in [None]:
with self.subTest(obj):
interpreters.channel_send(self.cid, obj)
got = interpreters.channel_recv(self.cid)
# XXX What about between interpreters?
self.assertIs(got, obj)
def test_types(self):
self._assert_values([
b'spam',
9999,
self.cid,
])
def test_bytes(self):
self._assert_values(i.to_bytes(2, 'little', signed=True)
for i in range(-1, 258))
def test_int(self):
self._assert_values(itertools.chain(range(-1, 258),
[sys.maxsize, -sys.maxsize - 1]))
def test_non_shareable_int(self):
ints = [
sys.maxsize + 1,
-sys.maxsize - 2,
2**1000,
]
for i in ints:
with self.subTest(i):
with self.assertRaises(OverflowError):
interpreters.channel_send(self.cid, i)
##################################
# interpreter tests
class ListAllTests(TestBase):
def test_initial(self):
main = interpreters.get_main()
ids = interpreters.list_all()
self.assertEqual(ids, [main])
def test_after_creating(self):
main = interpreters.get_main()
first = interpreters.create()
second = interpreters.create()
ids = interpreters.list_all()
self.assertEqual(ids, [main, first, second])
def test_after_destroying(self):
main = interpreters.get_main()
first = interpreters.create()
second = interpreters.create()
interpreters.destroy(first)
ids = interpreters.list_all()
self.assertEqual(ids, [main, second])
class GetCurrentTests(TestBase):
def test_main(self):
main = interpreters.get_main()
cur = interpreters.get_current()
self.assertEqual(cur, main)
self.assertIsInstance(cur, interpreters.InterpreterID)
def test_subinterpreter(self):
main = interpreters.get_main()
interp = interpreters.create()
out = _run_output(interp, dedent("""
import _xxsubinterpreters as _interpreters
cur = _interpreters.get_current()
print(cur)
assert isinstance(cur, _interpreters.InterpreterID)
"""))
cur = int(out.strip())
_, expected = interpreters.list_all()
self.assertEqual(cur, expected)
self.assertNotEqual(cur, main)
class GetMainTests(TestBase):
def test_from_main(self):
[expected] = interpreters.list_all()
main = interpreters.get_main()
self.assertEqual(main, expected)
self.assertIsInstance(main, interpreters.InterpreterID)
def test_from_subinterpreter(self):
[expected] = interpreters.list_all()
interp = interpreters.create()
out = _run_output(interp, dedent("""
import _xxsubinterpreters as _interpreters
main = _interpreters.get_main()
print(main)
assert isinstance(main, _interpreters.InterpreterID)
"""))
main = int(out.strip())
self.assertEqual(main, expected)
class IsRunningTests(TestBase):
def test_main(self):
main = interpreters.get_main()
self.assertTrue(interpreters.is_running(main))
def test_subinterpreter(self):
interp = interpreters.create()
self.assertFalse(interpreters.is_running(interp))
with _running(interp):
self.assertTrue(interpreters.is_running(interp))
self.assertFalse(interpreters.is_running(interp))
def test_from_subinterpreter(self):
interp = interpreters.create()
out = _run_output(interp, dedent(f"""
import _xxsubinterpreters as _interpreters
if _interpreters.is_running({interp}):
print(True)
else:
print(False)
"""))
self.assertEqual(out.strip(), 'True')
def test_already_destroyed(self):
interp = interpreters.create()
interpreters.destroy(interp)
with self.assertRaises(RuntimeError):
interpreters.is_running(interp)
def test_does_not_exist(self):
with self.assertRaises(RuntimeError):
interpreters.is_running(1_000_000)
def test_bad_id(self):
with self.assertRaises(ValueError):
interpreters.is_running(-1)
class InterpreterIDTests(TestBase):
def test_with_int(self):
id = interpreters.InterpreterID(10, force=True)
self.assertEqual(int(id), 10)
def test_coerce_id(self):
class Int(str):
def __index__(self):
return 10
id = interpreters.InterpreterID(Int(), force=True)
self.assertEqual(int(id), 10)
def test_bad_id(self):
self.assertRaises(TypeError, interpreters.InterpreterID, object())
self.assertRaises(TypeError, interpreters.InterpreterID, 10.0)
self.assertRaises(TypeError, interpreters.InterpreterID, '10')
self.assertRaises(TypeError, interpreters.InterpreterID, b'10')
self.assertRaises(ValueError, interpreters.InterpreterID, -1)
self.assertRaises(OverflowError, interpreters.InterpreterID, 2**64)
def test_does_not_exist(self):
id = interpreters.channel_create()
with self.assertRaises(RuntimeError):
interpreters.InterpreterID(int(id) + 1) # unforced
def test_str(self):
id = interpreters.InterpreterID(10, force=True)
self.assertEqual(str(id), '10')
def test_repr(self):
id = interpreters.InterpreterID(10, force=True)
self.assertEqual(repr(id), 'InterpreterID(10)')
def test_equality(self):
id1 = interpreters.create()
id2 = interpreters.InterpreterID(int(id1))
id3 = interpreters.create()
self.assertTrue(id1 == id1)
self.assertTrue(id1 == id2)
self.assertTrue(id1 == int(id1))
self.assertTrue(int(id1) == id1)
self.assertTrue(id1 == float(int(id1)))
self.assertTrue(float(int(id1)) == id1)
self.assertFalse(id1 == float(int(id1)) + 0.1)
self.assertFalse(id1 == str(int(id1)))
self.assertFalse(id1 == 2**1000)
self.assertFalse(id1 == float('inf'))
self.assertFalse(id1 == 'spam')
self.assertFalse(id1 == id3)
self.assertFalse(id1 != id1)
self.assertFalse(id1 != id2)
self.assertTrue(id1 != id3)
class CreateTests(TestBase):
def test_in_main(self):
id = interpreters.create()
self.assertIsInstance(id, interpreters.InterpreterID)
self.assertIn(id, interpreters.list_all())
@unittest.skip('enable this test when working on pystate.c')
def test_unique_id(self):
seen = set()
for _ in range(100):
id = interpreters.create()
interpreters.destroy(id)
seen.add(id)
self.assertEqual(len(seen), 100)
def test_in_thread(self):
lock = threading.Lock()
id = None
def f():
nonlocal id
id = interpreters.create()
lock.acquire()
lock.release()
t = threading.Thread(target=f)
with lock:
t.start()
t.join()
self.assertIn(id, interpreters.list_all())
def test_in_subinterpreter(self):
main, = interpreters.list_all()
id1 = interpreters.create()
out = _run_output(id1, dedent("""
import _xxsubinterpreters as _interpreters
id = _interpreters.create()
print(id)
assert isinstance(id, _interpreters.InterpreterID)
"""))
id2 = int(out.strip())
self.assertEqual(set(interpreters.list_all()), {main, id1, id2})
def test_in_threaded_subinterpreter(self):
main, = interpreters.list_all()
id1 = interpreters.create()
id2 = None
def f():
nonlocal id2
out = _run_output(id1, dedent("""
import _xxsubinterpreters as _interpreters
id = _interpreters.create()
print(id)
"""))
id2 = int(out.strip())
t = threading.Thread(target=f)
t.start()
t.join()
self.assertEqual(set(interpreters.list_all()), {main, id1, id2})
def test_after_destroy_all(self):
before = set(interpreters.list_all())
# Create 3 subinterpreters.
ids = []
for _ in range(3):
id = interpreters.create()
ids.append(id)
# Now destroy them.
for id in ids:
interpreters.destroy(id)
# Finally, create another.
id = interpreters.create()
self.assertEqual(set(interpreters.list_all()), before | {id})
def test_after_destroy_some(self):
before = set(interpreters.list_all())
# Create 3 subinterpreters.
id1 = interpreters.create()
id2 = interpreters.create()
id3 = interpreters.create()
# Now destroy 2 of them.
interpreters.destroy(id1)
interpreters.destroy(id3)
# Finally, create another.
id = interpreters.create()
self.assertEqual(set(interpreters.list_all()), before | {id, id2})
class DestroyTests(TestBase):
def test_one(self):
id1 = interpreters.create()
id2 = interpreters.create()
id3 = interpreters.create()
self.assertIn(id2, interpreters.list_all())
interpreters.destroy(id2)
self.assertNotIn(id2, interpreters.list_all())
self.assertIn(id1, interpreters.list_all())
self.assertIn(id3, interpreters.list_all())
def test_all(self):
before = set(interpreters.list_all())
ids = set()
for _ in range(3):
id = interpreters.create()
ids.add(id)
self.assertEqual(set(interpreters.list_all()), before | ids)
for id in ids:
interpreters.destroy(id)
self.assertEqual(set(interpreters.list_all()), before)
def test_main(self):
main, = interpreters.list_all()
with self.assertRaises(RuntimeError):
interpreters.destroy(main)
def f():
with self.assertRaises(RuntimeError):
interpreters.destroy(main)
t = threading.Thread(target=f)
t.start()
t.join()
def test_already_destroyed(self):
id = interpreters.create()
interpreters.destroy(id)
with self.assertRaises(RuntimeError):
interpreters.destroy(id)
def test_does_not_exist(self):
with self.assertRaises(RuntimeError):
interpreters.destroy(1_000_000)
def test_bad_id(self):
with self.assertRaises(ValueError):
interpreters.destroy(-1)
def test_from_current(self):
main, = interpreters.list_all()
id = interpreters.create()
script = dedent(f"""
import _xxsubinterpreters as _interpreters
try:
_interpreters.destroy({id})
except RuntimeError:
pass
""")
interpreters.run_string(id, script)
self.assertEqual(set(interpreters.list_all()), {main, id})
def test_from_sibling(self):
main, = interpreters.list_all()
id1 = interpreters.create()
id2 = interpreters.create()
script = dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.destroy({id2})
""")
interpreters.run_string(id1, script)
self.assertEqual(set(interpreters.list_all()), {main, id1})
def test_from_other_thread(self):
id = interpreters.create()
def f():
interpreters.destroy(id)
t = threading.Thread(target=f)
t.start()
t.join()
def test_still_running(self):
main, = interpreters.list_all()
interp = interpreters.create()
with _running(interp):
self.assertTrue(interpreters.is_running(interp),
msg=f"Interp {interp} should be running before destruction.")
with self.assertRaises(RuntimeError,
msg=f"Should not be able to destroy interp {interp} while it's still running."):
interpreters.destroy(interp)
self.assertTrue(interpreters.is_running(interp))
class RunStringTests(TestBase):
SCRIPT = dedent("""
with open('{}', 'w') as out:
out.write('{}')
""")
FILENAME = 'spam'
def setUp(self):
super().setUp()
self.id = interpreters.create()
self._fs = None
def tearDown(self):
if self._fs is not None:
self._fs.close()
super().tearDown()
@property
def fs(self):
if self._fs is None:
self._fs = FSFixture(self)
return self._fs
def test_success(self):
script, file = _captured_script('print("it worked!", end="")')
with file:
interpreters.run_string(self.id, script)
out = file.read()
self.assertEqual(out, 'it worked!')
def test_in_thread(self):
script, file = _captured_script('print("it worked!", end="")')
with file:
def f():
interpreters.run_string(self.id, script)
t = threading.Thread(target=f)
t.start()
t.join()
out = file.read()
self.assertEqual(out, 'it worked!')
def test_create_thread(self):
script, file = _captured_script("""
import threading
def f():
print('it worked!', end='')
t = threading.Thread(target=f)
t.start()
t.join()
""")
with file:
interpreters.run_string(self.id, script)
out = file.read()
self.assertEqual(out, 'it worked!')
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
def test_fork(self):
import tempfile
with tempfile.NamedTemporaryFile('w+') as file:
file.write('')
file.flush()
expected = 'spam spam spam spam spam'
script = dedent(f"""
import os
try:
os.fork()
except RuntimeError:
with open('{file.name}', 'w') as out:
out.write('{expected}')
""")
interpreters.run_string(self.id, script)
file.seek(0)
content = file.read()
self.assertEqual(content, expected)
def test_already_running(self):
with _running(self.id):
with self.assertRaises(RuntimeError):
interpreters.run_string(self.id, 'print("spam")')
def test_does_not_exist(self):
id = 0
while id in interpreters.list_all():
id += 1
with self.assertRaises(RuntimeError):
interpreters.run_string(id, 'print("spam")')
def test_error_id(self):
with self.assertRaises(ValueError):
interpreters.run_string(-1, 'print("spam")')
def test_bad_id(self):
with self.assertRaises(TypeError):
interpreters.run_string('spam', 'print("spam")')
def test_bad_script(self):
with self.assertRaises(TypeError):
interpreters.run_string(self.id, 10)
def test_bytes_for_script(self):
with self.assertRaises(TypeError):
interpreters.run_string(self.id, b'print("spam")')
@contextlib.contextmanager
def assert_run_failed(self, exctype, msg=None):
with self.assertRaises(interpreters.RunFailedError) as caught:
yield
if msg is None:
self.assertEqual(str(caught.exception).split(':')[0],
str(exctype))
else:
self.assertEqual(str(caught.exception),
"{}: {}".format(exctype, msg))
def test_invalid_syntax(self):
with self.assert_run_failed(SyntaxError):
# missing close paren
interpreters.run_string(self.id, 'print("spam"')
def test_failure(self):
with self.assert_run_failed(Exception, 'spam'):
interpreters.run_string(self.id, 'raise Exception("spam")')
def test_SystemExit(self):
with self.assert_run_failed(SystemExit, '42'):
interpreters.run_string(self.id, 'raise SystemExit(42)')
def test_sys_exit(self):
with self.assert_run_failed(SystemExit):
interpreters.run_string(self.id, dedent("""
import sys
sys.exit()
"""))
with self.assert_run_failed(SystemExit, '42'):
interpreters.run_string(self.id, dedent("""
import sys
sys.exit(42)
"""))
def test_with_shared(self):
r, w = os.pipe()
shared = {
'spam': b'ham',
'eggs': b'-1',
'cheddar': None,
}
script = dedent(f"""
eggs = int(eggs)
spam = 42
result = spam + eggs
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
interpreters.run_string(self.id, script, shared)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
self.assertEqual(ns['spam'], 42)
self.assertEqual(ns['eggs'], -1)
self.assertEqual(ns['result'], 41)
self.assertIsNone(ns['cheddar'])
def test_shared_overwrites(self):
interpreters.run_string(self.id, dedent("""
spam = 'eggs'
ns1 = dict(vars())
del ns1['__builtins__']
"""))
shared = {'spam': b'ham'}
script = dedent(f"""
ns2 = dict(vars())
del ns2['__builtins__']
""")
interpreters.run_string(self.id, script, shared)
r, w = os.pipe()
script = dedent(f"""
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
interpreters.run_string(self.id, script)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
self.assertEqual(ns['ns1']['spam'], 'eggs')
self.assertEqual(ns['ns2']['spam'], b'ham')
self.assertEqual(ns['spam'], b'ham')
def test_shared_overwrites_default_vars(self):
r, w = os.pipe()
shared = {'__name__': b'not __main__'}
script = dedent(f"""
spam = 42
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
interpreters.run_string(self.id, script, shared)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
self.assertEqual(ns['__name__'], b'not __main__')
def test_main_reused(self):
r, w = os.pipe()
interpreters.run_string(self.id, dedent(f"""
spam = True
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
del ns, pickle, chan
"""))
with open(r, 'rb') as chan:
ns1 = pickle.load(chan)
r, w = os.pipe()
interpreters.run_string(self.id, dedent(f"""
eggs = False
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
"""))
with open(r, 'rb') as chan:
ns2 = pickle.load(chan)
self.assertIn('spam', ns1)
self.assertNotIn('eggs', ns1)
self.assertIn('eggs', ns2)
self.assertIn('spam', ns2)
def test_execution_namespace_is_main(self):
r, w = os.pipe()
script = dedent(f"""
spam = 42
ns = dict(vars())
ns['__builtins__'] = str(ns['__builtins__'])
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
interpreters.run_string(self.id, script)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
ns.pop('__builtins__')
ns.pop('__loader__')
self.assertEqual(ns, {
'__name__': '__main__',
'__annotations__': {},
'__doc__': None,
'__package__': None,
'__spec__': None,
'spam': 42,
})
# XXX Fix this test!
@unittest.skip('blocking forever')
def test_still_running_at_exit(self):
script = dedent(f"""
from textwrap import dedent
import threading
import _xxsubinterpreters as _interpreters
id = _interpreters.create()
def f():
_interpreters.run_string(id, dedent('''
import time
# Give plenty of time for the main interpreter to finish.
time.sleep(1_000_000)
'''))
t = threading.Thread(target=f)
t.start()
""")
with support.temp_dir() as dirname:
filename = script_helper.make_script(dirname, 'interp', script)
with script_helper.spawn_python(filename) as proc:
retcode = proc.wait()
self.assertEqual(retcode, 0)
##################################
# channel tests
class ChannelIDTests(TestBase):
def test_default_kwargs(self):
cid = interpreters._channel_id(10, force=True)
self.assertEqual(int(cid), 10)
self.assertEqual(cid.end, 'both')
def test_with_kwargs(self):
cid = interpreters._channel_id(10, send=True, force=True)
self.assertEqual(cid.end, 'send')
cid = interpreters._channel_id(10, send=True, recv=False, force=True)
self.assertEqual(cid.end, 'send')
cid = interpreters._channel_id(10, recv=True, force=True)
self.assertEqual(cid.end, 'recv')
cid = interpreters._channel_id(10, recv=True, send=False, force=True)
self.assertEqual(cid.end, 'recv')
cid = interpreters._channel_id(10, send=True, recv=True, force=True)
self.assertEqual(cid.end, 'both')
def test_coerce_id(self):
class Int(str):
def __index__(self):
return 10
cid = interpreters._channel_id(Int(), force=True)
self.assertEqual(int(cid), 10)
def test_bad_id(self):
self.assertRaises(TypeError, interpreters._channel_id, object())
self.assertRaises(TypeError, interpreters._channel_id, 10.0)
self.assertRaises(TypeError, interpreters._channel_id, '10')
self.assertRaises(TypeError, interpreters._channel_id, b'10')
self.assertRaises(ValueError, interpreters._channel_id, -1)
self.assertRaises(OverflowError, interpreters._channel_id, 2**64)
def test_bad_kwargs(self):
with self.assertRaises(ValueError):
interpreters._channel_id(10, send=False, recv=False)
def test_does_not_exist(self):
cid = interpreters.channel_create()
with self.assertRaises(interpreters.ChannelNotFoundError):
interpreters._channel_id(int(cid) + 1) # unforced
def test_str(self):
cid = interpreters._channel_id(10, force=True)
self.assertEqual(str(cid), '10')
def test_repr(self):
cid = interpreters._channel_id(10, force=True)
self.assertEqual(repr(cid), 'ChannelID(10)')
cid = interpreters._channel_id(10, send=True, force=True)
self.assertEqual(repr(cid), 'ChannelID(10, send=True)')
cid = interpreters._channel_id(10, recv=True, force=True)
self.assertEqual(repr(cid), 'ChannelID(10, recv=True)')
cid = interpreters._channel_id(10, send=True, recv=True, force=True)
self.assertEqual(repr(cid), 'ChannelID(10)')
def test_equality(self):
cid1 = interpreters.channel_create()
cid2 = interpreters._channel_id(int(cid1))
cid3 = interpreters.channel_create()
self.assertTrue(cid1 == cid1)
self.assertTrue(cid1 == cid2)
self.assertTrue(cid1 == int(cid1))
self.assertTrue(int(cid1) == cid1)
self.assertTrue(cid1 == float(int(cid1)))
self.assertTrue(float(int(cid1)) == cid1)
self.assertFalse(cid1 == float(int(cid1)) + 0.1)
self.assertFalse(cid1 == str(int(cid1)))
self.assertFalse(cid1 == 2**1000)
self.assertFalse(cid1 == float('inf'))
self.assertFalse(cid1 == 'spam')
self.assertFalse(cid1 == cid3)
self.assertFalse(cid1 != cid1)
self.assertFalse(cid1 != cid2)
self.assertTrue(cid1 != cid3)
class ChannelTests(TestBase):
def test_create_cid(self):
cid = interpreters.channel_create()
self.assertIsInstance(cid, interpreters.ChannelID)
def test_sequential_ids(self):
before = interpreters.channel_list_all()
id1 = interpreters.channel_create()
id2 = interpreters.channel_create()
id3 = interpreters.channel_create()
after = interpreters.channel_list_all()
self.assertEqual(id2, int(id1) + 1)
self.assertEqual(id3, int(id2) + 1)
self.assertEqual(set(after) - set(before), {id1, id2, id3})
def test_ids_global(self):
id1 = interpreters.create()
out = _run_output(id1, dedent("""
import _xxsubinterpreters as _interpreters
cid = _interpreters.channel_create()
print(cid)
"""))
cid1 = int(out.strip())
id2 = interpreters.create()
out = _run_output(id2, dedent("""
import _xxsubinterpreters as _interpreters
cid = _interpreters.channel_create()
print(cid)
"""))
cid2 = int(out.strip())
self.assertEqual(cid2, int(cid1) + 1)
####################
def test_send_recv_main(self):
cid = interpreters.channel_create()
orig = b'spam'
interpreters.channel_send(cid, orig)
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, orig)
self.assertIsNot(obj, orig)
def test_send_recv_same_interpreter(self):
id1 = interpreters.create()
out = _run_output(id1, dedent("""
import _xxsubinterpreters as _interpreters
cid = _interpreters.channel_create()
orig = b'spam'
_interpreters.channel_send(cid, orig)
obj = _interpreters.channel_recv(cid)
assert obj is not orig
assert obj == orig
"""))
def test_send_recv_different_interpreters(self):
cid = interpreters.channel_create()
id1 = interpreters.create()
out = _run_output(id1, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_send({cid}, b'spam')
"""))
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
def test_send_recv_different_threads(self):
cid = interpreters.channel_create()
def f():
while True:
try:
obj = interpreters.channel_recv(cid)
break
except interpreters.ChannelEmptyError:
time.sleep(0.1)
interpreters.channel_send(cid, obj)
t = threading.Thread(target=f)
t.start()
interpreters.channel_send(cid, b'spam')
t.join()
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
def test_send_recv_different_interpreters_and_threads(self):
cid = interpreters.channel_create()
id1 = interpreters.create()
out = None
def f():
nonlocal out
out = _run_output(id1, dedent(f"""
import time
import _xxsubinterpreters as _interpreters
while True:
try:
obj = _interpreters.channel_recv({cid})
break
except _interpreters.ChannelEmptyError:
time.sleep(0.1)
assert(obj == b'spam')
_interpreters.channel_send({cid}, b'eggs')
"""))
t = threading.Thread(target=f)
t.start()
interpreters.channel_send(cid, b'spam')
t.join()
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'eggs')
def test_send_not_found(self):
with self.assertRaises(interpreters.ChannelNotFoundError):
interpreters.channel_send(10, b'spam')
def test_recv_not_found(self):
with self.assertRaises(interpreters.ChannelNotFoundError):
interpreters.channel_recv(10)
def test_recv_empty(self):
cid = interpreters.channel_create()
with self.assertRaises(interpreters.ChannelEmptyError):
interpreters.channel_recv(cid)
def test_run_string_arg_unresolved(self):
cid = interpreters.channel_create()
interp = interpreters.create()
out = _run_output(interp, dedent("""
import _xxsubinterpreters as _interpreters
print(cid.end)
_interpreters.channel_send(cid, b'spam')
"""),
dict(cid=cid.send))
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
self.assertEqual(out.strip(), 'send')
# XXX For now there is no high-level channel into which the
# sent channel ID can be converted...
# Note: this test caused crashes on some buildbots (bpo-33615).
@unittest.skip('disabled until high-level channels exist')
def test_run_string_arg_resolved(self):
cid = interpreters.channel_create()
cid = interpreters._channel_id(cid, _resolve=True)
interp = interpreters.create()
out = _run_output(interp, dedent("""
import _xxsubinterpreters as _interpreters
print(chan.id.end)
_interpreters.channel_send(chan.id, b'spam')
"""),
dict(chan=cid.send))
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
self.assertEqual(out.strip(), 'send')
# close
def test_close_single_user(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_close(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_multiple_users(self):
cid = interpreters.channel_create()
id1 = interpreters.create()
id2 = interpreters.create()
interpreters.run_string(id1, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_send({cid}, b'spam')
"""))
interpreters.run_string(id2, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_recv({cid})
"""))
interpreters.channel_close(cid)
with self.assertRaises(interpreters.RunFailedError) as cm:
interpreters.run_string(id1, dedent(f"""
_interpreters.channel_send({cid}, b'spam')
"""))
self.assertIn('ChannelClosedError', str(cm.exception))
with self.assertRaises(interpreters.RunFailedError) as cm:
interpreters.run_string(id2, dedent(f"""
_interpreters.channel_send({cid}, b'spam')
"""))
self.assertIn('ChannelClosedError', str(cm.exception))
def test_close_multiple_times(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_close(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_close(cid)
def test_close_empty(self):
tests = [
(False, False),
(True, False),
(False, True),
(True, True),
]
for send, recv in tests:
with self.subTest((send, recv)):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_close(cid, send=send, recv=recv)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_defaults_with_unused_items(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
with self.assertRaises(interpreters.ChannelNotEmptyError):
interpreters.channel_close(cid)
interpreters.channel_recv(cid)
interpreters.channel_send(cid, b'eggs')
def test_close_recv_with_unused_items_unforced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
with self.assertRaises(interpreters.ChannelNotEmptyError):
interpreters.channel_close(cid, recv=True)
interpreters.channel_recv(cid)
interpreters.channel_send(cid, b'eggs')
interpreters.channel_recv(cid)
interpreters.channel_recv(cid)
interpreters.channel_close(cid, recv=True)
def test_close_send_with_unused_items_unforced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_close(cid, send=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
interpreters.channel_recv(cid)
interpreters.channel_recv(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_both_with_unused_items_unforced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
with self.assertRaises(interpreters.ChannelNotEmptyError):
interpreters.channel_close(cid, recv=True, send=True)
interpreters.channel_recv(cid)
interpreters.channel_send(cid, b'eggs')
interpreters.channel_recv(cid)
interpreters.channel_recv(cid)
interpreters.channel_close(cid, recv=True)
def test_close_recv_with_unused_items_forced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_close(cid, recv=True, force=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_send_with_unused_items_forced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_close(cid, send=True, force=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_both_with_unused_items_forced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_close(cid, send=True, recv=True, force=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_never_used(self):
cid = interpreters.channel_create()
interpreters.channel_close(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'spam')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_by_unassociated_interp(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interp = interpreters.create()
interpreters.run_string(interp, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_close({cid}, force=True)
"""))
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_close(cid)
def test_close_used_multiple_times_by_single_user(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_close(cid, force=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
class ChannelReleaseTests(TestBase):
# XXX Add more test coverage a la the tests for close().
"""
- main / interp / other
- run in: current thread / new thread / other thread / different threads
- end / opposite
- force / no force
- used / not used (associated / not associated)
- empty / emptied / never emptied / partly emptied
- closed / not closed
- released / not released
- creator (interp) / other
- associated interpreter not running
- associated interpreter destroyed
"""
"""
use
pre-release
release
after
check
"""
"""
release in: main, interp1
creator: same, other (incl. interp2)
use: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all
pre-release: None,send,recv,both in None,same,other(incl. interp2),same+other(incl. interp2),all
pre-release forced: None,send,recv,both in None,same,other(incl. interp2),same+other(incl. interp2),all
release: same
release forced: same
use after: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all
release after: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all
check released: send/recv for same/other(incl. interp2)
check closed: send/recv for same/other(incl. interp2)
"""
def test_single_user(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_release(cid, send=True, recv=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_multiple_users(self):
cid = interpreters.channel_create()
id1 = interpreters.create()
id2 = interpreters.create()
interpreters.run_string(id1, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_send({cid}, b'spam')
"""))
out = _run_output(id2, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_recv({cid})
_interpreters.channel_release({cid})
print(repr(obj))
"""))
interpreters.run_string(id1, dedent(f"""
_interpreters.channel_release({cid})
"""))
self.assertEqual(out.strip(), "b'spam'")
def test_no_kwargs(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_release(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_multiple_times(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_release(cid, send=True, recv=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_release(cid, send=True, recv=True)
def test_with_unused_items(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_release(cid, send=True, recv=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_never_used(self):
cid = interpreters.channel_create()
interpreters.channel_release(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'spam')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_by_unassociated_interp(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interp = interpreters.create()
interpreters.run_string(interp, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_release({cid})
"""))
obj = interpreters.channel_recv(cid)
interpreters.channel_release(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
self.assertEqual(obj, b'spam')
def test_close_if_unassociated(self):
# XXX Something's not right with this test...
cid = interpreters.channel_create()
interp = interpreters.create()
interpreters.run_string(interp, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_send({cid}, b'spam')
_interpreters.channel_release({cid})
"""))
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_partially(self):
# XXX Is partial close too weird/confusing?
cid = interpreters.channel_create()
interpreters.channel_send(cid, None)
interpreters.channel_recv(cid)
interpreters.channel_send(cid, b'spam')
interpreters.channel_release(cid, send=True)
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
def test_used_multiple_times_by_single_user(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_release(cid, send=True, recv=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
class ChannelCloseFixture(namedtuple('ChannelCloseFixture',
'end interp other extra creator')):
# Set this to True to avoid creating interpreters, e.g. when
# scanning through test permutations without running them.
QUICK = False
def __new__(cls, end, interp, other, extra, creator):
assert end in ('send', 'recv')
if cls.QUICK:
known = {}
else:
interp = Interpreter.from_raw(interp)
other = Interpreter.from_raw(other)
extra = Interpreter.from_raw(extra)
known = {
interp.name: interp,
other.name: other,
extra.name: extra,
}
if not creator:
creator = 'same'
self = super().__new__(cls, end, interp, other, extra, creator)
self._prepped = set()
self._state = ChannelState()
self._known = known
return self
@property
def state(self):
return self._state
@property
def cid(self):
try:
return self._cid
except AttributeError:
creator = self._get_interpreter(self.creator)
self._cid = self._new_channel(creator)
return self._cid
def get_interpreter(self, interp):
interp = self._get_interpreter(interp)
self._prep_interpreter(interp)
return interp
def expect_closed_error(self, end=None):
if end is None:
end = self.end
if end == 'recv' and self.state.closed == 'send':
return False
return bool(self.state.closed)
def prep_interpreter(self, interp):
self._prep_interpreter(interp)
def record_action(self, action, result):
self._state = result
def clean_up(self):
clean_up_interpreters()
clean_up_channels()
# internal methods
def _new_channel(self, creator):
if creator.name == 'main':
return interpreters.channel_create()
else:
ch = interpreters.channel_create()
run_interp(creator.id, f"""
import _xxsubinterpreters
cid = _xxsubinterpreters.channel_create()
# We purposefully send back an int to avoid tying the
# channel to the other interpreter.
_xxsubinterpreters.channel_send({ch}, int(cid))
del _xxsubinterpreters
""")
self._cid = interpreters.channel_recv(ch)
return self._cid
def _get_interpreter(self, interp):
if interp in ('same', 'interp'):
return self.interp
elif interp == 'other':
return self.other
elif interp == 'extra':
return self.extra
else:
name = interp
try:
interp = self._known[name]
except KeyError:
interp = self._known[name] = Interpreter(name)
return interp
def _prep_interpreter(self, interp):
if interp.id in self._prepped:
return
self._prepped.add(interp.id)
if interp.name == 'main':
return
run_interp(interp.id, f"""
import _xxsubinterpreters as interpreters
import test.test__xxsubinterpreters as helpers
ChannelState = helpers.ChannelState
try:
cid
except NameError:
cid = interpreters._channel_id({self.cid})
""")
@unittest.skip('these tests take several hours to run')
class ExhaustiveChannelTests(TestBase):
"""
- main / interp / other
- run in: current thread / new thread / other thread / different threads
- end / opposite
- force / no force
- used / not used (associated / not associated)
- empty / emptied / never emptied / partly emptied
- closed / not closed
- released / not released
- creator (interp) / other
- associated interpreter not running
- associated interpreter destroyed
- close after unbound
"""
"""
use
pre-close
close
after
check
"""
"""
close in: main, interp1
creator: same, other, extra
use: None,send,recv,send/recv in None,same,other,same+other,all
pre-close: None,send,recv in None,same,other,same+other,all
pre-close forced: None,send,recv in None,same,other,same+other,all
close: same
close forced: same
use after: None,send,recv,send/recv in None,same,other,extra,same+other,all
close after: None,send,recv,send/recv in None,same,other,extra,same+other,all
check closed: send/recv for same/other(incl. interp2)
"""
def iter_action_sets(self):
# - used / not used (associated / not associated)
# - empty / emptied / never emptied / partly emptied
# - closed / not closed
# - released / not released
# never used
yield []
# only pre-closed (and possible used after)
for closeactions in self._iter_close_action_sets('same', 'other'):
yield closeactions
for postactions in self._iter_post_close_action_sets():
yield closeactions + postactions
for closeactions in self._iter_close_action_sets('other', 'extra'):
yield closeactions
for postactions in self._iter_post_close_action_sets():
yield closeactions + postactions
# used
for useactions in self._iter_use_action_sets('same', 'other'):
yield useactions
for closeactions in self._iter_close_action_sets('same', 'other'):
actions = useactions + closeactions
yield actions
for postactions in self._iter_post_close_action_sets():
yield actions + postactions
for closeactions in self._iter_close_action_sets('other', 'extra'):
actions = useactions + closeactions
yield actions
for postactions in self._iter_post_close_action_sets():
yield actions + postactions
for useactions in self._iter_use_action_sets('other', 'extra'):
yield useactions
for closeactions in self._iter_close_action_sets('same', 'other'):
actions = useactions + closeactions
yield actions
for postactions in self._iter_post_close_action_sets():
yield actions + postactions
for closeactions in self._iter_close_action_sets('other', 'extra'):
actions = useactions + closeactions
yield actions
for postactions in self._iter_post_close_action_sets():
yield actions + postactions
def _iter_use_action_sets(self, interp1, interp2):
interps = (interp1, interp2)
# only recv end used
yield [
ChannelAction('use', 'recv', interp1),
]
yield [
ChannelAction('use', 'recv', interp2),
]
yield [
ChannelAction('use', 'recv', interp1),
ChannelAction('use', 'recv', interp2),
]
# never emptied
yield [
ChannelAction('use', 'send', interp1),
]
yield [
ChannelAction('use', 'send', interp2),
]
yield [
ChannelAction('use', 'send', interp1),
ChannelAction('use', 'send', interp2),
]
# partially emptied
for interp1 in interps:
for interp2 in interps:
for interp3 in interps:
yield [
ChannelAction('use', 'send', interp1),
ChannelAction('use', 'send', interp2),
ChannelAction('use', 'recv', interp3),
]
# fully emptied
for interp1 in interps:
for interp2 in interps:
for interp3 in interps:
for interp4 in interps:
yield [
ChannelAction('use', 'send', interp1),
ChannelAction('use', 'send', interp2),
ChannelAction('use', 'recv', interp3),
ChannelAction('use', 'recv', interp4),
]
def _iter_close_action_sets(self, interp1, interp2):
ends = ('recv', 'send')
interps = (interp1, interp2)
for force in (True, False):
op = 'force-close' if force else 'close'
for interp in interps:
for end in ends:
yield [
ChannelAction(op, end, interp),
]
for recvop in ('close', 'force-close'):
for sendop in ('close', 'force-close'):
for recv in interps:
for send in interps:
yield [
ChannelAction(recvop, 'recv', recv),
ChannelAction(sendop, 'send', send),
]
def _iter_post_close_action_sets(self):
for interp in ('same', 'extra', 'other'):
yield [
ChannelAction('use', 'recv', interp),
]
yield [
ChannelAction('use', 'send', interp),
]
def run_actions(self, fix, actions):
for action in actions:
self.run_action(fix, action)
def run_action(self, fix, action, *, hideclosed=True):
end = action.resolve_end(fix.end)
interp = action.resolve_interp(fix.interp, fix.other, fix.extra)
fix.prep_interpreter(interp)
if interp.name == 'main':
result = run_action(
fix.cid,
action.action,
end,
fix.state,
hideclosed=hideclosed,
)
fix.record_action(action, result)
else:
_cid = interpreters.channel_create()
run_interp(interp.id, f"""
result = helpers.run_action(
{fix.cid},
{repr(action.action)},
{repr(end)},
{repr(fix.state)},
hideclosed={hideclosed},
)
interpreters.channel_send({_cid}, result.pending.to_bytes(1, 'little'))
interpreters.channel_send({_cid}, b'X' if result.closed else b'')
""")
result = ChannelState(
pending=int.from_bytes(interpreters.channel_recv(_cid), 'little'),
closed=bool(interpreters.channel_recv(_cid)),
)
fix.record_action(action, result)
def iter_fixtures(self):
# XXX threads?
interpreters = [
('main', 'interp', 'extra'),
('interp', 'main', 'extra'),
('interp1', 'interp2', 'extra'),
('interp1', 'interp2', 'main'),
]
for interp, other, extra in interpreters:
for creator in ('same', 'other', 'creator'):
for end in ('send', 'recv'):
yield ChannelCloseFixture(end, interp, other, extra, creator)
def _close(self, fix, *, force):
op = 'force-close' if force else 'close'
close = ChannelAction(op, fix.end, 'same')
if not fix.expect_closed_error():
self.run_action(fix, close, hideclosed=False)
else:
with self.assertRaises(interpreters.ChannelClosedError):
self.run_action(fix, close, hideclosed=False)
def _assert_closed_in_interp(self, fix, interp=None):
if interp is None or interp.name == 'main':
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(fix.cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(fix.cid, b'spam')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_close(fix.cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_close(fix.cid, force=True)
else:
run_interp(interp.id, f"""
with helpers.expect_channel_closed():
interpreters.channel_recv(cid)
""")
run_interp(interp.id, f"""
with helpers.expect_channel_closed():
interpreters.channel_send(cid, b'spam')
""")
run_interp(interp.id, f"""
with helpers.expect_channel_closed():
interpreters.channel_close(cid)
""")
run_interp(interp.id, f"""
with helpers.expect_channel_closed():
interpreters.channel_close(cid, force=True)
""")
def _assert_closed(self, fix):
self.assertTrue(fix.state.closed)
for _ in range(fix.state.pending):
interpreters.channel_recv(fix.cid)
self._assert_closed_in_interp(fix)
for interp in ('same', 'other'):
interp = fix.get_interpreter(interp)
if interp.name == 'main':
continue
self._assert_closed_in_interp(fix, interp)
interp = fix.get_interpreter('fresh')
self._assert_closed_in_interp(fix, interp)
def _iter_close_tests(self, verbose=False):
i = 0
for actions in self.iter_action_sets():
print()
for fix in self.iter_fixtures():
i += 1
if i > 1000:
return
if verbose:
if (i - 1) % 6 == 0:
print()
print(i, fix, '({} actions)'.format(len(actions)))
else:
if (i - 1) % 6 == 0:
print(' ', end='')
print('.', end=''); sys.stdout.flush()
yield i, fix, actions
if verbose:
print('---')
print()
# This is useful for scanning through the possible tests.
def _skim_close_tests(self):
ChannelCloseFixture.QUICK = True
for i, fix, actions in self._iter_close_tests():
pass
def test_close(self):
for i, fix, actions in self._iter_close_tests():
with self.subTest('{} {} {}'.format(i, fix, actions)):
fix.prep_interpreter(fix.interp)
self.run_actions(fix, actions)
self._close(fix, force=False)
self._assert_closed(fix)
# XXX Things slow down if we have too many interpreters.
fix.clean_up()
def test_force_close(self):
for i, fix, actions in self._iter_close_tests():
with self.subTest('{} {} {}'.format(i, fix, actions)):
fix.prep_interpreter(fix.interp)
self.run_actions(fix, actions)
self._close(fix, force=True)
self._assert_closed(fix)
# XXX Things slow down if we have too many interpreters.
fix.clean_up()
if __name__ == '__main__':
unittest.main()
|
the-stack_0_15448 | import sys
from . import data_prep_utils
if sys.version < '3' :
from backports import csv
else:
import csv
def autoLabel(raw_strings, module, type):
return set([tuple(module.parse(raw_sequence.strip(), type=type)) for i, raw_sequence in enumerate(set(raw_strings), 1)])
def label(module, infile, outfile, xml, type=None):
training_data = data_prep_utils.TrainingData(xml, module)
reader = csv.reader(infile)
strings = set(row[0] for row in reader if len(row) > 0)
if type is None:
tagger = module.TAGGER
else:
tagger = module.TAGGERS[type] or module.TAGGER
if tagger:
labeled_list = autoLabel(strings, module, type)
else:
raise Exception("Tagger is not defined in %s" % module.__name__)
training_data.extend(labeled_list)
with open(outfile, 'wb'):
training_data.write(outfile)
print("Training data successfully created and stored in stored in %s" % outfile)
|
the-stack_0_15449 | import clodius.tiles.format as hgfo
import pandas as pd
import numpy as np
import pandas as pd
import h5py
def csv_to_points(csv_file, output_file):
'''
Convert a csv file containing points to a numpy array
of [[x,y]] values.
Parameters:
-----------
csv_file: string
The filename of the data file
'''
df = pd.read_table(csv_file, delimiter=',')
min_x = df['x'].min()
max_x = df['x'].max()
min_y = df['y'].min()
max_y = df['y'].max()
width = max_x - min_x
height = max_y - min_y
max_width = max(width, height)
# print("max_width:", max_width, min_x, min_y, max_x, max_y)
max_zoom = 30
with h5py.File(output_file, 'w') as f_out:
dataset = f_out.create_dataset('values', (len(df), 2), compression='gzip', dtype=np.float32)
dataset[:] = df.reindex(columns=['x','y']).as_matrix()
dataset.attrs['min_x'] = min_x
dataset.attrs['max_x'] = max_x
dataset.attrs['min_y'] = min_y
dataset.attrs['max_y'] = max_y
dataset.attrs['max_zoom'] = max_zoom
dataset.attrs['max_width'] = max_width
info = {
'min_pos': [min_x, min_y],
'max_pos': [min_y, max_y],
'max_zoom': max_zoom,
'max_width': max_width
}
return df.reindex(columns=['x', 'y'])
def tileset_info(points_file):
'''
Calculate the extent, etc...
'''
with h5py.File(points_file, 'r') as f_in:
dset = f_in['values']
return {
'min_pos': [float(dset.attrs['min_x']), float(dset.attrs['min_y'])],
'max_pos': [float(dset.attrs['max_y']), float(dset.attrs['max_y'])],
'max_width': float(dset.attrs['max_width']),
'max_zoom': int(dset.attrs['max_zoom']),
'mirror_tiles': 'false'
}
def tile_bounds(points_file, z, x, y, width=1, height=1):
'''
Get the boundaries of a tile
Parameters:
-----------
tileset_info: { min_pos, max_pos, max_width}
Information about the bounds of this tileset
'''
tsinfo = tileset_info(points_file)
tile_width = tsinfo['max_width'] / 2 ** z
x_start = tsinfo['min_pos'][0] + tile_width * x
x_end = tsinfo['min_pos'][0] + tile_width * (x+width)
y_start = tsinfo['min_pos'][1] + tile_width * y
y_end = tsinfo['min_pos'][1] + tile_width * (y+width)
return (x_start, x_end, y_start, y_end)
def filter_points(data, extent):
'''
Filter points that are within the extent
Parameters:
-----------
data: [[]]
A 2D numpy array containing x,y values
extent: [x_start, x_end, y_start, y_end]
The region we want to return points within
Returns
-------
data: [[]]
A 2D numpy array containing x,y values
'''
# print("extent:", extent)
# print("data.shape", data.shape, data[:,0])
data = data[data[:,0] > extent[0]]
data = data[data[:,0] < extent[1]]
data = data[data[:,1] > extent[2]]
data = data[data[:,1] < extent[3]]
return data
def density_tiles(points_file, z, x, y, width=1, height=1):
'''
Get a 2D histogram of the given region. If the height and
width are specified, then we need to partition this into
multiple returned tiles.
'''
returns = []
with h5py.File(points_file, 'r') as f:
# get all the points in the region
all_points = filter_points(f['values'][:],
tile_bounds(points_file, z, x, y,
width, height))
for i in range(width):
for j in range(height):
# filter from the larger subregion
filtered_points = filter_points(all_points,
tile_bounds(points_file, z, x+i, y+j))
dt = np.histogram2d(filtered_points[:,0],
filtered_points[:,1], bins=256)[0].T
dt[dt == 0.] = np.nan
returns += [((z, x+i, y+j), dt)]
return returns
def tiles(points_file, z, x, y, width=1, height=1):
return [(tile_position, hgfo.format_dense_tile(data.flatten())) for
(tile_position, data) in density_tiles(points_file, z, x, y, width, height)]
|
the-stack_0_15452 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from datetime import datetime, timedelta
import hashlib
import os
import random
import sys
import tempfile
import time
from glob import glob
from py4j.protocol import Py4JJavaError
from pyspark import shuffle, RDD
from pyspark.resource import ExecutorResourceRequests, ResourceProfile, ResourceProfileBuilder,\
TaskResourceRequests
from pyspark.serializers import CloudPickleSerializer, BatchedSerializer, PickleSerializer,\
MarshalSerializer, UTF8Deserializer, NoOpSerializer
from pyspark.testing.utils import ReusedPySparkTestCase, SPARK_HOME, QuietTest
global_func = lambda: "Hi"
class RDDTests(ReusedPySparkTestCase):
def test_range(self):
self.assertEqual(self.sc.range(1, 1).count(), 0)
self.assertEqual(self.sc.range(1, 0, -1).count(), 1)
self.assertEqual(self.sc.range(0, 1 << 40, 1 << 39).count(), 2)
def test_id(self):
rdd = self.sc.parallelize(range(10))
id = rdd.id()
self.assertEqual(id, rdd.id())
rdd2 = rdd.map(str).filter(bool)
id2 = rdd2.id()
self.assertEqual(id + 1, id2)
self.assertEqual(id2, rdd2.id())
def test_empty_rdd(self):
rdd = self.sc.emptyRDD()
self.assertTrue(rdd.isEmpty())
def test_sum(self):
self.assertEqual(0, self.sc.emptyRDD().sum())
self.assertEqual(6, self.sc.parallelize([1, 2, 3]).sum())
def test_to_localiterator(self):
rdd = self.sc.parallelize([1, 2, 3])
it = rdd.toLocalIterator()
self.assertEqual([1, 2, 3], sorted(it))
rdd2 = rdd.repartition(1000)
it2 = rdd2.toLocalIterator()
self.assertEqual([1, 2, 3], sorted(it2))
def test_to_localiterator_prefetch(self):
# Test that we fetch the next partition in parallel
# We do this by returning the current time and:
# reading the first elem, waiting, and reading the second elem
# If not in parallel then these would be at different times
# But since they are being computed in parallel we see the time
# is "close enough" to the same.
rdd = self.sc.parallelize(range(2), 2)
times1 = rdd.map(lambda x: datetime.now())
times2 = rdd.map(lambda x: datetime.now())
times_iter_prefetch = times1.toLocalIterator(prefetchPartitions=True)
times_iter = times2.toLocalIterator(prefetchPartitions=False)
times_prefetch_head = next(times_iter_prefetch)
times_head = next(times_iter)
time.sleep(2)
times_next = next(times_iter)
times_prefetch_next = next(times_iter_prefetch)
self.assertTrue(times_next - times_head >= timedelta(seconds=2))
self.assertTrue(times_prefetch_next - times_prefetch_head < timedelta(seconds=1))
def test_save_as_textfile_with_unicode(self):
# Regression test for SPARK-970
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode("utf-8"))
def test_save_as_textfile_with_utf8(self):
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x.encode("utf-8")])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode('utf8'))
def test_transforming_cartesian_result(self):
# Regression test for SPARK-1034
rdd1 = self.sc.parallelize([1, 2])
rdd2 = self.sc.parallelize([3, 4])
cart = rdd1.cartesian(rdd2)
result = cart.map(lambda x_y3: x_y3[0] + x_y3[1]).collect()
def test_transforming_pickle_file(self):
# Regression test for SPARK-2601
data = self.sc.parallelize([u"Hello", u"World!"])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsPickleFile(tempFile.name)
pickled_file = self.sc.pickleFile(tempFile.name)
pickled_file.map(lambda x: x).collect()
def test_cartesian_on_textfile(self):
# Regression test for
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
a = self.sc.textFile(path)
result = a.cartesian(a).collect()
(x, y) = result[0]
self.assertEqual(u"Hello World!", x.strip())
self.assertEqual(u"Hello World!", y.strip())
def test_cartesian_chaining(self):
# Tests for SPARK-16589
rdd = self.sc.parallelize(range(10), 2)
self.assertSetEqual(
set(rdd.cartesian(rdd).cartesian(rdd).collect()),
set([((x, y), z) for x in range(10) for y in range(10) for z in range(10)])
)
self.assertSetEqual(
set(rdd.cartesian(rdd.cartesian(rdd)).collect()),
set([(x, (y, z)) for x in range(10) for y in range(10) for z in range(10)])
)
self.assertSetEqual(
set(rdd.cartesian(rdd.zip(rdd)).collect()),
set([(x, (y, y)) for x in range(10) for y in range(10)])
)
def test_zip_chaining(self):
# Tests for SPARK-21985
rdd = self.sc.parallelize('abc', 2)
self.assertSetEqual(
set(rdd.zip(rdd).zip(rdd).collect()),
set([((x, x), x) for x in 'abc'])
)
self.assertSetEqual(
set(rdd.zip(rdd.zip(rdd)).collect()),
set([(x, (x, x)) for x in 'abc'])
)
def test_union_pair_rdd(self):
# SPARK-31788: test if pair RDDs can be combined by union.
rdd = self.sc.parallelize([1, 2])
pair_rdd = rdd.zip(rdd)
unionRDD = self.sc.union([pair_rdd, pair_rdd])
self.assertEqual(
set(unionRDD.collect()),
set([(1, 1), (2, 2), (1, 1), (2, 2)])
)
self.assertEqual(unionRDD.count(), 4)
def test_deleting_input_files(self):
# Regression test for SPARK-1025
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
def test_sampling_default_seed(self):
# Test for SPARK-3995 (default seed setting)
data = self.sc.parallelize(range(1000), 1)
subset = data.takeSample(False, 10)
self.assertEqual(len(subset), 10)
def test_aggregate_mutable_zero_value(self):
# Test for SPARK-9021; uses aggregate and treeAggregate to build dict
# representing a counter of ints
from collections import defaultdict
# Show that single or multiple partitions work
data1 = self.sc.range(10, numSlices=1)
data2 = self.sc.range(10, numSlices=2)
def seqOp(x, y):
x[y] += 1
return x
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
counts1 = data1.aggregate(defaultdict(int), seqOp, comboOp)
counts2 = data2.aggregate(defaultdict(int), seqOp, comboOp)
counts3 = data1.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
counts4 = data2.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
ground_truth = defaultdict(int, dict((i, 1) for i in range(10)))
self.assertEqual(counts1, ground_truth)
self.assertEqual(counts2, ground_truth)
self.assertEqual(counts3, ground_truth)
self.assertEqual(counts4, ground_truth)
def test_aggregate_by_key_mutable_zero_value(self):
# Test for SPARK-9021; uses aggregateByKey to make a pair RDD that
# contains lists of all values for each key in the original RDD
# list(range(...)) for Python 3.x compatibility (can't use * operator
# on a range object)
# list(zip(...)) for Python 3.x compatibility (want to parallelize a
# collection, not a zip object)
tuples = list(zip(list(range(10))*2, [1]*20))
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def seqOp(x, y):
x.append(y)
return x
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.aggregateByKey([], seqOp, comboOp).collect()
values2 = data2.aggregateByKey([], seqOp, comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
ground_truth = [(i, [1]*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_fold_mutable_zero_value(self):
# Test for SPARK-9021; uses fold to merge an RDD of dict counters into
# a single dict
from collections import defaultdict
counts1 = defaultdict(int, dict((i, 1) for i in range(10)))
counts2 = defaultdict(int, dict((i, 1) for i in range(3, 8)))
counts3 = defaultdict(int, dict((i, 1) for i in range(4, 7)))
counts4 = defaultdict(int, dict((i, 1) for i in range(5, 6)))
all_counts = [counts1, counts2, counts3, counts4]
# Show that single or multiple partitions work
data1 = self.sc.parallelize(all_counts, 1)
data2 = self.sc.parallelize(all_counts, 2)
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
fold1 = data1.fold(defaultdict(int), comboOp)
fold2 = data2.fold(defaultdict(int), comboOp)
ground_truth = defaultdict(int)
for counts in all_counts:
for key, val in counts.items():
ground_truth[key] += val
self.assertEqual(fold1, ground_truth)
self.assertEqual(fold2, ground_truth)
def test_fold_by_key_mutable_zero_value(self):
# Test for SPARK-9021; uses foldByKey to make a pair RDD that contains
# lists of all values for each key in the original RDD
tuples = [(i, range(i)) for i in range(10)]*2
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.foldByKey([], comboOp).collect()
values2 = data2.foldByKey([], comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
# list(range(...)) for Python 3.x compatibility
ground_truth = [(i, list(range(i))*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_aggregate_by_key(self):
data = self.sc.parallelize([(1, 1), (1, 1), (3, 2), (5, 1), (5, 3)], 2)
def seqOp(x, y):
x.add(y)
return x
def combOp(x, y):
x |= y
return x
sets = dict(data.aggregateByKey(set(), seqOp, combOp).collect())
self.assertEqual(3, len(sets))
self.assertEqual(set([1]), sets[1])
self.assertEqual(set([2]), sets[3])
self.assertEqual(set([1, 3]), sets[5])
def test_itemgetter(self):
rdd = self.sc.parallelize([range(10)])
from operator import itemgetter
self.assertEqual([1], rdd.map(itemgetter(1)).collect())
self.assertEqual([(2, 3)], rdd.map(itemgetter(2, 3)).collect())
def test_namedtuple_in_rdd(self):
from collections import namedtuple
Person = namedtuple("Person", "id firstName lastName")
jon = Person(1, "Jon", "Doe")
jane = Person(2, "Jane", "Doe")
theDoes = self.sc.parallelize([jon, jane])
self.assertEqual([jon, jane], theDoes.collect())
def test_large_broadcast(self):
N = 10000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 27MB
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEqual(N, m)
def test_unpersist(self):
N = 1000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 3MB
bdata.unpersist()
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEqual(N, m)
bdata.destroy(blocking=True)
try:
self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
except Exception as e:
pass
else:
raise Exception("job should fail after destroy the broadcast")
def test_multiple_broadcasts(self):
N = 1 << 21
b1 = self.sc.broadcast(set(range(N))) # multiple blocks in JVM
r = list(range(1 << 15))
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
def test_multithread_broadcast_pickle(self):
import threading
b1 = self.sc.broadcast(list(range(3)))
b2 = self.sc.broadcast(list(range(3)))
def f1():
return b1.value
def f2():
return b2.value
funcs_num_pickled = {f1: None, f2: None}
def do_pickle(f, sc):
command = (f, None, sc.serializer, sc.serializer)
ser = CloudPickleSerializer()
ser.dumps(command)
def process_vars(sc):
broadcast_vars = list(sc._pickled_broadcast_vars)
num_pickled = len(broadcast_vars)
sc._pickled_broadcast_vars.clear()
return num_pickled
def run(f, sc):
do_pickle(f, sc)
funcs_num_pickled[f] = process_vars(sc)
# pickle f1, adds b1 to sc._pickled_broadcast_vars in main thread local storage
do_pickle(f1, self.sc)
# run all for f2, should only add/count/clear b2 from worker thread local storage
t = threading.Thread(target=run, args=(f2, self.sc))
t.start()
t.join()
# count number of vars pickled in main thread, only b1 should be counted and cleared
funcs_num_pickled[f1] = process_vars(self.sc)
self.assertEqual(funcs_num_pickled[f1], 1)
self.assertEqual(funcs_num_pickled[f2], 1)
self.assertEqual(len(list(self.sc._pickled_broadcast_vars)), 0)
def test_large_closure(self):
N = 200000
data = [float(i) for i in range(N)]
rdd = self.sc.parallelize(range(1), 1).map(lambda x: len(data))
self.assertEqual(N, rdd.first())
# regression test for SPARK-6886
self.assertEqual(1, rdd.map(lambda x: (x, 1)).groupByKey().count())
def test_zip_with_different_serializers(self):
a = self.sc.parallelize(range(5))
b = self.sc.parallelize(range(100, 105))
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
a = a._reserialize(BatchedSerializer(PickleSerializer(), 2))
b = b._reserialize(MarshalSerializer())
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
# regression test for SPARK-4841
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
t = self.sc.textFile(path)
cnt = t.count()
self.assertEqual(cnt, t.zip(t).count())
rdd = t.map(str)
self.assertEqual(cnt, t.zip(rdd).count())
# regression test for bug in _reserializer()
self.assertEqual(cnt, t.zip(rdd).count())
def test_zip_with_different_object_sizes(self):
# regress test for SPARK-5973
a = self.sc.parallelize(range(10000)).map(lambda i: '*' * i)
b = self.sc.parallelize(range(10000, 20000)).map(lambda i: '*' * i)
self.assertEqual(10000, a.zip(b).count())
def test_zip_with_different_number_of_items(self):
a = self.sc.parallelize(range(5), 2)
# different number of partitions
b = self.sc.parallelize(range(100, 106), 3)
self.assertRaises(ValueError, lambda: a.zip(b))
with QuietTest(self.sc):
# different number of batched items in JVM
b = self.sc.parallelize(range(100, 104), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# different number of items in one pair
b = self.sc.parallelize(range(100, 106), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# same total number of items, but different distributions
a = self.sc.parallelize([2, 3], 2).flatMap(range)
b = self.sc.parallelize([3, 2], 2).flatMap(range)
self.assertEqual(a.count(), b.count())
self.assertRaises(Exception, lambda: a.zip(b).count())
def test_count_approx_distinct(self):
rdd = self.sc.parallelize(range(1000))
self.assertTrue(950 < rdd.countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(float).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(str).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(lambda x: (x, -x)).countApproxDistinct(0.03) < 1050)
rdd = self.sc.parallelize([i % 20 for i in range(1000)], 7)
self.assertTrue(18 < rdd.countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(float).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(str).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(lambda x: (x, -x)).countApproxDistinct() < 22)
self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.00000001))
def test_histogram(self):
# empty
rdd = self.sc.parallelize([])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
self.assertRaises(ValueError, lambda: rdd.histogram(1))
# out of range
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram((0, 4, 10))[1])
# in range with one bucket
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual([4], rdd.histogram([0, 10])[1])
self.assertEqual([3, 1], rdd.histogram([0, 4, 10])[1])
# in range with one bucket exact match
self.assertEqual([4], rdd.histogram([1, 4])[1])
# out of range with two buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 5, 10])[1])
# out of range with two uneven buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
# in range with two buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two bucket and None
rdd = self.sc.parallelize([1, 2, 3, 5, 6, None, float('nan')])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two uneven buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 11])[1])
# mixed range with two uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.0, 11.01])
self.assertEqual([4, 3], rdd.histogram([0, 5, 11])[1])
# mixed range with four uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0, 199.0, 200.0, 200.1])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# mixed range with uneven buckets and NaN
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0,
199.0, 200.0, 200.1, None, float('nan')])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# out of range with infinite buckets
rdd = self.sc.parallelize([10.01, -0.01, float('nan'), float("inf")])
self.assertEqual([1, 2], rdd.histogram([float('-inf'), 0, float('inf')])[1])
# invalid buckets
self.assertRaises(ValueError, lambda: rdd.histogram([]))
self.assertRaises(ValueError, lambda: rdd.histogram([1]))
self.assertRaises(ValueError, lambda: rdd.histogram(0))
self.assertRaises(TypeError, lambda: rdd.histogram({}))
# without buckets
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 4], [4]), rdd.histogram(1))
# without buckets single element
rdd = self.sc.parallelize([1])
self.assertEqual(([1, 1], [1]), rdd.histogram(1))
# without bucket no range
rdd = self.sc.parallelize([1] * 4)
self.assertEqual(([1, 1], [4]), rdd.histogram(1))
# without buckets basic two
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 2.5, 4], [2, 2]), rdd.histogram(2))
# without buckets with more requested than elements
rdd = self.sc.parallelize([1, 2])
buckets = [1 + 0.2 * i for i in range(6)]
hist = [1, 0, 0, 0, 1]
self.assertEqual((buckets, hist), rdd.histogram(5))
# invalid RDDs
rdd = self.sc.parallelize([1, float('inf')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
rdd = self.sc.parallelize([float('nan')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
# string
rdd = self.sc.parallelize(["ab", "ac", "b", "bd", "ef"], 2)
self.assertEqual([2, 2], rdd.histogram(["a", "b", "c"])[1])
self.assertEqual((["ab", "ef"], [5]), rdd.histogram(1))
self.assertRaises(TypeError, lambda: rdd.histogram(2))
def test_repartitionAndSortWithinPartitions_asc(self):
rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)
repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2, True)
partitions = repartitioned.glom().collect()
self.assertEqual(partitions[0], [(0, 5), (0, 8), (2, 6)])
self.assertEqual(partitions[1], [(1, 3), (3, 8), (3, 8)])
def test_repartitionAndSortWithinPartitions_desc(self):
rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)
repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2, False)
partitions = repartitioned.glom().collect()
self.assertEqual(partitions[0], [(2, 6), (0, 5), (0, 8)])
self.assertEqual(partitions[1], [(3, 8), (3, 8), (1, 3)])
def test_repartition_no_skewed(self):
num_partitions = 20
a = self.sc.parallelize(range(int(1000)), 2)
l = a.repartition(num_partitions).glom().map(len).collect()
zeros = len([x for x in l if x == 0])
self.assertTrue(zeros == 0)
l = a.coalesce(num_partitions, True).glom().map(len).collect()
zeros = len([x for x in l if x == 0])
self.assertTrue(zeros == 0)
def test_repartition_on_textfile(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
rdd = self.sc.textFile(path)
result = rdd.repartition(1).collect()
self.assertEqual(u"Hello World!", result[0])
def test_distinct(self):
rdd = self.sc.parallelize((1, 2, 3)*10, 10)
self.assertEqual(rdd.getNumPartitions(), 10)
self.assertEqual(rdd.distinct().count(), 3)
result = rdd.distinct(5)
self.assertEqual(result.getNumPartitions(), 5)
self.assertEqual(result.count(), 3)
def test_external_group_by_key(self):
self.sc._conf.set("spark.python.worker.memory", "1m")
N = 2000001
kv = self.sc.parallelize(range(N)).map(lambda x: (x % 3, x))
gkv = kv.groupByKey().cache()
self.assertEqual(3, gkv.count())
filtered = gkv.filter(lambda kv: kv[0] == 1)
self.assertEqual(1, filtered.count())
self.assertEqual([(1, N // 3)], filtered.mapValues(len).collect())
self.assertEqual([(N // 3, N // 3)],
filtered.values().map(lambda x: (len(x), len(list(x)))).collect())
result = filtered.collect()[0][1]
self.assertEqual(N // 3, len(result))
self.assertTrue(isinstance(result.data, shuffle.ExternalListOfList))
def test_sort_on_empty_rdd(self):
self.assertEqual([], self.sc.parallelize(zip([], [])).sortByKey().collect())
def test_sample(self):
rdd = self.sc.parallelize(range(0, 100), 4)
wo = rdd.sample(False, 0.1, 2).collect()
wo_dup = rdd.sample(False, 0.1, 2).collect()
self.assertSetEqual(set(wo), set(wo_dup))
wr = rdd.sample(True, 0.2, 5).collect()
wr_dup = rdd.sample(True, 0.2, 5).collect()
self.assertSetEqual(set(wr), set(wr_dup))
wo_s10 = rdd.sample(False, 0.3, 10).collect()
wo_s20 = rdd.sample(False, 0.3, 20).collect()
self.assertNotEqual(set(wo_s10), set(wo_s20))
wr_s11 = rdd.sample(True, 0.4, 11).collect()
wr_s21 = rdd.sample(True, 0.4, 21).collect()
self.assertNotEqual(set(wr_s11), set(wr_s21))
def test_null_in_rdd(self):
jrdd = self.sc._jvm.PythonUtils.generateRDDWithNull(self.sc._jsc)
rdd = RDD(jrdd, self.sc, UTF8Deserializer())
self.assertEqual([u"a", None, u"b"], rdd.collect())
rdd = RDD(jrdd, self.sc, NoOpSerializer())
self.assertEqual([b"a", None, b"b"], rdd.collect())
def test_multiple_python_java_RDD_conversions(self):
# Regression test for SPARK-5361
data = [
(u'1', {u'director': u'David Lean'}),
(u'2', {u'director': u'Andrew Dominik'})
]
data_rdd = self.sc.parallelize(data)
data_java_rdd = data_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDeUtil.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
# conversion between python and java RDD threw exceptions
data_java_rdd = converted_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDeUtil.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
# Regression test for SPARK-6294
def test_take_on_jrdd(self):
rdd = self.sc.parallelize(range(1 << 20)).map(lambda x: str(x))
rdd._jrdd.first()
def test_sortByKey_uses_all_partitions_not_only_first_and_last(self):
# Regression test for SPARK-5969
seq = [(i * 59 % 101, i) for i in range(101)] # unsorted sequence
rdd = self.sc.parallelize(seq)
for ascending in [True, False]:
sort = rdd.sortByKey(ascending=ascending, numPartitions=5)
self.assertEqual(sort.collect(), sorted(seq, reverse=not ascending))
sizes = sort.glom().map(len).collect()
for size in sizes:
self.assertGreater(size, 0)
def test_pipe_functions(self):
data = ['1', '2', '3']
rdd = self.sc.parallelize(data)
with QuietTest(self.sc):
self.assertEqual([], rdd.pipe('java').collect())
self.assertRaises(Py4JJavaError, rdd.pipe('java', checkCode=True).collect)
result = rdd.pipe('cat').collect()
result.sort()
for x, y in zip(data, result):
self.assertEqual(x, y)
self.assertRaises(Py4JJavaError, rdd.pipe('grep 4', checkCode=True).collect)
self.assertEqual([], rdd.pipe('grep 4').collect())
def test_pipe_unicode(self):
# Regression test for SPARK-20947
data = [u'\u6d4b\u8bd5', '1']
rdd = self.sc.parallelize(data)
result = rdd.pipe('cat').collect()
self.assertEqual(data, result)
def test_stopiteration_in_user_code(self):
def stopit(*x):
raise StopIteration()
seq_rdd = self.sc.parallelize(range(10))
keyed_rdd = self.sc.parallelize((x % 2, x) for x in range(10))
msg = "Caught StopIteration thrown from user's code; failing the task"
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.map(stopit).collect)
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.filter(stopit).collect)
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.foreach, stopit)
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.reduce, stopit)
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.fold, 0, stopit)
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.foreach, stopit)
self.assertRaisesRegexp(Py4JJavaError, msg,
seq_rdd.cartesian(seq_rdd).flatMap(stopit).collect)
# these methods call the user function both in the driver and in the executor
# the exception raised is different according to where the StopIteration happens
# RuntimeError is raised if in the driver
# Py4JJavaError is raised if in the executor (wraps the RuntimeError raised in the worker)
self.assertRaisesRegexp((Py4JJavaError, RuntimeError), msg,
keyed_rdd.reduceByKeyLocally, stopit)
self.assertRaisesRegexp((Py4JJavaError, RuntimeError), msg,
seq_rdd.aggregate, 0, stopit, lambda *x: 1)
self.assertRaisesRegexp((Py4JJavaError, RuntimeError), msg,
seq_rdd.aggregate, 0, lambda *x: 1, stopit)
def test_overwritten_global_func(self):
# Regression test for SPARK-27000
global global_func
self.assertEqual(self.sc.parallelize([1]).map(lambda _: global_func()).first(), "Hi")
global_func = lambda: "Yeah"
self.assertEqual(self.sc.parallelize([1]).map(lambda _: global_func()).first(), "Yeah")
def test_to_local_iterator_failure(self):
# SPARK-27548 toLocalIterator task failure not propagated to Python driver
def fail(_):
raise RuntimeError("local iterator error")
rdd = self.sc.range(10).map(fail)
with self.assertRaisesRegexp(Exception, "local iterator error"):
for _ in rdd.toLocalIterator():
pass
def test_to_local_iterator_collects_single_partition(self):
# Test that partitions are not computed until requested by iteration
def fail_last(x):
if x == 9:
raise RuntimeError("This should not be hit")
return x
rdd = self.sc.range(12, numSlices=4).map(fail_last)
it = rdd.toLocalIterator()
# Only consume first 4 elements from partitions 1 and 2, this should not collect the last
# partition which would trigger the error
for i in range(4):
self.assertEqual(i, next(it))
def test_resourceprofile(self):
rp_builder = ResourceProfileBuilder()
ereqs = ExecutorResourceRequests().cores(2).memory("6g").memoryOverhead("1g")
ereqs.pysparkMemory("2g").resource("gpu", 2, "testGpus", "nvidia.com")
treqs = TaskResourceRequests().cpus(2).resource("gpu", 2)
def assert_request_contents(exec_reqs, task_reqs):
self.assertEqual(len(exec_reqs), 5)
self.assertEqual(exec_reqs["cores"].amount, 2)
self.assertEqual(exec_reqs["memory"].amount, 6144)
self.assertEqual(exec_reqs["memoryOverhead"].amount, 1024)
self.assertEqual(exec_reqs["pyspark.memory"].amount, 2048)
self.assertEqual(exec_reqs["gpu"].amount, 2)
self.assertEqual(exec_reqs["gpu"].discoveryScript, "testGpus")
self.assertEqual(exec_reqs["gpu"].resourceName, "gpu")
self.assertEqual(exec_reqs["gpu"].vendor, "nvidia.com")
self.assertEqual(len(task_reqs), 2)
self.assertEqual(task_reqs["cpus"].amount, 2.0)
self.assertEqual(task_reqs["gpu"].amount, 2.0)
assert_request_contents(ereqs.requests, treqs.requests)
rp = rp_builder.require(ereqs).require(treqs).build
assert_request_contents(rp.executorResources, rp.taskResources)
rdd = self.sc.parallelize(range(10)).withResources(rp)
return_rp = rdd.getResourceProfile()
assert_request_contents(return_rp.executorResources, return_rp.taskResources)
rddWithoutRp = self.sc.parallelize(range(10))
self.assertEqual(rddWithoutRp.getResourceProfile(), None)
def test_multiple_group_jobs(self):
import threading
group_a = "job_ids_to_cancel"
group_b = "job_ids_to_run"
threads = []
thread_ids = range(4)
thread_ids_to_cancel = [i for i in thread_ids if i % 2 == 0]
thread_ids_to_run = [i for i in thread_ids if i % 2 != 0]
# A list which records whether job is cancelled.
# The index of the array is the thread index which job run in.
is_job_cancelled = [False for _ in thread_ids]
def run_job(job_group, index):
"""
Executes a job with the group ``job_group``. Each job waits for 3 seconds
and then exits.
"""
try:
self.sc.parallelize([15]).map(lambda x: time.sleep(x)) \
.collectWithJobGroup(job_group, "test rdd collect with setting job group")
is_job_cancelled[index] = False
except Exception:
# Assume that exception means job cancellation.
is_job_cancelled[index] = True
# Test if job succeeded when not cancelled.
run_job(group_a, 0)
self.assertFalse(is_job_cancelled[0])
# Run jobs
for i in thread_ids_to_cancel:
t = threading.Thread(target=run_job, args=(group_a, i))
t.start()
threads.append(t)
for i in thread_ids_to_run:
t = threading.Thread(target=run_job, args=(group_b, i))
t.start()
threads.append(t)
# Wait to make sure all jobs are executed.
time.sleep(3)
# And then, cancel one job group.
self.sc.cancelJobGroup(group_a)
# Wait until all threads launching jobs are finished.
for t in threads:
t.join()
for i in thread_ids_to_cancel:
self.assertTrue(
is_job_cancelled[i],
"Thread {i}: Job in group A was not cancelled.".format(i=i))
for i in thread_ids_to_run:
self.assertFalse(
is_job_cancelled[i],
"Thread {i}: Job in group B did not succeeded.".format(i=i))
if __name__ == "__main__":
import unittest
from pyspark.tests.test_rdd import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
the-stack_0_15453 | # Copyright 2012 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from novaclient.tests.fixture_data import agents as data
from novaclient.tests.fixture_data import client
from novaclient.tests import utils
from novaclient.v1_1 import agents
class AgentsTest(utils.FixturedTestCase):
data_fixture_class = data.Fixture
scenarios = [('original', {'client_fixture_class': client.V1}),
('session', {'client_fixture_class': client.SessionV1})]
def stub_hypervisors(self, hypervisor='kvm'):
get_os_agents = {'agents':
[
{
'hypervisor': hypervisor,
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'id': 1
},
{
'hypervisor': hypervisor,
'os': 'linux',
'architecture': 'x86',
'version': '16.0',
'url': 'xxx://xxxx/xxx/xxx1',
'md5hash': 'add6bb58e139be103324d04d82d8f546',
'id': 2
},
]
}
headers = {'Content-Type': 'application/json'}
self.requests.register_uri('GET', self.data_fixture.url(),
json=get_os_agents,
headers=headers)
def test_list_agents(self):
self.stub_hypervisors()
ags = self.cs.agents.list()
self.assert_called('GET', '/os-agents')
for a in ags:
self.assertIsInstance(a, agents.Agent)
self.assertEqual('kvm', a.hypervisor)
def test_list_agents_with_hypervisor(self):
self.stub_hypervisors('xen')
ags = self.cs.agents.list('xen')
self.assert_called('GET', '/os-agents?hypervisor=xen')
for a in ags:
self.assertIsInstance(a, agents.Agent)
self.assertEqual('xen', a.hypervisor)
def test_agents_create(self):
ag = self.cs.agents.create('win', 'x86', '7.0',
'/xxx/xxx/xxx',
'add6bb58e139be103324d04d82d8f546',
'xen')
body = {'agent': {
'url': '/xxx/xxx/xxx',
'hypervisor': 'xen',
'md5hash': 'add6bb58e139be103324d04d82d8f546',
'version': '7.0',
'architecture': 'x86',
'os': 'win'}}
self.assert_called('POST', '/os-agents', body)
self.assertEqual(1, ag._info.copy()['id'])
def test_agents_delete(self):
self.cs.agents.delete('1')
self.assert_called('DELETE', '/os-agents/1')
def _build_example_update_body(self):
return {"para": {
"url": "/yyy/yyyy/yyyy",
"version": "8.0",
"md5hash": "add6bb58e139be103324d04d82d8f546"}}
def test_agents_modify(self):
ag = self.cs.agents.update('1', '8.0',
'/yyy/yyyy/yyyy',
'add6bb58e139be103324d04d82d8f546')
body = self._build_example_update_body()
self.assert_called('PUT', '/os-agents/1', body)
self.assertEqual(1, ag.id)
|
the-stack_0_15454 | # MicroPython ST7735 TFT display driver
from machine import Pin
from machine import SPI
import font
import time
class CMD_TFT(object):
# command definitions
CMD_NOP = const(0x00) # No Operation
CMD_SWRESET = const(0x01) # Software reset
CMD_RDDID = const(0x04) # Read Display ID
CMD_RDDST = const(0x09) # Read Display Status
CMD_SLPIN = const(0x10) # Sleep in & booster off
CMD_SLPOUT = const(0x11) # Sleep out & booster on
CMD_PTLON = const(0x12) # Partial mode on
CMD_NORON = const(0x13) # Partial off (Normal)
CMD_INVOFF = const(0x20) # Display inversion off
CMD_INVON = const(0x21) # Display inversion on
CMD_DISPOFF = const(0x28) # Display off
CMD_DISPON = const(0x29) # Display on
CMD_CASET = const(0x2A) # Column address set
CMD_RASET = const(0x2B) # Row address set
CMD_RAMWR = const(0x2C) # Memory write
CMD_RAMRD = const(0x2E) # Memory read
CMD_PTLAR = const(0x30) # Partial start/end address set
CMD_COLMOD = const(0x3A) # Interface pixel format
CMD_MADCTL = const(0x36) # Memory data access control
CMD_RDID1 = const(0xDA) # Read ID1
CMD_RDID2 = const(0xDB) # Read ID2
CMD_RDID3 = const(0xDC) # Read ID3
CMD_RDID4 = const(0xDD) # Read ID4
# panel function commands
CMD_FRMCTR1 = const(0xB1) # In normal mode (Full colors)
CMD_FRMCTR2 = const(0xB2) # In Idle mode (8-colors)
CMD_FRMCTR3 = const(0xB3) # In partial mode + Full colors
CMD_INVCTR = const(0xB4) # Display inversion control
CMD_PWCTR1 = const(0xC0) # Power control settings
CMD_PWCTR2 = const(0xC1) # Power control settings
CMD_PWCTR3 = const(0xC2) # In normal mode (Full colors
CMD_PWCTR4 = const(0xC3) # In Idle mode (8-colors)
CMD_PWCTR5 = const(0xC4) # In partial mode + Full colors
CMD_VMCTR1 = const(0xC5) # VCOM control
CMD_GMCTRP1 = const(0xE0)
CMD_GMCTRN1 = const(0xE1)
def __init__(self):
"""
SPI - SPI Bus (CLK/MOSI/MISO)
DC - RS/DC data/command flag
CS - Chip Select, enable communication
RST/RES - Reset
BL/Lite - Backlight control
"""
# self.tab = tab
self.spi = SPI(1, baudrate=8000000, polarity=1, phase=0)
self.dc = Pin('D6', Pin.OUT, Pin.PULL_DOWN)
self.cs = Pin('A15', Pin.OUT, Pin.PULL_DOWN)
self.rst = Pin('D7', Pin.OUT, Pin.PULL_DOWN)
self.bl = Pin('A7', Pin.OUT, Pin.PULL_DOWN)
#self.spi, self.dc, self.cs, self.rst, self.bl
super().__init__()
# self.tab = tab
self.power_on = True
self.inverted = False
self.backlight_on = True
# default margins, set yours in HAL init
self.margin_row = 0
self.margin_col = 0
def _set_window(self, x0, y0, x1, y1):
"""
Set window frame boundaries.
Any pixels written to the display will start from this area.
"""
# set row XSTART/XEND
self.write_cmd(CMD_RASET)
self.write_data(bytearray([0x00, y0 + self.margin_row, 0x00, y1 + self.margin_row]))
# set column XSTART/XEND
self.write_cmd(CMD_CASET)
self.write_data(bytearray([0x00, x0 + self.margin_col, 0x00, x1 + self.margin_col]))
# write addresses to RAM
self.write_cmd(CMD_RAMWR)
class ST7735(CMD_TFT):
# colors
COLOR_BLACK = const(0x0000)
COLOR_BLUE = const(0x001F)
COLOR_RED = const(0xF800)
COLOR_GREEN = const(0x07E0)
COLOR_CYAN = const(0x07FF)
COLOR_MAGENTA = const(0xF81F)
COLOR_YELLOW = const(0xFFE0)
COLOR_WHITE = const(0xFFFF)
def init(self, orient=None):
# hard reset first
self.reset()
self.write_cmd(CMD_SWRESET)
time.sleep_ms(150)
self.write_cmd(CMD_SLPOUT)
time.sleep_ms(255)
# TODO: optimize data streams and delays
self.write_cmd(CMD_FRMCTR1)
self.write_data(bytearray([0x01, 0x2C, 0x2D]))
self.write_cmd(CMD_FRMCTR2)
self.write_data(bytearray([0x01, 0x2C, 0x2D, 0x01, 0x2C, 0x2D]))
time.sleep_ms(10)
self.write_cmd(CMD_INVCTR)
self.write_data(bytearray([0x07]))
self.write_cmd(CMD_PWCTR1)
self.write_data(bytearray([0xA2, 0x02, 0x84]))
self.write_cmd(CMD_PWCTR2)
self.write_data(bytearray([0xC5]))
self.write_cmd(CMD_PWCTR3)
self.write_data(bytearray([0x8A, 0x00]))
self.write_cmd(CMD_PWCTR4)
self.write_data(bytearray([0x8A, 0x2A]))
self.write_cmd(CMD_PWCTR5)
self.write_data(bytearray([0x8A, 0xEE]))
self.write_cmd(CMD_VMCTR1)
self.write_data(bytearray([0x0E]))
self.write_cmd(CMD_INVOFF)
self.write_cmd(CMD_MADCTL)
if orient == None: #Si es cero la orientacion es horizontal
self.write_data(bytearray([0xA0])) # RGB Cambio de Posicion a Horizontal MV=1 MX=0 MY=1
self.width = 160 #Tamaño de la pantalla para el controlador
self.height = 128
else:
self.write_data(bytearray([0x00]))
self.width = 128
self.height = 160
self.write_cmd(CMD_COLMOD)
self.write_data(bytearray([0x05]))
self.write_cmd(CMD_CASET)
self.write_data(bytearray([0x00, 0x01, 0x00, 127]))
self.write_cmd(CMD_RASET)
self.write_data(bytearray([0x00, 0x01, 0x00, 159]))
self.write_cmd(CMD_GMCTRP1)
self.write_data(bytearray([0x02, 0x1c, 0x07, 0x12, 0x37, 0x32,
0x29, 0x2d, 0x29, 0x25, 0x2b, 0x39, 0x00, 0x01, 0x03, 0x10]))
self.write_cmd(CMD_GMCTRN1)
self.write_data(bytearray([0x03, 0x1d, 0x07, 0x06, 0x2e, 0x2c,
0x29, 0x2d, 0x2e, 0x2e, 0x37, 0x3f, 0x00, 0x00, 0x02, 0x10]))
self.write_cmd(CMD_NORON)
time.sleep_ms(10)
self.write_cmd(CMD_DISPON)
time.sleep_ms(100)
def show_image(self, path, x, y):
imgbmp = open(path)
# set row XSTART/XEND
self.write_cmd(CMD_RASET)
self.write_data(bytearray([0x00, y0 + self.margin_row, 0x00, y1 + self.margin_row]))
# set column XSTART/XEND
self.write_cmd(CMD_CASET)
self.write_data(bytearray([0x00, x0 + self.margin_col, 0x00, x1 + self.margin_col]))
# write addresses to RAM
self.write_cmd(CMD_RAMWR)
self.dc.value(1)
self.cs.value(0)
for _ in range(count):
self.spi.write(color)
self.cs.value(1)
def power(self, state=None):
"""
Get/set display power.
"""
if state is None:
return self.power_on
self.write_cmd(CMD_DISPON if state else CMD_DISPOFF)
self.power_on = state
def clear(self, color):
"""
Clear the display filling it with color.
"""
self.rect(0, 0, self.width, self.height, color)
def invert(self, state=None):
"""
Get/set display color inversion.
"""
if state is None:
return self.inverted
self.write_cmd(CMD_INVON if state else CMD_INVOFF)
self.inverted = state
def rgbcolor(self, r, g, b):
"""
Pack 24-bit RGB into 16-bit value.
"""
return ((r & 0xF8) << 8) | ((g & 0xFC) << 3) | (b >> 3)
def pixel(self, x, y, color):
"""
Draw a single pixel on the display with given color.
"""
self._set_window(x, y, x + 1, y + 1)
self.write_pixels(1, bytearray([color >> 8, color]))
def rect(self, x, y, w, h, color):
"""
Draw a rectangle with specified coordinates/size and fill with color.
"""
# check the coordinates and trim if necessary
if (x >= self.width) or (y >= self.height):
return
if (x + w - 1) >= self.width:
w = self.width - x
if (y + h - 1) >= self.height:
h = self.height - y
self._set_window(x, y, x + w - 1, y + h - 1)
self.write_pixels((w*h), bytearray([color >> 8, color]))
def line(self, x0, y0, x1, y1, color):
# line is vertical
if x0 == x1:
# use the smallest y
start, end = (x1, y1) if y1 < y0 else (x0, y0)
self.vline(start, end, abs(y1 - y0) + 1, color)
# line is horizontal
elif y0 == y1:
# use the smallest x
start, end = (x1, y1) if x1 < x0 else (x0, y0)
self.hline(start, end, abs(x1 - x0) + 1, color)
else:
# Bresenham's algorithm
dx = abs(x1 - x0)
dy = abs(y1 - y0)
inx = 1 if x1 - x0 > 0 else -1
iny = 1 if y1 - y0 > 0 else -1
# steep line
if (dx >= dy):
dy <<= 1
e = dy - dx
dx <<= 1
while (x0 != x1):
# draw pixels
self.pixel(x0, y0, color)
if (e >= 0):
y0 += iny
e -= dx
e += dy
x0 += inx
# not steep line
else:
dx <<= 1
e = dx - dy
dy <<= 1
while(y0 != y1):
# draw pixels
self.pixel(x0, y0, color)
if (e >= 0):
x0 += inx
e -= dy
e += dx
y0 += iny
def hline(self, x, y, w, color):
if (x >= self.width) or (y >= self.height):
return
if (x + w - 1) >= self.width:
w = self.width - x
self._set_window(x, y, x + w - 1, y)
self.write_pixels(x+w-1, bytearray([color >> 8, color]))
def vline(self, x, y, h, color):
if (x >= self.width) or (y >= self.height):
return
if (y + h -1) >= self.height:
h = self.height - y
self._set_window(x, y, x, y + h - 1)
self.write_pixels(y+h-1, bytearray([color >> 8, color]))
def text(self, x, y, string, color):
"""
Draw text at a given position using the user font.
Font can be scaled with the size parameter.
"""
z=font.terminalfont
width = z['width'] + 1
px = x
for c in string:
self.char(px, y, c, z, color, 1, 1)
px += width
# wrap the text to the next line if it reaches the end
if px + width > self.width:
y += z['height'] + 1
px = x
def char(self, x, y, char, font, color, sizex=1, sizey=1):
"""
Draw a character at a given position using the user font.
Font is a data dictionary, can be scaled with sizex and sizey.
"""
if font is None:
return
startchar = font['start']
endchar = font['end']
ci = ord(char)
if (startchar <= ci <= endchar):
width = font['width']
height = font['height']
ci = (ci - startchar) * width
ch = font['data'][ci:ci + width]
# no font scaling
px = x
if (sizex <= 1 and sizey <= 1):
for c in ch:
py = y
for _ in range(height):
if c & 0x01:
self.pixel(px, py, color)
py += 1
c >>= 1
px += 1
# scale to given sizes
else:
for c in ch:
py = y
for _ in range(height):
if c & 0x01:
self.rect(px, py, sizex, sizey, color)
py += sizey
c >>= 1
px += sizex
else:
# character not found in this font
return
def reset(self):
"""
Hard reset the display.
"""
self.dc.value(0)
self.rst.value(1)
time.sleep_ms(500)
self.rst.value(0)
time.sleep_ms(500)
self.rst.value(1)
time.sleep_ms(500)
def backlight(self, state=None):
"""
Get or set the backlight status if the pin is available.
"""
if self.bl is None:
return None
else:
if state is None:
return self.backlight_on
self.bl.value(1 if state else 0)
self.backlight_on = state
def write_pixels(self, count, color):
"""
Write pixels to the display.
count - total number of pixels
color - 16-bit RGB value
"""
self.dc.value(1)
self.cs.value(0)
for _ in range(count):
self.spi.write(color)
self.cs.value(1)
def write_cmd(self, cmd):
"""
Display command write implementation using SPI.
"""
self.dc.value(0)
self.cs.value(0)
self.spi.write(bytearray([cmd]))
self.cs.value(1)
def write_data(self, data):
"""
Display data write implementation using SPI.
"""
self.dc.value(1)
self.cs.value(0)
self.spi.write(data)
self.cs.value(1)
|
the-stack_0_15455 | ''' Atribuição condicional é uma estrutura utilizada para simplificar o código,
onde o valor a ser atrbuído será aquele que satisfazer a condição.
<variável> = <valor1> if (True) else <valor2>
var = 10 if (True) else 20
x = 10
texto = 'sim' if x == 10 else 'não'
print(texto)
x = 9
texto = 'sim' if x == 10 else 'não'
print(texto)'''
###################### PROGRAMA 1 ###############
num1 = int(input("Digite um número:"))
s = 'par' if num1 %2 == 0 else 'ímpar'
print('O número digitado é: ', s)
|
the-stack_0_15458 | from .base_general import BaseGeneral
from mltoolkit.mldp.steps.collectors import UnitCollector,\
BaseChunkCollector
class ChunkAccumulator(BaseGeneral):
"""
ChunkAccumulator step allows to group or change the size of data-chunks that
are passed along the pipeline. The step does not alter the format of
data-chunks only their size.
For example, one might want to use larger chunks (e.g. size of 500) for
computational purposes (fast vectorized operations on large numpy arrays)
but to train a model on smaller data-chunks (e.g. size of 64). In that case,
the step should be added after all computationally intensive ones.
It works both by accumulating smaller upstream data-chunks and passing
larger data-chunks downstream, and splitting larger upstream data-chunks
into smaller downstream data-chunks.
The adjuster uses chunk collectors, which have different notions of size.
For example, UnitCollector works as described above. However, a
more exotic collector can accumulate data-units which have the same 'id'
field's value. And output a new chunk only after a sufficient number of
unique ids is collected.
"""
def __init__(self, collector=None, new_size=2, **kwargs):
"""
:param collector: an object that accumulates data-chunks and yields
data-chunks when gets full.
:param new_size: a parameters that is passed to the standard collector
object if collector is not provided.
:param kwargs: self-explained.
"""
super(ChunkAccumulator, self).__init__(**kwargs)
if collector is None:
self.coll = UnitCollector(max_size=new_size)
else:
if not isinstance(collector, BaseChunkCollector):
raise TypeError("Please provide a valid collector that extends"
" the BaseChunkCollector class.")
self.coll = collector
def iter(self, data_chunk_iter):
"""
Wraps the data-chunk iterable into a generator that yields data-chunks
with the adjusted size.
"""
# in case iteration was not performed until the end, reset the collector
self.coll.reset()
for data_chunk in data_chunk_iter:
for adjusted_dc in self.coll.absorb_and_yield_if_full(data_chunk):
yield adjusted_dc
# yield the last (incomplete) chunk(s)
for adjusted_dc in self.coll.yield_remaining():
yield adjusted_dc
self.coll.reset()
def reset(self):
self.coll.reset()
|
the-stack_0_15459 | # -*- coding: utf-8 -*-
from paver.easy import *
@task
def test(options):
info("Running tests for Python 2")
sh('python2 tests.py')
info("Running tests for Python 3")
sh('python3 tests.py')
@task
def coverage(options):
info("Running coverage for Python 2")
sh('coverage2 run --source ldapom ./tests.py')
sh('coverage2 report')
info("Running coverage for Python 3")
sh('coverage3 run --source ldapom ./tests.py')
sh('coverage3 report')
|
the-stack_0_15460 | import json
import pytest
@pytest.mark.usefixtures("testapp")
class TestBuild:
def test_build_controller(self, testapp):
data = {
'user_name': 'root',
'repo_name': 'test',
'repo_provider': 'gitlab',
'gitlab_addr': 'http://localhost',
}
rv = testapp.post(
'/build/add',
data=json.dumps(data),
content_type='application/json')
assert rv.status_code == 200
assert b'{\n "message": "Build added successfully"\n}\n' in rv.data
def test_build_controller_fail(self, testapp):
data = {
'user_name': 'root',
'repo_provider': 'gitlab',
}
rv = testapp.post(
'/build/add',
data=json.dumps(data),
content_type='application/json')
assert rv.status_code == 401
assert b'{\n "message": "Invalid request"\n}\n' in rv.data
def test_build_controller_branch(self, testapp):
data = {
'repo_branch': 'testing',
}
rv = testapp.post(
'/build/add',
data=json.dumps(data),
content_type='application/json')
assert rv.status_code == 401
|
the-stack_0_15462 | import os
import subprocess
import time
import signal
__author__ = 'thurley'
def wait_timeout(proc, seconds):
"""Wait for a process to finish, or raise exception after timeout"""
start = time.time()
end = start + seconds
interval = 0.01
while True:
result = proc.poll()
#print "waiting"
if result is not None:
return result
if time.time() >= end:
os.killpg(proc.pid, signal.SIGTERM)
raise RuntimeError("Process timed out")
time.sleep(interval)
def run_with_timeout(seconds, *popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE,
preexec_fn=os.setsid, *popenargs, **kwargs)
retcode = wait_timeout(process, seconds)
output, unused_err = process.communicate()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
|
the-stack_0_15464 | #!/usr/bin/python
#-*- coding: utf-8 -*-
# >.>.>.>.>.>.>.>.>.>.>.>.>.>.>.>.
# Licensed under the Apache License, Version 2.0 (the "License")
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# --- File Name: collect_results_tcvae.py
# --- Creation Date: 14-09-2020
# --- Last Modified: Mon 14 Sep 2020 01:56:01 AEST
# --- Author: Xinqi Zhu
# .<.<.<.<.<.<.<.<.<.<.<.<.<.<.<.<
"""
Collect results of tc_vae.
"""
import os
import json
import numpy as np
import argparse
import pandas as pd
from collections import OrderedDict
METRICS_TEMPLATE = {
'beta_vae_sklearn': {
"train_accuracy": None,
"eval_accuracy": None
},
'dci': {
"informativeness_train": None,
"informativeness_test": None,
"disentanglement": None,
"completeness": None
},
'downstream_task_boosted_trees': {},
'factor_vae_metric': {
"train_accuracy": None,
"eval_accuracy": None,
# "num_active_dims": None # disentanglement_lib wrong implementation.
},
'mig': {
"discrete_mig": None
},
'modularity_explicitness': {
"modularity_score": None,
"explicitness_score_train": None,
"explicitness_score_test": None
},
'sap_score': {
"SAP_score": None
},
'unsupervised': {
"gaussian_total_correlation": None,
"gaussian_wasserstein_correlation": None,
"gaussian_wasserstein_correlation_norm": None,
"mutual_info_score": None
}
}
def get_mean_std_for_config(v_ls, target):
'''
v_ls: [{'eval':0.8, ..}, {'eval': 0.7, ...}, ...]
target: 'eval'
'''
pure_ls = []
for item in v_ls:
if item is not None:
pure_ls.append(item[target])
return (None, None) if len(pure_ls) == 0 else (np.mean(pure_ls),
np.std(pure_ls))
def count_samples(x):
x = list(filter(None, x))
return len(x)
def get_moments(res_dict, template):
'''
Args: result dict for each config and seed:
{'0_0_0_0': [{'eval':0.8}, {'eval': 0.7}, ...]}
template of collected results:
{'eval': None, ...}
Return: mean and std of each config:
{'0_0_0_0': {'eval.mean': 0.75, 'eval.std': 0.05}, ...}
'''
res_dict_moments = {}
for k, v in res_dict.items():
res_dict_moments[k] = {}
for res_k in template.keys():
res_dict_moments[k][res_k+'.mean'], \
res_dict_moments[k][res_k+'.std'] \
= get_mean_std_for_config(v, res_k)
res_dict_moments[k]['n_samples'] = count_samples(v)
return res_dict_moments
def get_metric_result(subdir, metric, representation):
result_json = os.path.join(subdir, 'metrics', representation, metric,
'results/json/evaluation_results.json')
if os.path.exists(result_json):
with open(result_json, 'r') as f:
data = json.load(f)
return data
else:
return None
def get_hyps_seed(sub_path):
config_json = os.path.join(sub_path, 'model/results/json/train_config.json')
if os.path.exists(config_json):
with open(config_json, 'r') as f:
data = json.load(f)
return data['beta_tc_vae.beta'], data['model.random_seed']
else:
return None, None
def main():
parser = argparse.ArgumentParser(description='Project description.')
parser.add_argument('--results_dir',
help='Results directory.',
type=str,
default='/mnt/hdd/repo_results/Ramiel/sweep')
parser.add_argument('--metric',
help='Name of the collect metric.',
type=str,
default='factor_vae_metric',
choices=[
'beta_vae_sklearn', 'dci',
'downstream_task_boosted_trees',
'factor_vae_metric', 'mig',
'modularity_explicitness', 'sap_score',
'unsupervised'
])
parser.add_argument('--representation',
help='Representation used.',
type=str,
default='mean',
choices=['mean', 'sampled'])
# parser.add_argument('--overwrite',
# help='Whether to overwrite output directory.',
# type=_str_to_bool,
# default=False)
args = parser.parse_args()
subdirs = os.listdir(args.results_dir)
res_dict = {}
key_template = METRICS_TEMPLATE[args.metric]
for subdir in subdirs:
sub_path = os.path.join(args.results_dir, subdir)
if not os.path.isdir(sub_path):
continue
# parse_subdir = subdir.split('-')
# hyps = '-'.join(parse_subdir[1:-1])
# seed = parse_subdir[-1]
hyps, seed = get_hyps_seed(sub_path)
if hyps not in res_dict:
res_dict[hyps] = [None] * 10
# get result for this seed, a dictionary.
res_dict[hyps][int(seed)] = get_metric_result(sub_path, args.metric,
args.representation)
# {'0_0_0_0': {'eval.mean': 0.75, 'eval.std': 0.05, 'n_samples': 2}, ...}
res_dict = get_moments(res_dict, key_template)
col_heads = ['_config'] + list(res_dict[list(res_dict.keys())[0]].keys())
col_dicts = {k: [] for k in col_heads}
for k, v in res_dict.items():
col_dicts['_config'].append(k)
for k in col_dicts.keys():
if k != '_config':
col_dicts[k].append(v[k])
new_results = OrderedDict(sorted(col_dicts.items()))
results_df = pd.DataFrame(new_results)
print('results_df:', results_df)
results_df.to_csv(os.path.join(
args.results_dir,
'collected-' + args.metric + '-' + args.representation + '.csv'),
na_rep='-',
index=False,
float_format='%.3f')
if __name__ == "__main__":
main()
|
the-stack_0_15466 | import os
import random
import typing
from airports.airport import Airport, AirportType
from airports.airportstable import AirportsTable
from airports.download import download
from airports.runwaystable import RunwaysTable
from airports.wikipediahelper import get_wikipedia_articles
class DB:
def __init__(self) -> None:
self._airports: typing.Dict[str, Airport] = {}
self._large: typing.List[str] = []
self._medium: typing.List[str] = []
self._small: typing.List[str] = []
self._other: typing.List[str] = []
def load(self, cache_dir: str, reset_cache: bool) -> None:
airports_csv = os.path.join(cache_dir, "airports.csv")
runways_csv = os.path.join(cache_dir, "runways.csv")
wikipedia_json = os.path.join(cache_dir, "wikipedia_json")
if reset_cache:
for file_name in [airports_csv, runways_csv, wikipedia_json]:
if os.path.exists(file_name):
os.remove(file_name)
airports = AirportsTable(download("https://ourairports.com/data/airports.csv", airports_csv))
runways = RunwaysTable(download("https://ourairports.com/data/runways.csv", runways_csv))
articles = get_wikipedia_articles(wikipedia_json)
airports.add_wikipedia(articles)
airports.compute_bounds(runways.to_dict())
airports.check()
for airport in airports.good_airports():
self._airports[airport.icao_code()] = airport
if airport.airport_type() == AirportType.LARGE_AIRPORT:
self._large.append(airport.icao_code())
elif airport.airport_type() == AirportType.MEDIUM_AIRPORT:
self._medium.append(airport.icao_code())
elif airport.airport_type() == AirportType.SMALL_AIRPORT:
self._small.append(airport.icao_code())
else:
self._other.append(airport.icao_code())
def get_all_icaos(self) -> typing.List[str]:
return list(self._airports.keys())
def get(self, icao: str) -> typing.Optional[Airport]:
icao = icao.strip().upper()
if icao in self._airports:
return self._airports[icao]
return None
def get_random(self) -> Airport:
if random.choice([True, False]):
return self._airports[random.choice(self._large)]
if random.choice([True, False]):
return self._airports[random.choice(self._medium)]
if random.choice([True, False]):
return self._airports[random.choice(self._small)]
return self._airports[random.choice(list(self._airports.keys()))]
def get_random_list(self, count: int) -> typing.List[Airport]:
return random.sample(list(self._airports.values()), count)
def search(self, needle: str) -> typing.Optional[Airport]:
needle = needle.strip().upper()
for airport in self._airports.values():
if airport.matches_code(needle):
return airport
for airport in self._airports.values():
if airport.matches_name(needle):
return airport
for airport in self._airports.values():
if airport.matches_location(needle):
return airport
return None
|
the-stack_0_15467 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from collections import defaultdict
from dataclasses import dataclass, field
from typing import Dict, Any, List, Optional
import torch.optim
from fairseq.dataclass import FairseqDataclass
from fairseq.optim import FairseqOptimizer, register_optimizer, _build_optimizer
from fairseq.optim.lr_scheduler import FairseqLRScheduler, build_lr_scheduler
from omegaconf import II, open_dict
logger = logging.getLogger(__name__)
@dataclass
class OptimizerAndSchedulerConfig(FairseqDataclass):
optimizer: Any = None
lr_scheduler: Optional[Any] = None
lr: List[float] = II("optimization.lr")
@dataclass
class CompositeOptimizerConfig(FairseqDataclass):
groups: Dict[str, OptimizerAndSchedulerConfig] = field(
default_factory=lambda: {},
metadata={
"help": "optimizer name -> optimizer OptimizerAndSchedulerConfig. "
"Configures a different optimizer and (optionally) lr scheduler for each parameter group"
},
)
@register_optimizer("composite", dataclass=CompositeOptimizerConfig)
class FairseqCompositeOptimizer(FairseqOptimizer):
optimizers: Dict[str, FairseqOptimizer] = {}
lr_schedulers: Dict[str, FairseqLRScheduler] = {}
lr_scheduler: FairseqLRScheduler = None
_optimizer: torch.optim.Optimizer
def __init__(self, cfg: CompositeOptimizerConfig, params):
super().__init__(cfg)
assert (
len(params) > 1
), "Composite optimizer only works when there are multiple parameter groups (try fp16_no_flatten_grads: true)"
groupped_params = defaultdict(list)
for p in params:
group = getattr(p, "param_group", "default")
groupped_params[group].append(p)
assert groupped_params.keys() == cfg.groups.keys(), (
f"Parameter groups {groupped_params.keys()} and optimizer groups {cfg.groups.keys()} are not the same! "
"Try setting 'param_group' on your parameters in the model."
)
for group, group_params in groupped_params.items():
group_cfg = cfg.groups[group]
with open_dict(group_cfg):
group_cfg.optimizer.lr = group_cfg.lr
group_cfg.lr_scheduler.lr = group_cfg.lr
self.optimizers[group] = _build_optimizer(group_cfg.optimizer, group_params)
if group_cfg.lr_scheduler is not None:
self.lr_schedulers[group] = build_lr_scheduler(
group_cfg.lr_scheduler, self.optimizers[group]
)
if len(self.lr_schedulers) > 0:
assert len(self.lr_schedulers) == len(self.optimizers), (
f"Please provide an lr scheduler for each optimizer to use pass_through scheduler. "
f"Optimizers: {self.optimizers}; Lr scheds: {self.lr_schedulers}"
)
self.lr_scheduler = CompositeLRScheduler(self.lr_schedulers)
self._optimizer = CompositeOptimizer(self.optimizers)
@property
def supports_groups(self):
return True
@property
def param_groups(self):
for opt in self.optimizers.values():
for group in opt.param_groups:
yield group
def get_lr(self):
"""Return the current learning rate."""
k = (
"default"
if "default" in self.optimizers
else next(iter(self.optimizers.keys()))
)
return self.optimizers[k].param_groups[0]["lr"]
def state_dict(self):
"""Return the LR scheduler state dict."""
return {k: s.state_dict() for k, s in self.optimizers.items()}
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an LR scheduler state dict."""
for k, state in state_dict.items():
if k not in self.optimizers:
# skip extra keys like "loss_scale" added by fp16 optimizer
continue
overrides = (
optimizer_overrides[k]
if isinstance(optimizer_overrides, dict) and k in optimizer_overrides
else None
)
self.optimizers[k].load_state_dict(state, optimizer_overrides=overrides)
class CompositeOptimizer(torch.optim.Optimizer):
def __init__(self, optimizers: Dict[str, FairseqOptimizer]):
self.optimizers = optimizers
@property
def supports_memory_efficient_fp16(self):
return all(o.supports_memory_efficient_fp16 for o in self.optimizers.values())
@property
def supports_flat_params(self):
return all(o.supports_flat_params for o in self.optimizers.values())
def step(self, closure=None, groups=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for k, opt in self.optimizers.items():
if groups is None or k in groups:
opt.step()
return loss
def zero_grad(self):
for opt in self.optimizers.values():
opt.zero_grad()
class CompositeLRScheduler(FairseqLRScheduler):
def __init__(self, lr_schedulers):
super().__init__(None, None)
self.lr_schedulers = lr_schedulers
def state_dict(self):
"""Return the LR scheduler state dict."""
return {k: s.state_dict() for k, s in self.lr_schedulers.items()}
def load_state_dict(self, state_dict):
"""Load an LR scheduler state dict."""
for k, state in state_dict.items():
self.lr_schedulers[k].load_state_dict(state)
def step_begin_epoch(self, epoch):
"""Update the learning rate at the beginning of the given epoch."""
for s in self.lr_schedulers.values():
s.step_begin_epoch(epoch)
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
for s in self.lr_schedulers.values():
s.step(epoch)
def step_update(self, num_updates):
"""Update the learning rate after each update."""
return {k: s.step_update(num_updates) for k, s in self.lr_schedulers.items()}
|
the-stack_0_15469 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from past.builtins import basestring
from collections import defaultdict, Counter
from datetime import datetime
import getpass
import logging
import socket
import multiprocessing
import os
import signal
import sys
import threading
import time
from time import sleep
import psutil
from sqlalchemy import Column, Integer, String, DateTime, func, Index, or_
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm.session import make_transient
from tabulate import tabulate
from airflow import executors, models, settings
from airflow import configuration as conf
from airflow.exceptions import AirflowException
from airflow.models import DagRun
from airflow.settings import Stats
from airflow.task_runner import get_task_runner
from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, RUN_DEPS
from airflow.utils.state import State
from airflow.utils.db import provide_session, pessimistic_connection_handling
from airflow.utils.dag_processing import (AbstractDagFileProcessor,
DagFileProcessorManager,
SimpleDag,
SimpleDagBag,
list_py_file_paths)
from airflow.utils.email import send_email
from airflow.utils.logging import LoggingMixin
from airflow.utils import asciiart
Base = models.Base
ID_LEN = models.ID_LEN
class BaseJob(Base, LoggingMixin):
"""
Abstract class to be derived for jobs. Jobs are processing items with state
and duration that aren't task instances. For instance a BackfillJob is
a collection of task instance runs, but should have it's own state, start
and end time.
"""
__tablename__ = "job"
id = Column(Integer, primary_key=True)
dag_id = Column(String(ID_LEN),)
state = Column(String(20))
job_type = Column(String(30))
start_date = Column(DateTime())
end_date = Column(DateTime())
latest_heartbeat = Column(DateTime())
executor_class = Column(String(500))
hostname = Column(String(500))
unixname = Column(String(1000))
__mapper_args__ = {
'polymorphic_on': job_type,
'polymorphic_identity': 'BaseJob'
}
__table_args__ = (
Index('job_type_heart', job_type, latest_heartbeat),
)
def __init__(
self,
executor=executors.DEFAULT_EXECUTOR,
heartrate=conf.getfloat('scheduler', 'JOB_HEARTBEAT_SEC'),
*args, **kwargs):
self.hostname = socket.getfqdn()
self.executor = executor
self.executor_class = executor.__class__.__name__
self.start_date = datetime.now()
self.latest_heartbeat = datetime.now()
self.heartrate = heartrate
self.unixname = getpass.getuser()
super(BaseJob, self).__init__(*args, **kwargs)
def is_alive(self):
return (
(datetime.now() - self.latest_heartbeat).seconds <
(conf.getint('scheduler', 'JOB_HEARTBEAT_SEC') * 2.1)
)
def kill(self):
session = settings.Session()
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.end_date = datetime.now()
try:
self.on_kill()
except:
self.logger.error('on_kill() method failed')
session.merge(job)
session.commit()
session.close()
raise AirflowException("Job shut down externally.")
def on_kill(self):
'''
Will be called when an external kill command is received
'''
pass
def heartbeat_callback(self, session=None):
pass
def heartbeat(self):
'''
Heartbeats update the job's entry in the database with a timestamp
for the latest_heartbeat and allows for the job to be killed
externally. This allows at the system level to monitor what is
actually active.
For instance, an old heartbeat for SchedulerJob would mean something
is wrong.
This also allows for any job to be killed externally, regardless
of who is running it or on which machine it is running.
Note that if your heartbeat is set to 60 seconds and you call this
method after 10 seconds of processing since the last heartbeat, it
will sleep 50 seconds to complete the 60 seconds and keep a steady
heart rate. If you go over 60 seconds before calling it, it won't
sleep at all.
'''
session = settings.Session()
job = session.query(BaseJob).filter_by(id=self.id).one()
make_transient(job)
session.commit()
session.close()
if job.state == State.SHUTDOWN:
self.kill()
# Figure out how long to sleep for
sleep_for = 0
if job.latest_heartbeat:
sleep_for = max(
0,
self.heartrate - (datetime.now() - job.latest_heartbeat).total_seconds())
# Don't keep session open while sleeping as it leaves a connection open
session.close()
sleep(sleep_for)
# Update last heartbeat time
session = settings.Session()
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.latest_heartbeat = datetime.now()
session.merge(job)
session.commit()
self.heartbeat_callback(session=session)
session.close()
self.logger.debug('[heart] Boom.')
def run(self):
Stats.incr(self.__class__.__name__.lower() + '_start', 1, 1)
# Adding an entry in the DB
session = settings.Session()
self.state = State.RUNNING
session.add(self)
session.commit()
id_ = self.id
make_transient(self)
self.id = id_
# Run
self._execute()
# Marking the success in the DB
self.end_date = datetime.now()
self.state = State.SUCCESS
session.merge(self)
session.commit()
session.close()
Stats.incr(self.__class__.__name__.lower() + '_end', 1, 1)
def _execute(self):
raise NotImplementedError("This method needs to be overridden")
class DagFileProcessor(AbstractDagFileProcessor):
"""Helps call SchedulerJob.process_file() in a separate process."""
# Counter that increments everytime an instance of this class is created
class_creation_counter = 0
def __init__(self, file_path, pickle_dags, dag_id_white_list, log_file):
"""
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: unicode
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_id_whitelist: If specified, only look at these DAG ID's
:type dag_id_whitelist: list[unicode]
:param log_file: the path to the file where log lines should be output
:type log_file: unicode
"""
self._file_path = file_path
self._log_file = log_file
# Queue that's used to pass results from the child process.
self._result_queue = multiprocessing.Queue()
# The process that was launched to process the given .
self._process = None
self._dag_id_white_list = dag_id_white_list
self._pickle_dags = pickle_dags
# The result of Scheduler.process_file(file_path).
self._result = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessor.class_creation_counter
DagFileProcessor.class_creation_counter += 1
@property
def file_path(self):
return self._file_path
@property
def log_file(self):
return self._log_file
@staticmethod
def _launch_process(result_queue,
file_path,
pickle_dags,
dag_id_white_list,
thread_name,
log_file):
"""
Launch a process to process the given file.
:param result_queue: the queue to use for passing back the result
:type result_queue: multiprocessing.Queue
:param file_path: the file to process
:type file_path: unicode
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_id_white_list: if specified, only examine DAG ID's that are
in this list
:type dag_id_white_list: list[unicode]
:param thread_name: the name to use for the process that is launched
:type thread_name: unicode
:param log_file: the logging output for the process should be directed
to this file
:type log_file: unicode
:return: the process that was launched
:rtype: multiprocessing.Process
"""
def helper():
# This helper runs in the newly created process
# Re-direct stdout and stderr to a separate log file. Otherwise,
# the main log becomes too hard to read. No buffering to enable
# responsive file tailing
parent_dir, _ = os.path.split(log_file)
# Create the parent directory for the log file if necessary.
if not os.path.isdir(parent_dir):
os.makedirs(parent_dir)
f = open(log_file, "a")
original_stdout = sys.stdout
original_stderr = sys.stderr
sys.stdout = f
sys.stderr = f
try:
# Re-configure logging to use the new output streams
log_format = settings.LOG_FORMAT_WITH_THREAD_NAME
settings.configure_logging(log_format=log_format)
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
start_time = time.time()
logging.info("Started process (PID=%s) to work on %s",
os.getpid(),
file_path)
scheduler_job = SchedulerJob(dag_ids=dag_id_white_list)
result = scheduler_job.process_file(file_path,
pickle_dags)
result_queue.put(result)
end_time = time.time()
logging.info("Processing %s took %.3f seconds",
file_path,
end_time - start_time)
except:
# Log exceptions through the logging framework.
logging.exception("Got an exception! Propagating...")
raise
finally:
sys.stdout = original_stdout
sys.stderr = original_stderr
f.close()
p = multiprocessing.Process(target=helper,
args=(),
name="{}-Process".format(thread_name))
p.start()
return p
def start(self):
"""
Launch the process and start processing the DAG.
"""
self._process = DagFileProcessor._launch_process(
self._result_queue,
self.file_path,
self._pickle_dags,
self._dag_id_white_list,
"DagFileProcessor{}".format(self._instance_id),
self.log_file)
self._start_time = datetime.now()
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None:
raise AirflowException("Tried to call stop before starting!")
# The queue will likely get corrupted, so remove the reference
self._result_queue = None
self._process.terminate()
# Arbitrarily wait 5s for the process to die
self._process.join(5)
if sigkill and self._process.is_alive():
logging.warn("Killing PID %s", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
@property
def pid(self):
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if not self._result_queue.empty():
self._result = self._result_queue.get_nowait()
self._done = True
logging.debug("Waiting for %s", self._process)
self._process.join()
return True
# Potential error case when process dies
if not self._process.is_alive():
self._done = True
# Get the object from the queue or else join() can hang.
if not self._result_queue.empty():
self._result = self._result_queue.get_nowait()
logging.debug("Waiting for %s", self._process)
self._process.join()
return True
return False
@property
def result(self):
"""
:return: result of running SchedulerJob.process_file()
:rtype: SimpleDag
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self):
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
class SchedulerJob(BaseJob):
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
def __init__(
self,
dag_id=None,
dag_ids=None,
subdir=models.DAGS_FOLDER,
num_runs=-1,
file_process_interval=conf.getint('scheduler',
'min_file_process_interval'),
processor_poll_interval=1.0,
run_duration=None,
do_pickle=False,
*args, **kwargs):
"""
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: unicode
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[unicode]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: unicode
:param num_runs: The number of times to try to schedule each DAG file.
-1 for unlimited within the run_duration.
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:param run_duration: how long to run (in seconds) before exiting
:type run_duration: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
# for BaseJob compatibility
self.dag_id = dag_id
self.dag_ids = [dag_id] if dag_id else []
if dag_ids:
self.dag_ids.extend(dag_ids)
self.subdir = subdir
self.num_runs = num_runs
self.run_duration = run_duration
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super(SchedulerJob, self).__init__(*args, **kwargs)
self.heartrate = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
self.max_threads = min(conf.getint('scheduler', 'max_threads'), multiprocessing.cpu_count())
self.using_sqlite = False
if 'sqlite' in conf.get('core', 'sql_alchemy_conn'):
if self.max_threads > 1:
self.logger.error("Cannot use more than 1 thread when using sqlite. Setting max_threads to 1")
self.max_threads = 1
self.using_sqlite = True
# How often to scan the DAGs directory for new files. Default to 5 minutes.
self.dag_dir_list_interval = conf.getint('scheduler',
'dag_dir_list_interval')
# How often to print out DAG file processing stats to the log. Default to
# 30 seconds.
self.print_stats_interval = conf.getint('scheduler',
'print_stats_interval')
# Parse and schedule each file no faster than this interval. Default
# to 3 minutes.
self.file_process_interval = file_process_interval
# Directory where log files for the processes that scheduled the DAGs reside
self.child_process_log_directory = conf.get('scheduler',
'child_process_log_directory')
if run_duration is None:
self.run_duration = conf.getint('scheduler',
'run_duration')
@provide_session
def manage_slas(self, dag, session=None):
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
Where assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
if not any([ti.sla for ti in dag.tasks]):
self.logger.info("Skipping SLA check for {} because "
"no tasks in DAG have SLAs".format(dag))
return
TI = models.TaskInstance
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(TI.state == State.SUCCESS)
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
).all()
ts = datetime.now()
SlaMiss = models.SlaMiss
for ti in max_tis:
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if task.sla:
dttm = dag.following_schedule(dttm)
while dttm < datetime.now():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < datetime.now():
session.merge(models.SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm = dag.following_schedule(dttm)
session.commit()
slas = (
session
.query(SlaMiss)
.filter(or_(SlaMiss.email_sent == False,
SlaMiss.notification_sent == False))
.filter(SlaMiss.dag_id == dag.dag_id)
.all()
)
if slas:
sla_dates = [sla.execution_date for sla in slas]
qry = (
session
.query(TI)
.filter(TI.state != State.SUCCESS)
.filter(TI.execution_date.in_(sla_dates))
.filter(TI.dag_id == dag.dag_id)
.all()
)
blocking_tis = []
for ti in qry:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.logger.info(' --------------> ABOUT TO CALL SLA MISS CALL BACK ')
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas, blocking_tis)
notification_sent = True
email_content = """\
Here's a list of tasks thas missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}\n{bug}<code></pre>
""".format(bug=asciiart.bug, **locals())
emails = []
for t in dag.tasks:
if t.email:
if isinstance(t.email, basestring):
l = [t.email]
elif isinstance(t.email, (list, tuple)):
l = t.email
for email in l:
if email not in emails:
emails.append(email)
if emails and len(slas):
send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
email_sent = True
notification_sent = True
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
if email_sent:
sla.email_sent = True
sla.notification_sent = True
session.merge(sla)
session.commit()
session.close()
@staticmethod
@provide_session
def clear_nonexistent_import_errors(session, known_file_paths):
"""
Clears import errors for files that no longer exist.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param known_file_paths: The list of existing files that are parsed for DAGs
:type known_file_paths: list[unicode]
"""
session.query(models.ImportError).filter(
~models.ImportError.filename.in_(known_file_paths)
).delete(synchronize_session='fetch')
session.commit()
@staticmethod
def update_import_errors(session, dagbag):
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: models.Dagbag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(models.ImportError).filter(
models.ImportError.filename == dagbag_file
).delete()
# Add the errors of the processed files
for filename, stacktrace in dagbag.import_errors.iteritems():
session.add(models.ImportError(
filename=filename,
stacktrace=stacktrace))
session.commit()
@provide_session
def create_dag_run(self, dag, session=None):
"""
This method checks whether a new DagRun needs to be created
for a DAG based on scheduling interval
Returns DagRun if one is scheduled. Otherwise returns None.
"""
if dag.schedule_interval:
active_runs = DagRun.find(
dag_id=dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
# return if already reached maximum active runs and no timeout setting
if len(active_runs) >= dag.max_active_runs and not dag.dagrun_timeout:
return
timedout_runs = 0
for dr in active_runs:
if (
dr.start_date and dag.dagrun_timeout and
dr.start_date < datetime.now() - dag.dagrun_timeout):
dr.state = State.FAILED
dr.end_date = datetime.now()
timedout_runs += 1
session.commit()
if len(active_runs) - timedout_runs >= dag.max_active_runs:
return
# this query should be replaced by find dagrun
qry = (
session.query(func.max(DagRun.execution_date))
.filter_by(dag_id=dag.dag_id)
.filter(or_(
DagRun.external_trigger == False,
# add % as a wildcard for the like query
DagRun.run_id.like(DagRun.ID_PREFIX + '%')
))
)
last_scheduled_run = qry.scalar()
# don't schedule @once again
if dag.schedule_interval == '@once' and last_scheduled_run:
return None
# don't do scheduler catchup for dag's that don't have dag.catchup = True
if not dag.catchup:
# The logic is that we move start_date up until
# one period before, so that datetime.now() is AFTER
# the period end, and the job can be created...
now = datetime.now()
next_start = dag.following_schedule(now)
last_start = dag.previous_schedule(now)
if next_start <= now:
new_start = last_start
else:
new_start = dag.previous_schedule(last_start)
if dag.start_date:
if new_start >= dag.start_date:
dag.start_date = new_start
else:
dag.start_date = new_start
next_run_date = None
if not last_scheduled_run:
# First run
task_start_dates = [t.start_date for t in dag.tasks]
if task_start_dates:
next_run_date = dag.normalize_schedule(min(task_start_dates))
self.logger.debug("Next run date based on tasks {}"
.format(next_run_date))
else:
next_run_date = dag.following_schedule(last_scheduled_run)
# make sure backfills are also considered
last_run = dag.get_last_dagrun(session=session)
if last_run and next_run_date:
while next_run_date <= last_run.execution_date:
next_run_date = dag.following_schedule(next_run_date)
# don't ever schedule prior to the dag's start_date
if dag.start_date:
next_run_date = (dag.start_date if not next_run_date
else max(next_run_date, dag.start_date))
if next_run_date == dag.start_date:
next_run_date = dag.normalize_schedule(dag.start_date)
self.logger.debug("Dag start date: {}. Next run date: {}"
.format(dag.start_date, next_run_date))
# don't ever schedule in the future
if next_run_date > datetime.now():
return
# this structure is necessary to avoid a TypeError from concatenating
# NoneType
if dag.schedule_interval == '@once':
period_end = next_run_date
elif next_run_date:
period_end = dag.following_schedule(next_run_date)
# Don't schedule a dag beyond its end_date (as specified by the dag param)
if next_run_date and dag.end_date and next_run_date > dag.end_date:
return
# Don't schedule a dag beyond its end_date (as specified by the task params)
# Get the min task end date, which may come from the dag.default_args
min_task_end_date = []
task_end_dates = [t.end_date for t in dag.tasks if t.end_date]
if task_end_dates:
min_task_end_date = min(task_end_dates)
if next_run_date and min_task_end_date and next_run_date > min_task_end_date:
return
if next_run_date and period_end and period_end <= datetime.now():
next_run = dag.create_dagrun(
run_id='scheduled__' + next_run_date.isoformat(),
execution_date=next_run_date,
start_date=datetime.now(),
state=State.RUNNING,
external_trigger=False
)
return next_run
def _process_task_instances(self, dag, queue):
"""
This method schedules the tasks for a single DAG by looking at the
active DAG runs and adding task instances that should run to the
queue.
"""
session = settings.Session()
# update the state of the previously active dag runs
dag_runs = DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)
active_dag_runs = []
for run in dag_runs:
self.logger.info("Examining DAG run {}".format(run))
# don't consider runs that are executed in the future
if run.execution_date > datetime.now():
self.logger.error("Execution date is in future: {}"
.format(run.execution_date))
continue
if len(active_dag_runs) >= dag.max_active_runs:
self.logger.info("Active dag runs > max_active_run.")
continue
# skip backfill dagruns for now as long as they are not really scheduled
if run.is_backfill:
continue
# todo: run.dag is transient but needs to be set
run.dag = dag
# todo: preferably the integrity check happens at dag collection time
run.verify_integrity(session=session)
run.update_state(session=session)
if run.state == State.RUNNING:
make_transient(run)
active_dag_runs.append(run)
for run in active_dag_runs:
self.logger.debug("Examining active DAG run {}".format(run))
# this needs a fresh session sometimes tis get detached
tis = run.get_task_instances(state=(State.NONE,
State.UP_FOR_RETRY))
# this loop is quite slow as it uses are_dependencies_met for
# every task (in ti.is_runnable). This is also called in
# update_state above which has already checked these tasks
for ti in tis:
task = dag.get_task(ti.task_id)
# fixme: ti.task is transient but needs to be set
ti.task = task
# future: remove adhoc
if task.adhoc:
continue
if ti.are_dependencies_met(
dep_context=DepContext(flag_upstream_failed=True),
session=session):
self.logger.debug('Queuing task: {}'.format(ti))
queue.append(ti.key)
session.close()
@provide_session
def _change_state_for_tis_without_dagrun(self,
simple_dag_bag,
old_states,
new_state,
session=None):
"""
For all DAG IDs in the SimpleDagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
exists but is not in the running state. This normally should not
happen, but it can if the state of DagRuns are changed manually.
:param old_states: examine TaskInstances in this state
:type old_state: list[State]
:param new_state: set TaskInstances to this state
:type new_state: State
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag and with states in the old_state will be examined
:type simple_dag_bag: SimpleDagBag
"""
task_instances_to_change = (
session
.query(models.TaskInstance)
.filter(models.TaskInstance.dag_id.in_(simple_dag_bag.dag_ids))
.filter(models.TaskInstance.state.in_(old_states))
.with_for_update()
.all()
)
""":type: list[TaskInstance]"""
for task_instance in task_instances_to_change:
dag_runs = DagRun.find(dag_id=task_instance.dag_id,
execution_date=task_instance.execution_date,
)
if len(dag_runs) == 0:
self.logger.warn("DagRun for %s %s does not exist",
task_instance.dag_id,
task_instance.execution_date)
continue
# There should only be one DAG run. Add some logging info if this
# is not the case for later debugging.
if len(dag_runs) > 1:
self.logger.warn("Multiple DagRuns found for {} {}: {}"
.format(task_instance.dag_id,
task_instance.execution_date,
dag_runs))
if not any(dag_run.state == State.RUNNING for dag_run in dag_runs):
self.logger.warn("Setting {} to state={} as it does not have "
"a DagRun in the {} state"
.format(task_instance,
new_state,
State.RUNNING))
task_instance.state = new_state
session.merge(task_instance)
session.commit()
@provide_session
def _execute_task_instances(self,
simple_dag_bag,
states,
session=None):
"""
Fetches task instances from ORM in the specified states, figures
out pool limits, and sends them to the executor for execution.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: SimpleDagBag
:param executor: the executor that runs task instances
:type executor: BaseExecutor
:param states: Execute TaskInstances in these states
:type states: Tuple[State]
:return: None
"""
# Get all the queued task instances from associated with scheduled
# DagRuns.
TI = models.TaskInstance
task_instances_to_examine = (
session
.query(TI)
.filter(TI.dag_id.in_(simple_dag_bag.dag_ids))
.filter(TI.state.in_(states))
.all()
)
# Put one task instance on each line
if len(task_instances_to_examine) == 0:
self.logger.info("No tasks to send to the executor")
return
task_instance_str = "\n\t".join(
["{}".format(x) for x in task_instances_to_examine])
self.logger.info("Tasks up for execution:\n\t{}".format(task_instance_str))
# Get the pool settings
pools = {p.pool: p for p in session.query(models.Pool).all()}
pool_to_task_instances = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
for pool, task_instances in pool_to_task_instances.items():
if not pool:
# Arbitrary:
# If queued outside of a pool, trigger no more than
# non_pooled_task_slot_count per run
open_slots = conf.getint('core', 'non_pooled_task_slot_count')
else:
open_slots = pools[pool].open_slots(session=session)
num_queued = len(task_instances)
self.logger.info("Figuring out tasks to run in Pool(name={pool}) "
"with {open_slots} open slots and {num_queued} "
"task instances in queue".format(**locals()))
if open_slots <= 0:
continue
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date))
# DAG IDs with running tasks that equal the concurrency limit of the dag
dag_id_to_running_task_count = {}
for task_instance in priority_sorted_task_instances:
if open_slots <= 0:
self.logger.info("No more slots free")
# Can't schedule any more since there are no more open slots.
break
if self.executor.has_task(task_instance):
self.logger.debug("Not handling task {} as the executor reports it is running"
.format(task_instance.key))
continue
if simple_dag_bag.get_dag(task_instance.dag_id).is_paused:
self.logger.info("Not executing queued {} since {} is paused"
.format(task_instance, task_instance.dag_id))
continue
# todo: remove this logic when backfills will be part of the scheduler
dag_run = task_instance.get_dagrun()
if dag_run and dag_run.is_backfill:
continue
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
if dag_id not in dag_id_to_running_task_count:
dag_id_to_running_task_count[dag_id] = \
DagRun.get_running_tasks(
session,
dag_id,
simple_dag_bag.get_dag(dag_id).task_ids)
current_task_concurrency = dag_id_to_running_task_count[dag_id]
task_concurrency_limit = simple_dag_bag.get_dag(dag_id).concurrency
self.logger.info("DAG {} has {}/{} running tasks"
.format(dag_id,
current_task_concurrency,
task_concurrency_limit))
if current_task_concurrency > task_concurrency_limit:
self.logger.info("Not executing {} since the number "
"of tasks running from DAG {} is >= to the "
"DAG's task concurrency limit of {}"
.format(task_instance,
dag_id,
task_concurrency_limit))
continue
command = " ".join(TI.generate_command(
task_instance.dag_id,
task_instance.task_id,
task_instance.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=task_instance.pool,
file_path=simple_dag_bag.get_dag(task_instance.dag_id).full_filepath,
pickle_id=simple_dag_bag.get_dag(task_instance.dag_id).pickle_id))
priority = task_instance.priority_weight
queue = task_instance.queue
self.logger.info("Sending to executor {} with priority {} and queue {}"
.format(task_instance.key, priority, queue))
# Set the state to queued
task_instance.refresh_from_db(lock_for_update=True, session=session)
if task_instance.state not in states:
self.logger.info("Task {} was set to {} outside this scheduler."
.format(task_instance.key, task_instance.state))
session.commit()
continue
self.logger.info("Setting state of {} to {}".format(
task_instance.key, State.QUEUED))
task_instance.state = State.QUEUED
task_instance.queued_dttm = (datetime.now()
if not task_instance.queued_dttm
else task_instance.queued_dttm)
session.merge(task_instance)
session.commit()
# These attributes will be lost after the object expires, so save them.
task_id_ = task_instance.task_id
dag_id_ = task_instance.dag_id
execution_date_ = task_instance.execution_date
make_transient(task_instance)
task_instance.task_id = task_id_
task_instance.dag_id = dag_id_
task_instance.execution_date = execution_date_
self.executor.queue_command(
task_instance,
command,
priority=priority,
queue=queue)
open_slots -= 1
def _process_dags(self, dagbag, dags, tis_out):
"""
Iterates over the dags and processes them. Processing includes:
1. Create appropriate DagRun(s) in the DB.
2. Create appropriate TaskInstance(s) in the DB.
3. Send emails for tasks that have missed SLAs.
:param dagbag: a collection of DAGs to process
:type dagbag: models.DagBag
:param dags: the DAGs from the DagBag to process
:type dags: DAG
:param tis_out: A queue to add generated TaskInstance objects
:type tis_out: multiprocessing.Queue[TaskInstance]
:return: None
"""
for dag in dags:
dag = dagbag.get_dag(dag.dag_id)
if dag.is_paused:
self.logger.info("Not processing DAG {} since it's paused"
.format(dag.dag_id))
continue
if not dag:
self.logger.error("DAG ID {} was not found in the DagBag"
.format(dag.dag_id))
continue
self.logger.info("Processing {}".format(dag.dag_id))
dag_run = self.create_dag_run(dag)
if dag_run:
self.logger.info("Created {}".format(dag_run))
self._process_task_instances(dag, tis_out)
self.manage_slas(dag)
models.DagStat.clean_dirty([d.dag_id for d in dags])
def _process_executor_events(self):
"""
Respond to executor events.
:param executor: the executor that's running the task instances
:type executor: BaseExecutor
:return: None
"""
for key, executor_state in list(self.executor.get_event_buffer().items()):
dag_id, task_id, execution_date = key
self.logger.info("Executor reports {}.{} execution_date={} as {}"
.format(dag_id,
task_id,
execution_date,
executor_state))
def _log_file_processing_stats(self,
known_file_paths,
processor_manager):
"""
Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:type known_file_paths: list[unicode]
:param processor_manager: manager for the file processors
:type stats: DagFileProcessorManager
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path",
"PID",
"Runtime",
"Last Runtime",
"Last Run"]
rows = []
for file_path in known_file_paths:
last_runtime = processor_manager.get_last_runtime(file_path)
processor_pid = processor_manager.get_pid(file_path)
processor_start_time = processor_manager.get_start_time(file_path)
runtime = ((datetime.now() - processor_start_time).total_seconds()
if processor_start_time else None)
last_run = processor_manager.get_last_finish_time(file_path)
rows.append((file_path,
processor_pid,
runtime,
last_runtime,
last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, last_runtime, last_run in rows:
formatted_rows.append((file_path,
pid,
"{:.2f}s".format(runtime)
if runtime else None,
"{:.2f}s".format(last_runtime)
if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S")
if last_run else None))
log_str = ("\n" +
"=" * 80 +
"\n" +
"DAG File Processing Stats\n\n" +
tabulate(formatted_rows, headers=headers) +
"\n" +
"=" * 80)
self.logger.info(log_str)
@provide_session
def _reset_state_for_orphaned_tasks(self, dag_run, session=None):
"""
This function checks for a DagRun if there are any tasks
that have a scheduled state but are not known by the
executor. If it finds those it will reset the state to None
so they will get picked up again.
"""
queued_tis = self.executor.queued_tasks
# also consider running as the state might not have changed in the db yet
running = self.executor.running
tis = list()
tis.extend(dag_run.get_task_instances(state=State.SCHEDULED, session=session))
tis.extend(dag_run.get_task_instances(state=State.QUEUED, session=session))
for ti in tis:
if ti.key not in queued_tis and ti.key not in running:
self.logger.debug("Rescheduling orphaned task {}".format(ti))
ti.state = State.NONE
session.commit()
def _execute(self):
self.logger.info("Starting the scheduler")
pessimistic_connection_handling()
logging.basicConfig(level=logging.DEBUG)
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = False
if self.do_pickle and self.executor.__class__ not in \
(executors.LocalExecutor, executors.SequentialExecutor):
pickle_dags = True
# Use multiple processes to parse and generate tasks for the
# DAGs in parallel. By processing them in separate processes,
# we can get parallelism and isolation from potentially harmful
# user code.
self.logger.info("Processing files using up to {} processes at a time "
.format(self.max_threads))
self.logger.info("Running execute loop for {} seconds"
.format(self.run_duration))
self.logger.info("Processing each file at most {} times"
.format(self.num_runs))
self.logger.info("Process each file at most once every {} seconds"
.format(self.file_process_interval))
self.logger.info("Checking for new files in {} every {} seconds"
.format(self.subdir, self.dag_dir_list_interval))
# Build up a list of Python files that could contain DAGs
self.logger.info("Searching for files in {}".format(self.subdir))
known_file_paths = list_py_file_paths(self.subdir)
self.logger.info("There are {} files in {}"
.format(len(known_file_paths), self.subdir))
def processor_factory(file_path, log_file_path):
return DagFileProcessor(file_path,
pickle_dags,
self.dag_ids,
log_file_path)
processor_manager = DagFileProcessorManager(self.subdir,
known_file_paths,
self.max_threads,
self.file_process_interval,
self.child_process_log_directory,
self.num_runs,
processor_factory)
try:
self._execute_helper(processor_manager)
finally:
self.logger.info("Exited execute loop")
# Kill all child processes on exit since we don't want to leave
# them as orphaned.
pids_to_kill = processor_manager.get_all_pids()
if len(pids_to_kill) > 0:
# First try SIGTERM
this_process = psutil.Process(os.getpid())
# Only check child processes to ensure that we don't have a case
# where we kill the wrong process because a child process died
# but the PID got reused.
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
for child in child_processes:
self.logger.info("Terminating child PID: {}".format(child.pid))
child.terminate()
timeout = 5
self.logger.info("Waiting up to {}s for processes to exit..."
.format(timeout))
try:
psutil.wait_procs(child_processes, timeout)
except psutil.TimeoutExpired:
self.logger.debug("Ran out of time while waiting for "
"processes to exit")
# Then SIGKILL
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
if len(child_processes) > 0:
for child in child_processes:
self.logger.info("Killing child PID: {}".format(child.pid))
child.kill()
child.wait()
def _execute_helper(self, processor_manager):
"""
:param processor_manager: manager to use
:type processor_manager: DagFileProcessorManager
:return: None
"""
self.executor.start()
session = settings.Session()
self.logger.info("Resetting state for orphaned tasks")
# grab orphaned tasks and make sure to reset their state
active_runs = DagRun.find(
state=State.RUNNING,
external_trigger=False,
session=session
)
for dr in active_runs:
self.logger.info("Resetting {} {}".format(dr.dag_id,
dr.execution_date))
self._reset_state_for_orphaned_tasks(dr, session=session)
session.close()
execute_start_time = datetime.now()
# Last time stats were printed
last_stat_print_time = datetime(2000, 1, 1)
# Last time that self.heartbeat() was called.
last_self_heartbeat_time = datetime.now()
# Last time that the DAG dir was traversed to look for files
last_dag_dir_refresh_time = datetime.now()
# Use this value initially
known_file_paths = processor_manager.file_paths
# For the execute duration, parse and schedule DAGs
while (datetime.now() - execute_start_time).total_seconds() < \
self.run_duration or self.run_duration < 0:
self.logger.debug("Starting Loop...")
loop_start_time = time.time()
# Traverse the DAG directory for Python files containing DAGs
# periodically
elapsed_time_since_refresh = (datetime.now() -
last_dag_dir_refresh_time).total_seconds()
if elapsed_time_since_refresh > self.dag_dir_list_interval:
# Build up a list of Python files that could contain DAGs
self.logger.info("Searching for files in {}".format(self.subdir))
known_file_paths = list_py_file_paths(self.subdir)
last_dag_dir_refresh_time = datetime.now()
self.logger.info("There are {} files in {}"
.format(len(known_file_paths), self.subdir))
processor_manager.set_file_paths(known_file_paths)
self.logger.debug("Removing old import errors")
self.clear_nonexistent_import_errors(known_file_paths=known_file_paths)
# Kick of new processes and collect results from finished ones
self.logger.info("Heartbeating the process manager")
simple_dags = processor_manager.heartbeat()
if self.using_sqlite:
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.logger.debug("Waiting for processors to finish since we're "
"using sqlite")
processor_manager.wait_until_finished()
# Send tasks for execution if available
if len(simple_dags) > 0:
simple_dag_bag = SimpleDagBag(simple_dags)
# Handle cases where a DAG run state is set (perhaps manually) to
# a non-running state. Handle task instances that belong to
# DAG runs in those states
# If a task instance is up for retry but the corresponding DAG run
# isn't running, mark the task instance as FAILED so we don't try
# to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.UP_FOR_RETRY],
State.FAILED)
# If a task instance is scheduled or queued, but the corresponding
# DAG run isn't running, set the state to NONE so we don't try to
# re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.QUEUED,
State.SCHEDULED],
State.NONE)
self._execute_task_instances(simple_dag_bag,
(State.SCHEDULED,))
# Call hearbeats
self.logger.info("Heartbeating the executor")
self.executor.heartbeat()
# Process events from the executor
self._process_executor_events()
# Heartbeat the scheduler periodically
time_since_last_heartbeat = (datetime.now() -
last_self_heartbeat_time).total_seconds()
if time_since_last_heartbeat > self.heartrate:
self.logger.info("Heartbeating the scheduler")
self.heartbeat()
last_self_heartbeat_time = datetime.now()
# Occasionally print out stats about how fast the files are getting processed
if ((datetime.now() - last_stat_print_time).total_seconds() >
self.print_stats_interval):
if len(known_file_paths) > 0:
self._log_file_processing_stats(known_file_paths,
processor_manager)
last_stat_print_time = datetime.now()
loop_end_time = time.time()
self.logger.debug("Ran scheduling loop in {:.2f}s"
.format(loop_end_time - loop_start_time))
self.logger.debug("Sleeping for {:.2f}s"
.format(self._processor_poll_interval))
time.sleep(self._processor_poll_interval)
# Exit early for a test mode
if processor_manager.max_runs_reached():
self.logger.info("Exiting loop as all files have been processed "
"{} times".format(self.num_runs))
break
# Stop any processors
processor_manager.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
all_files_processed = True
for file_path in known_file_paths:
if processor_manager.get_last_finish_time(file_path) is None:
all_files_processed = False
break
if all_files_processed:
self.logger.info("Deactivating DAGs that haven't been touched since {}"
.format(execute_start_time.isoformat()))
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove()
@provide_session
def process_file(self, file_path, pickle_dags=False, session=None):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param file_path: the path to the Python file that should be executed
:type file_path: unicode
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:return: a list of SimpleDags made from the Dags found in the file
:rtype: list[SimpleDag]
"""
self.logger.info("Processing file {} for tasks to queue".format(file_path))
# As DAGs are parsed from this file, they will be converted into SimpleDags
simple_dags = []
try:
dagbag = models.DagBag(file_path)
except Exception:
self.logger.exception("Failed at reloading the DAG file {}".format(file_path))
Stats.incr('dag_file_refresh_error', 1, 1)
return []
if len(dagbag.dags) > 0:
self.logger.info("DAG(s) {} retrieved from {}"
.format(dagbag.dags.keys(),
file_path))
else:
self.logger.warn("No viable dags retrieved from {}".format(file_path))
self.update_import_errors(session, dagbag)
return []
# Save individual DAGs in the ORM and update DagModel.last_scheduled_time
sync_time = datetime.now()
for dag in dagbag.dags.values():
models.DAG.sync_to_db(dag, dag.owner, sync_time)
paused_dag_ids = [dag.dag_id for dag in dagbag.dags.values()
if dag.is_paused]
# Pickle the DAGs (if necessary) and put them into a SimpleDag
for dag_id in dagbag.dags:
dag = dagbag.get_dag(dag_id)
pickle_id = None
if pickle_dags:
pickle_id = dag.pickle(session).id
task_ids = [task.task_id for task in dag.tasks]
# Only return DAGs that are not paused
if dag_id not in paused_dag_ids:
simple_dags.append(SimpleDag(dag.dag_id,
task_ids,
dag.full_filepath,
dag.concurrency,
dag.is_paused,
pickle_id))
if len(self.dag_ids) > 0:
dags = [dag for dag in dagbag.dags.values()
if dag.dag_id in self.dag_ids and
dag.dag_id not in paused_dag_ids]
else:
dags = [dag for dag in dagbag.dags.values()
if not dag.parent_dag and
dag.dag_id not in paused_dag_ids]
# Not using multiprocessing.Queue() since it's no longer a separate
# process and due to some unusual behavior. (empty() incorrectly
# returns true?)
ti_keys_to_schedule = []
self._process_dags(dagbag, dags, ti_keys_to_schedule)
for ti_key in ti_keys_to_schedule:
dag = dagbag.dags[ti_key[0]]
task = dag.get_task(ti_key[1])
ti = models.TaskInstance(task, ti_key[2])
ti.refresh_from_db(session=session, lock_for_update=True)
# We can defer checking the task dependency checks to the worker themselves
# since they can be expensive to run in the scheduler.
dep_context = DepContext(deps=QUEUE_DEPS, ignore_task_deps=True)
# Only schedule tasks that have their dependencies met, e.g. to avoid
# a task that recently got it's state changed to RUNNING from somewhere
# other than the scheduler from getting it's state overwritten.
# TODO(aoen): It's not great that we have to check all the task instance
# dependencies twice; once to get the task scheduled, and again to actually
# run the task. We should try to come up with a way to only check them once.
if ti.are_dependencies_met(
dep_context=dep_context,
session=session,
verbose=True):
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
self.logger.info("Creating / updating {} in ORM".format(ti))
session.merge(ti)
session.commit()
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception:
self.logger.exception("Error logging import errors!")
try:
dagbag.kill_zombies()
except Exception:
self.logger.exception("Error killing zombies!")
return simple_dags
@provide_session
def heartbeat_callback(self, session=None):
Stats.gauge('scheduler_heartbeat', 1, 1)
class BackfillJob(BaseJob):
"""
A backfill job consists of a dag or subdag for a specific time range. It
triggers a set of task instance runs, in the right order and lasts for
as long as it takes for the set of task instance to be completed.
"""
__mapper_args__ = {
'polymorphic_identity': 'BackfillJob'
}
def __init__(
self,
dag,
start_date=None,
end_date=None,
mark_success=False,
include_adhoc=False,
donot_pickle=False,
ignore_first_depends_on_past=False,
ignore_task_deps=False,
pool=None,
*args, **kwargs):
self.dag = dag
self.dag_id = dag.dag_id
self.bf_start_date = start_date
self.bf_end_date = end_date
self.mark_success = mark_success
self.include_adhoc = include_adhoc
self.donot_pickle = donot_pickle
self.ignore_first_depends_on_past = ignore_first_depends_on_past
self.ignore_task_deps = ignore_task_deps
self.pool = pool
super(BackfillJob, self).__init__(*args, **kwargs)
def _execute(self):
"""
Runs a dag for a specified date range.
"""
session = settings.Session()
DagRun = models.DagRun
# consider max_active_runs but ignore when running subdags
# "parent.child" as a dag_id is by convention a subdag
if self.dag.schedule_interval and "." not in self.dag.dag_id:
active_runs = DagRun.find(
dag_id=self.dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
# return if already reached maximum active runs
if len(active_runs) >= self.dag.max_active_runs:
self.logger.info("Dag {} has reached maximum amount of {} dag runs"
.format(self.dag.dag_id, self.dag.max_active_runs))
return
start_date = self.bf_start_date
end_date = self.bf_end_date
# picklin'
pickle_id = None
if not self.donot_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle = models.DagPickle(self.dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
executor = self.executor
executor.start()
executor_fails = Counter()
# Build a list of all instances to run
tasks_to_run = {}
failed = set()
succeeded = set()
started = set()
skipped = set()
not_ready = set()
deadlocked = set()
# create dag runs
dr_start_date = start_date or min([t.start_date for t in self.dag.tasks])
next_run_date = self.dag.normalize_schedule(dr_start_date)
end_date = end_date or datetime.now()
active_dag_runs = []
while next_run_date and next_run_date <= end_date:
run_id = 'backfill_' + next_run_date.isoformat()
# check if we are scheduling on top of a already existing dag_run
# we could find a "scheduled" run instead of a "backfill"
run = DagRun.find(dag_id=self.dag.dag_id,
execution_date=next_run_date,
session=session)
if not run:
run = self.dag.create_dagrun(
run_id=run_id,
execution_date=next_run_date,
start_date=datetime.now(),
state=State.RUNNING,
external_trigger=False,
session=session,
)
else:
run = run[0]
# set required transient field
run.dag = self.dag
# explictely mark running as we can fill gaps
run.state = State.RUNNING
run.verify_integrity(session=session)
# for some reason if we dont refresh the reference to run is lost
run.refresh_from_db()
make_transient(run)
active_dag_runs.append(run)
next_run_date = self.dag.following_schedule(next_run_date)
run_count = 0
for run in active_dag_runs:
logging.info("Checking run {}".format(run))
run_count = run_count + 1
def get_task_instances_for_dag_run(dag_run):
# this needs a fresh session sometimes tis get detached
# can be more finegrained (excluding success or skipped)
tasks = {}
for ti in dag_run.get_task_instances():
tasks[ti.key] = ti
return tasks
# Triggering what is ready to get triggered
while not deadlocked:
tasks_to_run = get_task_instances_for_dag_run(run)
self.logger.debug("Clearing out not_ready list")
not_ready.clear()
for key, ti in list(tasks_to_run.items()):
task = self.dag.get_task(ti.task_id)
ti.task = task
ignore_depends_on_past = (
self.ignore_first_depends_on_past and
ti.execution_date == (start_date or ti.start_date))
self.logger.debug("Task instance to run {} state {}"
.format(ti, ti.state))
# The task was already marked successful or skipped by a
# different Job. Don't rerun it.
if ti.state == State.SUCCESS:
succeeded.add(key)
self.logger.debug("Task instance {} succeeded. "
"Don't rerun.".format(ti))
tasks_to_run.pop(key)
continue
elif ti.state == State.SKIPPED:
skipped.add(key)
self.logger.debug("Task instance {} skipped. "
"Don't rerun.".format(ti))
tasks_to_run.pop(key)
continue
elif ti.state == State.FAILED:
self.logger.error("Task instance {} failed".format(ti))
failed.add(key)
tasks_to_run.pop(key)
continue
backfill_context = DepContext(
deps=RUN_DEPS,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=self.ignore_task_deps,
flag_upstream_failed=True)
# Is the task runnable? -- then run it
if ti.are_dependencies_met(
dep_context=backfill_context,
session=session,
verbose=True):
self.logger.debug('Sending {} to executor'.format(ti))
if ti.state == State.NONE:
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
executor.queue_task_instance(
ti,
mark_success=self.mark_success,
pickle_id=pickle_id,
ignore_task_deps=self.ignore_task_deps,
ignore_depends_on_past=ignore_depends_on_past,
pool=self.pool)
started.add(key)
# Mark the task as not ready to run
elif ti.state in (State.NONE, State.UPSTREAM_FAILED):
self.logger.debug('Adding {} to not_ready'.format(ti))
not_ready.add(key)
session.commit()
self.heartbeat()
executor.heartbeat()
# If the set of tasks that aren't ready ever equals the set of
# tasks to run, then the backfill is deadlocked
if not_ready and not_ready == set(tasks_to_run):
self.logger.warn("Deadlock discovered for tasks_to_run={}"
.format(tasks_to_run.values()))
deadlocked.update(tasks_to_run.values())
tasks_to_run.clear()
# Reacting to events
for key, state in list(executor.get_event_buffer().items()):
if key not in tasks_to_run:
self.logger.warn("{} state {} not in tasks_to_run={}"
.format(key, state,
tasks_to_run.values()))
continue
ti = tasks_to_run[key]
ti.refresh_from_db()
logging.info("Executor state: {} task {}".format(state, ti))
# executor reports failure
if state == State.FAILED:
# task reports running
if ti.state == State.RUNNING:
msg = (
'Executor reports that task instance {} failed '
'although the task says it is running.'.format(ti))
self.logger.error(msg)
ti.handle_failure(msg)
tasks_to_run.pop(key)
# task reports skipped
elif ti.state == State.SKIPPED:
self.logger.error("Skipping {} ".format(ti))
skipped.add(key)
tasks_to_run.pop(key)
# anything else is a failure
else:
self.logger.error("Task instance {} failed".format(ti))
failed.add(key)
tasks_to_run.pop(key)
# executor reports success
elif state == State.SUCCESS:
# task reports success
if ti.state == State.SUCCESS:
self.logger.info(
'Task instance {} succeeded'.format(ti))
succeeded.add(key)
tasks_to_run.pop(key)
# task reports failure
elif ti.state == State.FAILED:
self.logger.error("Task instance {} failed".format(ti))
failed.add(key)
tasks_to_run.pop(key)
# task reports skipped
elif ti.state == State.SKIPPED:
self.logger.info("Task instance {} skipped".format(ti))
skipped.add(key)
tasks_to_run.pop(key)
# this probably won't ever be triggered
elif ti in not_ready:
self.logger.info(
"{} wasn't expected to run, but it did".format(ti))
# executor reports success but task does not - this is weird
elif ti.state not in (
State.SCHEDULED,
State.QUEUED,
State.UP_FOR_RETRY):
self.logger.error(
"The airflow run command failed "
"at reporting an error. This should not occur "
"in normal circumstances. Task state is '{}',"
"reported state is '{}'. TI is {}"
"".format(ti.state, state, ti))
# if the executor fails 3 or more times, stop trying to
# run the task
executor_fails[key] += 1
if executor_fails[key] >= 3:
msg = (
'The airflow run command failed to report an '
'error for task {} three or more times. The '
'task is being marked as failed. This is very '
'unusual and probably means that an error is '
'taking place before the task even '
'starts.'.format(key))
self.logger.error(msg)
ti.handle_failure(msg)
tasks_to_run.pop(key)
msg = ' | '.join([
"[backfill progress]",
"dag run {6} of {7}",
"tasks waiting: {0}",
"succeeded: {1}",
"kicked_off: {2}",
"failed: {3}",
"skipped: {4}",
"deadlocked: {5}"
]).format(
len(tasks_to_run),
len(succeeded),
len(started),
len(failed),
len(skipped),
len(deadlocked),
run_count,
len(active_dag_runs))
self.logger.info(msg)
self.logger.debug("Finished dag run loop iteration. "
"Remaining tasks {}"
.format(tasks_to_run.values()))
if len(tasks_to_run) == 0:
break
# update dag run state
run.update_state(session=session)
if run.dag.is_paused:
models.DagStat.clean_dirty([run.dag_id], session=session)
executor.end()
session.commit()
session.close()
err = ''
if failed:
err += (
"---------------------------------------------------\n"
"Some task instances failed:\n{}\n".format(failed))
if deadlocked:
err += (
'---------------------------------------------------\n'
'BackfillJob is deadlocked.')
deadlocked_depends_on_past = any(
t.are_dependencies_met(
dep_context=DepContext(ignore_depends_on_past=False),
session=session,
verbose=True) !=
t.are_dependencies_met(
dep_context=DepContext(ignore_depends_on_past=True),
session=session,
verbose=True)
for t in deadlocked)
if deadlocked_depends_on_past:
err += (
'Some of the deadlocked tasks were unable to run because '
'of "depends_on_past" relationships. Try running the '
'backfill with the option '
'"ignore_first_depends_on_past=True" or passing "-I" at '
'the command line.')
err += ' These tasks have succeeded:\n{}\n'.format(succeeded)
err += ' These tasks have started:\n{}\n'.format(started)
err += ' These tasks have failed:\n{}\n'.format(failed)
err += ' These tasks are skipped:\n{}\n'.format(skipped)
err += ' These tasks are deadlocked:\n{}\n'.format(deadlocked)
if err:
raise AirflowException(err)
self.logger.info("Backfill done. Exiting.")
class LocalTaskJob(BaseJob):
__mapper_args__ = {
'polymorphic_identity': 'LocalTaskJob'
}
def __init__(
self,
task_instance,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
mark_success=False,
pickle_id=None,
pool=None,
*args, **kwargs):
self.task_instance = task_instance
self.ignore_all_deps = ignore_all_deps
self.ignore_depends_on_past = ignore_depends_on_past
self.ignore_task_deps = ignore_task_deps
self.ignore_ti_state = ignore_ti_state
self.pool = pool
self.pickle_id = pickle_id
self.mark_success = mark_success
# terminating state is used so that a job don't try to
# terminate multiple times
self.terminating = False
# Keeps track of the fact that the task instance has been observed
# as running at least once
self.was_running = False
super(LocalTaskJob, self).__init__(*args, **kwargs)
def _execute(self):
self.task_runner = get_task_runner(self)
try:
self.task_runner.start()
ti = self.task_instance
session = settings.Session()
if self.task_runner.process:
ti.pid = self.task_runner.process.pid
ti.hostname = socket.getfqdn()
session.merge(ti)
session.commit()
session.close()
last_heartbeat_time = time.time()
heartbeat_time_limit = conf.getint('scheduler',
'scheduler_zombie_task_threshold')
while True:
# Monitor the task to see if it's done
return_code = self.task_runner.return_code()
if return_code is not None:
self.logger.info("Task exited with return code {}"
.format(return_code))
return
# Periodically heartbeat so that the scheduler doesn't think this
# is a zombie
try:
self.heartbeat()
last_heartbeat_time = time.time()
except OperationalError:
Stats.incr('local_task_job_heartbeat_failure', 1, 1)
self.logger.exception("Exception while trying to heartbeat! "
"Sleeping for {}s".format(self.heartrate))
time.sleep(self.heartrate)
# If it's been too long since we've heartbeat, then it's possible that
# the scheduler rescheduled this task, so kill launched processes.
time_since_last_heartbeat = time.time() - last_heartbeat_time
if time_since_last_heartbeat > heartbeat_time_limit:
Stats.incr('local_task_job_prolonged_heartbeat_failure', 1, 1)
self.logger.error("Heartbeat time limited exceeded!")
raise AirflowException("Time since last heartbeat({:.2f}s) "
"exceeded limit ({}s)."
.format(time_since_last_heartbeat,
heartbeat_time_limit))
finally:
self.on_kill()
def on_kill(self):
self.task_runner.terminate()
self.task_runner.on_finish()
@provide_session
def heartbeat_callback(self, session=None):
"""Self destruct task if state has been moved away from running externally"""
if self.terminating:
# task is already terminating, let it breathe
return
self.task_instance.refresh_from_db()
ti = self.task_instance
if ti.state == State.RUNNING:
self.was_running = True
fqdn = socket.getfqdn()
if not (fqdn == ti.hostname and
self.task_runner.process.pid == ti.pid):
logging.warning("Recorded hostname and pid of {ti.hostname} "
"and {ti.pid} do not match this instance's "
"which are {fqdn} and "
"{self.task_runner.process.pid}. "
"Taking the poison pill. So long."
.format(**locals()))
raise AirflowException("Another worker/process is running this job")
elif (self.was_running
and self.task_runner.return_code() is None
and hasattr(self.task_runner, 'process')):
logging.warning(
"State of this instance has been externally set to "
"{}. Taking the poison pill. So long.".format(ti.state))
self.task_runner.terminate()
self.terminating = True
|
the-stack_0_15470 | from collections import OrderedDict
def get_field_keys(fields, path=""):
previous = path + "." if path else ""
results = []
if hasattr(fields, "_meta"):
fields = OrderedDict(
[
(field.name, field)
for field in fields._meta.get_fields()
# don't want to go backwards
if (field.__class__.__name__ != "ManyToOneRel") and
# avoid recursive self references
not (
field.__class__.__name__ == "ForeignKey"
and field.related_model == fields
)
]
)
for field_name, field in fields.items():
if field.__class__.__name__ in [
"NestedSerializer",
"OrderedDict",
"dict",
"ForeignKey",
]:
subobj = None
if hasattr(field, "fields"):
subobj = field.fields
elif hasattr(field, "related_model"):
subobj = field.related_model
else:
subobj = field
for result in get_field_keys(subobj, previous + field_name):
results.append(result)
else:
results.append(previous + field_name)
return results
|
the-stack_0_15471 | #!/usr/bin/env python3
#
# Copyright 2020 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Read more details from go/dram-init-chromebook."""
import argparse
import logging
from cros.factory.device import device_utils
from cros.factory.utils.type_utils import Enum
ARCH = Enum(['x86', 'arm'])
MRC_CACHE_SECTIONS = (
'RECOVERY_MRC_CACHE', # For x86 recovery mode
'RW_MRC_CACHE', # For x86 normal mode
'RW_DDR_TRAINING', # For ARM (Mediatek)
'RO_DDR_TRAINING', # For ARM (Qualcomm)
)
def GetMRCSections(dut):
with dut.temp.TempFile() as temp_file:
dut.CheckCall('flashrom -p host -r %s -i FMAP' % temp_file, log=True)
fmap_sections = dut.CheckOutput('dump_fmap -p %s' % temp_file, log=True)
mrc_sections = []
for section_info in fmap_sections.splitlines():
section_name = section_info.split()[0]
if section_name in MRC_CACHE_SECTIONS:
mrc_sections.append(section_name)
return mrc_sections
def EraseTrainingData(dut):
mrc_sections = GetMRCSections(dut)
if mrc_sections:
cmd = ['flashrom', '-p', 'host', '-E']
for section in mrc_sections:
cmd += ['-i', section]
dut.CheckCall(cmd, log=True)
if 'RECOVERY_MRC_CACHE' in mrc_sections:
# Set next boot to recovery mode to retrain RECOVERY_MRC_CACHE first.
# And it'll reboot automatically and retrain RW_MRC_CACHE.
dut.CheckCall('crossystem recovery_request=0xC4', log=True)
def VerifyTrainingData(dut):
arch = dut.CheckOutput('crossystem arch').strip()
# Currently we don't have a tool to verify training data on ARM platforms,
# but the system should run memory test after DRAM calibration.
if arch == ARCH.arm:
return
mrc_sections = GetMRCSections(dut)
with dut.temp.TempFile() as temp_file:
for section in mrc_sections:
dut.CheckCall(
'flashrom -p host -r /dev/null -i %s:%s' % (section, temp_file),
log=True)
dut.CheckCall('futility validate_rec_mrc %s' % temp_file, log=True)
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(
description='MRC cache tool for memory training and verification.',
formatter_class=argparse.RawDescriptionHelpFormatter)
group = parser.add_mutually_exclusive_group()
group.add_argument(
'--erase',
action='store_true',
help='Erase old training data, you need to reboot to trigger retrain')
group.add_argument(
'--verify', action='store_true', help='Verify the training data')
args = parser.parse_args()
dut = device_utils.CreateDUTInterface()
if args.erase:
EraseTrainingData(dut)
elif args.verify:
VerifyTrainingData(dut)
if __name__ == '__main__':
main()
|
the-stack_0_15472 | class Solution:
def isPalindrome(self, x: int) -> bool:
r = self.reverseNumber(x)
if x != r:
return False
return True
def reverseNumber(self, x: int) -> int:
result = 0
remaining = abs(x)
while remaining != 0:
result *= 10
result += remaining % 10
remaining //= 10
return result
x = -121
test = Solution()
res = test.isPalindrome(x)
print(res) |
the-stack_0_15474 | #!/usr/bin/env nemesis
#
# ======================================================================
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2021 University of California, Davis
#
# See LICENSE.md for license information.
#
# ======================================================================
from pylith.testing.FullTestApp import TestDriver, FullTestCase
import unittest
class TestApp(TestDriver):
"""Driver application for full-scale tests.
"""
def __init__(self):
"""Constructor.
"""
TestDriver.__init__(self)
return
def _suite(self):
"""Create test suite.
"""
suite = unittest.TestSuite()
import TestTerzaghi
for test in TestTerzaghi.test_cases():
suite.addTest(unittest.makeSuite(test))
import TestTerzaghiCompaction
for test in TestTerzaghiCompaction.test_cases():
suite.addTest(unittest.makeSuite(test))
return suite
# ----------------------------------------------------------------------
if __name__ == '__main__':
FullTestCase.parse_args()
TestApp().main()
# End of file
|
the-stack_0_15478 | import pygame
import sys; sys.path.insert(0, "..")
import tools_for_pygame as pgt
pygame.init()
__test_name__ = "animations.TextureAni"
screen = pygame.display.set_mode((800, 600))
pygame.display.set_caption(__test_name__)
clock = pygame.time.Clock()
fps = pgt.gui.Label(pos=0, font="consolas", text_size=20, color=pgt.WHITE)
t1 = pgt.filled_surface((100, 100), pgt.RED)
t2 = pgt.filled_surface((100, 100), pgt.GREEN)
t3 = pgt.filled_surface((100, 100), pgt.BLUE)
base = pygame.Surface((100, 100))
base.fill(pgt.SALMON)
e = pgt.AniElement(
pos=(100, 100),
size=(100, 100),
image=base,
animations=[
pgt.ani.TextureAni(
name="flash",
frames=[t1, t2, t3],
time=.5,
loop=False,
queued_ani=pgt.ani.TextureAni(
name="queued_flash",
frames=[t1, t2, t3],
time=1
)
)
],
rotation=45,
alpha=152
)
e.flash.start()
while True:
clock.tick()
fps.text = int(clock.get_fps())
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
screen.fill(pgt.GRAY(50))
fps.draw(screen)
e.draw(screen)
pygame.display.update()
|
the-stack_0_15480 | import contextlib
import os
import shutil
import subprocess
from tests import constants
def file_is_immutable(path):
"""Whether a file has the immutable attribute set.
Parameters
----------
path : str
An absolute path to a file.
Returns
-------
bool
True if the file's immmutable attribute is set, False if it is not.
Raises
------
CalledProcessError
If the exit status of the chattr command is non-zero.
"""
# Run the lsattr command.
lsattr_result = subprocess.run(
["lsattr", path],
check=True,
stderr=subprocess.DEVNULL,
stdout=subprocess.PIPE,
universal_newlines=True,
)
# Extract the immutable attribute from the command output.
attributes = lsattr_result.stdout.split()[0]
immutable_flag = list(attributes)[4]
return immutable_flag == "i"
def set_file_immutable_attribute(path, immutable):
"""Set or unset the immutable attribute for a file.
Parameters
----------
path : str
The absolute path of a file.
immutable: bool
Set immutable attribute if True, unset immutable attribute if False.
Returns
-------
None
Raises
------
CalledProcessError
If the exit status of the chattr command is non-zero.
"""
operation = "+i" if immutable else "-i"
subprocess.run(
["sudo", "chattr", operation, path],
check=True,
stderr=subprocess.DEVNULL,
)
def set_up():
"""Create temporary directories and files.
Returns
-------
None
"""
# Ensure that tests start with a clean slate.
tear_down()
# Create testing directories.
os.makedirs(constants.EMPTY_SUBDIRECTORY_PATH)
os.makedirs(constants.GIT_SUBDIRECTORY_PATH)
os.makedirs(constants.SUBDIRECTORY_PATH)
# Create testing files.
open(constants.GIT_DIRECTORY_MUTABLE_FILE_PATH, "x").close()
open(constants.GIT_SUBDIRECTORY_MUTABLE_FILE_PATH, "x").close()
open(constants.IMMUTABLE_FILE_PATH, "x").close()
open(constants.MUTABLE_FILE_PATH, "x").close()
open(constants.SUBDIRECTORY_IMMUTABLE_FILE_PATH, "x").close()
open(constants.SUBDIRECTORY_MUTABLE_FILE_PATH, "x").close()
open(constants.READABLE_BY_ROOT_FILE_PATH, "x").close()
# Create testing named pipe.
os.mkfifo(constants.NAMED_PIPE_PATH)
# Create testing links.
os.symlink(constants.MUTABLE_FILE_PATH, constants.LINK_PATH)
os.symlink(
constants.SUBDIRECTORY_MUTABLE_FILE_PATH,
constants.SUBDIRECTORY_LINK_PATH,
)
# Set immutability for some testing files.
set_file_immutable_attribute(constants.IMMUTABLE_FILE_PATH, immutable=True)
set_file_immutable_attribute(
constants.SUBDIRECTORY_IMMUTABLE_FILE_PATH,
immutable=True,
)
# Set ownership and permissions of the file which is readable only by root.
os.chmod(constants.READABLE_BY_ROOT_FILE_PATH, 0o400)
subprocess.run(
["sudo", "chown", "root:root", constants.READABLE_BY_ROOT_FILE_PATH],
check=True,
)
def tear_down():
"""Delete temporary directories and files.
Returns
-------
None
"""
# Ensure all testing files are mutable, or they won't able to be deleted.
for root_dir, _, filenames in os.walk(constants.DIRECTORY_PATH):
for filename in filenames:
file_path = os.path.join(root_dir, filename)
with contextlib.suppress(subprocess.CalledProcessError):
set_file_immutable_attribute(file_path, immutable=False)
# Remove the testing directory.
try:
shutil.rmtree(constants.DIRECTORY_PATH)
except FileNotFoundError:
pass
|
the-stack_0_15481 | import numpy as np
import yt
from yt.data_objects.level_sets.api import Clump, find_clumps
ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
data_source = ds.disk([0.5, 0.5, 0.5], [0.0, 0.0, 1.0], (8, "kpc"), (1, "kpc"))
# the field to be used for contouring
field = ("gas", "density")
# This is the multiplicative interval between contours.
step = 2.0
# Now we set some sane min/max values between which we want to find contours.
# This is how we tell the clump finder what to look for -- it won't look for
# contours connected below or above these threshold values.
c_min = 10 ** np.floor(np.log10(data_source[field]).min())
c_max = 10 ** np.floor(np.log10(data_source[field]).max() + 1)
# Now find get our 'base' clump -- this one just covers the whole domain.
master_clump = Clump(data_source, field)
# Add a "validator" to weed out clumps with less than 20 cells.
# As many validators can be added as you want.
master_clump.add_validator("min_cells", 20)
# Calculate center of mass for all clumps.
master_clump.add_info_item("center_of_mass")
# Begin clump finding.
find_clumps(master_clump, c_min, c_max, step)
# Save the clump tree as a reloadable dataset
fn = master_clump.save_as_dataset(fields=["density", "particle_mass"])
# We can traverse the clump hierarchy to get a list of all of the 'leaf' clumps
leaf_clumps = master_clump.leaves
# Get total cell and particle masses for each leaf clump
leaf_masses = [leaf.quantities.total_mass() for leaf in leaf_clumps]
# If you'd like to visualize these clumps, a list of clumps can be supplied to
# the "clumps" callback on a plot. First, we create a projection plot:
prj = yt.ProjectionPlot(ds, 2, field, center="c", width=(20, "kpc"))
# Next we annotate the plot with contours on the borders of the clumps
prj.annotate_clumps(leaf_clumps)
# Save the plot to disk.
prj.save("clumps")
# Reload the clump dataset.
cds = yt.load(fn)
# Clump annotation can also be done with the reloaded clump dataset.
# Remove the original clump annotation
prj.clear_annotations()
# Get the leaves and add the callback.
leaf_clumps_reloaded = cds.leaves
prj.annotate_clumps(leaf_clumps_reloaded)
prj.save("clumps_reloaded")
# Query fields for clumps in the tree.
print(cds.tree["clump", "center_of_mass"])
print(cds.tree.children[0]["grid", "density"])
print(cds.tree.children[1]["all", "particle_mass"])
# Get all of the leaf clumps.
print(cds.leaves)
print(cds.leaves[0]["clump", "cell_mass"])
|
the-stack_0_15482 | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import binascii
from collections import defaultdict
import re
import subprocess
from typing import Optional, Dict, List, TYPE_CHECKING
# Prevent circular import
if TYPE_CHECKING:
from magma.pipelined.service_manager import Tables
class DatapathLookupError(Exception):
pass
class BridgeTools:
"""
BridgeTools
Use ovs-vsctl commands to get bridge info and setup bridges for testing.
"""
TABLE_NUM_REGEX = r'table=(\d+)'
@staticmethod
def get_datapath_id(bridge_name):
"""
Gets the datapath_id by bridge_name
Hacky, call vsctl, decode output to str, strip '\n', remove '' around
the output, convert to int.
This gives the integer datapath_id that we want to run apps on, this is
needed when 2 bridges are setup, gtp_br0(main bridge) and testing_br)
"""
try:
output = subprocess.check_output(["ovs-vsctl", "get", "bridge",
bridge_name, "datapath_id"])
output_str = str(output, 'utf-8').strip()[1:-1]
output_hex = int(output_str, 16)
except subprocess.CalledProcessError as e:
raise DatapathLookupError(
'Error: ovs-vsctl bridge({}) datapath id lookup: {}'.format(
bridge_name, e
)
)
return output_hex
@staticmethod
def get_ofport(interface_name):
"""
Gets the ofport name ofport number of a interface
"""
try:
port_num = subprocess.check_output(["ovs-vsctl", "get", "interface",
interface_name, "ofport"])
except subprocess.CalledProcessError as e:
raise DatapathLookupError(
'Error: ovs-vsctl interface({}) of port lookup: {}'.format(
interface_name, e
)
)
return int(port_num)
@staticmethod
def create_internal_iface(bridge_name, iface_name, ip):
"""
Creates a simple bridge, sets up an interface.
Used when running unit tests
"""
subprocess.Popen(["ovs-vsctl", "add-port", bridge_name, iface_name,
"--", "set", "Interface", iface_name,
"type=internal"]).wait()
if ip is not None:
subprocess.Popen(["ifconfig", iface_name, ip]).wait()
@staticmethod
def create_bridge(bridge_name, iface_name):
"""
Creates a simple bridge, sets up an interface.
Used when running unit tests
"""
subprocess.Popen(["ovs-vsctl", "--if-exists", "del-br",
bridge_name]).wait()
subprocess.Popen(["ovs-vsctl", "add-br", bridge_name]).wait()
subprocess.Popen(["ovs-vsctl", "set", "bridge", bridge_name,
"protocols=OpenFlow10,OpenFlow13,OpenFlow14",
"other-config:disable-in-band=true"]).wait()
subprocess.Popen(["ovs-vsctl", "set-controller", bridge_name,
"tcp:127.0.0.1:6633", "tcp:127.0.0.1:6654"]).wait()
subprocess.Popen(["ifconfig", iface_name, "192.168.1.1/24"]).wait()
@staticmethod
def flush_conntrack():
"""
Cleanup the conntrack state
"""
subprocess.Popen(["ovs-dpctl", "flush-conntrack"]).wait()
@staticmethod
def destroy_bridge(bridge_name):
"""
Removes the bridge.
Used when unit test finishes
"""
subprocess.Popen(["ovs-vsctl", "del-br", bridge_name]).wait()
@staticmethod
def get_controllers_for_bridge(bridge_name):
curr_controllers = subprocess.check_output(
["ovs-vsctl", "get-controller", bridge_name],
).decode("utf-8").replace(' ', '').split('\n')
return list(filter(None, curr_controllers))
@staticmethod
def add_controller_to_bridge(bridge_name, port_num):
curr_controllers = BridgeTools.get_controllers_for_bridge(bridge_name)
ctlr_ip = "tcp:127.0.0.1:{}".format(port_num)
if ctlr_ip in curr_controllers:
return
curr_controllers.append(ctlr_ip)
BridgeTools.set_controllers_for_bridge(bridge_name, curr_controllers)
@staticmethod
def remove_controller_from_bridge(bridge_name, port_num):
curr_controllers = BridgeTools.get_controllers_for_bridge(bridge_name)
ctlr_ip = 'tcp:127.0.0.1:{}'.format(port_num)
curr_controllers.remove(ctlr_ip)
BridgeTools.set_controllers_for_bridge(bridge_name, curr_controllers)
@staticmethod
def set_controllers_for_bridge(bridge_name, ctlr_list):
set_cmd = ["ovs-vsctl", "set-controller", bridge_name]
set_cmd.extend(ctlr_list)
subprocess.Popen(set_cmd).wait()
@staticmethod
def get_flows_for_bridge(bridge_name, table_num=None, include_stats=True):
"""
Returns a flow dump of the given bridge from ovs-ofctl. If table_num is
specified, then only the flows for the table will be returned.
"""
if include_stats:
set_cmd = ["ovs-ofctl", "dump-flows", bridge_name]
else:
set_cmd = ["ovs-ofctl", "dump-flows", bridge_name, "--no-stats"]
if table_num:
set_cmd.append("table=%s" % table_num)
flows = \
subprocess.check_output(set_cmd).decode('utf-8').split('\n')
flows = list(filter(lambda x: (x is not None and
x != '' and
x.find("NXST_FLOW") == -1),
flows))
return flows
@staticmethod
def _get_annotated_name_by_table_num(
table_assignments: 'Dict[str, Tables]') -> Dict[int, str]:
annotated_tables = {}
# A main table may be used by multiple apps
apps_by_main_table_num = defaultdict(list)
for name in table_assignments:
apps_by_main_table_num[table_assignments[name].main_table].append(
name)
# Scratch tables are used for only one app
for ind, scratch_num in enumerate(
table_assignments[name].scratch_tables):
annotated_tables[scratch_num] = '{}(scratch_table_{})'.format(
name,
ind)
for table, apps in apps_by_main_table_num.items():
annotated_tables[table] = '{}(main_table)'.format(
'/'.join(sorted(apps)))
return annotated_tables
@classmethod
def get_annotated_flows_for_bridge(cls, bridge_name: str,
table_assignments: 'Dict[str, Tables]',
apps: Optional[List[str]] = None,
include_stats: bool = True
) -> List[str]:
"""
Returns an annotated flow dump of the given bridge from ovs-ofctl.
table_assignments is used to annotate table number with its
corresponding app. If a note exists, the note will be decoded.
If apps is not None, then only the flows for the given apps will be
returned.
"""
annotated_tables = cls._get_annotated_name_by_table_num(
table_assignments)
def annotated_table_num(num):
if int(num) in annotated_tables:
return annotated_tables[int(num)]
return num
def parse_resubmit_action(match):
"""
resubmit(port,1) => resubmit(port,app_name(main_table))
"""
ret = ''
# We can have more than one resubmit per flow
actions = [a for a in match.group().split('resubmit') if a]
for action in actions:
resubmit_tokens = re.search(r'\((.*?)\)', action)\
.group(1).split(',')
in_port, table = resubmit_tokens[0], resubmit_tokens[1]
if ret:
ret += ','
ret += 'resubmit({},{})'.format(in_port,
annotated_table_num(table))
return ret
def parse_flow(flow):
sub_rules = [
# Annotate table number with app name
(cls.TABLE_NUM_REGEX,
lambda match: 'table={}'.format(annotated_table_num(
match.group(1)))),
(r'resubmit\((.*)\)', parse_resubmit_action),
# Decode the note
(r'note:([\d\.a-fA-F]*)',
lambda match: 'note:{}'.format(
str(binascii.unhexlify(match.group(1)
.replace('00', '')
.replace('.', ''))))),
]
for rule in sub_rules:
flow = re.sub(rule[0], rule[1], flow)
return flow
def filter_apps(flows):
if apps is None:
yield from flows
return
selected_tables = []
for app in apps:
selected_tables.append(table_assignments[app].main_table)
selected_tables.extend(table_assignments[app].scratch_tables)
for flow in flows:
table_num = int(re.search(cls.TABLE_NUM_REGEX, flow).group(1))
if table_num in selected_tables or not selected_tables:
yield flow
return [parse_flow(flow) for flow in
filter_apps(cls.get_flows_for_bridge(bridge_name,
include_stats=include_stats))]
|
the-stack_0_15484 | # -*- coding: utf-8 -*-
#
# Copyright 2014 Danny Goodall
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import (absolute_import, division, print_function, unicode_literals)
from builtins import input
from codefurther.directions import GetDirections
# Ask for starting point, destination and the mode of travel
starting_point = input("What is the starting point of your journey? (Southampton) : ")
end_point = input("What is the destination of your journey? (Winchester) : ")
travel_mode = input("What is mode of travel ? (walking) : ")
# Set the defaults for the starting and end points
starting_point = starting_point if starting_point else "southampton, uk"
end_point = end_point if end_point else "winchester, uk"
# Set the travel mode to walking unless it is valid
travel_mode = travel_mode if travel_mode and travel_mode.lower() in GetDirections.valid_modes else "walking"
# Let's create a directions object that we can then interact with
directions = GetDirections(starting_point, end_point, travel_mode)
# Was this route found?
if directions.found:
# Yes, so let's print out a heading...
print(directions.heading)
# Followed by each of the steps...
for step in directions.steps:
print(step)
# Followed by a footer
print(directions.footer)
else:
# If the route wasn't found, then explain to the user.
print("We couldn't find a ({}) route from {}, to {}.".format(travel_mode, starting_point, end_point))
from codefurther.directions import GetDirections
directions = GetDirections("123l123", "345345l34")
print(directions.heading)
for step in directions.steps:
print(step)
print(directions.footer)
|
the-stack_0_15486 | from datetime import date
from decimal import Decimal
from django.utils.six import text_type
from silver.tests.api.specs.utils import ResourceDefinition
unaltered = lambda input_value: input_value
# required is True by default, (a default must be specified otherwise)
# read_only is False by default,
# write_only is False by default,
document_entry_definition = ResourceDefinition("document_entry", {
'id': {
'read_only': True,
'output': lambda entry: int(entry.id),
},
'description': {
'required': False,
'expected_input_types': text_type,
'output': lambda entry: entry.description
},
'unit': {
'required': False,
'expected_input_types': text_type,
'output': lambda entry: entry.unit,
},
'unit_price': {
'expected_input_types': (int, float, text_type),
'output': lambda entry: "%.4f" % Decimal(entry.unit_price)
},
'quantity': {
'expected_input_types': (int, float, text_type),
'output': lambda entry: "%.4f" % Decimal(entry.quantity)
},
'total_before_tax': {
'read_only': True,
'output': lambda entry: "%.2f" % (entry.unit_price * entry.quantity)
},
'total': {
'read_only': True,
'output': lambda entry: "%.2f" % (
entry.total_before_tax * Decimal(1 + entry.document.sales_tax_percent / 100)
)
},
'start_date': {
'required': False,
'expected_input_types': date,
'output': lambda entry: entry.start_date,
},
'end_date': {
'required': False,
'expected_input_types': date,
'output': lambda entry: entry.end_date,
},
'prorated': {
'required': False,
'expected_input_types': bool,
'output': lambda entry: entry.prorated,
},
'product_code': {
'required': False,
'expected_input_types': text_type,
'output': lambda entry: entry.product_code,
}
})
def spec_document_entry(entry):
return document_entry_definition.generate(entry)
|
the-stack_0_15488 | #!/usr/bin/python
#
# Copyright (c) 2018 Yunge Zhu, (@yungezz)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_roledefinition
version_added: "2.8"
short_description: Manage Azure Role Definition.
description:
- Create, update and delete instance of Azure Role Definition.
options:
name:
description:
- Unique name of role definition.
required: True
permissions:
description:
- Set of role definition peremissions.
- See U(https://docs.microsoft.com/en-us/azure/app-service/app-service-web-overview) for more info.
suboptions:
actions:
description:
- List of allowed actions.
type: list
not_actions:
description:
- List of denied actions.
type: list
data_actions:
description:
- List of allowed data actions.
type: list
not_data_actions:
description:
- List of denied data actions.
type: list
assignable_scopes:
description: List of assignable scope of this definition.
scope:
description: The scope of the role definition.
description:
description:
- The role definition description.
state:
description:
- Assert the state of the role definition.
- Use 'present' to create or update a role definition and 'absent' to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
author:
- "Yunge Zhu(@yungezz)"
'''
EXAMPLES = '''
- name: Create a role definition
azure_rm_roledefinition:
name: myTestRole
scope: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myresourceGroup
permissions:
- actions:
- "Microsoft.Compute/virtualMachines/read"
data_actions:
- "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/write"
assignable_scopes:
- "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
'''
RETURN = '''
id:
description: Id of current role definition.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/providers/Microsoft.Authorization/roleDefinitions/roleDefinitionId"
'''
import uuid
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils._text import to_native
try:
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from msrest.polling import LROPoller
from msrest.serialization import Model
from azure.mgmt.authorization import AuthorizationManagementClient
from azure.mgmt.authorization.model import (RoleDefinition, Permission)
except ImportError:
# This is handled in azure_rm_common
pass
permission_spec = dict(
actions=dict(
type='list',
options=dict(type='str')
),
not_actions=dict(
type='list',
options=dict(type='str')
),
data_actions=dict(
type='list',
options=dict(type='str')
),
not_data_actions=dict(
type='list',
options=dict(type='str')
),
)
def roledefinition_to_dict(role):
result = dict(
id=role.id,
name=role.name,
type=role.role_type,
assignable_scopes=role.assignable_scopes,
description=role.description,
role_name=role.role_name
)
if role.permissions:
result['permissions'] = [dict(
actions=p.actions,
not_actions=p.not_actions,
data_actions=p.data_actions,
not_data_actions=p.not_data_actions
) for p in role.permissions]
return result
class Actions:
NoAction, CreateOrUpdate, Delete = range(3)
class AzureRMRoleDefinition(AzureRMModuleBase):
"""Configuration class for an Azure RM Role definition resource"""
def __init__(self):
self.module_arg_spec = dict(
name=dict(
type='str',
required=True
),
scope=dict(
type='str'
),
permissions=dict(
type='list',
elements='dict',
options=permission_spec
),
assignable_scopes=dict(
type='list',
elements='str'
),
description=dict(
type='str'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.name = None
self.scope = None
self.permissions = None
self.description = None
self.assignable_scopes = None
self.results = dict(
changed=False,
id=None,
)
self.state = None
self.to_do = Actions.NoAction
self.role = None
self._client = None
super(AzureRMRoleDefinition, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=False)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
old_response = None
response = None
# get management client
self._client = self.get_mgmt_svc_client(AuthorizationManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version="2018-01-01-preview")
self.scope = self.build_scope()
# get existing role definition
old_response = self.get_roledefinition()
if old_response:
self.results['id'] = old_response['id']
self.role = old_response
if self.state == 'present':
# check if the role definition exists
if not old_response:
self.log("Role definition doesn't exist in this scope")
self.to_do = Actions.CreateOrUpdate
else:
# existing role definition, do update
self.log("Role definition already exists")
self.log('Result: {0}'.format(old_response))
# compare if role definition changed
if self.check_update(old_response):
self.to_do = Actions.CreateOrUpdate
elif self.state == 'absent':
if old_response:
self.log("Delete role defintion")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_roledefinition(old_response['name'])
self.log('role definition deleted')
else:
self.log("role definition {0} not exists.".format(self.name))
if self.to_do == Actions.CreateOrUpdate:
self.log('Need to Create/Update role definition')
self.results['changed'] = True
if self.check_mode:
return self.results
response = self.create_update_roledefinition()
self.results['id'] = response['id']
return self.results
# build scope
def build_scope(self):
subscription_scope = '/subscriptions/' + self.subscription_id
if self.scope is None:
return subscription_scope
return self.scope
# check update
def check_update(self, old_definition):
if self.description and self.description != old_definition['properties']['description']:
return True
if self.permissions:
if len(self.permissions) != len(old_definition['permissions']):
return True
existing_permissions = self.permissions_to_set(old_definition['permissions'])
new_permissions = self.permissions_to_set(self.permissions)
if existing_permissions != new_permissions:
return True
if self.assignable_scopes and self.assignable_scopes != old_definition['assignable_scopes']:
return True
return False
def permissions_to_set(self, permissions):
new_permissions = [str(dict(
actions=(set([to_native(a) for a in item.get('actions')]) if item.get('actions') else None),
not_actions=(set([to_native(a) for a in item.get('not_actions')]) if item.get('not_actions') else None),
data_actions=(set([to_native(a) for a in item.get('data_actions')]) if item.get('data_actions') else None),
not_data_actions=(set([to_native(a) for a in item.get('not_data_actions')]) if item.get('not_data_actions') else None),
)) for item in permissions]
return set(new_permissions)
def create_update_roledefinition(self):
'''
Creates or updates role definition.
:return: deserialized role definition
'''
self.log("Creating / Updating role definition {0}".format(self.name))
try:
permissions = None
if self.permissions:
permissions = [AuthorizationManagementClient.models("2018-01-01-preview").Permission(
actions=p.get('actions', None),
not_actions=p.get('not_actions', None),
data_actions=p.get('data_actions', None),
not_data_actions=p.get('not_data_actions', None)
) for p in self.permissions]
role_definition = AuthorizationManagementClient.models("2018-01-01-preview").RoleDefinition(
role_name=self.name,
description=self.description,
permissions=permissions,
assignable_scopes=self.assignable_scopes,
role_type='CustomRole')
if self.role:
role_definition.name = self.role['name']
response = self._client.role_definitions.create_or_update(role_definition_id=self.role['name'] if self.role else str(uuid.uuid4()),
scope=self.scope,
role_definition=role_definition)
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create role definition.')
self.fail("Error creating role definition: {0}".format(str(exc)))
return roledefinition_to_dict(response)
def delete_roledefinition(self, role_definition_id):
'''
Deletes specified role definition.
:return: True
'''
self.log("Deleting the role definition {0}".format(self.name))
scope = self.build_scope()
try:
response = self._client.role_definitions.delete(scope=scope,
role_definition_id=role_definition_id)
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
except CloudError as e:
self.log('Error attempting to delete the role definition.')
self.fail("Error deleting the role definition: {0}".format(str(e)))
return True
def get_roledefinition(self):
'''
Gets the properties of the specified role definition.
:return: deserialized role definition state dictionary
'''
self.log("Checking if the role definition {0} is present".format(self.name))
response = None
try:
response = list(self._client.role_definitions.list(scope=self.scope))
if len(response) > 0:
self.log("Response : {0}".format(response))
roles = []
for r in response:
if r.role_name == self.name:
roles.append(r)
if len(roles) == 1:
self.log("role definition : {0} found".format(self.name))
return roledefinition_to_dict(roles[0])
if len(roles) > 1:
self.fail("Found multiple role definitions: {0}".format(roles))
except CloudError as ex:
self.log("Didn't find role definition {0}".format(self.name))
return False
def main():
"""Main execution"""
AzureRMRoleDefinition()
if __name__ == '__main__':
main()
|
the-stack_0_15489 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.forms import widgets, ModelChoiceField
from django.utils.encoding import force_text
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from filer.models.imagemodels import Image
from cms.plugin_pool import plugin_pool
from cmsplugin_cascade.fields import GlossaryField
from cmsplugin_cascade.mixins import ImagePropertyMixin
from cmsplugin_cascade.widgets import MultipleCascadingSizeWidget
from cmsplugin_cascade.link.config import LinkPluginBase, LinkElementMixin, LinkForm
from .image import ImageFormMixin, ImageAnnotationMixin
from .utils import BS3_BREAKPOINT_KEYS, reduce_breakpoints, get_picture_elements
class BootstrapPicturePlugin(ImageAnnotationMixin, LinkPluginBase):
name = _("Picture")
model_mixins = (ImagePropertyMixin, LinkElementMixin,)
module = 'Bootstrap'
parent_classes = ['BootstrapColumnPlugin', 'SimpleWrapperPlugin']
require_parent = True
allow_children = False
raw_id_fields = ('image_file',)
admin_preview = False
ring_plugin = 'PicturePlugin'
render_template = 'cascade/bootstrap3/linked-picture.html'
default_css_class = 'img-responsive'
default_css_attributes = ('image_shapes',)
html_tag_attributes = {'image_title': 'title', 'alt_tag': 'tag'}
fields = ('image_file',) + LinkPluginBase.fields
RESIZE_OPTIONS = (('upscale', _("Upscale image")), ('crop', _("Crop image")),
('subject_location', _("With subject location")),
('high_resolution', _("Optimized for Retina")),)
responsive_heights = GlossaryField(
MultipleCascadingSizeWidget(BS3_BREAKPOINT_KEYS, allowed_units=['px', '%'], required=False),
label=_("Adapt Picture Heights"),
initial={'xs': '100%', 'sm': '100%', 'md': '100%', 'lg': '100%'},
help_text=_("Heights of picture in percent or pixels for distinct Bootstrap's breakpoints."),
)
responsive_zoom = GlossaryField(
MultipleCascadingSizeWidget(BS3_BREAKPOINT_KEYS, allowed_units=['%'], required=False),
label=_("Adapt Picture Zoom"),
initial={'xs': '0%', 'sm': '0%', 'md': '0%', 'lg': '0%'},
help_text=_("Magnification of picture in percent for distinct Bootstrap's breakpoints."),
)
resize_options = GlossaryField(
widgets.CheckboxSelectMultiple(choices=RESIZE_OPTIONS),
label=_("Resize Options"),
help_text=_("Options to use when resizing the image."),
initial=['subject_location', 'high_resolution']
)
class Media:
js = ['cascade/js/admin/pictureplugin.js']
def get_form(self, request, obj=None, **kwargs):
reduce_breakpoints(self, 'responsive_heights')
image_file = ModelChoiceField(queryset=Image.objects.all(), required=False, label=_("Image"))
Form = type(str('ImageForm'), (ImageFormMixin, getattr(LinkForm, 'get_form_class')(),),
{'LINK_TYPE_CHOICES': ImageFormMixin.LINK_TYPE_CHOICES, 'image_file': image_file})
kwargs.update(form=Form)
return super(BootstrapPicturePlugin, self).get_form(request, obj, **kwargs)
def render(self, context, instance, placeholder):
# image shall be rendered in a responsive context using the picture element
elements = get_picture_elements(context, instance)
fluid = instance.get_complete_glossary().get('fluid') == 'on'
context.update({
'is_responsive': True,
'instance': instance,
'is_fluid': fluid,
'placeholder': placeholder,
'elements': elements,
})
return context
@classmethod
def get_css_classes(cls, obj):
css_classes = cls.super(BootstrapPicturePlugin, cls).get_css_classes(obj)
css_class = obj.glossary.get('css_class')
if css_class:
css_classes.append(css_class)
return css_classes
@classmethod
def get_identifier(cls, obj):
identifier = super(BootstrapPicturePlugin, cls).get_identifier(obj)
try:
content = force_text(obj.image)
except AttributeError:
content = _("No Picture")
return format_html('{0}{1}', identifier, content)
plugin_pool.register_plugin(BootstrapPicturePlugin)
|
the-stack_0_15493 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from sms_service.Errors import ErrorTypeError, ErrorTokenExpired, ErrorWebServiceError
from sms_service.Interfaces import InterfaceSmsSenderAdapter
from .EnumCallturkEndpoints import EnumCallturkEndpoint
from .xml.authentication.AuthenticationXmlController import AuthenticationXmlController
from .xml.sms_send.SmsSendController import SmsSendXmlController
class CallturkSmsSender(InterfaceSmsSenderAdapter):
error_auth_instance = "`self.call_turk_auth` should be instance of `AuthenticationXmlController` not {}"
error_number_list_instance = '`number_list` type is should be `list` not {}'
REGISTERED_TOKENS = {}
def __init__(self, username=None, password=None, organization_name=None):
"""
(string, string, string) -> instance
:param username: Callturk Username
:param password: Callturk Userpassword
:param organization_name: Calltruk Company Name
"""
self.call_turk_auth = AuthenticationXmlController(username, password, organization_name)
self.registered_token_key = "{}-{}".format(username, organization_name)
self.sms_sender_ctrl = None
self.response_tuple = {}, -1
def send_sms(self):
try:
self.auth_token = CallturkSmsSender.REGISTERED_TOKENS.get(self.registered_token_key, None)
self.sms_sender_ctrl = SmsSendXmlController(self.auth_token, self.number_list, self.content)
sms_send_request_id = self.sms_sender_ctrl.send_sms()
self.response_tuple = self.handle_sms_response(sms_send_request_id), 201
except ErrorTokenExpired:
del self.auth_token
return self.send_sms()
except ErrorWebServiceError as e:
self.response_tuple = self.handle_sms_error_response(e), 200
return self.response_tuple
def handle_sms_response(self, response):
return dict(id=response)
def handle_sms_error_response(self, exception):
return dict(
message=exception.__class__.__name__,
description=str(exception),
errors=list(exception.args),
status_code=200
)
def get_log_data(self):
""" VOID
Log sms actions
"""
return dict(
method='POST',
url='{}'.format(EnumCallturkEndpoint.SMS_SEND.value),
body=dict(number_list=self.number_list, content=self.content),
is_failed=False if self.response_tuple[1] == 201 else True,
status_code=self.response_tuple[1],
response=[self.response_tuple[0]]
)
@property
def auth_token(self):
return self.__auth_token
@auth_token.setter
def auth_token(self, value):
if isinstance(value, str):
self.__auth_token = value
CallturkSmsSender.REGISTERED_TOKENS.update({self.registered_token_key: value})
else:
self.auth_token = self.call_turk_auth.get_auth_token()
@auth_token.deleter
def auth_token(self):
del self.__auth_token
del CallturkSmsSender.REGISTERED_TOKENS[self.registered_token_key]
@property
def call_turk_auth(self):
return self.__auth
@call_turk_auth.setter
def call_turk_auth(self, value):
if not isinstance(value, AuthenticationXmlController):
raise TypeError(CallturkSmsSender.error_auth_instance.format(type(value)))
self.__auth = value
@property
def number_list(self):
return self.__number_list
@number_list.setter
def number_list(self, number_list):
if not isinstance(number_list, list):
raise ErrorTypeError(CallturkSmsSender.error_number_list_instance.format(type(number_list)))
self.__number_list = number_list
@property
def content(self):
return self.__content
@content.setter
def content(self, sms_content):
self.__content = sms_content
|
the-stack_0_15494 | import re
import copy
import time
import json
import requests
from unshortenit.module import UnshortenModule
from unshortenit.exceptions import UnshortenFailed
class ShorteSt(UnshortenModule):
name = 'shortest'
domains = ['sh.st', 'festyy.com', 'ceesty.com']
def __init__(self, headers: dict = None, timeout: int = 30):
super().__init__(headers, timeout)
def unshorten(self, uri: str) -> str:
res = self.get(uri)
session_id = re.findall(r'sessionId\:(.*?)\"\,', res.text)
if len(session_id) == 0:
raise UnshortenFailed('No sessionId variable found.')
if len(session_id) > 0:
session_id = re.sub(r'\s\"', '', session_id[0])
http_header = copy.copy(self.headers or {})
http_header["Content-Type"] = "application/x-www-form-urlencoded"
http_header["Host"] = "sh.st"
http_header["Referer"] = uri
http_header["Origin"] = "http://sh.st"
http_header["X-Requested-With"] = "XMLHttpRequest"
time.sleep(5)
payload = {'adSessionId': session_id, 'callback': 'c'}
r = requests.get(
'http://sh.st/shortest-url/end-adsession',
params=payload,
headers=http_header,
timeout=self.timeout
)
response = r.content[6:-2].decode('utf-8')
if r.status_code == 200:
resp_uri = json.loads(response)['destinationUrl']
if resp_uri is not None:
uri = resp_uri
else:
raise UnshortenFailed('Error extracting url.')
else:
raise UnshortenFailed('Error extracting url.')
return uri
|
the-stack_0_15495 | import itertools
import os
from collections import defaultdict
import dbt.utils
import dbt.include
import dbt.tracking
from dbt.utils import get_materialization, NodeType, is_type
from dbt.linker import Linker
import dbt.context.runtime
import dbt.contracts.project
import dbt.exceptions
import dbt.flags
import dbt.loader
import dbt.config
from dbt.contracts.graph.compiled import InjectedCTE, COMPILED_TYPES
from dbt.contracts.graph.parsed import ParsedNode
from dbt.logger import GLOBAL_LOGGER as logger
graph_file_name = 'graph.gpickle'
def _compiled_type_for(model: ParsedNode):
if model.resource_type not in COMPILED_TYPES:
raise dbt.exceptions.InternalException(
'Asked to compile {} node, but it has no compiled form'
.format(model.resource_type)
)
return COMPILED_TYPES[model.resource_type]
def print_compile_stats(stats):
names = {
NodeType.Model: 'model',
NodeType.Test: 'test',
NodeType.Snapshot: 'snapshot',
NodeType.Analysis: 'analyse',
NodeType.Macro: 'macro',
NodeType.Operation: 'operation',
NodeType.Seed: 'seed file',
NodeType.Source: 'source',
}
results = {k: 0 for k in names.keys()}
results.update(stats)
stat_line = ", ".join(
[dbt.utils.pluralize(ct, names.get(t)) for t, ct in results.items()])
logger.info("Found {}".format(stat_line))
def _add_prepended_cte(prepended_ctes, new_cte):
for cte in prepended_ctes:
if cte.id == new_cte.id:
cte.sql = new_cte.sql
return
prepended_ctes.append(new_cte)
def _extend_prepended_ctes(prepended_ctes, new_prepended_ctes):
for new_cte in new_prepended_ctes:
_add_prepended_cte(prepended_ctes, new_cte)
def prepend_ctes(model, manifest):
model, _, manifest = recursively_prepend_ctes(model, manifest)
return (model, manifest)
def recursively_prepend_ctes(model, manifest):
if model.extra_ctes_injected:
return (model, model.extra_ctes, manifest)
if dbt.flags.STRICT_MODE:
assert isinstance(model, tuple(COMPILED_TYPES.values())), \
'Bad model type: {}'.format(type(model))
prepended_ctes = []
for cte in model.extra_ctes:
cte_id = cte.id
cte_to_add = manifest.nodes.get(cte_id)
cte_to_add, new_prepended_ctes, manifest = recursively_prepend_ctes(
cte_to_add, manifest)
_extend_prepended_ctes(prepended_ctes, new_prepended_ctes)
new_cte_name = '__dbt__CTE__{}'.format(cte_to_add.name)
sql = ' {} as (\n{}\n)'.format(new_cte_name, cte_to_add.compiled_sql)
_add_prepended_cte(prepended_ctes, InjectedCTE(id=cte_id, sql=sql))
model.prepend_ctes(prepended_ctes)
manifest.update_node(model)
return (model, prepended_ctes, manifest)
class Compiler:
def __init__(self, config):
self.config = config
def initialize(self):
dbt.clients.system.make_directory(self.config.target_path)
dbt.clients.system.make_directory(self.config.modules_path)
def compile_node(self, node, manifest, extra_context=None):
if extra_context is None:
extra_context = {}
logger.debug("Compiling {}".format(node.unique_id))
data = node.to_dict()
data.update({
'compiled': False,
'compiled_sql': None,
'extra_ctes_injected': False,
'extra_ctes': [],
'injected_sql': None,
})
compiled_node = _compiled_type_for(node).from_dict(data)
context = dbt.context.runtime.generate(
compiled_node, self.config, manifest)
context.update(extra_context)
compiled_node.compiled_sql = dbt.clients.jinja.get_rendered(
node.raw_sql,
context,
node)
compiled_node.compiled = True
injected_node, _ = prepend_ctes(compiled_node, manifest)
should_wrap = {NodeType.Test, NodeType.Operation}
if injected_node.resource_type in should_wrap:
# data tests get wrapped in count(*)
# TODO : move this somewhere more reasonable
if 'data' in injected_node.tags and \
is_type(injected_node, NodeType.Test):
injected_node.wrapped_sql = (
"select count(*) as errors "
"from (\n{test_sql}\n) sbq").format(
test_sql=injected_node.injected_sql)
else:
# don't wrap schema tests or analyses.
injected_node.wrapped_sql = injected_node.injected_sql
elif is_type(injected_node, NodeType.Snapshot):
# unfortunately we do everything automagically for
# snapshots. in the future it'd be nice to generate
# the SQL at the parser level.
pass
elif(is_type(injected_node, NodeType.Model) and
get_materialization(injected_node) == 'ephemeral'):
pass
else:
injected_node.wrapped_sql = None
return injected_node
def write_graph_file(self, linker, manifest):
filename = graph_file_name
graph_path = os.path.join(self.config.target_path, filename)
if dbt.flags.WRITE_JSON:
linker.write_graph(graph_path, manifest)
def link_node(self, linker, node, manifest):
linker.add_node(node.unique_id)
for dependency in node.depends_on_nodes:
if manifest.nodes.get(dependency):
linker.dependency(
node.unique_id,
(manifest.nodes.get(dependency).unique_id))
else:
dbt.exceptions.dependency_not_found(node, dependency)
def link_graph(self, linker, manifest):
for node in manifest.nodes.values():
self.link_node(linker, node, manifest)
cycle = linker.find_cycles()
if cycle:
raise RuntimeError("Found a cycle: {}".format(cycle))
def compile(self, manifest, write=True):
linker = Linker()
self.link_graph(linker, manifest)
stats = defaultdict(int)
for node_name, node in itertools.chain(
manifest.nodes.items(),
manifest.macros.items()):
stats[node.resource_type] += 1
if write:
self.write_graph_file(linker, manifest)
print_compile_stats(stats)
return linker
def compile_manifest(config, manifest, write=True):
compiler = Compiler(config)
compiler.initialize()
return compiler.compile(manifest, write=write)
def _is_writable(node):
if not node.injected_sql:
return False
if dbt.utils.is_type(node, NodeType.Snapshot):
return False
return True
def compile_node(adapter, config, node, manifest, extra_context, write=True):
compiler = Compiler(config)
node = compiler.compile_node(node, manifest, extra_context)
node = _inject_runtime_config(adapter, node, extra_context)
if write and _is_writable(node):
logger.debug('Writing injected SQL for node "{}"'.format(
node.unique_id))
written_path = dbt.writer.write_node(
node,
config.target_path,
'compiled',
node.injected_sql)
node.build_path = written_path
return node
def _inject_runtime_config(adapter, node, extra_context):
wrapped_sql = node.wrapped_sql
context = _node_context(adapter, node)
context.update(extra_context)
sql = dbt.clients.jinja.get_rendered(wrapped_sql, context)
node.wrapped_sql = sql
return node
def _node_context(adapter, node):
return {
"run_started_at": dbt.tracking.active_user.run_started_at,
"invocation_id": dbt.tracking.active_user.invocation_id,
}
|
the-stack_0_15496 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: __init__.py
Description: View components for Python SDK sample.
"""
import wx
import wx.lib.agw.labelbook as LB
from wx.lib.agw.fmresources import INB_FIT_LABELTEXT
from wx.lib.agw.fmresources import INB_LEFT
from wx.lib.agw.fmresources import INB_NO_RESIZE
from view.panel_detection import DetectionPanel
from view.panel_subscription import SubscriptionPanel
from view.panel_find_similar import FindSimilarPanel
from view.panel_group import GroupPanel
from view.panel_identification import IdentificationPanel
from view.panel_verification import VerificationPanel
TITLE = u"Microsoft Cognitive Services Face Samples"
class MyLabelBook(LB.LabelBook):
"""LabelBook part in Main Frame."""
def __init__(self, parent):
agw_style = INB_LEFT | INB_FIT_LABELTEXT | INB_NO_RESIZE
super(MyLabelBook, self).__init__(parent, agwStyle=agw_style)
subscription_panel = SubscriptionPanel(self)
subscription_text = u"Subscription Key Management"
self.AddPage(subscription_panel, subscription_text, True)
self.AddPage(wx.Panel(self), u"Select a scenario:")
self.EnableTab(1, False)
self.AddPage(DetectionPanel(self), u" - Face Detection")
self.AddPage(FindSimilarPanel(self), u" - Face Find Similar")
self.AddPage(GroupPanel(self), u" - Face Grouping")
self.AddPage(IdentificationPanel(self), u" - Face Identification")
self.AddPage(VerificationPanel(self), u" - Face Verification")
class MyTitle(wx.Panel):
"""Title part in Main Frame."""
def __init__(self, parent):
super(MyTitle, self).__init__(parent)
self.SetBackgroundColour('#00b294')
self.SetMinSize((-1, 80))
sizer = wx.BoxSizer()
sizer.AddStretchSpacer()
family = wx.FONTFAMILY_DEFAULT
style = wx.FONTSTYLE_NORMAL
weight = wx.FONTWEIGHT_NORMAL
font = wx.Font(20, family, style, weight)
self.text = wx.StaticText(self, label=TITLE, style=wx.ALIGN_CENTER)
self.text.SetFont(font)
sizer.Add(self.text, flag=wx.ALIGN_CENTER_VERTICAL)
sizer.AddStretchSpacer()
self.SetSizer(sizer)
class MyFrame(wx.Frame):
"""Main Frame."""
def __init__(self, parent):
super(MyFrame, self).__init__(parent, title=TITLE, size=(1280, 768))
icon_path = 'Assets/Microsoft-logo_rgb_c-gray.png'
self.SetIcon(wx.Icon(icon_path))
sizer = wx.BoxSizer(wx.VERTICAL)
self.title = MyTitle(self)
sizer.Add(self.title, flag=wx.EXPAND)
self.book = MyLabelBook(self)
sizer.Add(self.book, 1, flag=wx.EXPAND)
status_text = (
'Microsoft will receive the images you upload and may use them to '
'improve Face API and related services. By submitting an image, '
'you confirm you have consent from everyone in it.'
)
self.status = wx.StatusBar(self)
self.status.SetStatusText(status_text)
sizer.Add(self.status, flag=wx.EXPAND)
self.SetSizer(sizer)
self.Layout()
class MyApp(wx.App):
"""The whole app."""
def OnInit(self):
"""Show main frame."""
frame = MyFrame(None)
frame.Show()
return True
|
the-stack_0_15497 | from django.test import TestCase
from django.urls import reverse
from rest_framework.test import APIClient
from faker import Factory
from app_dir.factories import OrderFactory
faker = Factory.create()
class CreateOrderRevolut(TestCase):
def setUp(self):
self.order = OrderFactory()
self.client = APIClient()
self.namespace = 'revolut_integration_api'
self.body = {
'amount': faker.amount(),
'capture_mode': faker.capture_mode(),
'merchant_order_ext_ref': faker.merchant_order_ext_ref(),
'email': faker.email(),
'currency': faker.currency(),
}
self.create_url = reverse(self.namespace + ':create-order')
def test_create_order(self):
response = self.client.post(self.create_url, self.body, format='json')
self.assertEqual(201, response.status_code) |
the-stack_0_15498 | import numpy as np
import time, sys, math
from collections import deque
import sounddevice as sd
from src.utils import *
class Stream_Reader:
"""
The Stream_Reader continuously reads data from a selected sound source using PyAudio
Arguments:
device: int or None: Select which audio stream to read .
rate: float or None: Sample rate to use. Defaults to something supported.
updatesPerSecond: int: How often to record new data.
"""
def __init__(self,
device = None,
rate = None,
updates_per_second = 1000,
FFT_window_size = None,
verbose = False):
print("Available audio devices:")
device_dict = sd.query_devices()
print(device_dict)
try:
sd.check_input_settings(device=device, channels=1, dtype=np.float32, extra_settings=None, samplerate=rate)
except:
print("Input sound settings for device %s and samplerate %s Hz not supported, using defaults..." %(str(device), str(rate)))
rate = None
device = None
self.rate = rate
if rate is not None:
sd.default.samplerate = rate
self.device = device
if device is not None:
sd.default.device = device
self.verbose = verbose
self.data_buffer = None
# This part is a bit hacky, need better solution for this:
# Determine what the optimal buffer shape is by streaming some test audio
self.optimal_data_lengths = []
with sd.InputStream(samplerate=self.rate,
blocksize=0,
device=self.device,
channels=1,
dtype=np.float32,
latency='low',
callback=self.test_stream_read):
time.sleep(0.2)
self.update_window_n_frames = max(self.optimal_data_lengths)
del self.optimal_data_lengths
#Alternative:
#self.update_window_n_frames = round_up_to_even(44100 / updates_per_second)
self.stream = sd.InputStream(
samplerate=self.rate,
blocksize=self.update_window_n_frames,
device=2, # TODO: this needs tweak based on the actual devices on PC
channels=1,
dtype=np.float32,
latency='low',
extra_settings=None,
callback=self.non_blocking_stream_read)
self.rate = self.stream.samplerate
self.device = self.stream.device
self.updates_per_second = self.rate / self.update_window_n_frames
self.info = ''
self.data_capture_delays = deque(maxlen=20)
self.new_data = False
if self.verbose:
self.data_capture_delays = deque(maxlen=20)
self.num_data_captures = 0
self.device_latency = device_dict[self.device]['default_low_input_latency']
print("\n##################################################################################################")
print("\nDefaulted to using first working mic, Running on mic %s with properties:" %str(self.device))
print(device_dict[self.device])
print('Which has a latency of %.2f ms' %(1000*self.device_latency))
print("\n##################################################################################################")
print('Recording audio at %d Hz\nUsing (non-overlapping) data-windows of %d samples (updating at %.2ffps)'
%(self.rate, self.update_window_n_frames, self.updates_per_second))
def non_blocking_stream_read(self, indata, frames, time_info, status):
if self.verbose:
start = time.time()
if status:
print(status)
if self.data_buffer is not None:
self.data_buffer.append_data(indata[:,0])
self.new_data = True
if self.verbose:
self.num_data_captures += 1
self.data_capture_delays.append(time.time() - start)
return
def test_stream_read(self, indata, frames, time_info, status):
'''
Dummy function to determine what blocksize the stream is using
'''
self.optimal_data_lengths.append(len(indata[:,0]))
return
def stream_start(self, data_windows_to_buffer = None):
self.data_windows_to_buffer = data_windows_to_buffer
if data_windows_to_buffer is None:
self.data_windows_to_buffer = int(self.updates_per_second / 2) #By default, buffer 0.5 second of audio
else:
self.data_windows_to_buffer = data_windows_to_buffer
self.data_buffer = numpy_data_buffer(self.data_windows_to_buffer, self.update_window_n_frames)
print("\n--🎙 -- Starting live audio stream...\n")
self.stream.start()
self.stream_start_time = time.time()
def terminate(self):
print("👋 Sending stream termination command...")
self.stream.stop() |
the-stack_0_15500 | import _plotly_utils.basevalidators
class YperiodalignmentValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="yperiodalignment", parent_name="funnel", **kwargs):
super(YperiodalignmentValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["start", "middle", "end"]),
**kwargs
)
|
the-stack_0_15501 | def evalRec(env, rec):
"""Has Damaging Predictions"""
if (rec.Severity > 2):
return True
# 2.a. Present in ClinVar Path, Likely Path, VUS (worst annotation).
clinvar_clinically_significant = (rec.Clinvar_Benign == False) \
and (rec.Clinvar_Trusted_Benign in {False, "No data"})
if (clinvar_clinically_significant):
return True
# Include Splice Altering variants
if (rec.splice_ai_dsmax > 0.2):
return True
if len(rec.Polyphen &
{"possibly_damaging", "probably_damaging"}) > 0:
return True
if (len(rec.Polyphen_2_HVAR) > 0 and
len(rec.Polyphen_2_HVAR - {"P", "D"}) == 0):
return True
if (len(rec.Polyphen_2_HDIV) > 0 and
len(rec.Polyphen_2_HDIV - {"P", "D"}) == 0):
return True
return len(rec.SIFT &
{"deleterious", "tolerated_low_confidence"}) > 0
|
the-stack_0_15502 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.bigquery.storage.v1", manifest={"AvroSchema", "AvroRows",},
)
class AvroSchema(proto.Message):
r"""Avro schema.
Attributes:
schema (str):
Json serialized schema, as described at
https://avro.apache.org/docs/1.8.1/spec.html.
"""
schema = proto.Field(proto.STRING, number=1,)
class AvroRows(proto.Message):
r"""Avro rows.
Attributes:
serialized_binary_rows (bytes):
Binary serialized rows in a block.
row_count (int):
The count of rows in the returning block.
"""
serialized_binary_rows = proto.Field(proto.BYTES, number=1,)
row_count = proto.Field(proto.INT64, number=2,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
the-stack_0_15503 | import numpy as np
def laplace_numpy(image):
"""Laplace operator in NumPy for 2D images."""
laplacian = (
image[:-2, 1:-1]
+ image[2:, 1:-1]
+ image[1:-1, :-2]
+ image[1:-1, 2:]
- 4 * image[1:-1, 1:-1]
)
thresh = np.abs(laplacian) > 0.05
return thresh
def laplace_loops(image):
"""Laplace operator for 2D images."""
h = image.shape[0]
w = image.shape[1]
laplacian = np.empty((h - 2, w - 2), np.uint8)
for i in range(1, h - 1):
for j in range(1, w - 1):
laplacian[i - 1, j - 1] = (
np.abs(
image[i - 1, j]
+ image[i + 1, j]
+ image[i, j - 1]
+ image[i, j + 1]
- 4 * image[i, j]
)
> 0.05
)
return laplacian
|
the-stack_0_15507 | import os.path
import numpy as np
import itertools
import Tools
import statsmodels.tsa.stattools
# Those patterns are used for tests and benchmarks.
# For tests, there is the need to add tests for saturation
def cartesian(*somelists):
r=[]
for element in itertools.product(*somelists):
r.append(element)
return(r)
def autocorr(x):
result = np.correlate(x, x, mode='full')
return result[result.size//2:]
def writeTests(config,format):
config.setOverwrite(False)
NBSAMPLES=128
inputsA=np.random.randn(NBSAMPLES)
inputsB=np.random.randn(NBSAMPLES)
inputsA = Tools.normalize(inputsA)
inputsB = Tools.normalize(inputsB)
if format==31:
# To avoid overflow. There is no saturation in CMSIS code for Q31 conv/corr
inputsA = inputsA / 16
inputsB = inputsB / 16
config.writeInput(1, inputsA,"InputsA")
config.writeInput(1, inputsB,"InputsB")
a = [1,2,3,Tools.loopnb(format,Tools.TAILONLY),
Tools.loopnb(format,Tools.BODYONLY),
Tools.loopnb(format,Tools.BODYANDTAIL)
]
a = list(np.unique(np.array(a)))
if format == 15:
nbs = [(14, 15), (14, 16), (14, 17), (14, 18), (14, 33), (15, 15),
(15, 16), (15, 17), (15, 18), (15, 33), (16, 15), (16, 16),
(16, 17), (16, 18), (16, 33), (17, 15), (17, 16), (17, 17),
(17, 18), (17, 33), (32, 15), (32, 16), (32, 17), (32, 18), (32, 33)]
elif format == 7 :
nbs = [(30, 31), (30, 32), (30, 33), (30, 34), (30, 49), (31, 31),
(31,32), (31, 33), (31, 34), (31, 49), (32, 31), (32, 32),
(32, 33), (32,34), (32, 49), (33, 31), (33, 32), (33, 33), (33, 34),
(33, 49), (48,31), (48, 32), (48, 33), (48, 34), (48, 49)]
else:
nbs = [(4, 1), (4, 2), (4, 3), (4, 8), (4, 11), (5, 1), (5, 2), (5, 3), (5, 8), (5, 11), (6, 1), (6, 2), (6, 3), (6, 8), (6, 11), (9, 1), (9, 2),
(9, 3), (9, 8), (9, 11), (10, 1), (10, 2), (10, 3), (10, 8), (10, 11), (11, 1), (11, 2), (11, 3), (11, 8), (11, 11), (12, 1), (12, 2),
(12, 3), (12, 8), (12, 11), (13, 1), (13, 2), (13, 3), (13, 8), (13, 11)]
nbTest = 1
for (na,nb) in nbs:
#print(na,nb)
ref = np.correlate(inputsA[0:na],inputsB[0:nb],"full")
if na > nb:
padding = na - nb
z = np.zeros(padding)
ref = np.concatenate((z,ref))
else:
padding = nb - na
z = np.zeros(padding)
ref = np.concatenate((ref,z))
config.writeReference(nbTest, ref)
nbTest = nbTest + 1
for (na,nb) in nbs:
#print(na,nb)
ref = np.convolve(inputsA[0:na],inputsB[0:nb],"full")
config.writeReference(nbTest, ref)
nbTest = nbTest + 1
# Levinson durbin tests
config.setOverwrite(True)
a = [Tools.loopnb(format,Tools.TAILONLY),
Tools.loopnb(format,Tools.BODYONLY),
Tools.loopnb(format,Tools.BODYANDTAIL),
]
a = list(np.unique(np.array(a)))
#a = [3]
# Errors of each levinson durbin test
err=[]
errTestID = nbTest
for na in a:
s = np.random.randn(na+1)
s = Tools.normalize(s)
phi = autocorr(s)
phi = Tools.normalize(phi)
config.writeInput(nbTest, phi,"InputPhi")
sigmav,arcoef,pacf,sigma,phi=statsmodels.tsa.stattools.levinson_durbin(phi,nlags=na,isacov=True)
err.append(sigmav)
config.writeReference(nbTest, arcoef)
nbTest = nbTest + 1
config.writeReference(errTestID, err,"LDErrors")
def generatePatterns():
PATTERNDIR = os.path.join("Patterns","DSP","Filtering","MISC","MISC")
PARAMDIR = os.path.join("Parameters","DSP","Filtering","MISC","MISC")
configf32=Tools.Config(PATTERNDIR,PARAMDIR,"f32")
configf16=Tools.Config(PATTERNDIR,PARAMDIR,"f16")
configq31=Tools.Config(PATTERNDIR,PARAMDIR,"q31")
configq15=Tools.Config(PATTERNDIR,PARAMDIR,"q15")
configq7=Tools.Config(PATTERNDIR,PARAMDIR,"q7")
writeTests(configf32,0)
writeTests(configf16,16)
writeTests(configq31,31)
writeTests(configq15,15)
writeTests(configq7,7)
if __name__ == '__main__':
generatePatterns()
|
the-stack_0_15508 | #!/usr/bin/env python3
"""
Input: collaboration bipartite graph X-Y and weights on X.
Output: X'= Downsample set of nodes of X (from bipartite graph X-Y) such that each node connects to at most 10 nodes in Y
(eg the paper has at most 10 authors) and its weights are at least 5 (eg the number of citation is at least 5).
To ensure that the resulting bipartite graph X'-Y' is connected we downsampled X (with the above restrictions) by performing random walks on the X-graph.
(eg performing random walks on the papers graph -restricted to papers that have at least 5 citations and at most 10 authors-
where two papers are connected if they have at least one author in common)
"""
import numpy as np
from scipy import sparse
import pandas as pd
import networkx as nx
from networkx.algorithms import bipartite as nxb
import scipy
from scipy import sparse
from scipy.sparse import coo_matrix
from random import shuffle
import time
def starting_node_random_walk(bipartite,weights_x, min_weight=100, max_dim=10 ):
"""
Sample random node in X (from bipartite graph X-Y) with the restriction that it does not connect to more
than "max_dim" nodes in Y and that its weight is more than "min_weight"
Parameters
----------
bipartite : scipy sparse matrix
bipartite collaboration graph X-Y
weights_x : ndarray
Array of size bipartite.shape[0], containing the weights on the node of X
min_weight : float
minimum weight of the sampled node
max_dim : int
maximum number of adjacent nodes in Y
Returns
-------
start : starting node of the random walk
"""
Al=bipartite.tolil()
rows=Al.rows
seeds_papers=[]
for j, el in enumerate(rows[np.where(weights_x>100)]):
if len(el)<max_dim:
#print('Paper {} has {} authors and {} citations'.format(np.where(weights_x>100)[0][j],len(el),weights_x[np.where(weights_x>100)][j]))
seeds_papers.append(np.where(weights_x>100)[0][j])
copy_seed=np.copy(seeds_papers)
shuffle(copy_seed)
start=copy_seed[0]
return int(start)
def subsample_node_x(adjaceny_graph_x,bipartite,weights_x, min_weight=5, max_dim=10,length_walk=80):
""""
Downsample set of nodes X' of X (from bipartite graph X-Y) such that each node connects to at most 10 nodes in Y
(eg the paper has at most 10 authors) and its weights are at least 5 (eg the number of citation is at least 5).
To ensure that the resulting bipartite graph X'-Y' is connected we downsampled X (with the above restrictions) by performing random walks on the X-graph.
(eg performing random walks on the papers graph -restricted to papers that have at least 5 citations and at most 10 authors-
where two papers are connected if they have at least one author in common)
Parameters
----------
adjaceny_graph_x : scipy sparse matrix
adjacency matrix of X (from the bipartite graph X-Y)
bipartite : scipy sparse matrix
bipartite collaboration graph X-Y
weights_x : ndarray
Array of size bipartite.shape[0], containing the weights on the node of X
min_weight : float
minimum weight of the sampled node, default 5
max_dim : int
maximum number of adjacent nodes in Y, default 1-
length_walk : int
length of random walk with the above restrictions
Returns
-------
p: array of the downsampled nodes in X = X'
"""
start= starting_node_random_walk(bipartite,weights_x, min_weight=min_weight, max_dim=max_dim )
Al=bipartite.tolil()
rows=Al.rows
G = nx.from_scipy_sparse_matrix(adjaceny_graph_x)
new_start=start
H=nx.algorithms.traversal.breadth_first_search.bfs_edges(G, new_start, reverse=False, depth_limit=1)
e=list(H)
B=nx.Graph()
B.add_edges_from(e)
nodes=np.array(B.nodes())
down_cit=weights_x[nodes]
p=nodes[np.where(down_cit>=min_weight)]
list_seeds=[new_start]
for iterations in range(0,length_walk):
seed_papers=[]
for j, el in enumerate(rows[nodes]):
if len(el)<max_dim and weights_x[nodes[j]]>=min_weight:
seed_papers.append(nodes[j])
c=list(set(seed_papers).difference(list_seeds))
if len(c)<=1:
break
new_start=c[np.argsort(weights_x[c])[-2]]
H1=nx.algorithms.traversal.breadth_first_search.bfs_edges(G, new_start, reverse=False, depth_limit=1)
e1=list(H1)
B=nx.Graph()
B.add_edges_from(e1)
nodes=np.array(B.nodes())
down_cit=weights_x[nodes]
p1=nodes[np.where(down_cit>=min_weight)]
final=np.concatenate((p,p1))
p=np.unique(final)
list_seeds.append(new_start)
return p
if __name__ == '__main__':
start = time.time()
def timeit(name):
print('wall time ({}): {:.0f}s'.format(name, time.time() - start))
adjacency_papers = sparse.load_npz('s2_2_bipartite_graph/papers_adjacency.npz')
adjacency = scipy.sparse.load_npz('s2_2_bipartite_graph/paper_author_biadjacency.npz')
papers = pd.read_csv('s2_2_bipartite_graph/papers.csv', index_col=0)
citations=np.array(papers['citations_2019'])
starting_node=starting_node_random_walk(adjacency,weights_x=citations, min_weight=100, max_dim=10 )
print("The starting node of the random walk has ID {}".format(starting_node))
downsample= subsample_node_x(adjacency_papers,adjacency,weights_x=citations, min_weight=5, max_dim=10,length_walk=80)
timeit('process')
np.save(f's2_3_collaboration_complex/{starting_node}_downsampled.npy', downsample)
timeit('total')
|
the-stack_0_15509 | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .update_host_dump_transfer_details import UpdateHostDumpTransferDetails
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateCurlTransferDetails(UpdateHostDumpTransferDetails):
"""
Optional properties for Curl-based dump transfer in source or target host.
"""
def __init__(self, **kwargs):
"""
Initializes a new UpdateCurlTransferDetails object with values from keyword arguments. The default value of the :py:attr:`~oci.database_migration.models.UpdateCurlTransferDetails.kind` attribute
of this class is ``CURL`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param kind:
The value to assign to the kind property of this UpdateCurlTransferDetails.
Allowed values for this property are: "CURL", "OCI_CLI"
:type kind: str
"""
self.swagger_types = {
'kind': 'str'
}
self.attribute_map = {
'kind': 'kind'
}
self._kind = None
self._kind = 'CURL'
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
the-stack_0_15510 | from compiler import *
####################################################################################################################
# Each faction record contains the following fields:
# 1) Faction id: used for referencing factions in other files.
# The prefix fac_ is automatically added before each faction id.
# 2) Faction name.
# 3) Faction flags. See header_factions.py for a list of available flags
# 4) Faction coherence. Relation between members of this faction.
# 5) Relations. This is a list of relation records.
# Each relation record is a tuple that contains the following fields:
# 5.1) Faction. Which other faction this relation is referring to
# 5.2) Value: Relation value between the two factions.
# Values range between -1 and 1.
# 6) Ranks
# 7) Faction color (default is gray)
####################################################################################################################
default_kingdom_relations = [("outlaws",-0.05),("peasant_rebels", -0.1),("deserters", -0.05),("mountain_bandits", -0.02),("forest_bandits", -0.02)]
factions = [
("no_faction","No Faction",0, 0.9, [], []),
("commoners","Commoners",0, 0.1,[("player_faction",0.1)], []),
("outlaws","Outlaws", max_player_rating(-30), 0.5,[("commoners",-0.6),("player_faction",-0.15)], [], 0x888888),
# Factions before this point are hardwired into the game end their order should not be changed.
("neutral","Neutral",0, 0.1,[("player_faction",0.0)], [],0xFFFFFF),
("innocents","Innocents", ff_always_hide_label, 0.5,[("outlaws",-0.05)], []),
("merchants","Merchants", ff_always_hide_label, 0.5,[("outlaws",-0.5),], []),
("dark_knights","{!}Dark Knights", 0, 0.5,[("innocents",-0.9),("player_faction",-0.4)], []),
("culture_1", "{!}culture_1", 0, 0.9, [], []),
("culture_2", "{!}culture_2", 0, 0.9, [], []),
("culture_3", "{!}culture_3", 0, 0.9, [], []),
("culture_4", "{!}culture_4", 0, 0.9, [], []),
("culture_5", "{!}culture_5", 0, 0.9, [], []),
("culture_6", "{!}culture_6", 0, 0.9, [], []),
# ("swadian_caravans","Swadian Caravans", 0, 0.5,[("outlaws",-0.8), ("dark_knights",-0.2)], []),
# ("vaegir_caravans","Vaegir Caravans", 0, 0.5,[("outlaws",-0.8), ("dark_knights",-0.2)], []),
("player_faction","Player Faction",0, 0.9, [], []),
("player_supporters_faction","Player's Supporters",0, 0.9, [("player_faction",1.00),("outlaws",-0.05),("peasant_rebels", -0.1),("deserters", -0.02),("mountain_bandits", -0.05),("forest_bandits", -0.05)], [], 0xFF4433), #changed name so that can tell difference if shows up on map
("kingdom_1", "Kingdom of Swadia", 0, 0.9, [("outlaws",-0.05),("peasant_rebels", -0.1),("deserters", -0.02),("mountain_bandits", -0.05),("forest_bandits", -0.05)], [], 0xEE7744),
("kingdom_2", "Kingdom of Vaegirs", 0, 0.9, [("outlaws",-0.05),("peasant_rebels", -0.1),("deserters", -0.02),("mountain_bandits", -0.05),("forest_bandits", -0.05)], [], 0xCCBB99),
("kingdom_3", "Khergit Khanate", 0, 0.9, [("outlaws",-0.05),("peasant_rebels", -0.1),("deserters", -0.02),("mountain_bandits", -0.05),("forest_bandits", -0.05)], [], 0xCC99FF),
("kingdom_4", "Kingdom of Nords", 0, 0.9, [("outlaws",-0.05),("peasant_rebels", -0.1),("deserters", -0.02),("mountain_bandits", -0.05),("forest_bandits", -0.05)], [], 0x33DDDD),
("kingdom_5", "Kingdom of Rhodoks", 0, 0.9, [("outlaws",-0.05),("peasant_rebels", -0.1),("deserters", -0.02),("mountain_bandits", -0.05),("forest_bandits", -0.05)], [], 0x33DD33),
("kingdom_6", "Sarranid Sultanate", 0, 0.9, [("outlaws",-0.05),("peasant_rebels", -0.1),("deserters", -0.02),("mountain_bandits", -0.05),("forest_bandits", -0.05)], [], 0xDDDD33),
## ("kingdom_1_rebels", "Swadian rebels", 0, 0.9, [("outlaws",-0.05),("peasant_rebels", -0.1),("deserters", -0.02),("mountain_bandits", -0.05),("forest_bandits", -0.05)], [], 0xCC2211),
## ("kingdom_2_rebels", "Vaegir rebels", 0, 0.9, [("outlaws",-0.05),("peasant_rebels", -0.1),("deserters", -0.02),("mountain_bandits", -0.05),("forest_bandits", -0.05)], [], 0xCC2211),
## ("kingdom_3_rebels", "Khergit rebels", 0, 0.9, [("outlaws",-0.05),("peasant_rebels", -0.1),("deserters", -0.02),("mountain_bandits", -0.05),("forest_bandits", -0.05)], [], 0xCC2211),
## ("kingdom_4_rebels", "Nord rebels", 0, 0.9, [("outlaws",-0.05),("peasant_rebels", -0.1),("deserters", -0.02),("mountain_bandits", -0.05),("forest_bandits", -0.05)], [], 0xCC2211),
## ("kingdom_5_rebels", "Rhodok rebels", 0, 0.9, [("outlaws",-0.05),("peasant_rebels", -0.1),("deserters", -0.02),("mountain_bandits", -0.05),("forest_bandits", -0.05)], [], 0xCC2211),
("kingdoms_end","{!}kingdoms_end", 0, 0,[], []),
("robber_knights", "{!}robber_knights", 0, 0.1, [], []),
("khergits","{!}Khergits", 0, 0.5,[("player_faction",0.0)], []),
("black_khergits","{!}Black Khergits", 0, 0.5,[("player_faction",-0.3),("kingdom_1",-0.02),("kingdom_2",-0.02)], []),
## ("rebel_peasants","Rebel Peasants", 0, 0.5,[("vaegirs",-0.5),("player_faction",0.0)], []),
("manhunters","Manhunters", 0, 0.5,[("outlaws",-0.6),("player_faction",0.1)], []),
("deserters","Deserters", 0, 0.5,[("manhunters",-0.6),("merchants",-0.5),("player_faction",-0.1)], [], 0x888888),
("mountain_bandits","Mountain Bandits", 0, 0.5,[("commoners",-0.2),("merchants",-0.5),("manhunters",-0.6),("player_faction",-0.15)], [], 0x888888),
("forest_bandits","Forest Bandits", 0, 0.5,[("commoners",-0.2),("merchants",-0.5),("manhunters",-0.6),("player_faction",-0.15)], [], 0x888888),
("undeads","{!}Undeads", max_player_rating(-30), 0.5,[("commoners",-0.7),("player_faction",-0.5)], []),
("slavers","{!}Slavers", 0, 0.1, [], []),
("peasant_rebels","{!}Peasant Rebels", 0, 1.0,[("noble_refugees",-1.0),("player_faction",-0.4)], []),
("noble_refugees","{!}Noble Refugees", 0, 0.5,[], []),
]
|
the-stack_0_15511 | import os
import shutil
import sys
import argparse
ext_music = [".mp3", ".flac", ".aac", ".wav", ".wma", ".ape", ".alac", ".m4a", ".m4b", ".m4p", ".ogg", ".aiff", ".aif"]
ext_artwork = [".jpg", ".png", ".bmp", ".gif", ".jpeg"]
ext_extras = [".m3u", ".m3u8", ".wpl", ".pls", ".asx", ".smi", ".sami", ".xspf", ".txt", ".cue", ".log"]
ext_both = ext_music + ext_artwork
lists = [ext_music, ext_artwork, ext_both]
def file_copy(destination_location, path, file, source_location):
# manual path joining is used because os.path.join returns the second argument when two absolute paths are joined
# e.g. /foo and /bar are joined as /bar instead of /foo/bar
# whereas this method creates /foo//bar which is then normalized by normpath to /foo/bar
target_path = os.path.normpath(destination_location + os.path.sep + path[len(source_location):])
target_file = os.path.join(target_path, file)
if not os.path.exists(target_path):
try:
os.makedirs(target_path)
except OSError:
print("Unable to create the appropriate folder for", file)
return 3
if not os.path.isfile(target_file):
try:
shutil.copy2(os.path.join(path, file), target_path)
return 1 # Returned when the file was not found and the copy was successful.
except shutil.Error:
print("Copy failed.")
return 2 # Returned when the file was not found, but the copy failed.
return 0 # Returned when the file already exists in destination_location.
def create_log(log_file, source_location, destination_location, operation, extras, sync):
operation_list = ["Music only.", "Artwork only.", "Music and Artwork."]
if not os.path.exists("Logs"):
try:
os.makedirs("Logs")
except OSError:
print("ERROR: Unable to create log folder.")
return
try:
with open(os.path.join("Logs", log_file), "w+", encoding="UTF-8") as log:
log.write("Source Location: " + source_location + "\n")
log.write("Destination Location: " + destination_location + "\n")
log.write("Chosen Operation: " + operation_list[operation] + "\n")
log.write("Extra files: " + str(extras) + "\n")
log.write("Folder-Sync: " + ("True" if sync == 2 else "False") + "\n\n")
except OSError:
print("ERROR: Unable to create log-folder or logfile.")
return
def update_log(file, backup_result, log_file):
with open(os.path.join("Logs", log_file), 'a', encoding="UTF-8") as log:
if backup_result == 1:
log.write(file + "\n")
elif backup_result == 2:
log.write("Unable to copy " + file + " because an error occurred.\n")
elif backup_result == 3:
log.write("Unable to create the appropriate folder for " + file + "\n")
def scan_and_backup(source_location, destination_location, operation, extras, sync, log, log_file=None):
if log:
if sync == 1:
with open(os.path.join("Logs", log_file), 'a', encoding="UTF-8") as log:
log.write("\nReverse way\n\n")
else:
from datetime import datetime
current_date = str(datetime.now())[:19]
log_file = "AutoBackup_" + current_date[:10] + '_' + current_date[11:] + ".log"
create_log(log_file, source_location, destination_location, operation, extras, sync)
for path, _, files in os.walk(source_location):
for file in files:
name, ext = os.path.splitext(file)
ext = str.lower(ext)
if ext in lists[int(operation)] or extras and ext in ext_extras:
if ext not in ext_artwork or name[:8] != "AlbumArt" and name != "Thumbnail" and name != "Folder":
backup_result = file_copy(destination_location, path, file, source_location)
if log and backup_result > 0:
update_log(file, backup_result, log_file)
if sync == 2:
scan_and_backup(destination_location, source_location, operation, extras, 1, log, log_file)
def argument_validity():
parser = argparse.ArgumentParser()
parser.add_argument("source", help="location containing the files to be backed up")
parser.add_argument("destination", help="location where the files will be stored")
parser.add_argument("action", metavar="action", choices=["m", "music", "a", "artwork", "c", "complete"],
help="m or music: audio files only, a or artwork: image files only, c or complete: both audio"
" and image files")
parser.add_argument("-e", "--extras", action="store_true", dest="extras_flag", help="back-up extra files")
parser.add_argument("-s", "--sync", action="store_const", const=2, default=0, dest="sync_flag",
help="synchronize source and destination folder")
parser.add_argument("-l", "--log", action="store_true", dest="log_flag", help="save a log file")
parser.add_argument("-v", "--version", action="version", version="%(prog)s 1.3.0")
args = parser.parse_args()
source_location = os.path.abspath(args.source)
if not os.path.exists(source_location):
print(source_location, "does not exist.")
sys.exit()
destination_location = os.path.abspath(args.destination)
if args.action == "m" or args.action == "music":
action = 0
elif args.action == "a" or args.action == "artwork":
action = 1
else:
action = 2
return source_location, destination_location, action, args.extras_flag, args.sync_flag, args.log_flag
if __name__ == "__main__":
source, destination, selection, extras_flag, sync_flag, log_flag = argument_validity()
scan_and_backup(source, destination, selection, extras_flag, sync_flag, log_flag)
|
the-stack_0_15515 | import tensorflow as tf
from tensorflow.python.framework import ops
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
grouping_module=tf.load_op_library(os.path.join(BASE_DIR, 'tf_grouping_so.so'))
def query_ball_point(radius, nsample, xyz1, xyz2):
'''
Input:
radius: float32, ball search radius
nsample: int32, number of points selected in each ball region
xyz1: (batch_size, ndataset, 3) float32 array, input points
xyz2: (batch_size, npoint, 3) float32 array, query points
Output:
idx: (batch_size, npoint, nsample) int32 array, indices to input points
pts_cnt: (batch_size, npoint) int32 array, number of unique points in each local region
'''
#return grouping_module.query_ball_point(radius, nsample, xyz1, xyz2)
return grouping_module.query_ball_point(xyz1, xyz2, radius, nsample)
ops.NoGradient('QueryBallPoint')
def select_top_k(k, dist):
'''
Input:
k: int32, number of k SMALLEST elements selected
dist: (b,m,n) float32 array, distance matrix, m query points, n dataset points
Output:
idx: (b,m,n) int32 array, first k in n are indices to the top k
dist_out: (b,m,n) float32 array, first k in n are the top k
'''
return grouping_module.selection_sort(dist, k)
ops.NoGradient('SelectionSort')
def group_point(points, idx):
'''
Input:
points: (batch_size, ndataset, channel) float32 array, points to sample from
idx: (batch_size, npoint, nsample) int32 array, indices to points
Output:
out: (batch_size, npoint, nsample, channel) float32 array, values sampled from points
'''
return grouping_module.group_point(points, idx)
@tf.RegisterGradient('GroupPoint')
def _group_point_grad(op, grad_out):
points = op.inputs[0]
idx = op.inputs[1]
return [grouping_module.group_point_grad(points, idx, grad_out), None]
def knn_point(k, xyz1, xyz2):
'''
Input:
k: int32, number of k in k-nn search
xyz1: (batch_size, ndataset, c) float32 array, input points
xyz2: (batch_size, npoint, c) float32 array, query points
Output:
val: (batch_size, npoint, k) float32 array, L2 distances
idx: (batch_size, npoint, k) int32 array, indices to input points
'''
b = xyz1.get_shape()[0].value
n = xyz1.get_shape()[1].value
c = xyz1.get_shape()[2].value
m = xyz2.get_shape()[1].value
print(b, n, c, m)
print(xyz1, (b,1,n,c))
xyz1 = tf.tile(tf.reshape(xyz1, (b,1,n,c)), [1,m,1,1])
xyz2 = tf.tile(tf.reshape(xyz2, (b,m,1,c)), [1,1,n,1])
dist = tf.reduce_sum((xyz1-xyz2)**2, -1)
print(dist, k)
outi, out = select_top_k(k, dist)
idx = tf.slice(outi, [0,0,0], [-1,-1,k])
val = tf.slice(out, [0,0,0], [-1,-1,k])
print(idx, val)
#val, idx = tf.nn.top_k(-dist, k=k) # ONLY SUPPORT CPU
return(val, idx)
if __name__=='__main__':
knn=True
import numpy as np
import time
np.random.seed(100)
pts = np.random.random((32,512,64)).astype('float32')
tmp1 = np.random.random((32,512,3)).astype('float32')
tmp2 = np.random.random((32,128,3)).astype('float32')
with tf.device('/gpu:1'):
points = tf.constant(pts)
xyz1 = tf.constant(tmp1)
xyz2 = tf.constant(tmp2)
radius = 0.1
nsample = 64
if knn:
_, idx = knn_point(nsample, xyz1, xyz2)
grouped_points = group_point(points, idx)
else:
idx, _ = query_ball_point(radius, nsample, xyz1, xyz2)
grouped_points = group_point(points, idx)
#grouped_points_grad = tf.ones_like(grouped_points)
#points_grad = tf.gradients(grouped_points, points, grouped_points_grad)
with tf.Session('') as sess:
now = time.time()
for _ in range(100):
ret = sess.run(grouped_points)
print(time.time() - now)
print(ret.shape, ret.dtype)
print(ret)
|
the-stack_0_15516 | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
import warnings
from transformers import AutoTokenizer, MarianConfig, MarianTokenizer, TranslationPipeline, is_tf_available
from transformers.file_utils import cached_property
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeq2SeqLM, TFMarianModel, TFMarianMTModel
@require_tf
class TFMarianModelTester:
config_cls = MarianConfig
config_updates = {}
hidden_act = "gelu"
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=20,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
def prepare_config_and_inputs_for_common(self):
input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size)
eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1)
input_ids = tf.concat([input_ids, eos_tensor], axis=1)
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = self.config_cls(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_ids=[2],
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.pad_token_id,
**self.config_updates,
)
inputs_dict = prepare_marian_inputs_dict(config, input_ids, decoder_input_ids)
return config, inputs_dict
def check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = TFMarianModel(config=config).get_decoder()
input_ids = inputs_dict["input_ids"]
input_ids = input_ids[:1, :]
attention_mask = inputs_dict["attention_mask"][:1, :]
head_mask = inputs_dict["head_mask"]
self.batch_size = 1
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
past_key_values = past_key_values[1]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.int8)
# append to next input_ids and
next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)
next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1])
# select random slice
random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx]
output_from_past_slice = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)
def prepare_marian_inputs_dict(
config,
input_ids,
decoder_input_ids,
attention_mask=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
):
if attention_mask is None:
attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8)
if decoder_attention_mask is None:
decoder_attention_mask = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.int8),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id), tf.int8),
],
axis=-1,
)
if head_mask is None:
head_mask = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
decoder_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class TFMarianModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (TFMarianMTModel, TFMarianModel) if is_tf_available() else ()
all_generative_model_classes = (TFMarianMTModel,) if is_tf_available() else ()
is_encoder_decoder = True
test_pruning = False
test_head_masking = True
def setUp(self):
self.model_tester = TFMarianModelTester(self)
self.config_tester = ConfigTester(self, config_class=MarianConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_decoder_model_past_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs)
def test_compile_tf_model(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08, clipnorm=1.0)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metric = tf.keras.metrics.SparseCategoricalAccuracy("accuracy")
model_class = self.all_generative_model_classes[0]
input_ids = {
"decoder_input_ids": tf.keras.Input(batch_shape=(2, 2000), name="decoder_input_ids", dtype="int32"),
"input_ids": tf.keras.Input(batch_shape=(2, 2000), name="input_ids", dtype="int32"),
}
# Prepare our model
model = model_class(config)
model(self._prepare_for_class(inputs_dict, model_class)) # Model must be called before saving.
# Let's load it from the disk to be sure we can use pre-trained weights
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model = model_class.from_pretrained(tmpdirname)
outputs_dict = model(input_ids)
hidden_states = outputs_dict[0]
# Add a dense layer on top to test integration with other keras modules
outputs = tf.keras.layers.Dense(2, activation="softmax", name="outputs")(hidden_states)
# Compile extended model
extended_model = tf.keras.Model(inputs=[input_ids], outputs=[outputs])
extended_model.compile(optimizer=optimizer, loss=loss, metrics=[metric])
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if model_class in self.all_generative_model_classes:
x = model.get_output_embeddings()
assert isinstance(x, tf.keras.layers.Layer)
name = model.get_bias()
assert isinstance(name, dict)
for k, v in name.items():
assert isinstance(v, tf.Variable)
else:
x = model.get_output_embeddings()
assert x is None
name = model.get_bias()
assert name is None
def test_saved_model_creation(self):
# This test is too long (>30sec) and makes fail the CI
pass
def test_mixed_precision(self):
# TODO JP: Make Marian float16 compliant
pass
def test_resize_token_embeddings(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(model, embedding_layer):
if hasattr(embedding_layer, "weight"):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model(model.dummy_inputs)
if hasattr(embedding_layer, "weight"):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10, None]:
# build the embeddings
model = model_class(config=config)
old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
old_final_logits_bias = model.get_bias()
# reshape the embeddings
model.resize_token_embeddings(size)
new_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
new_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
new_final_logits_bias = model.get_bias()
# check that the resized embeddings size matches the desired size.
assert_size = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0], assert_size)
# check that weights remain the same after resizing
models_equal = True
for p1, p2 in zip(old_input_embeddings.value(), new_input_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0], assert_size)
models_equal = True
for p1, p2 in zip(old_output_embeddings.value(), new_output_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
if old_final_logits_bias is not None and new_final_logits_bias is not None:
old_final_logits_bias = old_final_logits_bias["final_logits_bias"]
new_final_logits_bias = new_final_logits_bias["final_logits_bias"]
self.assertEqual(new_final_logits_bias.shape[0], 1)
self.assertEqual(new_final_logits_bias.shape[1], assert_size)
models_equal = True
for old, new in zip(old_final_logits_bias.value(), new_final_logits_bias.value()):
for p1, p2 in zip(old, new):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
def _assert_tensors_equal(a, b, atol=1e-12, prefix=""):
"""If tensors not close, or a and b arent both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if tf.debugging.assert_near(a, b, atol=atol):
return True
raise
except Exception:
msg = "{} != {}".format(a, b)
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
def _long_tensor(tok_lst):
return tf.constant(tok_lst, dtype=tf.int32)
@require_tf
class AbstractMarianIntegrationTest(unittest.TestCase):
maxDiff = 1000 # show more chars for failing integration tests
@classmethod
def setUpClass(cls) -> None:
cls.model_name = f"Helsinki-NLP/opus-mt-{cls.src}-{cls.tgt}"
return cls
@cached_property
def tokenizer(self) -> MarianTokenizer:
return AutoTokenizer.from_pretrained(self.model_name)
@property
def eos_token_id(self) -> int:
return self.tokenizer.eos_token_id
@cached_property
def model(self):
warnings.simplefilter("error")
model: TFMarianMTModel = TFAutoModelForSeq2SeqLM.from_pretrained(self.model_name, from_pt=True)
assert isinstance(model, TFMarianMTModel)
c = model.config
self.assertListEqual(c.bad_words_ids, [[c.pad_token_id]])
self.assertEqual(c.max_length, 512)
self.assertEqual(c.decoder_start_token_id, c.pad_token_id)
return model
def _assert_generated_batch_equal_expected(self, **tokenizer_kwargs):
generated_words = self.translate_src_text(**tokenizer_kwargs)
self.assertListEqual(self.expected_text, generated_words)
def translate_src_text(self, **tokenizer_kwargs):
model_inputs = self.tokenizer.prepare_seq2seq_batch(
src_texts=self.src_text, **tokenizer_kwargs, return_tensors="tf"
)
generated_ids = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, max_length=128
)
generated_words = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=True)
return generated_words
@require_sentencepiece
@require_tokenizers
@require_tf
class TestMarian_MT_EN(AbstractMarianIntegrationTest):
"""Cover low resource/high perplexity setting. This breaks if pad_token_id logits not set to LARGE_NEGATIVE."""
src = "mt"
tgt = "en"
src_text = ["Billi messu b'mod ġentili, Ġesù fejjaq raġel li kien milqut bil - marda kerha tal - ġdiem."]
expected_text = ["Touching gently, Jesus healed a man who was affected by the sad disease of leprosy."]
@slow
def test_batch_generation_mt_en(self):
self._assert_generated_batch_equal_expected()
@require_sentencepiece
@require_tokenizers
@require_tf
class TestMarian_en_zh(AbstractMarianIntegrationTest):
src = "en"
tgt = "zh"
src_text = ["My name is Wolfgang and I live in Berlin"]
expected_text = ["我叫沃尔夫冈 我住在柏林"]
@slow
def test_batch_generation_en_zh(self):
self._assert_generated_batch_equal_expected()
@require_sentencepiece
@require_tokenizers
@require_tf
class TestMarian_en_ROMANCE(AbstractMarianIntegrationTest):
"""Multilingual on target side."""
src = "en"
tgt = "ROMANCE"
src_text = [
">>fr<< Don't spend so much time watching TV.",
">>pt<< Your message has been sent.",
">>es<< He's two years older than me.",
]
expected_text = [
"Ne passez pas autant de temps à regarder la télé.",
"A sua mensagem foi enviada.",
"Es dos años más viejo que yo.",
]
@slow
def test_batch_generation_en_ROMANCE_multi(self):
self._assert_generated_batch_equal_expected()
@slow
def test_pipeline(self):
pipeline = TranslationPipeline(self.model, self.tokenizer, framework="tf")
output = pipeline(self.src_text)
self.assertEqual(self.expected_text, [x["translation_text"] for x in output])
|
the-stack_0_15517 | from __future__ import annotations
from typing import TYPE_CHECKING
from dearpygui import core as dpgcore
from dearpygui_obj import _register_item_type
from dearpygui_obj.data import DrawPos, DrawPropertyPos, DrawPropertyColorRGBA
from dearpygui_obj.wrapper.widget import Widget, ItemWidget
from dearpygui_obj.wrapper.drawing import DrawCommand, DrawProperty
if TYPE_CHECKING:
from typing import Any, Optional, Tuple, Sequence
from dearpygui_obj.data import Pos2D, ColorRGBA
@_register_item_type('mvAppItemType::Drawing')
class DrawingCanvas(Widget, ItemWidget):
"""A widget that displays the result of drawing commands."""
def __init__(self, size: Tuple[int, int] = (300, 300), *, name_id: str = None, **config):
super().__init__(size=size, name_id=name_id, **config)
def _setup_add_widget(self, dpg_args) -> None:
dpgcore.add_drawing(self.id, **dpg_args)
def clear(self) -> None:
"""Clears the drawing.
Warning:
Any :class:`.DrawCommand` objects created using this canvas must not be used after this
method is called.
This includes reading or writing to any properties of :class:`DrawCommand` objects.
"""
dpgcore.clear_drawing(self.id)
def get_mouse_pos(self) -> Optional[Tuple[int, int]]:
"""Get the mouse position within the drawing, or ``None`` if the drawing is not hovered."""
if not self.is_hovered():
return None
return dpgcore.get_drawing_mouse_pos()
def draw_line(self, p1: Pos2D, p2: Pos2D, color: ColorRGBA, thickness: int) -> DrawLine:
"""See :class:`.DrawLine`"""
return DrawLine(self, p1, p2, color, thickness)
def draw_rectangle(self, pmin: Pos2D, pmax: Pos2D, color: ColorRGBA, **kwargs: Any) -> DrawRectangle:
"""See :class:`.DrawRectangle` for keyword arguments."""
return DrawRectangle(self, pmin, pmax, color, **kwargs)
def draw_circle(self, center: Pos2D, radius: float, color: ColorRGBA, **kwargs: Any) -> DrawCircle:
"""See :class:`.DrawCircle` for keyword arguments."""
return DrawCircle(self, center, radius, color, **kwargs)
def draw_text(self, pos: Pos2D, text: str, **kwargs) -> DrawText:
"""See :class:`.DrawText` for keyword arguments."""
return DrawText(self, pos, text, **kwargs)
def draw_arrow(self, p1: Pos2D, p2: Pos2D, color: ColorRGBA, thickness: int, arrow_size: int) -> DrawArrow:
"""See :class:`.DrawArrow` for keyword arguments."""
return DrawArrow(self, p1, p2, color, thickness, arrow_size)
def draw_polyline(self, points: Sequence[Pos2D], color: ColorRGBA, **kwargs: Any) -> DrawPolyLine:
"""See :class:`.DrawPolyLine` for keyword arguments."""
return DrawPolyLine(self, points, color, **kwargs)
def draw_triangle(self, p1: Pos2D, p2: Pos2D, p3: Pos2D, color: ColorRGBA, **kwargs: Any) -> DrawTriangle:
"""See :class:`.DrawTriangle` for keyword arguments."""
return DrawTriangle(self, p1, p2, p3, color, **kwargs)
def draw_quad(self, p1: Pos2D, p2: Pos2D, p3: Pos2D, p4: Pos2D, color: ColorRGBA, **kwargs: Any) -> DrawQuad:
"""See :class:`.DrawQuod` for keyword arguments."""
return DrawQuad(self, p1, p2, p3, p4, color, **kwargs)
def draw_polygon(self, points: Sequence[Pos2D], color: ColorRGBA, **kwargs) -> DrawPolygon:
"""See :class:`.DrawPolygon` for keyword arguments."""
return DrawPolygon(self, points, color, **kwargs)
def draw_bezier_curve(self, p1: Pos2D, p2: Pos2D, p3: Pos2D, p4: Pos2D, color: ColorRGBA, **kwargs: Any) -> DrawBezierCurve:
"""See :class:`.DrawBezierCurve` for keyword arguments."""
return DrawBezierCurve(self, p1, p2, p3, p4, color, **kwargs)
class DrawLine(DrawCommand):
"""Draws a line."""
p1: Pos2D = DrawPropertyPos()
p2: Pos2D = DrawPropertyPos()
color: ColorRGBA = DrawPropertyColorRGBA()
thickness: int = DrawProperty()
def _draw_internal(self, draw_args) -> None:
dpgcore.draw_line(self.canvas.id, tag=self.id, **draw_args)
class DrawRectangle(DrawCommand):
"""Draws a rectangle."""
pmin: Pos2D = DrawPropertyPos()
pmax: Pos2D = DrawPropertyPos()
color: ColorRGBA = DrawPropertyColorRGBA()
fill: ColorRGBA = DrawPropertyColorRGBA()
rounding: float = DrawProperty()
thickness: float = DrawProperty()
def _draw_internal(self, draw_args) -> None:
dpgcore.draw_rectangle(self.canvas.id, tag=self.id, **draw_args)
class DrawCircle(DrawCommand):
"""Draws a circle."""
center: Pos2D = DrawPropertyPos()
radius: float = DrawProperty()
color: ColorRGBA = DrawPropertyColorRGBA()
segments: int = DrawProperty()
thickness: float = DrawProperty()
fill: ColorRGBA = DrawPropertyColorRGBA()
def _draw_internal(self, draw_args) -> None:
dpgcore.draw_circle(self.canvas.id, tag=self.id, **draw_args)
class DrawText(DrawCommand):
"""Draws text."""
pos: Pos2D = DrawPropertyPos()
text: str = DrawProperty()
color: ColorRGBA = DrawPropertyColorRGBA()
font_size: int = DrawProperty(key='size')
def _draw_internal(self, draw_args) -> None:
dpgcore.draw_text(self.canvas.id, tag=self.id, **draw_args)
class DrawArrow(DrawCommand):
"""Draw a line with an arrowhead."""
p1: Pos2D = DrawPropertyPos()
p2: Pos2D = DrawPropertyPos()
color: ColorRGBA = DrawPropertyColorRGBA()
thickness: int = DrawProperty()
arrow_size: int = DrawProperty(key='size')
def _draw_internal(self, draw_args) -> None:
dpgcore.draw_arrow(self.canvas.id, tag=self.id, **draw_args)
class DrawPolyLine(DrawCommand):
"""Draws connected lines."""
@DrawProperty()
def points(self) -> Sequence[Pos2D]:
return [ DrawPos(*p) for p in self.get_config()['points'] ]
@points.getconfig
def points(self, value: Sequence[Pos2D]):
return { 'points' : [ list(p) for p in value ] }
color: ColorRGBA = DrawPropertyColorRGBA()
closed: bool = DrawProperty()
thickness: float = DrawProperty()
def _draw_internal(self, draw_args) -> None:
dpgcore.draw_polyline(self.canvas.id, tag=self.id, **draw_args)
class DrawTriangle(DrawCommand):
"""Draws a triangle."""
p1: Pos2D = DrawPropertyPos()
p2: Pos2D = DrawPropertyPos()
p3: Pos2D = DrawPropertyPos()
color: ColorRGBA = DrawPropertyColorRGBA()
fill: ColorRGBA = DrawPropertyColorRGBA()
thickness: float = DrawProperty()
def _draw_internal(self, draw_args) -> None:
dpgcore.draw_triangle(self.canvas.id, tag=self.id, **draw_args)
class DrawQuad(DrawCommand):
"""Draws a quadrilateral."""
p1: Pos2D = DrawPropertyPos()
p2: Pos2D = DrawPropertyPos()
p3: Pos2D = DrawPropertyPos()
p4: Pos2D = DrawPropertyPos()
color: ColorRGBA = DrawPropertyColorRGBA()
fill: ColorRGBA = DrawPropertyColorRGBA()
thickness: float = DrawProperty()
def _draw_internal(self, draw_args) -> None:
dpgcore.draw_quad(self.canvas.id, tag=self.id, **draw_args)
class DrawPolygon(DrawCommand):
"""Draws a polygon."""
@DrawProperty()
def points(self) -> Sequence[Pos2D]:
return [ DrawPos(*p) for p in self.get_config()['points'] ]
@points.getconfig
def points(self, value: Sequence[Pos2D]):
return { 'points' : [ list(p) for p in value ] }
color: ColorRGBA = DrawPropertyColorRGBA()
fill: ColorRGBA = DrawPropertyColorRGBA()
thickness: float = DrawProperty()
def _draw_internal(self, draw_args) -> None:
dpgcore.draw_polygon(self.canvas.id, tag=self.id, **draw_args)
class DrawBezierCurve(DrawCommand):
"""Draws a bezier curve."""
p1: Pos2D = DrawPropertyPos()
p2: Pos2D = DrawPropertyPos()
p3: Pos2D = DrawPropertyPos()
p4: Pos2D = DrawPropertyPos()
color: ColorRGBA = DrawPropertyColorRGBA()
thickness: float = DrawProperty()
segments: int = DrawProperty()
def _draw_internal(self, draw_args) -> None:
dpgcore.draw_bezier_curve(self.canvas.id, tag=self.id, **draw_args)
## class DrawImage TODO
__all__ = [
'DrawingCanvas',
'DrawLine',
'DrawRectangle',
'DrawCircle',
'DrawText',
'DrawArrow',
'DrawPolyLine',
'DrawTriangle',
'DrawQuad',
'DrawPolygon',
'DrawBezierCurve',
] |
the-stack_0_15518 | from .testtools import virtuese as vs
from .testtools import pickyinvestor
import datetime
import numpy as np
import pandas as pd
import os
from .models import TradeCalendar
from .models import Position
def getDateIDs():
"""Get a dictionary mapping date to id in the database.
"""
tradeID = {}
tradeDays = TradeCalendar.objects.all()
for tradeDay in tradeDays:
tradeID[tradeDay.trade_date.strftime("%Y-%m-%d")] = tradeDay.id
return tradeID
def getIndex(information):
"""
Adjust order so that indexes is a list such that:
indexes[] are the index for information to be storaged correctly.
information[indexes[0]] - book
information[indexes[1]] - ts_code
information[indexes[2]] - trade_date
information[indexes[3]] - position
information[indexes[4]] - value
information[indexes[5]] - wavg_cost
information[indexes[6]] - return
information[indexes[7]] - pct_return
"""
indexes = []
indexes.append(information.index("book"))
indexes.append(information.index("ts_code"))
indexes.append(information.index("trade_date"))
indexes.append(information.index("position"))
indexes.append(information.index("value"))
indexes.append(information.index("wavg_cost"))
indexes.append(information.index("return"))
indexes.append(information.index("pct_return"))
return indexes
def simulate(params):
response = {}
try:
startDate = params['startDate']
endDate = params['endDate']
stockPool = params['stockPool']
strategy = params['strategy']
testval = [startDate,endDate,stockPool,strategy]
startDate = '20200301'
endDate = '20200603'
stockPool = []
strategy = ""
backtest = vs.VirtualSE()
backtest.setRunningInterval(startDate, endDate)
backtest.setBacktestingPriceEngine("backward")
# backtest.setStockPool(stockPool)
backtest.setBrokerDB("aTest.db")
backtest.setMarketExpressDB("externalDB.db")
strategy = pickyinvestor.PickyInvestor()
backtest.setTradingStrategy(strategy)
backtest.execute()
transactionDataFile = backtest.getTransactionData()
tradeID = getDateIDs()
skipFirstLine = True
for line in transactionDataFile:
if skipFirstLine:
skipFirstLine = False
indexes = getIndex(line.split()[0].split(","))
Position.objects.all().delete()
statements = []
continue
infomation = line.split()[0].split(",")
value = infomation[indexes[4]] if infomation[indexes[4]]!="" else 0
return_field = infomation[indexes[6]] if infomation[indexes[6]]!="" else 0
pct_return = infomation[indexes[7]] if infomation[indexes[7]]!="" else 0
statements.append(Position(
book=infomation[indexes[0]],
ts_code=infomation[indexes[1]],
trade_day_id=tradeID[infomation[indexes[2]]],
position=infomation[indexes[3]],
value=value,
wavg_cost=infomation[indexes[5]],
return_field=return_field,
pct_return=pct_return,
))
Position.objects.bulk_create(statements)
transactionDataFile.close()
backtest.clear()
return testval
except Exception as e:
print(str(e))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.