filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_17540 | # -*- coding: utf-8 -*-
__all__ = ["BlockedQuadPotential", "WindowedDiagAdapt", "WindowedFullAdapt"]
import numpy as np
from pymc3.step_methods.hmc.quadpotential import (
QuadPotential,
_WeightedVariance,
)
from scipy.linalg import LinAlgError, cholesky, solve_triangular
from .estimator import _WeightedCovariance
class BlockedQuadPotential(QuadPotential):
def __init__(self, n, groups, dtype="float64"):
self.dtype = dtype
self.n = int(n)
self.groups = groups
self.ordering = None
self.vmap = None
def set_ordering(self, ordering):
self.ordering = ordering
self.vmap = []
inds = np.arange(self.n)
for group in self.groups:
self.vmap.append(
np.concatenate(
[inds[self.ordering[v.name].slc] for v in group.variables]
)
)
def reset(self):
for group in self.groups:
group.potential.reset()
def velocity(self, x, out=None):
if out is None:
out = np.zeros_like(x)
for inds, group in zip(self.vmap, self.groups):
out[inds] = group.potential.velocity(x[inds])
return out
def energy(self, x, velocity=None):
if velocity is None:
velocity = self.velocity(x)
return 0.5 * np.dot(x, velocity)
def velocity_energy(self, x, v_out):
self.velocity(x, out=v_out)
return self.energy(x, v_out)
def random(self):
out = np.empty(self.n)
for inds, group in zip(self.vmap, self.groups):
out[inds] = group.potential.random()
return out
def update(self, sample, grad, tune):
if not tune:
return
for inds, group in zip(self.vmap, self.groups):
group.potential.update(sample[inds], grad[inds], tune)
def raise_ok(self, vmap):
for group in self.groups:
group.potential.raise_ok(vmap)
class WindowedDiagAdapt(QuadPotential):
def __init__(
self,
ndim,
update_steps=None,
recompute_interval=1,
regularization_steps=0,
regularization_variance=1e-8,
dtype="float64",
):
self.dtype = dtype
self._ndim = int(ndim)
if update_steps is not None:
self._update_steps = np.atleast_1d(update_steps).astype(int)
else:
self._update_steps = np.array([], dtype=int)
self._recompute_interval = int(recompute_interval)
self._regularization_steps = int(regularization_steps)
self._regularization_variance = float(regularization_variance)
self.reset()
def reset(self):
self._n_samples = 0
self.new_variance()
self.update_factors()
self._foreground = self.new_estimator()
self._background = self.new_estimator()
def update(self, sample, grad, tune):
if not tune:
return
self._n_samples += 1
# If we're in warmup or cooldown, we shouldn't update the variance
if (
self._n_samples <= self._update_steps[0]
or self._n_samples > self._update_steps[-1]
):
return
# Add the sample to the estimators
self._foreground.add_sample(sample, weight=1)
self._background.add_sample(sample, weight=1)
# During the first slow window, never update the variance estimate
if self._n_samples < self._update_steps[1]:
return
# If this is one of the update steps, update the estimators
if self._n_samples in self._update_steps:
self._foreground = self._background
self._background = self.new_estimator()
self.update_var()
# Update the variance every `recompute_interval` steps
elif (
self._recompute_interval
and self._n_samples % self._recompute_interval == 0
):
self.update_var()
def set_var(self, var):
self._var = var
self.update_factors()
def update_var(self):
self._foreground.current_variance(out=self._var)
if self._regularization_steps > 0:
N = self._foreground.n_samples
n = self._regularization_steps
self._var *= N / (N + n)
self._var[self._diag_inds] += (
self._regularization_variance * n / (N + n)
)
self.update_factors()
def energy(self, x, velocity=None):
if velocity is None:
velocity = self.velocity(x)
return 0.5 * x.dot(velocity)
def velocity_energy(self, x, v_out):
self.velocity(x, out=v_out)
return 0.5 * np.dot(x, v_out)
#
# The following methods should be overloaded by subclasses
#
def new_estimator(self):
return _WeightedVariance(self._ndim, dtype=self.dtype)
def new_variance(self):
self._var = np.ones(self._ndim, dtype=self.dtype)
self._diag_inds = np.arange(self._ndim)
def update_factors(self):
self._inv_sd = 1.0 / np.sqrt(self._var)
def velocity(self, x, out=None):
return np.multiply(self._var, x, out=out)
def random(self):
vals = np.random.normal(size=self._ndim).astype(self.dtype)
return self._inv_sd * vals
def raise_ok(self, vmap):
if np.any(~np.isfinite(self._inv_sd)):
raise ValueError("non-finite inverse variances found")
class WindowedFullAdapt(WindowedDiagAdapt):
def new_estimator(self):
return _WeightedCovariance(self._ndim, dtype=self.dtype)
def new_variance(self):
self._var = np.eye(self._ndim, dtype=self.dtype)
self._diag_inds = np.diag_indices(self._ndim)
def update_factors(self):
try:
self._chol = cholesky(self._var, lower=True)
except (LinAlgError, ValueError) as error:
self._chol_error = error
else:
self._chol_error = None
def velocity(self, x, out=None):
return np.dot(self._var, x, out=out)
def random(self):
vals = np.random.normal(size=self._ndim).astype(self.dtype)
return solve_triangular(self._chol.T, vals, overwrite_b=True)
def raise_ok(self, vmap):
if self._chol_error is not None:
raise ValueError("{0}".format(self._chol_error))
|
the-stack_0_17541 | from django.contrib.auth import login
from rest_framework import serializers, status
from rest_framework.views import APIView
from rest_framework.exceptions import ValidationError
from drf_yasg.utils import swagger_auto_schema
from drf_yasg import openapi
from common.response import create_response, create_response_dict
from your_project.users.selectors import user_get_login_data, user_get_by_email
from your_project.users.services import user_create
from ..services import user_create_access_token
example_responses = {
"201": openapi.Response(
description="Successful creation of User",
examples={
"application/json": create_response_dict(
data={
"id": 1,
"email": "[email protected]",
"access": "exxddnjsjkdkdcdkdkcdkcdncdkndkk...",
},
status=status.HTTP_201_CREATED,
)
},
),
"400": openapi.Response(
description="User already exising!",
examples={
"application/json": {
"errors": [{"message": "User already existing!", "code": "invalid"}],
"data": None,
"statusCode": 400,
}
},
),
}
class InputSerializer(serializers.Serializer):
email = serializers.EmailField()
password = serializers.CharField()
class Meta:
ref_name = "UserSignUpInputSerializer"
class UserSignUp(APIView):
@swagger_auto_schema(
security=[],
request_body=InputSerializer,
responses=example_responses,
)
def post(self, request):
serializer = InputSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
request_data = serializer.validated_data
user = user_get_by_email(email=request_data["email"])
if user is None:
user = user_create(
email=request_data["email"], password=request_data["password"]
)
login(request=request, user=user)
return create_response(
data={
**user_get_login_data(user=user),
**user_create_access_token(user=user),
},
status=status.HTTP_201_CREATED,
)
else:
raise ValidationError(detail="User already existing!")
|
the-stack_0_17542 | import hashlib
import random
import json
import os
import os.path
import redleader.util as util
class Resource(object):
def __init__(self, context, cf_params):
super(Resource, self).__init__()
self._context = context
self._dependencies = []
self._cf_params = cf_params
self._user_id = None
self._multiplicity_uid = None
self._generated_id = None
self._generated_resources = None
def get_dependencies(self):
return self._dependencies
def is_static(self):
"""
Static resources only generate cloud formation templates
when they don't already exist. Example uses: S3 buckets, SQS queues
"""
return False
def add_dependency(self, dep):
"""
Resource dependencies will be included in the cloud formation
`DependsOn` attribute to ensure correct creation order.
Dependencies must be added to the cluster or generated
as subresources of included resources.
"""
self._dependencies.append(dep)
def get_id(self):
"""
Each resource needs a reproducible UID that represents its state and multiplicty
State: If a key parameter to a resource changes, it's a different resource
Multiplicity: We need to be able to differentiate identical resources. e.g) t2.micro Instance #2 vs #3
Solution:
* Utilize _get_multiplicity to track # of identical resources produced
* Utilize _idempotent_params() to get a subset of a resource's
cloud formation template output, and hash it.
Implications:
* get_id() cannot be used inside of _cloud_formation_template()
** Instead, we'll output the placeholder {resource_id}
"""
if self._user_id is not None:
return self._user_id
if self._generated_id is None:
class_name = str(self.__class__.__name__).replace("Resource", "")
param_hash = self._param_hash()
if self._multiplicity_uid is None:
self._multiplicity_uid = Resource._get_multiplicity(class_name + param_hash)
self._generated_id = "RL%sN%sP%s" % (class_name,
self._multiplicity_uid,
param_hash)
if self._context.pretty_names():
h = hashlib.md5()
h.update(self._generated_id.encode('utf-8'))
ints = []
for x in range(2):
ints.append(int(h.hexdigest()[x * 8:(x+ 1) * 8], 16))
d = self._context.get_dict()
pretty_words = ""
for i in ints:
word = d[i % len(d)].lower().replace("'", "")
pretty_words += word[0].upper() + word[1:].lower()
self._generated_id = "%s%s%s" % (class_name, self._multiplicity_uid, pretty_words)
return self._generated_id
def _id_placeholder(self):
"""
Placeholder for use in _cloud_formation_template()
"""
return "{resource_id}"
def _param_hash(self):
key_params = self._idempotent_params()
template = self._cloud_formation_template()
extracted = {}
for k in key_params:
extracted[k] = template['Properties'][k]
h = hashlib.md5()
extracted_json = json.dumps(extracted, sort_keys=True)
h.update(str(extracted_json).encode('utf-8'))
return str(h.hexdigest()[0:10])
@classmethod
def _get_multiplicity(cls, uid):
if(not hasattr(cls, "_multiplicity_count")):
cls._multiplicity_count = {}
if uid in cls._multiplicity_count:
cls._multiplicity_count[uid] += 1
else:
cls._multiplicity_count[uid] = 1
return cls._multiplicity_count[uid]
@classmethod
def reset_multiplicity(cls):
cls._multiplicity_count = {}
def _idempotent_params(self):
"""
Returns the list of cloud formation parameters that must be the same
in order for two RedLeader resources to refer to the same deployed resource.
By default we assume that all parameters must be the same.
Example: we might change an EC2 instance's security group, but want the
RedLeader resource to refer to the same deployed server.
"""
template = self._cloud_formation_template()
return sorted(template['Properties'].keys())
def iam_service_policies(self):
"""
Return a list of objects usable by IAMRoleResource to generate
an IAM role representing access to this resource and its sub resources
"""
policies = []
for resource in self.generate_sub_resources():
policies += resource.iam_service_policies()
policies.append(self._iam_service_policy())
return policies
def _iam_service_policy(self):
raise NotImplementedError
def generate_sub_resources(self):
if self._generated_resources is None:
self._generated_resources = self._generate_sub_resources()
return self._generated_resources
def _generate_sub_resources(self):
"""
Generate any sub resources, if necessary
"""
return []
@staticmethod
def cf_ref(resource):
if resource is None:
return {}
return {"Ref": resource.get_id()}
@staticmethod
def cf_attr(resource, attr):
return {"Fn::GetAtt": [ resource.get_id(), attr ]}
@staticmethod
def replaceValues(obj, replaceMap):
if isinstance(obj, dict):
for key in obj:
if isinstance(obj[key], str):
obj[key] = util.multireplace(obj[key], replaceMap)
else:
obj[key] = Resource.replaceValues(obj[key], replaceMap)
if isinstance(obj, list):
new = []
for elem in obj:
if isinstance(elem, str) and elem in replaceMap:
new.append(replaceMap[elem])
else:
new.append(Resource.replaceValues(elem, replaceMap))
return new
return obj
def cloud_formation_template(self):
"""
Get the cloud formation template for this resource
"""
if(self.is_static() and self.resource_exists()):
# Don't create templates for static resources that exist
return None
cf_template = self._cloud_formation_template()
if cf_template is None:
return None
for param in self._cf_params:
cf_template['Properties'][param] = self._cf_params[param]
replaceMap = {"{resource_id}": self.get_id()}
for param in cf_template['Properties']:
cf_template['Properties'] = Resource.replaceValues(cf_template['Properties'], replaceMap)
cf_template["DependsOn"] = []
for dependency in self.get_dependencies():
if(not dependency.is_static() or not dependency.resource_exists()):
cf_template["DependsOn"].append(dependency.get_id())
cf_template["DependsOn"] = sorted(cf_template["DependsOn"])
if self.is_static():
# Don't delete static resources on cluster deletion
cf_template["DeletionPolicy"] = "Retain"
return cf_template
def find_deployed_resources(self):
"""
Finds already deployed resources that match this resource's configuration
"""
raise NotImplementedError
class CustomUserResource(Resource):
"""
CustomUserResource allows a cluster to provision and depend upon
resources that aren't yet implemented programatically
"""
def __init__(self, context, template):
super(self, context)
self._template = template
def cloud_formation_template(self):
"""
Get the cloud formation template for this resource
"""
return self._template
|
the-stack_0_17543 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: Ampel-HU-astro/ampel/contrib/hu/util/ned.py
# License: BSD-3-Clause
# Author: valery brinnel <[email protected]>
# Date: 14.09.2021
# Last Modified Date: 14.09.2021
# Last Modified By: valery brinnel <[email protected]>
from typing import Tuple
from ampel.protocol.LoggerProtocol import LoggerProtocol
def check_ned_res(
cat_res: dict,
logger: LoggerProtocol,
spectroscopic: bool = False,
z_range: None | tuple[float | float] = None
) -> bool:
if not cat_res.get('z'):
logger.info("No redshift found in NED result")
return True
if spectroscopic and cat_res.get('n_spectra', 0) == 0 and cat_res["zflag"] != "SPEC":
logger.info("Not a spectroscopic redshift")
return True
if z_range and (cat_res['z'] < z_range[0] or cat_res['z'] > z_range[1]):
logger.info("Redshift exceeds allowed values")
return True
return False
|
the-stack_0_17547 | import numpy as np
from math import sqrt, exp
from planet_finder.grid import Grid
class Kriging:
def __init__(self, heat_map):
self.nugget = 0
self.range = 8 #1/3 of range
self.sill = 12
self.sv_matrix = None
self.lag_matrix = None
self.heat_map = heat_map
self.pp = None
self.ppsv = None
self.weights = None
self.points = []
self.pp_z = 0
self.z_matrix = None
self.pp_error = 0
self.pX = 0
self.pY = 0
def update_heat_map(self, heat_map):
self.heat_map = heat_map
def get_points(self):
for y0 in range(self.heat_map.height):
for x0 in range(self.heat_map.width):
if self.heat_map.cells[x0][y0] >= 1:
self.points.append([x0, y0])
def calculate_lag_matrix(self):
self.lag_matrix = np.zeros((len(self.points), len(self.points)), dtype=float)
row = 0
column = 0
for p0 in self.points:
for p1 in self.points:
lag = sqrt(pow(p0[0] - p1[0], 2) + pow(p0[1] - p1[1], 2))
self.lag_matrix[row][column] = lag
column += 1
row += 1
column = 0
def calculate_sv_matrix(self):
sv = lambda t: self.nugget + self.sill*(1 - exp(-t/self.range)) if t != 0 else 0
self.sv_matrix = np.array([[sv(h) for h in row] for row in self.lag_matrix])
self.sv_matrix = np.c_[self.sv_matrix, np.zeros(len(self.points))]
self.sv_matrix = np.c_[self.sv_matrix, np.zeros(len(self.points))]
self.sv_matrix = np.c_[self.sv_matrix, np.zeros(len(self.points))]
self.sv_matrix = np.r_[self.sv_matrix, [np.zeros(len(self.points)+3)]]
self.sv_matrix = np.r_[self.sv_matrix, [np.zeros(len(self.points)+3)]]
self.sv_matrix = np.r_[self.sv_matrix, [np.zeros(len(self.points)+3)]]
num_rows = len(self.points) + 3
num_colmuns = len(self.points) + 3
count = 0
for point in self.points:
self.sv_matrix[num_rows-1][count] = point[1]
self.sv_matrix[num_rows-2][count] = point[0]
self.sv_matrix[num_rows-3][count] = 1
self.sv_matrix[count][num_colmuns-1] = point[1]
self.sv_matrix[count][num_colmuns-2] = point[0]
self.sv_matrix[count][num_colmuns-3] = 1
count += 1
def calculate_prediction_point(self, pX, pY):
pp_lag = lambda t: sqrt(pow(t[0] - pX, 2) + pow(t[1] - pY, 2))
self.pp = np.array([pp_lag(row) for row in self.points])
self.pX = pX
self.pY = pY
def calculate_sv_pp(self):
# ppsv = lambda t: self.sill*(1 - exp(-t/self.range)) if t < self.range and t != 0 else 0
ppsv = lambda t: self.nugget + self.sill*(1 - exp(-t/self.range)) if t != 0 else 0
self.ppsv = np.array([ppsv(h) for h in self.pp])
self.ppsv = np.r_[self.ppsv, np.ones(3)]
rows = len(self.ppsv)
self.ppsv[rows - 2] = self.pX
self.ppsv[rows - 1] = self.pY
def calculate_weights(self):
try:
temp = np.linalg.inv(self.sv_matrix)
self.weights = np.dot(temp, self.ppsv)
self.pp_error = np.dot(self.ppsv, self.weights)
self.weights = np.delete(self.weights, -1, 0)
self.weights = np.delete(self.weights, -1, 0)
self.weights = np.delete(self.weights, -1, 0)
return True
except Exception as err:
print("Error")
print(err)
return False
def calculate_z(self):
z = lambda t: self.heat_map.cells[t[0]][t[1]]
self.z_matrix = np.array([z(p) for p in self.points])
self.pp_z = np.inner(self.z_matrix, self.weights)
def setup(self):
self.get_points()
if len(self.points) < 3:
return False
else:
self.calculate_lag_matrix()
self.calculate_sv_matrix()
if np.linalg.det(self.sv_matrix) == 0:
return False
else:
return True
return True
def get_estimate(self, x, y):
self.calculate_prediction_point(x, y)
self.calculate_sv_pp()
if self.calculate_weights():
self.calculate_z()
return [self.pp_z, self.pp_error]
else:
return []
if __name__ == "__main__":
np.set_printoptions(linewidth=300, precision=1)
heat_map = Grid(16, 16)
heat_map.init_bomb(3, 3, 10)
heat_map.cells[3][3] = 0
# heat_map.cells[0][0] = 1
# heat_map.cells[1][0] = 2
# heat_map.cells[2][0] = 4
# heat_map.cells[0][1] = 5
# heat_map.cells[0][2] = 6
# heat_map.cells[2][2] = 27
for x in range(16, 32):
for y in range(16, 32):
heat_map = Grid((x), (y))
bombX = int(heat_map.width/2)
bombY = int(heat_map.height/2)
heat_map.init_bomb(bombX, bombY)
heat_map.cells[bombX][bombY] = 0
k = Kriging(heat_map)
k.setup()
result = k.get_estimate(bombX, bombY )
print("Estimate for (%2d,%2d)" % (x, y), str("%4.1f" % result[0]), str("%4.1f" % result[1]), heat_map.cells[bombX][bombY], ' Error ' + str("%.1f" % (result[0] - 10)))
|
the-stack_0_17548 | import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
CHANGES = open(os.path.join(here, 'CHANGES.rst')).read()
reqs = [line.strip() for line in open('requirements/deploy.txt')]
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Atmospheric Science',
]
setup(name='esmvalwps',
version='1.0.1',
description='WPS processes for ESMValTool',
long_description=README + '\n\n' + CHANGES,
classifiers=classifiers,
author='Birdhouse',
author_email='',
url='http://www.esmvaltool.org/',
license="Apache License v2.0",
keywords='wps pywps conda birdhouse esmvaltool',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='esmvalwps',
install_requires=reqs,
entry_points={
'console_scripts': []
},
)
|
the-stack_0_17549 | """Fixture used in type-related test cases.
It contains class TypeInfos and Type objects.
"""
from typing import List, Optional, Tuple
from mypy.semanal_shared import set_callable_name
from mypy.types import (
Type, AnyType, NoneType, Instance, CallableType, TypeVarType, TypeType,
UninhabitedType, TypeOfAny, TypeAliasType, UnionType, LiteralType,
TypeVarLikeType
)
from mypy.nodes import (
TypeInfo, ClassDef, FuncDef, Block, ARG_POS, ARG_OPT, ARG_STAR, SymbolTable,
COVARIANT, TypeAlias, SymbolTableNode, MDEF,
)
class TypeFixture:
"""Helper class that is used as a fixture in type-related unit tests.
The members are initialized to contain various type-related values.
"""
def __init__(self, variance: int = COVARIANT) -> None:
# The 'object' class
self.oi = self.make_type_info('builtins.object') # class object
self.o = Instance(self.oi, []) # object
# Type variables (these are effectively global)
def make_type_var(name: str, id: int, values: List[Type], upper_bound: Type,
variance: int) -> TypeVarType:
return TypeVarType(name, name, id, values, upper_bound, variance)
self.t = make_type_var('T', 1, [], self.o, variance) # T`1 (type variable)
self.tf = make_type_var('T', -1, [], self.o, variance) # T`-1 (type variable)
self.tf2 = make_type_var('T', -2, [], self.o, variance) # T`-2 (type variable)
self.s = make_type_var('S', 2, [], self.o, variance) # S`2 (type variable)
self.s1 = make_type_var('S', 1, [], self.o, variance) # S`1 (type variable)
self.sf = make_type_var('S', -2, [], self.o, variance) # S`-2 (type variable)
self.sf1 = make_type_var('S', -1, [], self.o, variance) # S`-1 (type variable)
# Simple types
self.anyt = AnyType(TypeOfAny.special_form)
self.nonet = NoneType()
self.uninhabited = UninhabitedType()
# Abstract class TypeInfos
# class F
self.fi = self.make_type_info('F', is_abstract=True)
# class F2
self.f2i = self.make_type_info('F2', is_abstract=True)
# class F3(F)
self.f3i = self.make_type_info('F3', is_abstract=True, mro=[self.fi])
# Class TypeInfos
self.std_tuplei = self.make_type_info('builtins.tuple',
mro=[self.oi],
typevars=['T'],
variances=[COVARIANT]) # class tuple
self.type_typei = self.make_type_info('builtins.type') # class type
self.bool_type_info = self.make_type_info('builtins.bool')
self.functioni = self.make_type_info('builtins.function') # function TODO
self.ai = self.make_type_info('A', mro=[self.oi]) # class A
self.bi = self.make_type_info('B', mro=[self.ai, self.oi]) # class B(A)
self.ci = self.make_type_info('C', mro=[self.ai, self.oi]) # class C(A)
self.di = self.make_type_info('D', mro=[self.oi]) # class D
# class E(F)
self.ei = self.make_type_info('E', mro=[self.fi, self.oi])
# class E2(F2, F)
self.e2i = self.make_type_info('E2', mro=[self.f2i, self.fi, self.oi])
# class E3(F, F2)
self.e3i = self.make_type_info('E3', mro=[self.fi, self.f2i, self.oi])
# Generic class TypeInfos
# G[T]
self.gi = self.make_type_info('G', mro=[self.oi],
typevars=['T'],
variances=[variance])
# G2[T]
self.g2i = self.make_type_info('G2', mro=[self.oi],
typevars=['T'],
variances=[variance])
# H[S, T]
self.hi = self.make_type_info('H', mro=[self.oi],
typevars=['S', 'T'],
variances=[variance, variance])
# GS[T, S] <: G[S]
self.gsi = self.make_type_info('GS', mro=[self.gi, self.oi],
typevars=['T', 'S'],
variances=[variance, variance],
bases=[Instance(self.gi, [self.s])])
# GS2[S] <: G[S]
self.gs2i = self.make_type_info('GS2', mro=[self.gi, self.oi],
typevars=['S'],
variances=[variance],
bases=[Instance(self.gi, [self.s1])])
# list[T]
self.std_listi = self.make_type_info('builtins.list', mro=[self.oi],
typevars=['T'],
variances=[variance])
# Instance types
self.std_tuple = Instance(self.std_tuplei, [self.anyt]) # tuple
self.type_type = Instance(self.type_typei, []) # type
self.function = Instance(self.functioni, []) # function TODO
self.a = Instance(self.ai, []) # A
self.b = Instance(self.bi, []) # B
self.c = Instance(self.ci, []) # C
self.d = Instance(self.di, []) # D
self.e = Instance(self.ei, []) # E
self.e2 = Instance(self.e2i, []) # E2
self.e3 = Instance(self.e3i, []) # E3
self.f = Instance(self.fi, []) # F
self.f2 = Instance(self.f2i, []) # F2
self.f3 = Instance(self.f3i, []) # F3
# Generic instance types
self.ga = Instance(self.gi, [self.a]) # G[A]
self.gb = Instance(self.gi, [self.b]) # G[B]
self.gd = Instance(self.gi, [self.d]) # G[D]
self.go = Instance(self.gi, [self.o]) # G[object]
self.gt = Instance(self.gi, [self.t]) # G[T`1]
self.gtf = Instance(self.gi, [self.tf]) # G[T`-1]
self.gtf2 = Instance(self.gi, [self.tf2]) # G[T`-2]
self.gs = Instance(self.gi, [self.s]) # G[S]
self.gdyn = Instance(self.gi, [self.anyt]) # G[Any]
self.gn = Instance(self.gi, [NoneType()]) # G[None]
self.g2a = Instance(self.g2i, [self.a]) # G2[A]
self.gsaa = Instance(self.gsi, [self.a, self.a]) # GS[A, A]
self.gsab = Instance(self.gsi, [self.a, self.b]) # GS[A, B]
self.gsba = Instance(self.gsi, [self.b, self.a]) # GS[B, A]
self.gs2a = Instance(self.gs2i, [self.a]) # GS2[A]
self.gs2b = Instance(self.gs2i, [self.b]) # GS2[B]
self.gs2d = Instance(self.gs2i, [self.d]) # GS2[D]
self.hab = Instance(self.hi, [self.a, self.b]) # H[A, B]
self.haa = Instance(self.hi, [self.a, self.a]) # H[A, A]
self.hbb = Instance(self.hi, [self.b, self.b]) # H[B, B]
self.hts = Instance(self.hi, [self.t, self.s]) # H[T, S]
self.had = Instance(self.hi, [self.a, self.d]) # H[A, D]
self.hao = Instance(self.hi, [self.a, self.o]) # H[A, object]
self.lsta = Instance(self.std_listi, [self.a]) # List[A]
self.lstb = Instance(self.std_listi, [self.b]) # List[B]
self.lit1 = LiteralType(1, self.a)
self.lit2 = LiteralType(2, self.a)
self.lit3 = LiteralType("foo", self.d)
self.lit1_inst = Instance(self.ai, [], last_known_value=self.lit1)
self.lit2_inst = Instance(self.ai, [], last_known_value=self.lit2)
self.lit3_inst = Instance(self.di, [], last_known_value=self.lit3)
self.type_a = TypeType.make_normalized(self.a)
self.type_b = TypeType.make_normalized(self.b)
self.type_c = TypeType.make_normalized(self.c)
self.type_d = TypeType.make_normalized(self.d)
self.type_t = TypeType.make_normalized(self.t)
self.type_any = TypeType.make_normalized(self.anyt)
self._add_bool_dunder(self.bool_type_info)
self._add_bool_dunder(self.ai)
def _add_bool_dunder(self, type_info: TypeInfo) -> None:
signature = CallableType([], [], [], Instance(self.bool_type_info, []), self.function)
bool_func = FuncDef('__bool__', [], Block([]))
bool_func.type = set_callable_name(signature, bool_func)
type_info.names[bool_func.name] = SymbolTableNode(MDEF, bool_func)
# Helper methods
def callable(self, *a: Type) -> CallableType:
"""callable(a1, ..., an, r) constructs a callable with argument types
a1, ... an and return type r.
"""
return CallableType(list(a[:-1]), [ARG_POS] * (len(a) - 1),
[None] * (len(a) - 1), a[-1], self.function)
def callable_type(self, *a: Type) -> CallableType:
"""callable_type(a1, ..., an, r) constructs a callable with
argument types a1, ... an and return type r, and which
represents a type.
"""
return CallableType(list(a[:-1]), [ARG_POS] * (len(a) - 1),
[None] * (len(a) - 1), a[-1], self.type_type)
def callable_default(self, min_args: int, *a: Type) -> CallableType:
"""callable_default(min_args, a1, ..., an, r) constructs a
callable with argument types a1, ... an and return type r,
with min_args mandatory fixed arguments.
"""
n = len(a) - 1
return CallableType(list(a[:-1]),
[ARG_POS] * min_args + [ARG_OPT] * (n - min_args),
[None] * n,
a[-1], self.function)
def callable_var_arg(self, min_args: int, *a: Type) -> CallableType:
"""callable_var_arg(min_args, a1, ..., an, r) constructs a callable
with argument types a1, ... *an and return type r.
"""
n = len(a) - 1
return CallableType(list(a[:-1]),
[ARG_POS] * min_args +
[ARG_OPT] * (n - 1 - min_args) +
[ARG_STAR], [None] * n,
a[-1], self.function)
def make_type_info(self, name: str,
module_name: Optional[str] = None,
is_abstract: bool = False,
mro: Optional[List[TypeInfo]] = None,
bases: Optional[List[Instance]] = None,
typevars: Optional[List[str]] = None,
variances: Optional[List[int]] = None) -> TypeInfo:
"""Make a TypeInfo suitable for use in unit tests."""
class_def = ClassDef(name, Block([]), None, [])
class_def.fullname = name
if module_name is None:
if '.' in name:
module_name = name.rsplit('.', 1)[0]
else:
module_name = '__main__'
if typevars:
v: List[TypeVarLikeType] = []
for id, n in enumerate(typevars, 1):
if variances:
variance = variances[id - 1]
else:
variance = COVARIANT
v.append(TypeVarType(n, n, id, [], self.o, variance=variance))
class_def.type_vars = v
info = TypeInfo(SymbolTable(), class_def, module_name)
if mro is None:
mro = []
if name != 'builtins.object':
mro.append(self.oi)
info.mro = [info] + mro
if bases is None:
if mro:
# By default, assume that there is a single non-generic base.
bases = [Instance(mro[0], [])]
else:
bases = []
info.bases = bases
return info
def def_alias_1(self, base: Instance) -> Tuple[TypeAliasType, Type]:
A = TypeAliasType(None, [])
target = Instance(self.std_tuplei,
[UnionType([base, A])]) # A = Tuple[Union[base, A], ...]
AN = TypeAlias(target, '__main__.A', -1, -1)
A.alias = AN
return A, target
def def_alias_2(self, base: Instance) -> Tuple[TypeAliasType, Type]:
A = TypeAliasType(None, [])
target = UnionType([base,
Instance(self.std_tuplei, [A])]) # A = Union[base, Tuple[A, ...]]
AN = TypeAlias(target, '__main__.A', -1, -1)
A.alias = AN
return A, target
def non_rec_alias(self, target: Type) -> TypeAliasType:
AN = TypeAlias(target, '__main__.A', -1, -1)
return TypeAliasType(AN, [])
class InterfaceTypeFixture(TypeFixture):
"""Extension of TypeFixture that contains additional generic
interface types."""
def __init__(self) -> None:
super().__init__()
# GF[T]
self.gfi = self.make_type_info('GF', typevars=['T'], is_abstract=True)
# M1 <: GF[A]
self.m1i = self.make_type_info('M1',
is_abstract=True,
mro=[self.gfi, self.oi],
bases=[Instance(self.gfi, [self.a])])
self.gfa = Instance(self.gfi, [self.a]) # GF[A]
self.gfb = Instance(self.gfi, [self.b]) # GF[B]
self.m1 = Instance(self.m1i, []) # M1
|
the-stack_0_17551 | import json
import logging
from django.conf import settings
from azure.storage.file import FileService
from azure.common import AzureMissingResourceHttpError
from azure.storage.file.models import File as AzureFile, Directory as AzureDirectory
from azure.storage.blob import BlockBlobService
from django.db import models
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django.db.models.signals import post_save
logger = logging.getLogger('django')
class TaxonomyTerms(models.Model):
taxonomy_id = models.CharField(max_length=255, unique=True)
terms_json = models.TextField()
@receiver(post_save, sender=TaxonomyTerms)
def update_taxonomy_terms_on_blobstore(sender, instance, **kwargs):
try:
data = json.loads(instance.terms_json)
terms_with_vocab = get_terms_from_terms_json(data)
vocabs = get_vocabs_from_terms_json(data)
content = dict()
content['vocabs'] = vocabs
content['terms'] = terms_with_vocab
blobPath = f'taxonomy/{instance.taxonomy_id}.json'
blob_service = BlockBlobService(account_name=settings.AZURE_ACCOUNT_NAME, account_key=settings.AZURE_ACCOUNT_KEY)
blob_service.create_blob_from_text(settings.AZURE_CONTAINER, blobPath, to_json(content))
logger.info('Successfully wrote taxonomy json to BlobStore %s', blobPath)
except Exception as e:
logger.info('Could not build taxonomy json and send to BlobStore %s', e)
def get_terms_from_terms_json(data):
terms = dict()
level = 1
for obj in data:
if obj.get('type') == 'vocabulary':
vocab_code = obj.get('code')
children = obj.get('children', None)
if children:
child_terms = get_terms_from_children(children, vocab_code, '', level)
terms.update(child_terms)
return terms
def get_terms_from_children(children, vocab_code, index_path, level):
terms = dict()
for obj in children:
if obj.get('type') == 'term':
index_path_for_level = obj.get('code') if index_path == '' else "%s|%s" % (index_path, obj.get('code'))
terms[obj.get('code')] = { "label": obj.get('label'), "vocabCode": vocab_code, "indexPath": index_path_for_level, "level": level }
children = obj.get('children', None)
if children:
next_level = level + 1
child_terms = get_terms_from_children(children, vocab_code, index_path_for_level, next_level)
terms.update(child_terms)
return terms
def get_vocabs_from_terms_json(data):
vocabs = dict()
for obj in data:
if obj.get('type') == 'vocabulary':
vocab_code = obj.get('code')
vocab_label = obj.get('label')
vocabs[vocab_code] = vocab_label
return vocabs
def to_json(data):
return json.dumps(data)
class TaxonomyMixin(models.Model):
taxonomy_json = models.TextField(null=True, blank=True)
class Meta:
abstract = True
class PageTaxonomyPermissionsMixin(models.Model):
global_permission = models.CharField(max_length=100, null=True, blank=True, default='public')
inherit_permission = models.CharField(max_length=100, null=True, blank=True)
permissions_json = models.TextField(null=True, blank=True)
permissions_json_formatted = models.TextField(null=True, blank=True)
class Meta:
abstract = True
class ModelTaxonomyPermissionsMixin(models.Model):
permissions_json = models.TextField(null=True, blank=True)
permissions_json_formatted = models.TextField(null=True, blank=True)
class Meta:
abstract = True
# @receiver(pre_save, sender=PageTaxonomyPermissionsMixin)
def format_permissions_json(sender, instance, **kwargs):
permissions_json_formatted = {}
permissions_json_formatted = {}
for group_key, groups in (json.loads(instance.permissions_json)).items():
permissions_json_formatted[group_key] = []
for action_key, vocs in groups.items():
permissions_json_formatted[group_key].extend(['{0}.{1}'.format(action_key, voc) for voc in vocs])
instance.permissions_json_formatted = permissions_json_formatted
print(permissions_json_formatted)
return instance
|
the-stack_0_17555 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from collections import OrderedDict
from logging import INFO
from typing import Union
import pytest
import torch
import torch.nn.utils.prune as pytorch_prune
from torch import nn
from torch.nn import Sequential
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, ModelPruning
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers import BoringModel
from tests.helpers.runif import RunIf
class TestModel(BoringModel):
test_step = None
def __init__(self):
super().__init__()
self.layer = Sequential(
OrderedDict([
("mlp_1", nn.Linear(32, 32)),
("mlp_2", nn.Linear(32, 32, bias=False)),
("mlp_3", nn.Linear(32, 2)),
])
)
def training_step(self, batch, batch_idx):
self.log("test", -batch_idx)
return super().training_step(batch, batch_idx)
class TestPruningMethod(pytorch_prune.BasePruningMethod):
PRUNING_TYPE = "unstructured"
def compute_mask(self, _, default_mask):
mask = default_mask.clone()
# Prune every other entry in a tensor
mask.view(-1)[::2] = 0
return mask
@classmethod
def apply(cls, module, name, amount):
return super(TestPruningMethod, cls).apply(module, name, amount=amount)
def train_with_pruning_callback(
tmpdir,
parameters_to_prune=False,
use_global_unstructured=False,
pruning_fn="l1_unstructured",
use_lottery_ticket_hypothesis=False,
accelerator=None,
gpus=None,
num_processes=1,
):
model = TestModel()
# Weights are random. None is 0
assert torch.all(model.layer.mlp_2.weight != 0)
pruning_kwargs = {
"pruning_fn": pruning_fn,
"amount": 0.3,
"use_global_unstructured": use_global_unstructured,
"use_lottery_ticket_hypothesis": use_lottery_ticket_hypothesis,
"verbose": 1,
}
if parameters_to_prune:
pruning_kwargs["parameters_to_prune"] = [(model.layer.mlp_1, "weight"), (model.layer.mlp_2, "weight")]
else:
if isinstance(pruning_fn, str) and pruning_fn.endswith("_structured"):
pruning_kwargs["parameter_names"] = ["weight"]
else:
pruning_kwargs["parameter_names"] = ["weight", "bias"]
if isinstance(pruning_fn, str) and pruning_fn.endswith("_structured"):
pruning_kwargs["pruning_dim"] = 0
if pruning_fn == "ln_structured":
pruning_kwargs["pruning_norm"] = 1
# Misconfiguration checks
if isinstance(pruning_fn, str) and pruning_fn.endswith("_structured") and use_global_unstructured:
with pytest.raises(MisconfigurationException, match="is supported with `use_global_unstructured=True`"):
ModelPruning(**pruning_kwargs)
return
if ModelPruning._is_pruning_method(pruning_fn) and not use_global_unstructured:
with pytest.raises(MisconfigurationException, match="currently only supported with"):
ModelPruning(**pruning_kwargs)
return
pruning = ModelPruning(**pruning_kwargs)
trainer = Trainer(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
weights_summary=None,
checkpoint_callback=False,
logger=False,
limit_train_batches=10,
limit_val_batches=2,
max_epochs=10,
accelerator=accelerator,
gpus=gpus,
num_processes=num_processes,
callbacks=pruning,
)
trainer.fit(model)
trainer.test(model)
if not accelerator:
# Check some have been pruned
assert torch.any(model.layer.mlp_2.weight == 0)
def test_pruning_misconfiguration():
with pytest.raises(MisconfigurationException, match=r"chocolate isn't in \('weight', 'bias'\)"):
ModelPruning(pruning_fn="l1_unstructured", parameter_names=["chocolate"])
with pytest.raises(MisconfigurationException, match=r"expected to be a str in \["):
ModelPruning(pruning_fn={}) # noqa
with pytest.raises(MisconfigurationException, match="should be provided"):
ModelPruning(pruning_fn="random_structured")
with pytest.raises(MisconfigurationException, match=r"must be any of \(0, 1, 2\)"):
ModelPruning(pruning_fn="l1_unstructured", verbose=3)
with pytest.raises(MisconfigurationException, match="requesting `ln_structured` pruning, the `pruning_norm`"):
ModelPruning(pruning_fn="ln_structured", pruning_dim=0)
@pytest.mark.parametrize("parameters_to_prune", [False, True])
@pytest.mark.parametrize("use_global_unstructured", [False, True])
@pytest.mark.parametrize(
"pruning_fn", ["l1_unstructured", "random_unstructured", "ln_structured", "random_structured", TestPruningMethod]
)
@pytest.mark.parametrize("use_lottery_ticket_hypothesis", [False, True])
def test_pruning_callback(
tmpdir, use_global_unstructured: bool, parameters_to_prune: bool,
pruning_fn: Union[str, pytorch_prune.BasePruningMethod], use_lottery_ticket_hypothesis: bool
):
train_with_pruning_callback(
tmpdir,
parameters_to_prune=parameters_to_prune,
use_global_unstructured=use_global_unstructured,
pruning_fn=pruning_fn,
use_lottery_ticket_hypothesis=use_lottery_ticket_hypothesis,
)
@RunIf(special=True, min_gpus=2)
def test_pruning_callback_ddp_0(tmpdir):
train_with_pruning_callback(
tmpdir,
parameters_to_prune=False,
use_global_unstructured=False,
accelerator="ddp",
gpus=2,
)
@RunIf(special=True, min_gpus=2)
def test_pruning_callback_ddp_1(tmpdir):
train_with_pruning_callback(
tmpdir,
parameters_to_prune=False,
use_global_unstructured=True,
accelerator="ddp",
gpus=2,
)
@RunIf(special=True, min_gpus=2)
def test_pruning_callback_ddp_2(tmpdir):
train_with_pruning_callback(
tmpdir,
parameters_to_prune=True,
use_global_unstructured=False,
accelerator="ddp",
gpus=2,
)
@RunIf(special=True, min_gpus=2)
def test_pruning_callback_ddp_3(tmpdir):
train_with_pruning_callback(
tmpdir,
parameters_to_prune=True,
use_global_unstructured=True,
accelerator="ddp",
gpus=2,
)
@RunIf(min_gpus=2, skip_windows=True)
def test_pruning_callback_ddp_spawn(tmpdir):
train_with_pruning_callback(tmpdir, use_global_unstructured=True, accelerator="ddp_spawn", gpus=2)
@RunIf(skip_windows=True)
def test_pruning_callback_ddp_cpu(tmpdir):
train_with_pruning_callback(tmpdir, parameters_to_prune=True, accelerator="ddp_cpu", num_processes=2)
@pytest.mark.parametrize("resample_parameters", (False, True))
def test_pruning_lth_callable(tmpdir, resample_parameters: bool):
model = TestModel()
class ModelPruningTestCallback(ModelPruning):
lth_calls = 0
def apply_lottery_ticket_hypothesis(self):
super().apply_lottery_ticket_hypothesis()
self.lth_calls += 1
for d in self._original_layers.values():
copy, names = d["data"], d["names"]
for i, name in names:
curr, curr_name = self._parameters_to_prune[i]
assert name == curr_name
actual, expected = getattr(curr, name).data, getattr(copy, name).data
allclose = torch.allclose(actual, expected)
assert not allclose if self._resample_parameters else allclose
pruning = ModelPruningTestCallback(
"l1_unstructured", use_lottery_ticket_hypothesis=lambda e: bool(e % 2), resample_parameters=resample_parameters
)
trainer = Trainer(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
weights_summary=None,
checkpoint_callback=False,
logger=False,
limit_train_batches=10,
limit_val_batches=2,
max_epochs=5,
callbacks=pruning,
)
trainer.fit(model)
assert pruning.lth_calls == trainer.max_epochs // 2
@pytest.mark.parametrize("make_pruning_permanent", (False, True))
def test_multiple_pruning_callbacks(tmpdir, caplog, make_pruning_permanent: bool):
model = TestModel()
pruning_kwargs = {
'parameters_to_prune': [(model.layer.mlp_1, "weight"), (model.layer.mlp_3, "weight")],
'verbose': 2,
"make_pruning_permanent": make_pruning_permanent
}
p1 = ModelPruning("l1_unstructured", amount=0.5, apply_pruning=lambda e: not e % 2, **pruning_kwargs)
p2 = ModelPruning("random_unstructured", amount=0.25, apply_pruning=lambda e: e % 2, **pruning_kwargs)
trainer = Trainer(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
weights_summary=None,
checkpoint_callback=False,
logger=False,
limit_train_batches=10,
limit_val_batches=2,
max_epochs=3,
callbacks=[p1, p2],
)
with caplog.at_level(INFO):
trainer.fit(model)
actual = [m.strip() for m in caplog.messages]
actual = [m for m in actual if m.startswith("Applied")]
percentage = r"\(\d+(?:\.\d+)?%\)"
expected = [
rf"Applied `L1Unstructured`. Pruned: \d+\/1122 {percentage} -> \d+\/1122 {percentage}",
rf"Applied `L1Unstructured` to `Linear\(in_features=32, out_features=32, bias=True\).weight` with amount=0.5. Pruned: 0 \(0.00%\) -> \d+ {percentage}", # noqa: E501
rf"Applied `L1Unstructured` to `Linear\(in_features=32, out_features=2, bias=True\).weight` with amount=0.5. Pruned: 0 \(0.00%\) -> \d+ {percentage}", # noqa: E501
rf"Applied `RandomUnstructured`. Pruned: \d+\/1122 {percentage} -> \d+\/1122 {percentage}",
rf"Applied `RandomUnstructured` to `Linear\(in_features=32, out_features=32, bias=True\).weight` with amount=0.25. Pruned: \d+ {percentage} -> \d+ {percentage}", # noqa: E501
rf"Applied `RandomUnstructured` to `Linear\(in_features=32, out_features=2, bias=True\).weight` with amount=0.25. Pruned: \d+ {percentage} -> \d+ {percentage}", # noqa: E501
rf"Applied `L1Unstructured`. Pruned: \d+\/1122 {percentage} -> \d+\/1122 {percentage}",
rf"Applied `L1Unstructured` to `Linear\(in_features=32, out_features=32, bias=True\).weight` with amount=0.5. Pruned: \d+ {percentage} -> \d+ {percentage}", # noqa: E501
rf"Applied `L1Unstructured` to `Linear\(in_features=32, out_features=2, bias=True\).weight` with amount=0.5. Pruned: \d+ {percentage} -> \d+ {percentage}", # noqa: E501
]
expected = [re.compile(s) for s in expected]
assert all(regex.match(s) for s, regex in zip(actual, expected))
filepath = str(tmpdir / "foo.ckpt")
trainer.save_checkpoint(filepath)
model.load_from_checkpoint(filepath, strict=False)
has_pruning = hasattr(model.layer.mlp_1, "weight_orig")
assert not has_pruning if make_pruning_permanent else has_pruning
@pytest.mark.parametrize("on_train_epoch_end", (False, True))
def test_permanent_when_model_is_saved_multiple_times(tmpdir, caplog, on_train_epoch_end):
"""
When a model is saved multiple times and make_permanent=True, we need to
make sure a copy is pruned and not the trained model if we want to continue
with the same pruning buffers.
"""
class TestPruning(ModelPruning):
def on_save_checkpoint(self, trainer, pl_module, checkpoint):
super().on_save_checkpoint(trainer, pl_module, checkpoint)
if not on_train_epoch_end:
# these checks only work if pruning on `validation_epoch_end`
# because `on_save_checkpoint` is called before `on_train_epoch_end`
assert "layer.mlp_3.weight_orig" not in checkpoint["state_dict"]
assert hasattr(pl_module.layer.mlp_3, "weight_orig")
model = TestModel()
pruning_callback = TestPruning(
"random_unstructured",
parameters_to_prune=[(model.layer.mlp_3, "weight")],
verbose=1,
make_pruning_permanent=True,
prune_on_train_epoch_end=on_train_epoch_end,
)
ckpt_callback = ModelCheckpoint(monitor="test", save_top_k=2, save_last=True)
trainer = Trainer(callbacks=[pruning_callback, ckpt_callback], max_epochs=3, progress_bar_refresh_rate=0)
with caplog.at_level(INFO):
trainer.fit(model)
actual = [m.strip() for m in caplog.messages]
actual = [m for m in actual if m.startswith("Applied")]
percentage = r"\(\d+(?:\.\d+)?%\)"
expected = [
rf"Applied `RandomUnstructured`. Pruned: \d+\/66 {percentage} -> \d+\/66 {percentage}",
rf"Applied `RandomUnstructured`. Pruned: \d+\/66 {percentage} -> \d+\/66 {percentage}",
rf"Applied `RandomUnstructured`. Pruned: \d+\/66 {percentage} -> \d+\/66 {percentage}",
]
expected = [re.compile(s) for s in expected]
assert all(regex.match(s) for s, regex in zip(actual, expected))
# removed on_train_end
assert not hasattr(model.layer.mlp_3, "weight_orig")
model.load_from_checkpoint(trainer.checkpoint_callback.kth_best_model_path)
assert not hasattr(model.layer.mlp_3, "weight_orig")
model.load_from_checkpoint(trainer.checkpoint_callback.last_model_path)
assert not hasattr(model.layer.mlp_3, "weight_orig")
|
the-stack_0_17556 |
# `$ python3 simple_ast.py --help` for more information
# MIT License
#
# Copyright (c) 2020 John Scott
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import os
import json
import re
def main():
parser = argparse.ArgumentParser(description='Generate a simple abstract syntax tree from the given files', epilog="""
Parsing rules
This parser uses three values:
bounds A dictionary of start and end tokens. If the program finds a start
token it will push a new array on the stack and continue. When it
finds the corresponding end token the program will pop the array off
the stack and continue.
extra An array of tokens that don't push or pop when found (unless they're
in the bounds).
strip An array of tokens that will be removed from the output.
Example rules:
{
"bounds": { "(": ")" },
"extra": [ "-", "+", "*", "/", "%" ],
"strip": [ "\n", " " ]
}
""", formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('input', nargs='+', help='Files to be parsed')
parser.add_argument('--output', default='-', help='Location to save the AST')
parser.add_argument('--rules', help='A JSON file containing the parsing rules')
args = parser.parse_args()
rules = {}
if args.rules:
with open(args.rules, 'r') as f:
rules = json.load(f)
if 'bounds' not in rules:
rules['bounds'] = {}
if 'extra' not in rules:
rules['extra'] = ['\n']
if 'strip' not in rules:
rules['strip'] = []
if args.rules:
with open(args.rules, "w") as file:
file.write(json.dumps(rules, sort_keys=True, indent=2))
ast = {}
for input_path in args.input:
with open(input_path, 'r') as file:
text = file.read()
ast[input_path] = generate_ast(text, bounds=rules['bounds'], extra=rules['extra']+rules['strip'], strip=rules['strip'])
if len(ast) == 1:
ast = list(ast.values())[0]
outputContent = json.dumps(ast, sort_keys=True, indent=2)
if args.output != '-':
with open(args.output, "w") as file:
file.write(outputContent)
else:
print(outputContent)
def generate_ast(text, bounds={}, extra=['\n'], strip=['\n']):
boundingTokenRegex = '|'.join(map(lambda s: "("+re.escape(s)+")", sorted(list(bounds.keys()) + list(bounds.values()) + extra,reverse=True)))
tokens = re.compile(boundingTokenRegex).split(text)
stack = [[]]
for token in tokens:
if token is None or len(token) == 0:
continue
if token in bounds:
frame = []
stack[-1].append(frame)
stack.append(frame)
if token not in strip:
stack[-1].append(token)
if len(stack) > 1 and isinstance(stack[-1][0], str) and stack[-1][0] in bounds and token == bounds[stack[-1][0]]:
stack.pop()
return stack[0]
if __name__ == "__main__":
main()
|
the-stack_0_17557 | # -*- coding: utf-8 -*-
import pytest
import binascii
import time
import sys
sys.path.extend(["../"])
from bbc1.core import bbclib, bbc_app
from bbc1.core.message_key_types import KeyType
from testutils import prepare, get_core_client, start_core_thread, make_client, domain_setup_utility
LOGLEVEL = 'debug'
#LOGLEVEL = 'none'
core_num = 5
client_num = core_num * 2
cores = None
clients = None
domain_id = bbclib.get_new_id("testdomain")
asset_group_id = bbclib.get_new_id("asset_group_1")
transactions = [None for i in range(client_num)]
msg_processor = [None for i in range(client_num)]
class MessageProcessor(bbc_app.Callback):
def __init__(self, index=0):
super(MessageProcessor, self).__init__(self)
self.idx = index
def proc_cmd_sign_request(self, dat):
self.logger.debug("[%i] Recv SIGN_REQUEST from %s" % (self.idx, binascii.b2a_hex(dat[KeyType.source_user_id])))
txobj, fmt_type = bbclib.deserialize(dat[KeyType.transaction_data])
objs = dict()
for txid, txdata in dat[KeyType.transactions].items():
txo, fmt_type = bbclib.deserialize(txdata)
objs[txid] = txo
for i, reference in enumerate(txobj.references):
event = objs[reference.transaction_id].events[reference.event_index_in_ref]
if clients[self.idx]['user_id'] in event.mandatory_approvers:
signature = txobj.sign(keypair=clients[self.idx]['keypair'])
clients[self.idx]['app'].sendback_signature(asset_group_id, dat[KeyType.source_user_id],
txobj.transaction_id, i, signature)
return
class TestBBcAppClient(object):
def test_00_setup(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
print("domain_id =", binascii.b2a_hex(domain_id))
global msg_processor
prepare(core_num=core_num, client_num=client_num, loglevel=LOGLEVEL)
for i in range(core_num):
start_core_thread(index=i, core_port_increment=i, p2p_port_increment=i)
time.sleep(0.1)
domain_setup_utility(i, domain_id) # system administrator
time.sleep(1)
for i in range(core_num):
msg_processor[i*2] = MessageProcessor(index=i*2)
make_client(index=i*2, core_port_increment=i, callback=msg_processor[i*2])
msg_processor[i * 2 + 1] = MessageProcessor(index=i*2+1)
make_client(index=i * 2 + 1, core_port_increment=i, callback=msg_processor[i * 2 + 1])
time.sleep(1)
global cores, clients
cores, clients = get_core_client()
def test_10_setup_network(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
ret = clients[0]['app'].get_domain_neighborlist(domain_id=domain_id)
assert ret
dat = msg_processor[0].synchronize()
print("[0] nodeinfo=", dat[0])
node_id, ipv4, ipv6, port, domain0 = dat[0]
for i in range(1, core_num):
clients[i*2]['app'].send_domain_ping(domain_id, ipv4, ipv6, port)
print("*** wait 5 seconds ***")
time.sleep(5)
for i in range(core_num):
print(cores[i].networking.domains[domain_id]['neighbor'].show_list())
assert len(cores[i].networking.domains[domain_id]['neighbor'].nodeinfo_list) == core_num - 1
def test_11_register(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
global clients
for cl in clients:
ret = cl['app'].register_to_core()
assert ret
time.sleep(1)
for i in range(4):
assert clients[i]['app'].request_insert_completion_notification(asset_group_id)
time.sleep(2)
for i in range(core_num):
fe = cores[i].networking.domains[domain_id]['user'].forwarding_entries
assert asset_group_id in fe
print(fe[asset_group_id]['nodes'])
num = len(fe[asset_group_id]['nodes'])
if i in [0, 1]: # core0 and core1 have forwarding entry for core1 and core0, respectively.
assert num == 1
else:
assert num == 2
def test_12_cancel_notification(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
clients[0]['app'].cancel_insert_completion_notification(asset_group_id)
clients[2]['app'].cancel_insert_completion_notification(asset_group_id)
time.sleep(1)
for i in range(core_num):
fe = cores[i].networking.domains[domain_id]['user'].forwarding_entries
assert asset_group_id in fe
print(fe[asset_group_id]['nodes'])
num = len(fe[asset_group_id]['nodes'])
if i in [0, 1]: # core0 and core1 have forwarding entry for core1 and core0, respectively.
assert num == 1
else:
assert num == 2
def test_13_cancel_notification(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
clients[1]['app'].cancel_insert_completion_notification(asset_group_id)
time.sleep(1)
for i in range(core_num):
fe = cores[i].networking.domains[domain_id]['user'].forwarding_entries
if i == 1: # core1 has no forwarding entry because all clients in core0 canceled multicast forwarding
assert asset_group_id not in fe
else:
assert asset_group_id in fe
print(fe[asset_group_id]['nodes'])
num = len(fe[asset_group_id]['nodes'])
assert num == 1
if __name__ == '__main__':
pytest.main()
|
the-stack_0_17558 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualMachineExtension(Resource):
"""Describes a Virtual Machine Extension.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Resource location
:type location: str
:param tags: Resource tags
:type tags: dict
:param force_update_tag: How the extension handler should be forced to
update even if the extension configuration has not changed.
:type force_update_tag: str
:param publisher: The name of the extension handler publisher.
:type publisher: str
:param virtual_machine_extension_type: The type of the extension handler.
:type virtual_machine_extension_type: str
:param type_handler_version: The type version of the extension handler.
:type type_handler_version: str
:param auto_upgrade_minor_version: Whether the extension handler should be
automatically upgraded across minor versions.
:type auto_upgrade_minor_version: bool
:param settings: Json formatted public settings for the extension.
:type settings: object
:param protected_settings: Json formatted protected settings for the
extension.
:type protected_settings: object
:ivar provisioning_state: The provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:param instance_view: The virtual machine extension instance view.
:type instance_view: :class:`VirtualMachineExtensionInstanceView
<azure.mgmt.compute.v2016_04_30_preview.models.VirtualMachineExtensionInstanceView>`
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'force_update_tag': {'key': 'properties.forceUpdateTag', 'type': 'str'},
'publisher': {'key': 'properties.publisher', 'type': 'str'},
'virtual_machine_extension_type': {'key': 'properties.type', 'type': 'str'},
'type_handler_version': {'key': 'properties.typeHandlerVersion', 'type': 'str'},
'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'},
'settings': {'key': 'properties.settings', 'type': 'object'},
'protected_settings': {'key': 'properties.protectedSettings', 'type': 'object'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineExtensionInstanceView'},
}
def __init__(self, location, tags=None, force_update_tag=None, publisher=None, virtual_machine_extension_type=None, type_handler_version=None, auto_upgrade_minor_version=None, settings=None, protected_settings=None, instance_view=None):
super(VirtualMachineExtension, self).__init__(location=location, tags=tags)
self.force_update_tag = force_update_tag
self.publisher = publisher
self.virtual_machine_extension_type = virtual_machine_extension_type
self.type_handler_version = type_handler_version
self.auto_upgrade_minor_version = auto_upgrade_minor_version
self.settings = settings
self.protected_settings = protected_settings
self.provisioning_state = None
self.instance_view = instance_view
|
the-stack_0_17559 | import os
import platform
import pytest
from conans.test.assets.genconanfile import GenConanfile
from conans.test.integration.toolchains.apple.test_xcodetoolchain import _get_filename
from conans.test.utils.tools import TestClient
_expected_dep_xconfig = [
"HEADER_SEARCH_PATHS = $(inherited) $(HEADER_SEARCH_PATHS_{name}_{name})",
"GCC_PREPROCESSOR_DEFINITIONS = $(inherited) $(GCC_PREPROCESSOR_DEFINITIONS_{name}_{name})",
"OTHER_CFLAGS = $(inherited) $(OTHER_CFLAGS_{name}_{name})",
"OTHER_CPLUSPLUSFLAGS = $(inherited) $(OTHER_CPLUSPLUSFLAGS_{name}_{name})",
"FRAMEWORK_SEARCH_PATHS = $(inherited) $(FRAMEWORK_SEARCH_PATHS_{name}_{name})",
"LIBRARY_SEARCH_PATHS = $(inherited) $(LIBRARY_SEARCH_PATHS_{name}_{name})",
"OTHER_LDFLAGS = $(inherited) $(OTHER_LDFLAGS_{name}_{name})",
]
_expected_conf_xconfig = [
"HEADER_SEARCH_PATHS_{name}_{name}[config={configuration}][arch={architecture}][sdk={sdk}{sdk_version}] = ",
"GCC_PREPROCESSOR_DEFINITIONS_{name}_{name}[config={configuration}][arch={architecture}][sdk={sdk}{sdk_version}] = ",
"OTHER_CFLAGS_{name}_{name}[config={configuration}][arch={architecture}][sdk={sdk}{sdk_version}] = ",
"OTHER_CPLUSPLUSFLAGS_{name}_{name}[config={configuration}][arch={architecture}][sdk={sdk}{sdk_version}] = ",
"FRAMEWORK_SEARCH_PATHS_{name}_{name}[config={configuration}][arch={architecture}][sdk={sdk}{sdk_version}] = ",
"LIBRARY_SEARCH_PATHS_{name}_{name}[config={configuration}][arch={architecture}][sdk={sdk}{sdk_version}] = ",
"OTHER_LDFLAGS_{name}_{name}[config={configuration}][arch={architecture}][sdk={sdk}{sdk_version}] = "
]
def expected_files(current_folder, configuration, architecture, sdk, sdk_version):
files = []
name = _get_filename(configuration, architecture, sdk, sdk_version)
deps = ["hello", "goodbye"]
files.extend(
[os.path.join(current_folder, "conan_{dep}_{dep}{name}.xcconfig".format(dep=dep, name=name)) for dep in deps])
files.append(os.path.join(current_folder, "conandeps.xcconfig"))
return files
def check_contents(client, deps, configuration, architecture, sdk, sdk_version):
for dep_name in deps:
dep_xconfig = client.load("conan_{dep}_{dep}.xcconfig".format(dep=dep_name))
conf_name = "conan_{}_{}{}.xcconfig".format(dep_name, dep_name,
_get_filename(configuration, architecture, sdk, sdk_version))
assert '#include "{}"'.format(conf_name) in dep_xconfig
for var in _expected_dep_xconfig:
line = var.format(name=dep_name)
assert line in dep_xconfig
conan_conf = client.load(conf_name)
for var in _expected_conf_xconfig:
assert var.format(name=dep_name, configuration=configuration, architecture=architecture,
sdk=sdk, sdk_version=sdk_version) in conan_conf
@pytest.mark.skipif(platform.system() != "Darwin", reason="Only for MacOS")
def test_generator_files():
client = TestClient()
client.save({"hello.py": GenConanfile().with_settings("os", "arch", "compiler", "build_type")
.with_package_info(cpp_info={"libs": ["hello"],
"frameworks": ['framework_hello']},
env_info={})})
client.run("export hello.py hello/0.1@")
client.save({"goodbye.py": GenConanfile().with_settings("os", "arch", "compiler", "build_type")
.with_package_info(cpp_info={"libs": ["goodbye"],
"frameworks": ['framework_goodbye']},
env_info={})})
client.run("export goodbye.py goodbye/0.1@")
client.save({"conanfile.txt": "[requires]\nhello/0.1\ngoodbye/0.1\n"}, clean_first=True)
for build_type in ["Release", "Debug"]:
client.run("install . -g XcodeDeps -s build_type={} -s arch=x86_64 -s os.sdk=macosx -s os.sdk_version=12.1 --build missing".format(build_type))
for config_file in expected_files(client.current_folder, build_type, "x86_64", "macosx", "12.1"):
assert os.path.isfile(config_file)
conandeps = client.load("conandeps.xcconfig")
assert '#include "conan_hello.xcconfig"' in conandeps
assert '#include "conan_goodbye.xcconfig"' in conandeps
conan_config = client.load("conan_config.xcconfig")
assert '#include "conandeps.xcconfig"' in conan_config
check_contents(client, ["hello", "goodbye"], build_type, "x86_64", "macosx", "12.1")
|
the-stack_0_17560 | import mock
import os
import shutil
from django.contrib.auth.models import User
from django.test import TestCase, override_settings
from core import models
from core.datatools import ansible, tasks
class Ansible(TestCase):
def setUp(self):
var = models.Variable.objects.create(
name='Test_name',
value='Test_var'
)
empty_host_group = models.HostGroup.objects.create(
name="Empty test host_group",
)
host_group = models.HostGroup.objects.create(
name='Test host_group',
)
host_group.vars.add(var)
host = models.Host.objects.create(
name='Test host',
address='192.168.59.44',
)
host.groups.add(host_group)
host.vars.add(var)
other_host = models.Host.objects.create(
name='Test №2 host',
address='192.168.128.20',
)
other_host.vars.add(var)
ansible_user = models.AnsibleUser.objects.create(
name='Serega'
)
self.user = User.objects.create(
username='Serega',
password='passwd'
)
task_template = models.TaskTemplate.objects.create(
name='qwer',
playbook='/home/',
)
task = models.Task.objects.create(
playbook='/home/',
template=task_template,
user=self.user,
ansible_user=ansible_user,
)
task.host_groups.add(host_group)
task.hosts.add(host)
task.hosts.add(other_host)
task.vars.add(var)
task2 = models.Task.objects.create(
playbook="/home2/",
template=task_template,
user=self.user,
ansible_user=ansible_user,
)
# task2.host_groups.add(empty_host_group)
@mock.patch('core.datatools.ansible.create_inventory')
def test_make_command(self, create_inventory_mock):
test_path_inventory = '/tmp/test/inventory'
create_inventory_mock.return_value = test_path_inventory
self.assertEqual(models.Task.objects.get(playbook='/home/').get_ansible_command(),
'/usr/bin/ansible-playbook -i ' + test_path_inventory +
' -u Serega -e "Test_name=Test_var " -v /home/')
@mock.patch('core.datatools.ansible.tempfile.mkdtemp')
def test_create_inventory(self, tempfile_mock):
test_path_tempfile = '/tmp/test'
tempfile_mock.return_value = test_path_tempfile
if os.path.exists(test_path_tempfile):
shutil.rmtree(test_path_tempfile)
os.mkdir(test_path_tempfile)
self.assertRaises(Exception, ansible.create_inventory, models.Task.objects.get(playbook='/home2/'))
shutil.rmtree(test_path_tempfile)
def test_inventory_file_path(self):
self.assertEqual(ansible.get_inventory_file_path('qwerty 12345 test some 55'), 'test')
class Tasks(TestCase):
def setUp(self):
self.user = User.objects.create(
username='Serega',
password='passwd'
)
def test_check_progress_tasks_not_pid(self):
models.Task.objects.create(
playbook='/home/',
status='in_progress',
user=self.user,
pid=99999999,
)
task_manager = tasks.TaskManager()
task_manager.check_in_progress_tasks()
self.assertEqual(len(models.TaskLog.objects.filter(message='Task with pid 99999999 is not running')), 1)
@mock.patch('django.db.connection')
def test_start_waiting_task(self, connection):
connection.return_value = True
an_user = models.AnsibleUser.objects.create(
name='Test',
)
models.Task.objects.create(
playbook='/home/',
status='wait',
user=self.user,
pid=99999999,
ansible_user=an_user,
)
task_manager = tasks.TaskManager()
task_manager.start_waiting_tasks()
self.assertIn('Start task with pid', models.TaskLog.objects.get().message)
self.assertEqual(models.Task.objects.get().status, 'in_progress')
@override_settings(ANSIBLE_WORK_DIR='/tmp/')
@mock.patch('django.db.connection')
def test_run_task_invalid(self, connection):
connection.return_value = True
an_user = models.AnsibleUser.objects.create(
name='Test',
)
task = models.Task.objects.create(
playbook='/home/',
status='wait',
user=self.user,
pid=99999999,
ansible_user=an_user,
)
host = models.Host.objects.create(
name='Test host',
address='192.168.59.44',
)
task.hosts.add(host)
task_manager = tasks.TaskManager()
task_manager.run_task(task.id)
self.assertIn('Command: ', models.TaskLog.objects.get(id=1).message)
self.assertIn('Working directory: ', models.TaskLog.objects.get(id=2).message)
self.assertIn('Failed with status code ', models.TaskLog.objects.all().last().message)
@mock.patch('asyncio.get_event_loop')
@mock.patch('django.db.connection')
def test_run_task_exception(self, connection, p):
connection.return_value = True
p.return_value = 0
an_user = models.AnsibleUser.objects.create(
name='Test',
)
task = models.Task.objects.create(
playbook='/home/',
status='wait',
user=self.user,
pid=99999999,
ansible_user=an_user,
)
task_manager = tasks.TaskManager()
task_manager.run_task(task.id)
self.assertIn('Progress error', models.TaskLog.objects.all().last().message)
def test_stop_task(self):
task = models.Task.objects.create(
playbook='/home/',
status='wait',
user=self.user,
pid=99999999,
)
task_manager = tasks.TaskManager()
task_manager.stop_task(task)
self.assertEqual(models.TaskLog.objects.get().message, 'Task stopped')
self.assertEqual(models.Task.objects.get().status, 'stopped')
|
the-stack_0_17561 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016 The Kubeflow Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import copy
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument(
"filenames",
help="list of files to check, all files if unspecified",
nargs='*')
args = parser.parse_args()
rootdir = os.path.dirname(__file__) + "/../../"
rootdir = os.path.abspath(rootdir)
def get_refs():
ref_file = open(os.path.join(rootdir, "build/boilerplate/boilerplate.txt"))
ref = ref_file.read().splitlines()
ref_file.close()
refs = {}
for extension in ["sh", "go", "py"]:
refs[extension] = copy.copy(ref)
prefix = ""
if extension == "go":
prefix = "//"
else:
prefix = "#"
for i in range(len(refs[extension])):
if len(refs[extension][i]) != 0:
p = prefix + " "
else:
p = prefix
refs[extension][i] = p + refs[extension][i]
return refs
def file_passes(filename, refs, regexs):
try:
f = open(filename, 'r')
except: # noqa: E722
return False
data = f.read()
f.close()
extension = file_extension(filename)
ref = refs[extension]
# remove build tags from the top of Go files
if extension == "go":
p = regexs["go_build_constraints"]
(data, found) = p.subn("", data, 1)
# remove shebang from the top of shell files
if extension == "sh" or extension == "py":
p = regexs["shebang"]
(data, found) = p.subn("", data, 1)
data = data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
return False
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
p = regexs["year"]
for d in data:
if p.search(d):
return False
# Replace all occurrences of the regex "2016|2015|2014" with "YEAR"
p = regexs["date"]
for i, d in enumerate(data):
(data[i], found) = p.subn('YEAR', d)
if found != 0:
break
# if we don't match the reference at this point, fail
if ref != data:
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = ['Godeps', 'vendor', 'third_party', '_gopath', '_output', '.git']
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in skipped_dirs):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(rootdir, pathname)
return newfiles
def get_files(extensions):
files = []
if len(args.filenames) > 0:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(rootdir):
# don't visit certain dirs. This is just a performance improvement as we
# would prune these later in normalize_files(). But doing it cuts down the
# amount of filesystem walking we do and cuts down the size of the file
# list
for d in skipped_dirs:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
extension = file_extension(pathname)
if extension in extensions:
outfiles.append(pathname)
return outfiles
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the
# real thing
regexs["year"] = re.compile('YEAR')
# dates can be 2014, 2015 or 2016, company holder names can be anything
regexs["date"] = re.compile('(2014|2015|2016|2017|2018|2019|2020)')
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n",
re.MULTILINE)
# strip #!.* from shell scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
return regexs
def main():
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys())
for filename in filenames:
if not file_passes(filename, refs, regexs):
print(filename, file=sys.stdout)
if __name__ == "__main__":
sys.exit(main())
|
the-stack_0_17562 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Liberated Pixel Cup [(LPC)][1] Sprites Dataset.
This file provides logic to download and build a version of the sprites
video sequence dataset as used in the Disentangled Sequential
Autoencoder paper [(Li and Mandt, 2018)][2].
#### References:
[1]: Liberated Pixel Cup. http://lpc.opengameart.org. Accessed:
2018-07-20.
[2]: Yingzhen Li and Stephan Mandt. Disentangled Sequential Autoencoder.
In _International Conference on Machine Learning_, 2018.
https://arxiv.org/abs/1803.02991
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import os
import random
import zipfile
from absl import flags
from six.moves import urllib
import tensorflow as tf
__all__ = ["SpritesDataset"]
flags.DEFINE_string(
"data_dir",
default=os.path.join(
os.getenv("TEST_TMPDIR", "/tmp"),
os.path.join("disentangled_vae", "data")),
help="Directory where the dataset is stored.")
DATA_SPRITES_URL = "https://github.com/jrconway3/Universal-LPC-spritesheet/archive/master.zip"
DATA_SPRITES_DIR = "Universal-LPC-spritesheet-master"
WIDTH = 832
HEIGHT = 1344
FRAME_SIZE = 64
CHANNELS = 4
SKIN_COLORS = [
os.path.join("body", "male", "light.png"),
os.path.join("body", "male", "tanned2.png"),
os.path.join("body", "male", "darkelf.png"),
os.path.join("body", "male", "darkelf2.png"),
os.path.join("body", "male", "dark.png"),
os.path.join("body", "male", "dark2.png")
]
HAIRSTYLES = [
os.path.join("hair", "male", "messy2", "green2.png"),
os.path.join("hair", "male", "ponytail", "blue2.png"),
os.path.join("hair", "male", "messy1", "light-blonde.png"),
os.path.join("hair", "male", "parted", "white.png"),
os.path.join("hair", "male", "plain", "ruby-red.png"),
os.path.join("hair", "male", "jewfro", "purple.png")
]
TOPS = [
os.path.join(
"torso", "shirts", "longsleeve", "male", "maroon_longsleeve.png"),
os.path.join(
"torso", "shirts", "longsleeve", "male", "teal_longsleeve.png"),
os.path.join(
"torso", "shirts", "longsleeve", "male", "white_longsleeve.png"),
os.path.join("torso", "plate", "chest_male.png"),
os.path.join("torso", "leather", "chest_male.png"),
os.path.join("formal_male_no_th-sh", "shirt.png")
]
PANTS = [
os.path.join("legs", "pants", "male", "white_pants_male.png"),
os.path.join("legs", "armor", "male", "golden_greaves_male.png"),
os.path.join("legs", "pants", "male", "red_pants_male.png"),
os.path.join("legs", "armor", "male", "metal_pants_male.png"),
os.path.join("legs", "pants", "male", "teal_pants_male.png"),
os.path.join("formal_male_no_th-sh", "pants.png")
]
Action = namedtuple("Action", ["name", "start_row", "frames"])
ACTIONS = [
Action("walk", 8, 9),
Action("spellcast", 0, 7),
Action("slash", 12, 6)
]
Direction = namedtuple("Direction", ["name", "row_offset"])
DIRECTIONS = [
Direction("west", 1),
Direction("south", 2),
Direction("east", 3),
]
FLAGS = flags.FLAGS
def read_image(filepath):
"""Returns an image tensor."""
im_bytes = tf.io.read_file(filepath)
im = tf.image.decode_image(im_bytes, channels=CHANNELS)
im = tf.image.convert_image_dtype(im, tf.float32)
return im
def join_seq(seq):
"""Joins a sequence side-by-side into a single image."""
return tf.concat(tf.unstack(seq), 1)
def download_sprites():
"""Downloads the sprites data and returns the saved filepath."""
filepath = os.path.join(FLAGS.data_dir, DATA_SPRITES_DIR)
if not tf.io.gfile.exists(filepath):
if not tf.io.gfile.exists(FLAGS.data_dir):
tf.io.gfile.makedirs(FLAGS.data_dir)
zip_name = "{}.zip".format(filepath)
urllib.request.urlretrieve(DATA_SPRITES_URL, zip_name)
with zipfile.ZipFile(zip_name, "r") as zip_file:
zip_file.extractall(FLAGS.data_dir)
tf.io.gfile.remove(zip_name)
return filepath
def create_character(skin, hair, top, pants):
"""Creates a character sprite from a set of attribute sprites."""
dtype = skin.dtype
hair_mask = tf.cast(hair[..., -1:] <= 0, dtype)
top_mask = tf.cast(top[..., -1:] <= 0, dtype)
pants_mask = tf.cast(pants[..., -1:] <= 0, dtype)
char = (skin * hair_mask) + hair
char = (char * top_mask) + top
char = (char * pants_mask) + pants
return char
def create_seq(character, action_metadata, direction, length=8, start=0):
"""Creates a sequence.
Args:
character: A character sprite tensor.
action_metadata: An action metadata tuple.
direction: An integer representing the direction, i.e., the row
offset within each action group corresponding to a particular
direction.
length: Desired length of the sequence. If this is longer than
the number of available frames, it will roll over to the
beginning.
start: Index of possible frames at which to start the sequence.
Returns:
A sequence tensor.
"""
sprite_start = (action_metadata[0]+direction) * FRAME_SIZE
sprite_end = (action_metadata[0]+direction+1) * FRAME_SIZE
sprite_line = character[sprite_start:sprite_end, ...]
# Extract 64x64 patches that are side-by-side in the sprite, and limit
# to the actual number of frames for the given action.
frames = tf.stack(tf.split(sprite_line, 13, axis=1)) # 13 is a hack
frames = frames[0:action_metadata[1]]
# Extract a slice of the desired length.
# NOTE: Length could be longer than the number of frames, so tile as needed.
frames = tf.roll(frames, shift=-start, axis=0)
frames = tf.tile(frames, [2, 1, 1, 1]) # 2 is a hack
frames = frames[:length]
frames = tf.cast(frames, dtype=tf.float32)
frames.set_shape([length, FRAME_SIZE, FRAME_SIZE, CHANNELS])
return frames
def create_random_seq(character, action_metadata, direction, length=8):
"""Creates a random sequence."""
start = tf.random.uniform([], maxval=action_metadata[1], dtype=tf.int32)
return create_seq(character, action_metadata, direction, length, start)
def create_sprites_dataset(characters, actions, directions, channels=3,
length=8, shuffle=False, fake_data=False):
"""Creates a tf.data pipeline for the sprites dataset.
Args:
characters: A list of (skin, hair, top, pants) tuples containing
relative paths to the sprite png image for each attribute.
actions: A list of Actions.
directions: A list of Directions.
channels: Number of image channels to yield.
length: Desired length of the sequences.
shuffle: Whether or not to shuffle the characters and sequences
start frame.
fake_data: Boolean for whether or not to yield synthetic data.
Returns:
A tf.data.Dataset yielding (seq, skin label index, hair label index,
top label index, pants label index, action label index, skin label
name, hair label_name, top label name, pants label name, action
label name) tuples.
"""
if fake_data:
dummy_image = tf.random.normal([HEIGHT, WIDTH, CHANNELS])
else:
basedir = download_sprites()
action_names = [action.name for action in actions]
action_metadata = [(action.start_row, action.frames) for action in actions]
direction_rows = [direction.row_offset for direction in directions]
chars = tf.data.Dataset.from_tensor_slices(characters)
act_names = tf.data.Dataset.from_tensor_slices(action_names).repeat()
acts_metadata = tf.data.Dataset.from_tensor_slices(action_metadata).repeat()
dir_rows = tf.data.Dataset.from_tensor_slices(direction_rows).repeat()
if shuffle:
chars = chars.shuffle(len(characters))
dataset = tf.data.Dataset.zip((chars, act_names, acts_metadata, dir_rows))
skin_table = tf.contrib.lookup.index_table_from_tensor(sorted(SKIN_COLORS))
hair_table = tf.contrib.lookup.index_table_from_tensor(sorted(HAIRSTYLES))
top_table = tf.contrib.lookup.index_table_from_tensor(sorted(TOPS))
pants_table = tf.contrib.lookup.index_table_from_tensor(sorted(PANTS))
action_table = tf.contrib.lookup.index_table_from_tensor(sorted(action_names))
def process_example(attrs, act_name, act_metadata, dir_row_offset):
"""Processes a dataset row."""
skin_name = attrs[0]
hair_name = attrs[1]
top_name = attrs[2]
pants_name = attrs[3]
if fake_data:
char = dummy_image
else:
skin = read_image(basedir + os.sep + skin_name)
hair = read_image(basedir + os.sep + hair_name)
top = read_image(basedir + os.sep + top_name)
pants = read_image(basedir + os.sep + pants_name)
char = create_character(skin, hair, top, pants)
if shuffle:
seq = create_random_seq(char, act_metadata, dir_row_offset, length)
else:
seq = create_seq(char, act_metadata, dir_row_offset, length)
seq = seq[..., :channels] # limit output channels
skin_idx = skin_table.lookup(skin_name)
hair_idx = hair_table.lookup(hair_name)
top_idx = top_table.lookup(top_name)
pants_idx = pants_table.lookup(pants_name)
act_idx = action_table.lookup(act_name)
return (seq, skin_idx, hair_idx, top_idx, pants_idx, act_idx,
skin_name, hair_name, top_name, pants_name, act_name)
dataset = dataset.map(process_example)
return dataset
class SpritesDataset(object):
"""Liberated Pixel Cup [(LPC)][1] Sprites Dataset.
This file provides logic to download and build a version of the
sprites video sequence dataset as used in the Disentangled Sequential
Autoencoder paper [(Li and Mandt, 2018)][2]. The dataset contains
sprites (graphics files used to generate animated sequences) of human
characters wearing a variety of clothing, and performing a variety of
actions. The paper limits the dataset used for training to four
attribute categories (skin color, hairstyles, tops, and pants), each
of which are limited to include six variants. Thus, there are
6^4 = 1296 possible animated characters in this dataset. The
characters are shuffled and deterministically split such that 1000
characters are used for the training set, and 296 are used for the
testing set. The numbers are consistent with the paper, but the exact
split is impossible to match given the currently available paper
details. The actions are limited to three categories (walking,
casting spells, and slashing), each with three viewing angles.
Sequences of length T=8 frames are generated depicting a given
character performing a given action, starting at a random frame in the
sequence.
Attributes:
train: Training dataset with 1000 characters each performing an
action.
test: Testing dataset with 296 characters each performing an action.
#### References:
[1]: Liberated Pixel Cup. http://lpc.opengameart.org. Accessed:
2018-07-20.
[2]: Yingzhen Li and Stephan Mandt. Disentangled Sequential
Autoencoder. In _International Conference on Machine Learning_,
2018. https://arxiv.org/abs/1803.02991
"""
def __init__(self, channels=3, shuffle_train=True, fake_data=False):
"""Creates the SpritesDataset and stores train and test datasets.
The datasets yield (seq, skin label index, hair label index, top
label index, pants label index, action label index, skin label name,
hair label_name, top label name, pants label name, action label
name) tuples.
Args:
channels: Number of image channels to yield.
shuffle_train: Boolean for whether or not to shuffle the training
set.
fake_data: Boolean for whether or not to yield synthetic data.
Raises:
ValueError: If the number of training or testing examples is
incorrect, or if there is overlap betweem the two datasets.
"""
super(SpritesDataset, self).__init__()
self.frame_size = FRAME_SIZE
self.channels = channels
self.length = 8
num_train = 1000
num_test = 296
characters = [(skin, hair, top, pants)
for skin in sorted(SKIN_COLORS)
for hair in sorted(HAIRSTYLES)
for top in sorted(TOPS)
for pants in sorted(PANTS)]
random.seed(42)
random.shuffle(characters)
train_chars = characters[:num_train]
test_chars = characters[num_train:]
num_train_actual = len(set(train_chars))
num_test_actual = len(set(test_chars))
num_train_test_overlap = len(set(train_chars) & set(test_chars))
if num_train_actual != num_train:
raise ValueError(
"Unexpected number of training examples: {}.".format(
num_train_actual))
if num_test_actual != num_test:
raise ValueError(
"Unexpected number of testing examples: {}.".format(
num_test_actual))
if num_train_test_overlap > 0: # pylint: disable=g-explicit-length-test
raise ValueError(
"Overlap between train and test datasets detected: {}.".format(
num_train_test_overlap))
self.train = create_sprites_dataset(
train_chars, ACTIONS, DIRECTIONS, self.channels, self.length,
shuffle=shuffle_train, fake_data=fake_data)
self.test = create_sprites_dataset(
test_chars, ACTIONS, DIRECTIONS, self.channels, self.length,
shuffle=False, fake_data=fake_data)
|
the-stack_0_17563 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import flash
from flash.core.data.utils import download_data
from flash.image import ObjectDetectionData, ObjectDetector
# 1. Create the DataModule
# Dataset Credit: https://www.kaggle.com/ultralytics/coco128
download_data("https://github.com/zhiqwang/yolov5-rt-stack/releases/download/v0.3.0/coco128.zip", "data/")
datamodule = ObjectDetectionData.from_coco(
train_folder="data/coco128/images/train2017/",
train_ann_file="data/coco128/annotations/instances_train2017.json",
val_split=0.1,
image_size=128,
)
# 2. Build the task
model = ObjectDetector(head="efficientdet", backbone="d0", num_classes=datamodule.num_classes, image_size=128)
# 3. Create the trainer and finetune the model
trainer = flash.Trainer(max_epochs=1)
trainer.finetune(model, datamodule=datamodule, strategy="freeze")
# 4. Detect objects in a few images!
predictions = model.predict(
[
"data/coco128/images/train2017/000000000625.jpg",
"data/coco128/images/train2017/000000000626.jpg",
"data/coco128/images/train2017/000000000629.jpg",
]
)
print(predictions)
# 5. Save the model!
trainer.save_checkpoint("object_detection_model.pt")
|
the-stack_0_17567 | from pathlib import Path
import subprocess
data_dir = Path('out_amif_4_200_19')
vv_tif = sorted(data_dir.rglob('*_VV.mli.filt.tif'))
vh_tif = sorted(data_dir.rglob('*_VH.mli.filt.tif'))
for i in range(len(vv_tif)):
out_tif = data_dir / str(vv_tif[i].name).replace('_VV.mli', '_RGB.mli')
print(i, out_tif)
cmd = (f'gdal_merge.py '
f'-init "0 0 0" '
f'-separate '
f'-co COMPRESS=LZW '
f'-o {out_tif} {vv_tif[i]} {vh_tif[i]}')
subprocess.call(cmd, shell=True)
|
the-stack_0_17568 | """
Copyright (c) 2020, VRAI Labs and/or its affiliates. All rights reserved.
This software is licensed under the Apache License, Version 2.0 (the
"License") as published by the Apache Software Foundation.
You may not use this file except in compliance with the License. You may
obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
from os import environ
from supertokens_fastapi.exceptions import raise_general_exception
class ProcessState:
__instance = None
def __init__(self):
self.service_called = False
@staticmethod
def __get_instance():
if ProcessState.__instance is None:
ProcessState.__instance = ProcessState()
return ProcessState.__instance
@staticmethod
def update_service_called(b):
instance = ProcessState.__get_instance()
instance.service_called = b
@staticmethod
def get_service_called():
return ProcessState.__get_instance().service_called
@staticmethod
def reset():
if ('SUPERTOKENS_ENV' not in environ) or (
environ['SUPERTOKENS_ENV'] != 'testing'):
raise_general_exception(
'calling testing function in non testing env')
ProcessState.__instance = None
|
the-stack_0_17571 | #!/usr/bin/env python
#
# Fermatum - lightweight IoP client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This module uses functions from TLSLite (public domain)
#
# TLSLite Authors:
# Trevor Perrin
# Martin von Loewis - python 3 port
# Yngve Pettersen (ported by Paul Sokolovsky) - TLS 1.2
#
"""Pure-Python RSA implementation."""
from __future__ import print_function
import os
import math
import base64
import binascii
import hashlib
from pem import *
def SHA1(x):
return hashlib.sha1(x).digest()
# **************************************************************************
# PRNG Functions
# **************************************************************************
# Check that os.urandom works
import zlib
length = len(zlib.compress(os.urandom(1000)))
assert(length > 900)
def getRandomBytes(howMany):
b = bytearray(os.urandom(howMany))
assert(len(b) == howMany)
return b
prngName = "os.urandom"
# **************************************************************************
# Converter Functions
# **************************************************************************
def bytesToNumber(b):
total = 0
multiplier = 1
for count in range(len(b)-1, -1, -1):
byte = b[count]
total += multiplier * byte
multiplier *= 256
return total
def numberToByteArray(n, howManyBytes=None):
"""Convert an integer into a bytearray, zero-pad to howManyBytes.
The returned bytearray may be smaller than howManyBytes, but will
not be larger. The returned bytearray will contain a big-endian
encoding of the input integer (n).
"""
if howManyBytes == None:
howManyBytes = numBytes(n)
b = bytearray(howManyBytes)
for count in range(howManyBytes-1, -1, -1):
b[count] = int(n % 256)
n >>= 8
return b
def mpiToNumber(mpi): #mpi is an openssl-format bignum string
if (ord(mpi[4]) & 0x80) !=0: #Make sure this is a positive number
raise AssertionError()
b = bytearray(mpi[4:])
return bytesToNumber(b)
def numberToMPI(n):
b = numberToByteArray(n)
ext = 0
#If the high-order bit is going to be set,
#add an extra byte of zeros
if (numBits(n) & 0x7)==0:
ext = 1
length = numBytes(n) + ext
b = bytearray(4+ext) + b
b[0] = (length >> 24) & 0xFF
b[1] = (length >> 16) & 0xFF
b[2] = (length >> 8) & 0xFF
b[3] = length & 0xFF
return bytes(b)
# **************************************************************************
# Misc. Utility Functions
# **************************************************************************
def numBits(n):
if n==0:
return 0
s = "%x" % n
return ((len(s)-1)*4) + \
{'0':0, '1':1, '2':2, '3':2,
'4':3, '5':3, '6':3, '7':3,
'8':4, '9':4, 'a':4, 'b':4,
'c':4, 'd':4, 'e':4, 'f':4,
}[s[0]]
return int(math.floor(math.log(n, 2))+1)
def numBytes(n):
if n==0:
return 0
bits = numBits(n)
return int(math.ceil(bits / 8.0))
# **************************************************************************
# Big Number Math
# **************************************************************************
def getRandomNumber(low, high):
if low >= high:
raise AssertionError()
howManyBits = numBits(high)
howManyBytes = numBytes(high)
lastBits = howManyBits % 8
while 1:
bytes = getRandomBytes(howManyBytes)
if lastBits:
bytes[0] = bytes[0] % (1 << lastBits)
n = bytesToNumber(bytes)
if n >= low and n < high:
return n
def gcd(a,b):
a, b = max(a,b), min(a,b)
while b:
a, b = b, a % b
return a
def lcm(a, b):
return (a * b) // gcd(a, b)
#Returns inverse of a mod b, zero if none
#Uses Extended Euclidean Algorithm
def invMod(a, b):
c, d = a, b
uc, ud = 1, 0
while c != 0:
q = d // c
c, d = d-(q*c), c
uc, ud = ud - (q * uc), uc
if d == 1:
return ud % b
return 0
def powMod(base, power, modulus):
if power < 0:
result = pow(base, power*-1, modulus)
result = invMod(result, modulus)
return result
else:
return pow(base, power, modulus)
#Pre-calculate a sieve of the ~100 primes < 1000:
def makeSieve(n):
sieve = list(range(n))
for count in range(2, int(math.sqrt(n))+1):
if sieve[count] == 0:
continue
x = sieve[count] * 2
while x < len(sieve):
sieve[x] = 0
x += sieve[count]
sieve = [x for x in sieve[2:] if x]
return sieve
sieve = makeSieve(1000)
def isPrime(n, iterations=5, display=False):
#Trial division with sieve
for x in sieve:
if x >= n: return True
if n % x == 0: return False
#Passed trial division, proceed to Rabin-Miller
#Rabin-Miller implemented per Ferguson & Schneier
#Compute s, t for Rabin-Miller
if display: print("*", end=' ')
s, t = n-1, 0
while s % 2 == 0:
s, t = s//2, t+1
#Repeat Rabin-Miller x times
a = 2 #Use 2 as a base for first iteration speedup, per HAC
for count in range(iterations):
v = powMod(a, s, n)
if v==1:
continue
i = 0
while v != n-1:
if i == t-1:
return False
else:
v, i = powMod(v, 2, n), i+1
a = getRandomNumber(2, n)
return True
def getRandomPrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = ((2 ** (bits-1)) * 3) // 2
high = 2 ** bits - 30
p = getRandomNumber(low, high)
p += 29 - (p % 30)
while 1:
if display: print(".", end=' ')
p += 30
if p >= high:
p = getRandomNumber(low, high)
p += 29 - (p % 30)
if isPrime(p, display=display):
return p
#Unused at the moment...
def getRandomSafePrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = (2 ** (bits-2)) * 3//2
high = (2 ** (bits-1)) - 30
q = getRandomNumber(low, high)
q += 29 - (q % 30)
while 1:
if display: print(".", end=' ')
q += 30
if (q >= high):
q = getRandomNumber(low, high)
q += 29 - (q % 30)
#Ideas from Tom Wu's SRP code
#Do trial division on p and q before Rabin-Miller
if isPrime(q, 0, display=display):
p = (2 * q) + 1
if isPrime(p, display=display):
if isPrime(q, display=display):
return p
class RSAKey(object):
def __init__(self, n=0, e=0, d=0, p=0, q=0, dP=0, dQ=0, qInv=0):
if (n and not e) or (e and not n):
raise AssertionError()
self.n = n
self.e = e
self.d = d
self.p = p
self.q = q
self.dP = dP
self.dQ = dQ
self.qInv = qInv
self.blinder = 0
self.unblinder = 0
def __len__(self):
"""Return the length of this key in bits.
@rtype: int
"""
return numBits(self.n)
def hasPrivateKey(self):
return self.d != 0
def hashAndSign(self, bytes):
"""Hash and sign the passed-in bytes.
This requires the key to have a private component. It performs
a PKCS1-SHA1 signature on the passed-in data.
@type bytes: str or L{bytearray} of unsigned bytes
@param bytes: The value which will be hashed and signed.
@rtype: L{bytearray} of unsigned bytes.
@return: A PKCS1-SHA1 signature on the passed-in data.
"""
hashBytes = SHA1(bytearray(bytes))
prefixedHashBytes = self._addPKCS1SHA1Prefix(hashBytes)
sigBytes = self.sign(prefixedHashBytes)
return sigBytes
def hashAndVerify(self, sigBytes, bytes):
"""Hash and verify the passed-in bytes with the signature.
This verifies a PKCS1-SHA1 signature on the passed-in data.
@type sigBytes: L{bytearray} of unsigned bytes
@param sigBytes: A PKCS1-SHA1 signature.
@type bytes: str or L{bytearray} of unsigned bytes
@param bytes: The value which will be hashed and verified.
@rtype: bool
@return: Whether the signature matches the passed-in data.
"""
hashBytes = SHA1(bytearray(bytes))
# Try it with/without the embedded NULL
prefixedHashBytes1 = self._addPKCS1SHA1Prefix(hashBytes, False)
prefixedHashBytes2 = self._addPKCS1SHA1Prefix(hashBytes, True)
result1 = self.verify(sigBytes, prefixedHashBytes1)
result2 = self.verify(sigBytes, prefixedHashBytes2)
return (result1 or result2)
def sign(self, bytes):
"""Sign the passed-in bytes.
This requires the key to have a private component. It performs
a PKCS1 signature on the passed-in data.
@type bytes: L{bytearray} of unsigned bytes
@param bytes: The value which will be signed.
@rtype: L{bytearray} of unsigned bytes.
@return: A PKCS1 signature on the passed-in data.
"""
if not self.hasPrivateKey():
raise AssertionError()
paddedBytes = self._addPKCS1Padding(bytes, 1)
m = bytesToNumber(paddedBytes)
if m >= self.n:
raise ValueError()
c = self._rawPrivateKeyOp(m)
sigBytes = numberToByteArray(c, numBytes(self.n))
return sigBytes
def verify(self, sigBytes, bytes):
"""Verify the passed-in bytes with the signature.
This verifies a PKCS1 signature on the passed-in data.
@type sigBytes: L{bytearray} of unsigned bytes
@param sigBytes: A PKCS1 signature.
@type bytes: L{bytearray} of unsigned bytes
@param bytes: The value which will be verified.
@rtype: bool
@return: Whether the signature matches the passed-in data.
"""
if len(sigBytes) != numBytes(self.n):
return False
paddedBytes = self._addPKCS1Padding(bytes, 1)
c = bytesToNumber(sigBytes)
if c >= self.n:
return False
m = self._rawPublicKeyOp(c)
checkBytes = numberToByteArray(m, numBytes(self.n))
return checkBytes == paddedBytes
def encrypt(self, bytes):
"""Encrypt the passed-in bytes.
This performs PKCS1 encryption of the passed-in data.
@type bytes: L{bytearray} of unsigned bytes
@param bytes: The value which will be encrypted.
@rtype: L{bytearray} of unsigned bytes.
@return: A PKCS1 encryption of the passed-in data.
"""
paddedBytes = self._addPKCS1Padding(bytes, 2)
m = bytesToNumber(paddedBytes)
if m >= self.n:
raise ValueError()
c = self._rawPublicKeyOp(m)
encBytes = numberToByteArray(c, numBytes(self.n))
return encBytes
def decrypt(self, encBytes):
"""Decrypt the passed-in bytes.
This requires the key to have a private component. It performs
PKCS1 decryption of the passed-in data.
@type encBytes: L{bytearray} of unsigned bytes
@param encBytes: The value which will be decrypted.
@rtype: L{bytearray} of unsigned bytes or None.
@return: A PKCS1 decryption of the passed-in data or None if
the data is not properly formatted.
"""
if not self.hasPrivateKey():
raise AssertionError()
if len(encBytes) != numBytes(self.n):
return None
c = bytesToNumber(encBytes)
if c >= self.n:
return None
m = self._rawPrivateKeyOp(c)
decBytes = numberToByteArray(m, numBytes(self.n))
#Check first two bytes
if decBytes[0] != 0 or decBytes[1] != 2:
return None
#Scan through for zero separator
for x in range(1, len(decBytes)-1):
if decBytes[x]== 0:
break
else:
return None
return decBytes[x+1:] #Return everything after the separator
# **************************************************************************
# Helper Functions for RSA Keys
# **************************************************************************
def _addPKCS1SHA1Prefix(self, bytes, withNULL=True):
# There is a long history of confusion over whether the SHA1
# algorithmIdentifier should be encoded with a NULL parameter or
# with the parameter omitted. While the original intention was
# apparently to omit it, many toolkits went the other way. TLS 1.2
# specifies the NULL should be included, and this behavior is also
# mandated in recent versions of PKCS #1, and is what tlslite has
# always implemented. Anyways, verification code should probably
# accept both. However, nothing uses this code yet, so this is
# all fairly moot.
if not withNULL:
prefixBytes = bytearray(\
[0x30,0x1f,0x30,0x07,0x06,0x05,0x2b,0x0e,0x03,0x02,0x1a,0x04,0x14])
else:
prefixBytes = bytearray(\
[0x30,0x21,0x30,0x09,0x06,0x05,0x2b,0x0e,0x03,0x02,0x1a,0x05,0x00,0x04,0x14])
prefixedBytes = prefixBytes + bytes
return prefixedBytes
def _addPKCS1Padding(self, bytes, blockType):
padLength = (numBytes(self.n) - (len(bytes)+3))
if blockType == 1: #Signature padding
pad = [0xFF] * padLength
elif blockType == 2: #Encryption padding
pad = bytearray(0)
while len(pad) < padLength:
padBytes = getRandomBytes(padLength * 2)
pad = [b for b in padBytes if b != 0]
pad = pad[:padLength]
else:
raise AssertionError()
padding = bytearray([0,blockType] + pad + [0])
paddedBytes = padding + bytes
return paddedBytes
def _rawPrivateKeyOp(self, m):
#Create blinding values, on the first pass:
if not self.blinder:
self.unblinder = getRandomNumber(2, self.n)
self.blinder = powMod(invMod(self.unblinder, self.n), self.e,
self.n)
#Blind the input
m = (m * self.blinder) % self.n
#Perform the RSA operation
c = self._rawPrivateKeyOpHelper(m)
#Unblind the output
c = (c * self.unblinder) % self.n
#Update blinding values
self.blinder = (self.blinder * self.blinder) % self.n
self.unblinder = (self.unblinder * self.unblinder) % self.n
#Return the output
return c
def _rawPrivateKeyOpHelper(self, m):
#Non-CRT version
#c = powMod(m, self.d, self.n)
#CRT version (~3x faster)
s1 = powMod(m, self.dP, self.p)
s2 = powMod(m, self.dQ, self.q)
h = ((s1 - s2) * self.qInv) % self.p
c = s2 + self.q * h
return c
def _rawPublicKeyOp(self, c):
m = powMod(c, self.e, self.n)
return m
def acceptsPassword(self):
return False
def generate(bits):
key = Python_RSAKey()
p = getRandomPrime(bits//2, False)
q = getRandomPrime(bits//2, False)
t = lcm(p-1, q-1)
key.n = p * q
key.e = 65537
key.d = invMod(key.e, t)
key.p = p
key.q = q
key.dP = key.d % (p-1)
key.dQ = key.d % (q-1)
key.qInv = invMod(q, p)
return key
generate = staticmethod(generate)
|
the-stack_0_17572 | import networkx as nx
from django.db import connection
from api.models import Person
def build_social_graph(user):
query = """
with face as (
select photo_id, person_id, name
from api_face join api_person on api_person.id = person_id
where person_label_is_inferred = false
)
select f1.name, f2.name
from face f1 join face f2 using (photo_id)
where f1.person_id != f2.person_id
group by f1.name, f2.name
"""
G = nx.Graph()
with connection.cursor() as cursor:
cursor.execute(query)
links = cursor.fetchall()
if len(links) == 0:
return {"nodes": [], "links": []}
for link in links:
G.add_edge(link[0], link[1])
pos = nx.spring_layout(G, k=1 / 2, scale=1000, iterations=20)
return {
"nodes": [{"id": node, "x": pos[0], "y": pos[1]} for node, pos in pos.items()],
"links": [{"source": pair[0], "target": pair[1]} for pair in G.edges()],
}
def build_ego_graph(person_id):
G = nx.Graph()
person = Person.objects.prefetch_related("faces__photo__faces__person").filter(
id=person_id
)[0]
for this_person_face in person.faces.all():
for other_person_face in this_person_face.photo.faces.all():
G.add_edge(person.name, other_person_face.person.name)
nodes = [{"id": node} for node in G.nodes()]
links = [{"source": pair[0], "target": pair[1]} for pair in G.edges()]
res = {"nodes": nodes, "links": links}
return res
|
the-stack_0_17573 | from django.conf import settings
from django.core import mail
from django.core.mail.backends.base import BaseEmailBackend
import olympia.core.logger
from olympia.amo.models import FakeEmail
log = olympia.core.logger.getLogger('z.amo.mail')
class DevEmailBackend(BaseEmailBackend):
"""Log emails in the database, send allowed addresses for real though.
Used for development environments when we don't want to send out
real emails. This gets swapped in as the email backend when
`settings.SEND_REAL_EMAIL` is disabled.
BUT even if `settings.SEND_REAL_EMAIL` is disabled, if the targeted
email address is in the `settings.EMAIL_QA_ALLOW_LIST` list,
the email will be sent.
"""
def send_messages(self, messages):
"""Save a `FakeEmail` object viewable within the admin.
If one of the target email addresses is in
`settings.EMAIL_QA_ALLOW_LIST`, it send a real email message.
"""
log.debug('Sending dev mail messages.')
qa_messages = []
for msg in messages:
FakeEmail.objects.create(message=msg.message().as_string())
qa_emails = set(msg.to).intersection(settings.EMAIL_QA_ALLOW_LIST)
if qa_emails:
if len(msg.to) != len(qa_emails):
# We need to replace the recipients with the QA
# emails only prior to send the message for real.
# We don't want to send real emails to people if
# they happen to also be in the recipients together
# with white-listed emails
msg.to = list(qa_emails)
qa_messages.append(msg)
if qa_messages:
log.debug('Sending real mail messages to QA.')
connection = mail.get_connection()
connection.send_messages(qa_messages)
return len(messages)
def view_all(self):
"""Useful for displaying messages in admin panel."""
return (FakeEmail.objects.values_list('message', flat=True)
.order_by('-created'))
def clear(self):
return FakeEmail.objects.all().delete()
|
the-stack_0_17577 | from __future__ import annotations
import os
import signal
import subprocess
import sys
from pathlib import Path
from typing import TYPE_CHECKING
from typing import Any
import pexpect
from cleo.terminal import Terminal
from shellingham import ShellDetectionFailure
from shellingham import detect_shell
from poetry.utils._compat import WINDOWS
if TYPE_CHECKING:
from poetry.utils.env import VirtualEnv
class Shell:
"""
Represents the current shell.
"""
_shell = None
def __init__(self, name: str, path: str) -> None:
self._name = name
self._path = path
@property
def name(self) -> str:
return self._name
@property
def path(self) -> str:
return self._path
@classmethod
def get(cls) -> Shell:
"""
Retrieve the current shell.
"""
if cls._shell is not None:
return cls._shell
try:
name, path = detect_shell(os.getpid())
except (RuntimeError, ShellDetectionFailure):
shell = None
if os.name == "posix":
shell = os.environ.get("SHELL")
elif os.name == "nt":
shell = os.environ.get("COMSPEC")
if not shell:
raise RuntimeError("Unable to detect the current shell.")
name, path = Path(shell).stem, shell
cls._shell = cls(name, path)
return cls._shell
def activate(self, env: VirtualEnv) -> int | None:
activate_script = self._get_activate_script()
bin_dir = "Scripts" if WINDOWS else "bin"
activate_path = env.path / bin_dir / activate_script
# mypy requires using sys.platform instead of WINDOWS constant
# in if statements to properly type check on Windows
if sys.platform == "win32":
if self._name in ("powershell", "pwsh"):
args = ["-NoExit", "-File", str(activate_path)]
else:
# /K will execute the bat file and
# keep the cmd process from terminating
args = ["/K", str(activate_path)]
completed_proc = subprocess.run([self.path, *args])
return completed_proc.returncode
import shlex
terminal = Terminal()
with env.temp_environ():
c = pexpect.spawn(
self._path, ["-i"], dimensions=(terminal.height, terminal.width)
)
if self._name == "zsh":
c.setecho(False)
c.sendline(f"{self._get_source_command()} {shlex.quote(str(activate_path))}")
def resize(sig: Any, data: Any) -> None:
terminal = Terminal()
c.setwinsize(terminal.height, terminal.width)
signal.signal(signal.SIGWINCH, resize)
# Interact with the new shell.
c.interact(escape_character=None)
c.close()
sys.exit(c.exitstatus)
def _get_activate_script(self) -> str:
if self._name == "fish":
suffix = ".fish"
elif self._name in ("csh", "tcsh"):
suffix = ".csh"
elif self._name in ("powershell", "pwsh"):
suffix = ".ps1"
elif self._name == "cmd":
suffix = ".bat"
else:
suffix = ""
return "activate" + suffix
def _get_source_command(self) -> str:
if self._name in ("fish", "csh", "tcsh"):
return "source"
return "."
def __repr__(self) -> str:
return f'{self.__class__.__name__}("{self._name}", "{self._path}")'
|
the-stack_0_17580 | # author: WatchDogOblivion
# description: TODO
# WatchDogs Blind SQL Request Response Service
# pylint: disable=R0904
import time
import copy
from typing import Callable # pylint: disable=unused-import
from collections import OrderedDict
from multiprocessing.pool import ThreadPool
from pathos.multiprocessing import ProcessingPool
from watchdogs.base.models import AllArgs # pylint: disable=unused-import
from watchdogs.utils import GeneralUtility
from watchdogs.web.models import BlindSQLRRHelper # pylint: disable=unused-import
from watchdogs.web.models.Requests import Request # pylint: disable=unused-import
from watchdogs.web.parsers import BlindSQLArgs
from watchdogs.web.services import RequestResponseService
from watchdogs.utils.Constants import (COUNT, LENGTH, NAME, VALUE, VERSION)
from watchdogs.web.webutils.BlindSQLRRQueries import CIPH, ODPH, ORPH, RIPH, BlindSQLRRQueries
KEY_MAP = OrderedDict([(VERSION, '@@version'), (NAME, 'database()')])
OPERAND_LIMIT = 10000
ERROR = -999
ERROR_STRING = '-999'
EXCEED_MSG = "Exceeded maximum operand limit."
class BlindSQLRRService(RequestResponseService):
@staticmethod
def resetEndpoint(request, originalEndpoint):
# type: (Request, str) -> None
requestInfo = request.getRequestInfo()
requestInfo.setEndpoint(originalEndpoint)
request.setRequestInfo(requestInfo)
return request
@staticmethod
def updateEndpoint(request, query):
# type: (Request, str) -> Request
requestInfo = request.getRequestInfo()
requestInfo.setEndpoint(requestInfo.getEndpoint() + query)
request.setRequestInfo(requestInfo)
return request
@staticmethod
def getMultiprocessingArgs(allArgs, request, helper, argumentSize, startIndex=0):
# type: (AllArgs, Request, BlindSQLRRHelper, int, int) -> tuple
allArgsArray = []
requestArray = []
helperArray = []
index = 0
while index < argumentSize:
allArgsArray.append(allArgs)
requestArray.append(request)
helperArray.append(helper)
index += 1
return (allArgsArray, requestArray, helperArray, range(startIndex, argumentSize))
@staticmethod
def multithread(method, allArgs, request, helper, processes):
# type: (Callable, AllArgs, Request, BlindSQLRRHelper, int) -> str
jobs = []
results = []
pool = ThreadPool(processes=processes)
for index in range(processes):
jobs.append(pool.apply_async(method, (allArgs, request, copy.deepcopy(helper), index)))
for job in jobs:
try:
results.append(job.get())
except Exception as e:
results.append(e)
return results
def multiprocess(self, method, allArgs, request, helper, argumentSize, startIndex=0):
# type: (Callable, AllArgs, Request, BlindSQLRRHelper, int, int) -> str
blindSQLArgs = allArgs.getArgs(BlindSQLArgs)
pool = ProcessingPool(blindSQLArgs.processPoolSize)
args = self.getMultiprocessingArgs(allArgs, request, helper, argumentSize, startIndex)
return pool.map(method, *args)
def getDbCharacterInteger(self, allArgs, request, helper, index):
# type: (AllArgs, Request, BlindSQLRRHelper, int) -> int
helper.setCharacterIndex(index)
helper.setQueryOperand(0)
integerValue = self.operandBinarySearch(allArgs, request, helper)
return integerValue
def getDatabaseValue(self, allArgs, request, helper, dbValueLength):
# type: (AllArgs, Request, BlindSQLRRHelper, int) -> str
dbCharacterIntegers = self.multiprocess(self.getDbCharacterInteger, allArgs, request, helper,
dbValueLength)
if (ERROR in dbCharacterIntegers):
return ERROR_STRING
nullByteCount = 0
databaseValue = ""
for characterInteger in dbCharacterIntegers:
if (characterInteger == 0):
nullByteCount += 1
characterValue = chr(characterInteger)
databaseValue += characterValue
if (nullByteCount > 0):
remainingCharacterIntegers = self.multiprocess(self.getDbCharacterInteger, allArgs, request, helper,
nullByteCount + dbValueLength, dbValueLength)
for characterInteger in remainingCharacterIntegers:
characterValue = chr(characterInteger)
databaseValue += characterValue
return databaseValue
def getRowValue(self, allArgs, request, helper, index):
# type: (AllArgs, Request, BlindSQLRRHelper, int) -> str
helper.setRowIndex(index)
helper.setQueryOperand(0)
helper.setIsRowCheck(True)
BlindSQLRRQueries.setQuery(LENGTH, allArgs, helper)
valueLength = self.operandBinarySearch(allArgs, request, helper)
if (valueLength == ERROR):
return ERROR_STRING
helper.setIsRowCharacterCheck(True)
BlindSQLRRQueries.setQuery(VALUE, allArgs, helper)
return self.getDatabaseValue(allArgs, request, helper, valueLength)
def getInvalidResponseLength(self, allArgs, request):
# type: (AllArgs, Request) -> int
blindSQLArgs = allArgs.getArgs(BlindSQLArgs)
query = "{0}{1}AND{1}1=2{2}"
query = query.format(blindSQLArgs.terminator, blindSQLArgs.wordDelimiter, blindSQLArgs.commentOut)
query = GeneralUtility.urlEncode(query)
response = self.sendRequest(allArgs, self.updateEndpoint(request, query))
return int(self.getFinalResponse(response).getResponseLength())
def operatorOperand(self, allArgs, request, helper):
# type: (AllArgs, Request, BlindSQLRRHelper) -> bool
self.resetEndpoint(request, helper.getOriginalEndpoint())
query = helper.getQuery()
replaced = query.replace(ORPH, helper.getQueryOperator())
replaced = replaced.replace(ODPH, str(helper.getQueryOperand()))
if (helper.isCharacterCheck() or helper.isRowCharacterCheck()):
replaced = replaced.replace(CIPH, str(helper.getCharacterIndex()))
if (helper.isRowCheck() or helper.isRowCharacterCheck()):
replaced = replaced.replace(RIPH, str(helper.getRowIndex()))
query = GeneralUtility.urlEncode(replaced)
response = self.sendRequest(allArgs, self.updateEndpoint(request, query))
responseLength = self.getFinalResponse(response).getResponseLength()
if (int(responseLength) == helper.getInvalidResponseLength()):
return False
return True
def equalsOperand(self, allArgs, request, helper):
# type: (AllArgs, Request, BlindSQLRRHelper) -> bool
helper.setQueryOperator("=")
return self.operatorOperand(allArgs, request, helper)
def isLessThanOperand(self, allArgs, request, helper):
# type: (AllArgs, Request, BlindSQLRRHelper) -> bool
helper.setQueryOperator("<")
return self.operatorOperand(allArgs, request, helper)
def operandBinarySearch(self, allArgs, request, helper):
#type:(AllArgs, Request, BlindSQLRRHelper)->int
operand = helper.getQueryOperand()
if (self.equalsOperand(allArgs, request, helper)):
return operand
index = 0
while (True):
if (helper.getQueryOperand() > OPERAND_LIMIT):
return ERROR
helper.setQueryOperand(2**index + operand)
if (self.isLessThanOperand(allArgs, request, helper)):
helper.setQueryOperand(2**(index - 1) + operand)
return self.operandBinarySearch(allArgs, request, helper)
index += 1
def getKeyValue(self, key, allArgs, request, helper):
# type: (str, AllArgs, Request, BlindSQLRRHelper) -> str
if (key == VERSION):
helper.setQueryKey(KEY_MAP[VERSION])
elif (key == NAME):
helper.setQueryKey(KEY_MAP[NAME])
else:
helper.setQueryKey(key)
helper.setQueryOperand(0)
helper.setQuery(BlindSQLRRQueries.getLengthQuery(allArgs, helper))
valueLength = self.operandBinarySearch(allArgs, request, helper)
if (valueLength == ERROR):
return ERROR_STRING
helper.setIsCharacterCheck(True)
helper.setQuery(BlindSQLRRQueries.getValueQuery(allArgs, helper))
return self.getDatabaseValue(allArgs, request, helper, valueLength)
def setDataBaseVersion(self, allArgs, request, helper):
# type: (AllArgs, Request, BlindSQLRRHelper) -> None
startTime = time.time()
if (not helper.getDatabaseVersion()):
databaseVersion = self.getKeyValue(VERSION, allArgs, request, helper)
if (databaseVersion == ERROR_STRING):
print(EXCEED_MSG)
helper.setDatabaseVersion(databaseVersion)
endTime = time.time()
GeneralUtility.printTime(startTime, endTime)
def setCurrentDatabase(self, allArgs, request, helper):
# type: (AllArgs, Request, BlindSQLRRHelper) -> None
startTime = time.time()
if (not helper.getDatabaseName()):
databaseName = self.getKeyValue(NAME, allArgs, request, helper)
if (databaseName == ERROR_STRING):
print(EXCEED_MSG)
helper.setDatabaseName(databaseName)
endTime = time.time()
GeneralUtility.printTime(startTime, endTime)
def setDataList(self, allArgs, request, helper):
# type: (AllArgs, Request, BlindSQLRRHelper) -> None
startTime = time.time()
helper.setQueryOperand(0)
BlindSQLRRQueries.setQuery(COUNT, allArgs, helper)
valueCount = self.operandBinarySearch(allArgs, request, helper)
if (valueCount < 1):
print("There were no entries in the database for your request")
helper.setDataList([])
results = self.multithread(self.getRowValue, allArgs, request, helper, valueCount)
if (ERROR_STRING in results):
print(EXCEED_MSG)
endTime = time.time()
GeneralUtility.printTime(startTime, endTime)
helper.setDataList(results)
|
the-stack_0_17581 | import sys
import iris
from cosmic.util import load_module, filepath_regrid
def main(target_filename, models_settings, model, year, season):
input_dir = models_settings[model]['input_dir']
print(f'{model}, {year}, {season}')
input_filename = input_dir / f'{model}.highresSST-present.r1i1p1f1.{year}.{season}.asia_precip.nc'
output_filename = input_filename.parent / f'{input_filename.stem}.N1280.nc'
done_filename = (output_filename.parent / (output_filename.name + '.done'))
if done_filename.exists():
print(f'Skipping: {done_filename.name} exists')
return
regridded_cube = filepath_regrid(input_filename, target_filename)
iris.save(regridded_cube, str(output_filename), zlib=True)
done_filename.touch()
if __name__ == '__main__':
config = load_module(sys.argv[1])
config_key = sys.argv[2]
main(config.TARGET_FILENAME, config.MODELS, *config.SCRIPT_ARGS[config_key])
|
the-stack_0_17583 | """Evaluation metric implementations for audio synthesis tasks"""
import glob
import warnings
from pathlib import Path
from typing import Any, Callable, Iterable, Optional, Type
import numpy as np
import torch
from scipy.io import wavfile
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset
class WavDataSet(Dataset):
"""Torch dataset for wavfile directories"""
def __init__(
self,
samples: str,
labels: Optional[Iterable[Any]] = None,
transform: Optional[Callable[[np.ndarray], Any]] = None,
train: bool = True,
dtype: Type = torch.FloatTensor,
):
"""
Args:
samples: Path to directory containing audio samples (wav files are supported)
transform: Optionally provide a preprocessing function to transform the audio data before predicting the
class with the classifier
dtype: Datatype to cast the loaded numpy array of audio data to (done before passing to transform function)
"""
self.files = glob.glob(os.path.join(Path(samples), "*.wav"))
self.labels = np.array(labels) if labels is not None else None
self.transform = transform if transform is not None else lambda audio: audio
self.train = train
self.dtype = dtype
if not train:
if labels is None:
raise ValueError("Cannot create test dataloader without labels")
if labels.shape[0] != len(self.files):
raise ValueError(
f"The number of labels provided does not match the number of samples, got {labels.shape[0]} labels"
f" and {len(self.files)} samples"
)
def __len__(self):
return len(self.files)
def __getitem__(self, item):
file = self.files[item]
_, audio = wavfile.read(file)
audio = self.transform(torch.from_numpy(audio).to(dtype=self.dtype))
return audio if self.train else audio, self.labels[item]
def _check_cuda(cuda: bool):
# Make sure cuda is available if using cuda
if cuda and not torch.cuda.is_available():
raise EnvironmentError("CUDA set to true but no CUDA enabled device available")
# Warn if cuda is available and not using
if not cuda and torch.cuda.is_available():
warnings.warn("A CUDA enabled device is available, but cuda is not set to True")
def audio_inception_score(
classifier: Callable[..., np.ndarray],
samples: Union[str, Path],
transform: Optional[Callable[[np.ndarray], Any]] = None,
batch_size: int = 4,
splits: int = 10,
n_classes: int = 10,
shuffle: bool = True,
cuda: bool = True,
) -> np.ndarray:
"""Inception score implementation adapted for audio synthesis performance evaluation
Based on https://github.com/openai/improved-gan/blob/master/inception_score/model.py
From Improved Techniques for Training GANs (Goodfellow, 2016) https://arxiv.org/pdf/1606.03498.pdf
Args:
classifier: Classification model (in evaluation mode) which classifies an audio sample into <n_classes> by
computing confidence scores for each class, for each sample
samples: Path to directory containing audio samples (wav files are supported)
transform: Optionally provide a preprocessing function to transform the audio data before predicting the class
with the classifier
batch_size: Integer representing the number of samples to predict on in each iteration
splits: Integer representing the number of splits to chunk the predictions into, producing an inception score
for each chunk
n_classes: The number of classes predicted by the classification model
shuffle: Boolean flag, whether or not to shuffle the dataset
cuda: Boolean flag, whether or not to use a CUDA device for the classification model
Returns:
<splits> x 1 np.ndarray containing the computed inception score for each split
"""
_check_cuda(cuda)
dtype = torch.cuda.FloatTensor if cuda else torch.FloatTensor # CUDA type if on cuda
dataloader = DataLoader(
WavDataSet(samples, None, transform, False, dtype), batch_size=batch_size, shuffle=shuffle, num_workers=0
)
# Must have >= 1 sample per split
n = len(dataloader.dataset)
if splits > n:
raise ValueError(f"Cannot compute inception score for {splits} splits from only {n} samples")
# Process classification predictions in batches
predictions = np.empty((n, n_classes), dtype=np.float64)
for i, batch in enumerate(dataloader):
preds = classifier(batch)
preds = F.softmax(preds).data.cpu().numpy()
predictions[i * batch_size : (i + 1) * batch_size] = preds
# Compute inception scores
scores = np.empty(splits, dtype=np.float64)
split_size = n // splits
for i in range(splits):
preds_split = predictions[i * split_size : (i + 1) * split_size]
kl = preds_split * (np.log(preds_split) - np.log(np.expand_dims(np.mean(preds_split, axis=0), axis=0)))
kl = np.exp(np.mean(np.sum(kl, axis=1)))
scores[i] = kl
return scores
def pitch_accuracy_entropy(
classifier: Callable[..., np.ndarray],
samples: str,
labels: np.ndarray,
transform: Optional[Callable[[np.ndarray], Any]] = None,
batch_size: int = 4,
shuffle: bool = True,
cuda: bool = True,
):
"""Implementation of pitch accuracy and pitch entropy as described in GANSynth: Adversarial Neural Audio Synthesis
(Engel, 2019) https://arxiv.org/abs/1902.08710
Args:
classifier: Classification model (in evaluation mode) which classifies an audio sample into <n_classes> by
computing confidence scores for each class, for each sample
samples: Path to directory containing audio samples (wav files are supported)
labels: Numpy array of integers representing the true label for each corresponding sample (index of label)
transform: Optionally provide a preprocessing function to transform the audio data before predicting the class
with the classifier
batch_size: Integer representing the number of samples to predict on in each iteration
shuffle: Boolean flag, whether or not to shuffle the dataset
cuda: Boolean flag, whether or not to use a CUDA device for the classification model
Returns:
<splits> x 1 np.ndarray containing the computed inception score for each split
"""
_check_cuda(cuda)
dtype = torch.cuda.FloatTensor if cuda else torch.FloatTensor # CUDA type if on cuda
dataloader = DataLoader(
WavDataSet(samples, labels, transform, True, dtype), batch_size=batch_size, shuffle=shuffle, num_workers=0
)
predictions = np.empty(len(dataloader.dataset), dtype=np.int32)
for i, batch in enumerate(dataloader):
preds = classifier(batch)
preds = F.argmax(preds).data.cpu().numpy()
predictions[i * batch_size : (i + 1) * batch_size] = preds
probs = np.array([(predictions == i).mean() for i in range(labels.min(), labels.max() + 1)], dtype=np.float64)
return (labels == predictions).mean(), -(probs @ np.log(probs)) # Compute accuracy and entropy of predictions
|
the-stack_0_17585 | # Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from gcp_devrel.testing import eventually_consistent
from gcp_devrel.testing.flaky import flaky
from google.cloud import pubsub_v1
import google.api_core.exceptions
import mock
import pytest
import subscriber
PROJECT = os.environ['GCLOUD_PROJECT']
TOPIC = 'subscription-test-topic'
SUBSCRIPTION = 'subscription-test-subscription'
SUBSCRIPTION_SYNC1 = 'subscription-test-subscription-sync1'
SUBSCRIPTION_SYNC2 = 'subscription-test-subscription-sync2'
ENDPOINT = 'https://{}.appspot.com/push'.format(PROJECT)
NEW_ENDPOINT = 'https://{}.appspot.com/push2'.format(PROJECT)
@pytest.fixture(scope='module')
def publisher_client():
yield pubsub_v1.PublisherClient()
@pytest.fixture(scope='module')
def topic(publisher_client):
topic_path = publisher_client.topic_path(PROJECT, TOPIC)
try:
publisher_client.delete_topic(topic_path)
except Exception:
pass
publisher_client.create_topic(topic_path)
yield topic_path
@pytest.fixture(scope='module')
def subscriber_client():
yield pubsub_v1.SubscriberClient()
@pytest.fixture
def subscription(subscriber_client, topic):
subscription_path = subscriber_client.subscription_path(
PROJECT, SUBSCRIPTION)
try:
subscriber_client.delete_subscription(subscription_path)
except Exception:
pass
try:
subscriber_client.create_subscription(subscription_path, topic=topic)
except google.api_core.exceptions.AlreadyExists:
pass
yield subscription_path
@pytest.fixture
def subscription_sync1(subscriber_client, topic):
subscription_sync_path = subscriber_client.subscription_path(
PROJECT, SUBSCRIPTION_SYNC1)
try:
subscriber_client.delete_subscription(subscription_sync_path)
except Exception:
pass
subscriber_client.create_subscription(subscription_sync_path, topic=topic)
yield subscription_sync_path
@pytest.fixture
def subscription_sync2(subscriber_client, topic):
subscription_sync_path = subscriber_client.subscription_path(
PROJECT, SUBSCRIPTION_SYNC2)
try:
subscriber_client.delete_subscription(subscription_sync_path)
except Exception:
pass
subscriber_client.create_subscription(subscription_sync_path, topic=topic)
yield subscription_sync_path
def test_list_in_topic(subscription, capsys):
@eventually_consistent.call
def _():
subscriber.list_subscriptions_in_topic(PROJECT, TOPIC)
out, _ = capsys.readouterr()
assert subscription in out
def test_list_in_project(subscription, capsys):
@eventually_consistent.call
def _():
subscriber.list_subscriptions_in_project(PROJECT)
out, _ = capsys.readouterr()
assert subscription in out
def test_create(subscriber_client):
subscription_path = subscriber_client.subscription_path(
PROJECT, SUBSCRIPTION)
try:
subscriber_client.delete_subscription(subscription_path)
except Exception:
pass
subscriber.create_subscription(PROJECT, TOPIC, SUBSCRIPTION)
@eventually_consistent.call
def _():
assert subscriber_client.get_subscription(subscription_path)
def test_create_push(subscriber_client):
subscription_path = subscriber_client.subscription_path(
PROJECT, SUBSCRIPTION)
try:
subscriber_client.delete_subscription(subscription_path)
except Exception:
pass
subscriber.create_push_subscription(PROJECT, TOPIC, SUBSCRIPTION, ENDPOINT)
@eventually_consistent.call
def _():
assert subscriber_client.get_subscription(subscription_path)
def test_delete(subscriber_client, subscription):
subscriber.delete_subscription(PROJECT, SUBSCRIPTION)
@eventually_consistent.call
def _():
with pytest.raises(Exception):
subscriber_client.get_subscription(subscription)
def test_update(subscriber_client, subscription, capsys):
subscriber.update_subscription(PROJECT, SUBSCRIPTION, NEW_ENDPOINT)
out, _ = capsys.readouterr()
assert 'Subscription updated' in out
def _publish_messages(publisher_client, topic):
for n in range(5):
data = u'Message {}'.format(n).encode('utf-8')
future = publisher_client.publish(
topic, data=data)
future.result()
def _publish_messages_with_custom_attributes(publisher_client, topic):
data = u'Test message'.encode('utf-8')
future = publisher_client.publish(topic, data=data, origin='python-sample')
future.result()
def _make_sleep_patch():
real_sleep = time.sleep
def new_sleep(period):
if period == 60:
real_sleep(5)
raise RuntimeError('sigil')
else:
real_sleep(period)
return mock.patch('time.sleep', new=new_sleep)
@flaky
def test_receive(publisher_client, topic, subscription, capsys):
_publish_messages(publisher_client, topic)
with _make_sleep_patch():
with pytest.raises(RuntimeError, match='sigil'):
subscriber.receive_messages(PROJECT, SUBSCRIPTION)
out, _ = capsys.readouterr()
assert 'Listening' in out
assert subscription in out
assert 'Message 1' in out
def test_receive_synchronously(
publisher_client, topic, subscription_sync1, capsys):
_publish_messages(publisher_client, topic)
subscriber.synchronous_pull(PROJECT, SUBSCRIPTION_SYNC1)
out, _ = capsys.readouterr()
assert 'Done.' in out
def test_receive_synchronously_with_lease(
publisher_client, topic, subscription_sync2, capsys):
_publish_messages(publisher_client, topic)
subscriber.synchronous_pull_with_lease_management(
PROJECT, SUBSCRIPTION_SYNC2)
out, _ = capsys.readouterr()
assert 'Done.' in out
def test_receive_with_custom_attributes(
publisher_client, topic, subscription, capsys):
_publish_messages_with_custom_attributes(publisher_client, topic)
with _make_sleep_patch():
with pytest.raises(RuntimeError, match='sigil'):
subscriber.receive_messages_with_custom_attributes(
PROJECT, SUBSCRIPTION)
out, _ = capsys.readouterr()
assert 'Test message' in out
assert 'origin' in out
assert 'python-sample' in out
def test_receive_with_flow_control(
publisher_client, topic, subscription, capsys):
_publish_messages(publisher_client, topic)
with _make_sleep_patch():
with pytest.raises(RuntimeError, match='sigil'):
subscriber.receive_messages_with_flow_control(
PROJECT, SUBSCRIPTION)
out, _ = capsys.readouterr()
assert 'Listening' in out
assert subscription in out
assert 'Message 1' in out
|
the-stack_0_17586 | # -*- coding: utf-8 -*-
# @Time : 2021/8/6 15:01
# @Author : zc
# @Desc : 查询现存量返回值实体
from chanjet_openapi_python_sdk.chanjet_response import ChanjetResponse
class QueryCurrentStockResponse(ChanjetResponse):
def __init__(self, data=None):
self.WarehouseName = ""
self.WarehouseCode = ""
self.InvLocationName = ""
self.InvLocationCode = ""
self.InventoryCode = ""
self.InventoryName = ""
self.InventoryClassCode = ""
self.InventoryClassName = ""
self.DefaultBarCode = ""
self.InvBarCode = ""
self.UnitName = ""
self.Specification = ""
self.Brand = ""
self.IsSingleUnit = ""
self.AvailableQuantity = ""
self.ExistingQuantity = ""
self.UnitName2 = ""
self.AvailableQuantity2 = ""
self.ExistingQuantity2 = ""
self.TS = ""
self.TotalCount = ""
self.SkuCode = ""
self.Batch = ""
self.ProductionDate = ""
self.ExpiryDate = ""
self.DynamicPropertyKeys = []
self.DynamicPropertyTitles = []
self.DynamicPropertyValues = []
if data:
self.__dict__ = data
def __str__(self):
return str(self.__dict__)
|
the-stack_0_17587 | # If not stated otherwise in this file or this component's LICENSE file the
# following copyright and licenses apply:
#
# Copyright 2020 Sky UK
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import test_utils
from os.path import basename
tests = [
test_utils.Test("Logging to file",
"filelogging",
["hello world 1","hello world 2","hello world 10"],
"Prints hello world 10 times, output should be contained in the logfile"),
test_utils.Test("No logging",
"nolog",
"",
"Starts a container without any logfile"),
]
def execute_test():
if test_utils.selected_platform == test_utils.Platforms.no_selection:
return test_utils.print_unsupported_platform(basename(__file__), test_utils.selected_platform)
with test_utils.dobby_daemon():
output_table = []
for test in tests:
result = test_container(test.container_id, test.expected_output)
output = test_utils.create_simple_test_output(test, result[0], result[1])
output_table.append(output)
test_utils.print_single_result(output)
return test_utils.count_print_results(output_table)
def test_container(container_id, expected_output):
"""Runs container and check if output contains expected output
Parameters:
container_id (string): name of container to run
expected_output (string): output that should be provided by containter
Returns:
(pass (bool), message (string)): Returns if expected output found and message
"""
test_utils.print_log("Running %s container test" % container_id, test_utils.Severity.debug)
with test_utils.untar_bundle(container_id) as bundle_path:
launch_result = test_utils.launch_container(container_id, bundle_path)
if launch_result:
return validate_output_file(container_id, expected_output)
return False, "Container did not launch successfully"
def validate_output_file(container_id, expected_output):
"""Helper function for finding if expected output is inside log of container
Parameters:
container_id (string): name of container to run
expected_output (string): output that should be provided by containter
Returns:
(pass (bool), message (string)): Returns if expected output found and message
"""
log = test_utils.get_container_log(container_id)
# If given a list of outputs to check, loop through and return false is one of them is not in the output
if isinstance(expected_output, list):
for text in expected_output:
if text.lower() not in log.lower():
return False, "Output file did not contain expected text"
return True, "Test passed"
# Otherwise we've been given a string, so just check that one string
if expected_output.lower() in log.lower():
return True, "Test passed"
else:
return False, "Output file did not contain expected text"
if __name__ == "__main__":
test_utils.parse_arguments(__file__, True)
execute_test()
|
the-stack_0_17589 | from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
setup(
name='configuration-agent',
version='0.1.1',
description='IoT configuration agent',
long_description=readme,
license='Intel Proprietary (see \'licenses\' directory)',
packages=find_packages(exclude=['*.*', 'mqttclient']),
include_package_data=True,
install_requires=['nose', 'packaging', 'future'],
test_suite='nose.collector',
tests_require=['nose'])
|
the-stack_0_17592 | import cv2
import numpy as np
import torch
def get_frames(filepath, max_frames=1e7, verbose=1000):
vidcap = cv2.VideoCapture(filepath)
success,image = vidcap.read()
count = 0
data = []
while success and count < max_frames:
# save frame as JPEG file
success, image = vidcap.read()
data.append(image / 255)
count += 1
if verbose != -1 and count%verbose==0:
print("Loading video %s: %.2f%%" % (filepath, count * 100 / max_frames))
data = np.array(data)
data = torch.as_tensor(data)
return data.permute(0, 3, 1, 2)
def decompose(file_path, save_path, batch_size=64):
import os
vidcap = cv2.VideoCapture(file_path)
success,preimage = vidcap.read()
count = 0
fake_count = 0
while success:
# save frame as JPEG file
success = vidcap.grab()
if count%1==0 and count > 59950:
success,image = vidcap.read()
image = torch.from_numpy(np.transpose((image / 255), (2, 0, 1))).unsqueeze(0)
torch.save(image, os.path.join(save_path, 'frame' + str(fake_count)))
fake_count += 1
print(fake_count)
count += 1
|
the-stack_0_17593 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .web_api_connected_service_ref import WebApiConnectedServiceRef
class WebApiConnectedServiceDetails(WebApiConnectedServiceRef):
"""WebApiConnectedServiceDetails.
:param id:
:type id: str
:param url:
:type url: str
:param connected_service_meta_data: Meta data for service connection
:type connected_service_meta_data: :class:`WebApiConnectedService <core.v4_0.models.WebApiConnectedService>`
:param credentials_xml: Credential info
:type credentials_xml: str
:param end_point: Optional uri to connect directly to the service such as https://windows.azure.com
:type end_point: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'connected_service_meta_data': {'key': 'connectedServiceMetaData', 'type': 'WebApiConnectedService'},
'credentials_xml': {'key': 'credentialsXml', 'type': 'str'},
'end_point': {'key': 'endPoint', 'type': 'str'}
}
def __init__(self, id=None, url=None, connected_service_meta_data=None, credentials_xml=None, end_point=None):
super(WebApiConnectedServiceDetails, self).__init__(id=id, url=url)
self.connected_service_meta_data = connected_service_meta_data
self.credentials_xml = credentials_xml
self.end_point = end_point
|
the-stack_0_17596 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import unittest
import numpy as np
import torch
from reagent.models.mdn_rnn import MDNRNNMemoryPool, gmm_loss
from reagent.models.world_model import MemoryNetwork
from reagent.parameters import MDNRNNTrainerParameters
from reagent.reporting.world_model_reporter import WorldModelReporter
from reagent.test.world_model.simulated_world_model import SimulatedWorldModel
from reagent.training.world_model.mdnrnn_trainer import MDNRNNTrainer
from torch.distributions.categorical import Categorical
from torch.distributions.normal import Normal
logger = logging.getLogger(__name__)
class TestMDNRNN(unittest.TestCase):
def test_gmm_loss(self):
# seq_len x batch_size x gaussian_size x feature_size
# 1 x 1 x 2 x 2
mus = torch.Tensor([[[[0.0, 0.0], [6.0, 6.0]]]])
sigmas = torch.Tensor([[[[2.0, 2.0], [2.0, 2.0]]]])
# seq_len x batch_size x gaussian_size
pi = torch.Tensor([[[0.5, 0.5]]])
logpi = torch.log(pi)
# seq_len x batch_size x feature_size
batch = torch.Tensor([[[3.0, 3.0]]])
gl = gmm_loss(batch, mus, sigmas, logpi)
# first component, first dimension
n11 = Normal(mus[0, 0, 0, 0], sigmas[0, 0, 0, 0])
# first component, second dimension
n12 = Normal(mus[0, 0, 0, 1], sigmas[0, 0, 0, 1])
p1 = (
pi[0, 0, 0]
* torch.exp(n11.log_prob(batch[0, 0, 0]))
* torch.exp(n12.log_prob(batch[0, 0, 1]))
)
# second component, first dimension
n21 = Normal(mus[0, 0, 1, 0], sigmas[0, 0, 1, 0])
# second component, second dimension
n22 = Normal(mus[0, 0, 1, 1], sigmas[0, 0, 1, 1])
p2 = (
pi[0, 0, 1]
* torch.exp(n21.log_prob(batch[0, 0, 0]))
* torch.exp(n22.log_prob(batch[0, 0, 1]))
)
logger.info(
"gmm loss={}, p1={}, p2={}, p1+p2={}, -log(p1+p2)={}".format(
gl, p1, p2, p1 + p2, -(torch.log(p1 + p2))
)
)
assert -(torch.log(p1 + p2)) == gl
def test_mdnrnn_simulate_world_cpu(self):
self._test_mdnrnn_simulate_world()
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_mdnrnn_simulate_world_gpu(self):
self._test_mdnrnn_simulate_world(use_gpu=True)
def _test_mdnrnn_simulate_world(self, use_gpu=False):
num_epochs = 300
num_episodes = 400
batch_size = 200
action_dim = 2
seq_len = 5
state_dim = 2
simulated_num_gaussians = 2
mdrnn_num_gaussians = 2
simulated_num_hidden_layers = 1
simulated_num_hiddens = 3
mdnrnn_num_hidden_layers = 1
mdnrnn_num_hiddens = 10
adam_lr = 0.01
replay_buffer = MDNRNNMemoryPool(max_replay_memory_size=num_episodes)
swm = SimulatedWorldModel(
action_dim=action_dim,
state_dim=state_dim,
num_gaussians=simulated_num_gaussians,
lstm_num_hidden_layers=simulated_num_hidden_layers,
lstm_num_hiddens=simulated_num_hiddens,
)
possible_actions = torch.eye(action_dim)
for _ in range(num_episodes):
cur_state_mem = torch.zeros((seq_len, state_dim))
next_state_mem = torch.zeros((seq_len, state_dim))
action_mem = torch.zeros((seq_len, action_dim))
reward_mem = torch.zeros(seq_len)
not_terminal_mem = torch.zeros(seq_len)
next_mus_mem = torch.zeros((seq_len, simulated_num_gaussians, state_dim))
swm.init_hidden(batch_size=1)
next_state = torch.randn((1, 1, state_dim))
for s in range(seq_len):
cur_state = next_state
action = possible_actions[np.random.randint(action_dim)].view(
1, 1, action_dim
)
next_mus, reward = swm(action, cur_state)
not_terminal = 1
if s == seq_len - 1:
not_terminal = 0
# randomly draw for next state
next_pi = torch.ones(simulated_num_gaussians) / simulated_num_gaussians
index = Categorical(next_pi).sample((1,)).long().item()
next_state = next_mus[0, 0, index].view(1, 1, state_dim)
cur_state_mem[s] = cur_state.detach()
action_mem[s] = action
reward_mem[s] = reward.detach()
not_terminal_mem[s] = not_terminal
next_state_mem[s] = next_state.detach()
next_mus_mem[s] = next_mus.detach()
replay_buffer.insert_into_memory(
cur_state_mem, action_mem, next_state_mem, reward_mem, not_terminal_mem
)
num_batch = num_episodes // batch_size
mdnrnn_params = MDNRNNTrainerParameters(
hidden_size=mdnrnn_num_hiddens,
num_hidden_layers=mdnrnn_num_hidden_layers,
minibatch_size=batch_size,
learning_rate=adam_lr,
num_gaussians=mdrnn_num_gaussians,
)
mdnrnn_net = MemoryNetwork(
state_dim=state_dim,
action_dim=action_dim,
num_hiddens=mdnrnn_params.hidden_size,
num_hidden_layers=mdnrnn_params.num_hidden_layers,
num_gaussians=mdnrnn_params.num_gaussians,
)
if use_gpu:
mdnrnn_net = mdnrnn_net.cuda()
trainer = MDNRNNTrainer(memory_network=mdnrnn_net, params=mdnrnn_params)
trainer.reporter = WorldModelReporter(1)
for e in range(num_epochs):
for i in range(num_batch):
training_batch = replay_buffer.sample_memories(
batch_size, use_gpu=use_gpu
)
trainer.train(training_batch)
trainer.reporter.finish_epoch()
report = trainer.reporter.publish().training_report.oss_world_model_report
loss = np.mean(report.loss)
bce = np.mean(report.bce)
gmm = np.mean(report.gmm)
mse = np.mean(report.mse)
logger.info(
f"{e}-th epoch: \n" f"loss={loss}, bce={bce}, gmm={gmm}, mse={mse}"
)
if loss < 0 and gmm < -3.0 and bce < 0.6 and mse < 0.2:
return
raise RuntimeError("losses not reduced significantly during training")
|
the-stack_0_17597 | """Plot word counts."""
import argparse
import pandas as pd
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description="Plot word counts")
parser.add_argument('infile', type=argparse.FileType('r'),
nargs='?', default='-',
help='Word count csv file name')
parser.add_argument('--xlim', type=float, nargs=2,
metavar=('XMIN', 'XMAX'),
default=None, help='X-axis limits')
parser.add_argument('--outfile', type=str,
default='plotcounts.png',
help='Output image file name')
args = parser.parse_args()
df = pd.read_csv(args.infile, header=None,
names=('word', 'word_frequency'))
df['rank'] = df['word_frequency'].rank(ascending=False,
method='max')
df['inverse_rank'] = 1 / df['rank']
ax = df.plot.scatter(x='word_frequency',
y='inverse_rank',
figsize=[12, 6],
grid=True,
xlim=args.xlim)
plt.savefig(args.outfile)
|
the-stack_0_17599 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Download and build the data if it does not exist.
from parlai.core.build_data import DownloadableFile
from parlai.utils.io import PathManager
import parlai.core.build_data as build_data
import os
import json
VERSION = '1'
TRAIN_FILENAME = 'hotpot_train_v{}.1.json'.format(VERSION)
DEV_DISTRACTOR_FILENAME = 'hotpot_dev_distractor_v{}.json'.format(VERSION)
DEV_FULLWIKI_FILENAME = 'hotpot_dev_fullwiki_v{}.json'.format(VERSION)
RESOURCES = [
DownloadableFile(
'http://curtis.ml.cmu.edu/datasets/hotpot/hotpot_train_v1.1.json',
'hotpot_train_v1.1.json',
'26650cf50234ef5fb2e664ed70bbecdfd87815e6bffc257e068efea5cf7cd316',
zipped=False,
),
DownloadableFile(
'http://curtis.ml.cmu.edu/datasets/hotpot/hotpot_dev_distractor_v1.json',
'hotpot_dev_distractor_v1.json',
'4e9ecb5c8d3b719f624d66b60f8d56bf227f03914f5f0753d6fa1b359d7104ea',
zipped=False,
),
DownloadableFile(
'http://curtis.ml.cmu.edu/datasets/hotpot/hotpot_dev_fullwiki_v1.json',
'hotpot_dev_fullwiki_v1.json',
'2f1f3e594a3066a3084cc57950ca2713c24712adaad03af6ccce18d1846d5618',
zipped=False,
),
]
OUTPUT_FORMAT = 'text:{context_question}\t' 'labels:{answer}'
def _handle_data_point(data_point):
output = []
context_question_txt = ""
for [title, sentences_list] in data_point['context']:
sentences = '\\n'.join(sentences_list)
context_question_txt += '{}\\n{}\\n\\n'.format(title, sentences)
context_question_txt += data_point['question']
output = OUTPUT_FORMAT.format(
context_question=context_question_txt, answer=data_point['answer']
)
output += '\t\tepisode_done:True\n'
return output
def make_parlai_format(outpath, dtype, data):
print('building parlai:' + dtype)
with PathManager.open(os.path.join(outpath, dtype + '.txt'), 'w') as fout:
for data_point in data:
fout.write(_handle_data_point(data_point))
def build(opt):
dpath = os.path.join(opt['datapath'], 'HotpotQA')
if not build_data.built(dpath, version_string=VERSION):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
for downloadable_file in RESOURCES:
downloadable_file.download_file(dpath)
with PathManager.open(os.path.join(dpath, TRAIN_FILENAME)) as f:
data = json.load(f)
make_parlai_format(dpath, 'train', data)
with PathManager.open(os.path.join(dpath, DEV_DISTRACTOR_FILENAME)) as f:
data = json.load(f)
make_parlai_format(dpath, 'valid_distractor', data)
with PathManager.open(os.path.join(dpath, DEV_FULLWIKI_FILENAME)) as f:
data = json.load(f)
make_parlai_format(dpath, 'valid_fullwiki', data)
# Mark the data as built.
build_data.mark_done(dpath, version_string=VERSION)
|
the-stack_0_17602 | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
from ask_sdk_model.interfaces.alexa.presentation.apl.command import Command
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
from ask_sdk_model.interfaces.alexa.presentation.apl.component_state import ComponentState
class SetStateCommand(Command):
"""
The SetState command changes one of the component’s state settings. The SetState command can be used to change the checked, disabled, and focused states. The karaoke and pressed states may not be directly set; use the Select command or SpeakItem commands to change those states. Also, note that the focused state may only be set - it can’t be cleared.
:param delay: The delay in milliseconds before this command starts executing; must be non-negative. Defaults to 0.
:type delay: (optional) int
:param description: A user-provided description of this command.
:type description: (optional) str
:param when: If false, the execution of the command is skipped. Defaults to true.
:type when: (optional) bool
:param component_id: The id of the component whose value should be set.
:type component_id: (optional) str
:param state: The name of the state to set. Must be one of “checked”, “disabled”, and “focused”.
:type state: (optional) ask_sdk_model.interfaces.alexa.presentation.apl.component_state.ComponentState
:param value: The value to set on the property
:type value: (optional) bool
"""
deserialized_types = {
'object_type': 'str',
'delay': 'int',
'description': 'str',
'when': 'bool',
'component_id': 'str',
'state': 'ask_sdk_model.interfaces.alexa.presentation.apl.component_state.ComponentState',
'value': 'bool'
} # type: Dict
attribute_map = {
'object_type': 'type',
'delay': 'delay',
'description': 'description',
'when': 'when',
'component_id': 'componentId',
'state': 'state',
'value': 'value'
} # type: Dict
supports_multiple_types = False
def __init__(self, delay=None, description=None, when=None, component_id=None, state=None, value=None):
# type: (Union[int, str, None], Optional[str], Optional[bool], Optional[str], Optional[ComponentState], Union[bool, str, None]) -> None
"""The SetState command changes one of the component’s state settings. The SetState command can be used to change the checked, disabled, and focused states. The karaoke and pressed states may not be directly set; use the Select command or SpeakItem commands to change those states. Also, note that the focused state may only be set - it can’t be cleared.
:param delay: The delay in milliseconds before this command starts executing; must be non-negative. Defaults to 0.
:type delay: (optional) int
:param description: A user-provided description of this command.
:type description: (optional) str
:param when: If false, the execution of the command is skipped. Defaults to true.
:type when: (optional) bool
:param component_id: The id of the component whose value should be set.
:type component_id: (optional) str
:param state: The name of the state to set. Must be one of “checked”, “disabled”, and “focused”.
:type state: (optional) ask_sdk_model.interfaces.alexa.presentation.apl.component_state.ComponentState
:param value: The value to set on the property
:type value: (optional) bool
"""
self.__discriminator_value = "SetState" # type: str
self.object_type = self.__discriminator_value
super(SetStateCommand, self).__init__(object_type=self.__discriminator_value, delay=delay, description=description, when=when)
self.component_id = component_id
self.state = state
self.value = value
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, SetStateCommand):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_17603 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START job_search_commute_search]
from google.cloud import talent
from google.cloud.talent import enums
import six
def search_jobs(project_id, tenant_id):
"""Search Jobs using commute distance"""
client = talent.JobServiceClient()
# project_id = 'Your Google Cloud Project ID'
# tenant_id = 'Your Tenant ID (using tenancy is optional)'
if isinstance(project_id, six.binary_type):
project_id = project_id.decode("utf-8")
if isinstance(tenant_id, six.binary_type):
tenant_id = tenant_id.decode("utf-8")
parent = client.tenant_path(project_id, tenant_id)
domain = "www.example.com"
session_id = "Hashed session identifier"
user_id = "Hashed user identifier"
request_metadata = {"domain": domain, "session_id": session_id, "user_id": user_id}
commute_method = enums.CommuteMethod.TRANSIT
seconds = 1800
travel_duration = {"seconds": seconds}
latitude = 37.422408
longitude = -122.084068
start_coordinates = {"latitude": latitude, "longitude": longitude}
commute_filter = {
"commute_method": commute_method,
"travel_duration": travel_duration,
"start_coordinates": start_coordinates,
}
job_query = {"commute_filter": commute_filter}
# Iterate over all results
results = []
for response_item in client.search_jobs(
parent, request_metadata, job_query=job_query
):
print("Job summary: {}".format(response_item.job_summary))
print("Job title snippet: {}".format(response_item.job_title_snippet))
job = response_item.job
results.append(job.name)
print("Job name: {}".format(job.name))
print("Job title: {}".format(job.title))
return results
# [END job_search_commute_search]
|
the-stack_0_17604 | import torch
from torch import nn
# Define model
class TermScorer(nn.Module):
def __init__(self, d_hidden=768, max_sentence_length=40, num_of_class=3):
super(TermScorer, self).__init__()
self.dropout = nn.Dropout(0.10)
self.hidden = nn.Sequential(
nn.Linear(d_hidden, 768),
nn.ReLU()
)
self.linear = nn.Linear(768, num_of_class)
def forward(self, x):
# input x (batch_size, num_of_span, d_hidden)
x = self.dropout(x)
h = self.hidden(x)
logits = self.linear(h)
return logits
|
the-stack_0_17605 | #!/usr/bin/env python3
import glob
import os.path
from datetime import datetime
from typing import Generator, Optional
import filetype
from pysymphony import SymphonyClient
from ..common.data_class import Document, Location, SiteSurvey
from ..graphql.enum.image_entity import ImageEntity
from ..graphql.input.add_image import AddImageInput
from ..graphql.mutation.add_image import AddImageMutation
from ..graphql.mutation.delete_image import DeleteImageMutation
def _add_image(
client: SymphonyClient,
local_file_path: str,
entity_type: ImageEntity,
entity_id: str,
category: Optional[str] = None,
) -> None:
file_type = filetype.guess(local_file_path)
file_type = file_type.MIME if file_type is not None else ""
img_key = client.store_file(local_file_path, file_type, False)
file_size = os.path.getsize(local_file_path)
AddImageMutation.execute(
client,
AddImageInput(
entityType=entity_type,
entityId=entity_id,
imgKey=img_key,
fileName=os.path.basename(local_file_path),
fileSize=file_size,
modified=datetime.utcnow(),
contentType=file_type,
category=category,
),
)
def list_dir(directory_path: str) -> Generator[str, None, None]:
files = list(glob.glob(os.path.join(directory_path, "**/**"), recursive=True))
for file_path in set(files):
if os.path.isfile(file_path):
yield file_path
def add_file(
client: SymphonyClient,
local_file_path: str,
entity_type: str,
entity_id: str,
category: Optional[str] = None,
) -> None:
"""This function adds file to an entity of a given type.
Args:
local_file_path (str): local system path to the file
entity_type (str): one of existing options ["LOCATION", "WORK_ORDER", "SITE_SURVEY", "EQUIPMENT"]
entity_id (string): valid entity ID
category (Optional[string]): file category name
Raises:
FailedOperationException: on operation failure
Example:
```
location = client.get_location({("Country", "LS_IND_Prod_Copy")})
client.add_file(
local_file_path="./document.pdf",
entity_type="LOCATION",
entity_id=location.id,
category="category_name",
)
```
"""
entity = {
"LOCATION": ImageEntity.LOCATION,
"WORK_ORDER": ImageEntity.WORK_ORDER,
"SITE_SURVEY": ImageEntity.SITE_SURVEY,
"EQUIPMENT": ImageEntity.EQUIPMENT,
}.get(entity_type, ImageEntity.LOCATION)
_add_image(client, local_file_path, entity, entity_id, category)
def add_files(
client: SymphonyClient,
local_directory_path: str,
entity_type: str,
entity_id: str,
category: Optional[str] = None,
) -> None:
"""This function adds all files located in folder to an entity of a given type.
Args:
local_directory_path (str): local system path to the directory
entity_type (str): one of existing options ["LOCATION", "WORK_ORDER", "SITE_SURVEY", "EQUIPMENT"]
entity_id (string): valid entity ID
category (Optional[string]): file category name
Example:
```
location = client.get_location({("Country", "LS_IND_Prod_Copy")})
client.add_files(
local_directory_path="./documents_folder/",
entity_type="LOCATION",
entity_id=location.id,
category="category_name",
)
```
"""
for file in list_dir(local_directory_path):
add_file(client, file, entity_type, entity_id, category)
def add_location_image(
client: SymphonyClient, local_file_path: str, location: Location
) -> None:
"""This function adds image to existing location.
Args:
local_file_path (str): local system path to the file
location ( `pyinventory.common.data_class.Location` ): existing location object
Raises:
FailedOperationException: on operation failure
Example:
```
location = client.get_location({("Country", "LS_IND_Prod_Copy")})
client.add_location_image(
local_file_path="./document.pdf",
location=location,
)
```
"""
_add_image(client, local_file_path, ImageEntity.LOCATION, location.id)
def add_site_survey_image(
client: SymphonyClient, local_file_path: str, id: str
) -> None:
"""This function adds image to existing site survey.
Args:
local_file_path (str): local system path to the file
id (str): site survey ID
Raises:
FailedOperationException: on operation failure
Example:
```
client.add_site_survey_image(
local_file_path="./document.pdf",
id="123456"
)
```
"""
_add_image(client, local_file_path, ImageEntity.SITE_SURVEY, id)
def _delete_image(
client: SymphonyClient, entity_type: ImageEntity, entity_id: str, image_id: str
) -> None:
DeleteImageMutation.execute(
client, entityType=entity_type, entityId=entity_id, id=image_id
)
def delete_site_survey_image(client: SymphonyClient, survey: SiteSurvey) -> None:
"""This function deletes image from existing site survey.
Args:
survey ( `pyinventory.common.data_class.SiteSurvey` ): site survey object
Raises:
FailedOperationException: on operation failure
Example:
```
client.delete_site_survey_image(survey=survey)
```
"""
source_file_key = survey.sourceFileKey
source_file_id = survey.sourceFileId
if source_file_key is not None:
client.delete_file(source_file_key, False)
if source_file_id is not None:
_delete_image(client, ImageEntity.SITE_SURVEY, survey.id, source_file_id)
def delete_document(client: SymphonyClient, document: Document) -> None:
"""This function deletes existing document.
Args:
document ( `pyinventory.common.data_class.Document` ): document object
Raises:
FailedOperationException: on operation failure
Example:
```
client.delete_document(document=document)
```
"""
_delete_image(client, document.parentEntity, document.parentId, document.id)
|
the-stack_0_17607 | from .base import APITestCase
from rest_framework import status
from tests.rest_app.models import (
RootModel, OneToOneModel, ForeignKeyModel, ExtraModel, UserManagedModel,
Parent, Child, ItemType, Item, SlugModel, SlugRefParent, ChoiceModel,
)
from django.contrib.auth.models import User
from django.conf import settings
class TemplateTestCase(APITestCase):
def setUp(self):
instance = RootModel.objects.create(
slug='instance',
description="Test"
)
for cls in OneToOneModel, ForeignKeyModel, ExtraModel:
cls.objects.create(
root=instance,
)
user = User.objects.create(username="testuser", is_superuser=True)
self.client.force_authenticate(user)
UserManagedModel.objects.create(id=1, user=user)
parent = Parent.objects.create(name="Test", pk=1)
parent.children.create(name="Test 1")
parent.children.create(name="Test 2")
itype = ItemType.objects.create(name="Test", pk=1)
itype.item_set.create(name="Test 1")
itype.item_set.create(name="Test 2")
slugref = SlugModel.objects.create(
code="test",
name="Test",
)
SlugRefParent.objects.create(
ref=slugref,
pk=1,
name="Test Slug Ref"
)
SlugRefParent.objects.create(
ref=SlugModel.objects.create(
code="other",
name="Other",
),
pk=2,
name="Test Another Ref",
)
ItemType.objects.create(
name="Inactive",
pk=2,
active=False
)
ChoiceModel.objects.create(
name="Test",
pk=1,
choice="two"
)
def assertHTMLEqual(self, expected_html, html, auto_replace=True):
if settings.WITH_NONROOT and auto_replace:
html = html.replace('/wqsite/', '/')
super().assertHTMLEqual(expected_html, html)
def check_html(self, url, expected_html):
response = self.client.get(url)
self.assertTrue(status.is_success(response.status_code), response.data)
html = response.content.decode('utf-8')
self.assertHTMLEqual(expected_html, html)
# Test url="" use case
def test_template_list_at_root(self):
self.check_html("/", """
<ul>
<li><a href="/instance">instance</a></li>
</ul>
""")
def test_template_detail_at_root(self):
instance = RootModel.objects.get(slug='instance')
self.check_html("/instance", """
<h1>instance</h1>
<p>Test</p>
<h3>OneToOneModel</h3>
<p>
<a href="/onetoonemodels/{onetoone_pk}">
onetoonemodel for instance
</a>
</p>
<h3>ExtraModels</h3>
<ul>
<li>
<a href="/extramodels/{extra_pk}">
extramodel for instance
</a>
</li>
</ul>
<p><a href="/instance/edit">Edit</a></p>
""".format(
onetoone_pk=instance.onetoonemodel.pk,
extra_pk=instance.extramodels.all()[0].pk,
))
def test_template_filter_by_parent(self):
childs = Parent.objects.get(pk=1).children.order_by('pk')
self.check_html('/parents/1/children', """
<p>2 Records</p>
<h3>Childs for <a href="/parents/1">Test</a></h3>
<ul>
<li><a href="/children/{c1_pk}">Test 1</a></li>
<li><a href="/children/{c2_pk}">Test 2</a></li>
</ul>
""".format(
c1_pk=childs[0].pk,
c2_pk=childs[1].pk,
))
items = ItemType.objects.get(pk=1).item_set.order_by('pk')
self.check_html('/itemtypes/1/items', """
<h3><a href="/itemtypes/1">Test</a> Items</h3>
<ul>
<li><a href="/items/{i1_pk}">Test 1</a></li>
<li><a href="/items/{i2_pk}">Test 2</a></li>
</ul>
""".format(
i1_pk=items[0].pk,
i2_pk=items[1].pk,
))
def test_template_detail_user_serializer(self):
self.check_html('/usermanagedmodels/1', """
<h1>Object #1</h1>
<p>Created by testuser</p>
<p></p>
""")
def test_template_custom_lookup(self):
self.check_html('/slugmodels/test', "<h1>Test</h1>")
def test_template_default_per_page(self):
parent = Parent.objects.get(pk=1)
parent.name = "Test 1"
parent.save()
for i in range(2, 101):
Parent.objects.create(
id=i,
name="Test %s" % i,
)
html = """
<p>100 Records</p>
<div>
<h3>Page 1 of 2</h3>
<a href="http://testserver/parents/?page=2">Next 50</a>
</div>
<ul>
"""
for i in range(1, 51):
html += """
<li><a href="/parents/{pk}">Test {pk}</a></li>
""".format(pk=i)
html += """
</ul>
"""
self.check_html("/parents/", html)
def test_template_custom_per_page(self):
for i in range(3, 102):
child = Child.objects.create(
name="Test %s" % i,
parent_id=1,
)
self.check_html("/children/?page=2", """
<p>101 Records</p>
<div>
<a href="http://testserver/children/">Prev 100</a>
<h3>Page 2 of 2</h3>
</div>
<ul>
<li><a href="/children/{pk}">Test 101</a></li>
</ul>
""".format(pk=child.pk))
def test_template_limit(self):
for i in range(3, 101):
child = Child.objects.create(
name="Test %s" % i,
parent_id=1,
)
html = """
<p>100 Records</p>
<div>
<h3>Page 1 of 10</h3>
<a href="http://testserver/children/?limit=10&page=2">Next 10</a>
</div>
<ul>
"""
for child in Child.objects.all()[:10]:
html += """
<li><a href="/children/{pk}">{label}</a></li>
""".format(pk=child.pk, label=child.name)
html += """
</ul>
"""
self.check_html("/children/?limit=10", html)
def test_template_context_processors(self):
response = self.client.get('/rest_context')
html = response.content.decode('utf-8')
token = html.split('value="')[1].split('"')[0]
self.assertTrue(len(token) >= 32)
if settings.WITH_NONROOT:
base_url = '/wqsite'
else:
base_url = ''
self.assertHTMLEqual("""
<p>{base_url}/rest_context</p>
<p>rest_context</p>
<p>{base_url}/</p>
<p>{base_url}/</p>
<p>0.0.0</p>
<p>
<input name="csrfmiddlewaretoken" type="hidden" value="{csrf}">
</p>
<p>rest_context</p>
<p>Can Edit Items</p>
""".format(csrf=token, base_url=base_url), html, auto_replace=False)
def test_template_page_config(self):
item = Item.objects.get(name="Test 1")
self.check_html('/items/%s' % item.pk, """
<h3>Test 1</h3>
<a href="/itemtypes/1">Test</a>
<a href="/items/{pk}/edit">Edit</a>
""".format(pk=item.pk))
def test_template_edit_fk(self):
item = Item.objects.get(name="Test 1")
self.check_html('/items/%s/edit' % item.pk, """
<form>
<input name="name" required value="Test 1">
<select name="type_id" required>
<option value="">Select one...</option>
<option value="1" selected>Test</option>
<option value="2">Inactive</option>
</select>
<button>Submit</button>
</form>
""")
def test_template_edit_choice(self):
self.check_html('/choicemodels/1/edit', """
<form>
<input name="name" required value="Test">
<fieldset>
<legend>Choice</legend>
<input type="radio" id="choicemodel-choice-one"
name="choice" value="one">
<label for="choicemodel-choice-one">Choice One</label>
<input type="radio" id="choicemodel-choice-two"
name="choice" value="two" checked>
<label for="choicemodel-choice-two">Choice Two</label>
<input type="radio" id="choicemodel-choice-three"
name="choice" value="three">
<label for="choicemodel-choice-three">Choice Three</label>
</fieldset>
<button>Submit</button>
</form>
""")
def test_template_new_fk(self):
self.check_html('/children/new', """
<form>
<input name="name" required value="">
<select name="parent_id" required>
<option value="">Select one...</option>
<option value="1">Test</option>
</select>
<button>Submit</button>
</form>
""")
def test_template_new_fk_filtered(self):
self.check_html('/items/new', """
<form>
<input name="name" required value="">
<select name="type_id" required>
<option value="">Select one...</option>
<option value="1">Test</option>
</select>
<button>Submit</button>
</form>
""")
def test_template_new_fk_defaults(self):
self.check_html('/items/new?type_id=1', """
<form>
<input name="name" required value="">
<select name="type_id" required>
<option value="">Select one...</option>
<option value="1" selected>Test</option>
</select>
<button>Submit</button>
</form>
""")
def test_template_new_fk_slug(self):
self.check_html('/slugrefparents/new?ref_id=test', """
<form>
<input name="name" required value="">
<select name="ref_id" required>
<option value="">Select one...</option>
<option value="test" selected>Test</option>
<option value="other">Other</option>
</select>
<button>Submit</button>
</form>
""")
def test_template_new_fk_slug_filtered(self):
self.check_html('/slugrefchildren/new', """
<form>
<input name="name" required value="">
<select name="parent_id" required>
<option value="">Select one...</option>
<option value="1">Test Slug Ref (Test)</option>
</select>
<button>Submit</button>
</form>
""")
def test_template_new_choice(self):
self.check_html('/choicemodels/new', """
<form>
<input name="name" required value="">
<fieldset>
<legend>Choice</legend>
<input type="radio" id="choicemodel-choice-one"
name="choice" value="one">
<label for="choicemodel-choice-one">Choice One</label>
<input type="radio" id="choicemodel-choice-two"
name="choice" value="two">
<label for="choicemodel-choice-two">Choice Two</label>
<input type="radio" id="choicemodel-choice-three"
name="choice" value="three">
<label for="choicemodel-choice-three">Choice Three</label>
</fieldset>
<button>Submit</button>
</form>
""")
def test_template_new_choice_defaults(self):
self.check_html('/choicemodels/new?choice=three', """
<form>
<input name="name" required value="">
<fieldset>
<legend>Choice</legend>
<input type="radio" id="choicemodel-choice-one"
name="choice" value="one">
<label for="choicemodel-choice-one">Choice One</label>
<input type="radio" id="choicemodel-choice-two"
name="choice" value="two">
<label for="choicemodel-choice-two">Choice Two</label>
<input type="radio" id="choicemodel-choice-three"
name="choice" value="three" checked>
<label for="choicemodel-choice-three">Choice Three</label>
</fieldset>
<button>Submit</button>
</form>
""")
|
the-stack_0_17608 | from hwtypes import BitVector
import random
NTESTS = 10
MAX_BITS = 128
def test_concat_const():
a = BitVector[4](4)
b = BitVector[4](1)
c = a.concat(b)
print(a.binary_string())
print(c.binary_string())
expected = BitVector[8]([0,0,1,0,1,0,0,0])
assert expected == c
def test_concat_random():
for _ in range(NTESTS):
n1 = random.randint(1, MAX_BITS)
n2 = random.randint(1, MAX_BITS)
a = BitVector.random(n1)
b = BitVector.random(n2)
c = a.concat(b)
assert c.size == a.size + b.size
assert c == BitVector[n1 + n2](a.bits() + b.bits())
assert c.binary_string() == b.binary_string() + a.binary_string()
|
the-stack_0_17610 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutControlStatements(Koan):
def test_if_then_else_statements(self):
if True:
result = 'true value'
else:
result = 'false value'
self.assertEqual("true value", result)
def test_if_then_statements(self):
result = 'default value'
if True:
result = 'true value'
self.assertEqual("true value", result)
def test_while_statement(self):
i = 1
result = 1
while i <= 10:
result = result * i
i += 1
self.assertEqual(3628800, result)
def test_break_statement(self):
i = 1
result = 1
while True:
if i > 10: break
result = result * i
i += 1
self.assertEqual(3628800, result)
def test_continue_statement(self):
i = 0
result = []
while i < 10:
i += 1
if (i % 2) == 0: continue
result.append(i)
self.assertEqual([1, 3, 5, 7, 9], result)
def test_for_statement(self):
phrase = ["fish", "and", "chips"]
result = []
for item in phrase:
result.append(item.upper())
self.assertEqual(["FISH", "AND", "CHIPS"], result)
def test_for_statement_with_tuples(self):
round_table = [
("Lancelot", "Blue"),
("Galahad", "I don't know!"),
("Robin", "Blue! I mean Green!"),
("Arthur", "Is that an African Swallow or Amazonian Swallow?")
]
result = []
for knight, answer in round_table:
result.append("Contestant: '" + knight + "' Answer: '" + answer + "'")
text = "Contestant: 'Robin' Answer: 'Blue! I mean Green!'"
self.assertMatch(text, result[2])
self.assertNoMatch(text, result[0])
self.assertNoMatch(text, result[1])
self.assertNoMatch(text, result[3])
|
the-stack_0_17613 | import os
from datetime import datetime, timedelta, timezone
import discord
from Core.error import InteractionError
from discord import ApplicationContext
from discord.commands import slash_command
from discord.ext import commands
from dotenv import load_dotenv
from SongDBCore import SongDBClient
from SongDB.embed_builder import EmbedBuilder as EB
from SongDB.many_page import PagePage
from SongDB.match import match_url
load_dotenv()
req_url = "https://script.google.com/macros/s/AKfycbybEQO66Ui5AbgaPvisluBbWMqxayLM2iyPCNeipXUOvn__Jp4SQsm7X8Z4w3HQvxja/exec"
guild_id = int(os.environ["GUILD_ID"])
utc = timezone.utc
jst = timezone(timedelta(hours=9), "Asia/Tokyo")
class SongDB(commands.Cog):
def __init__(self, bot):
self.bot = bot
@slash_command(guild_ids=[guild_id], name="song")
async def _song(self, ctx: ApplicationContext):
await ctx.interaction.response.defer(ephemeral=True)
embed = EB()._start()
view = ProdDropdownView()
await ctx.interaction.followup.send(embed=embed, view=view, ephemeral=True)
return
class ProdDropdown(discord.ui.Select):
def __init__(self) -> None:
options = [
discord.SelectOption(
label="データベース検索",
value="multi",
description="曲名、アーティスト名、配信URLなどの条件で検索",
default=False,
),
discord.SelectOption(
label="最近歌われていない曲",
value="no_recent",
description="最近歌われていない曲の一覧を検索できます。",
default=False,
),
]
super().__init__(
placeholder="検索方式を指定してください。", min_values=1, max_values=1, options=options
)
async def callback(self, interaction: discord.Interaction) -> None:
if self.values[0] == "multi":
await interaction.response.send_modal(modal=ProdSearch())
return
elif self.values[0] == "no_recent":
embed = EB()._start()
view = ProdRecentDropdownView()
await interaction.response.send_message(
embed=embed, view=view, ephemeral=True
)
return
raise InteractionError(interaction=interaction, cls=self)
class DateSelect(discord.ui.Select):
def __init__(self) -> None:
options = [
discord.SelectOption(
label="1ヶ月",
value="1",
description="1ヶ月歌われていない曲を検索します。",
default=False,
),
discord.SelectOption(
label="3ヶ月",
value="3",
description="3ヶ月歌われていない曲を検索します。",
default=False,
),
discord.SelectOption(
label="6ヶ月",
value="6",
description="6ヶ月歌われていない曲を検索します。",
default=False,
),
discord.SelectOption(
label="1年",
value="12",
description="1年歌われていない曲を検索します。",
default=False,
),
]
super().__init__(
placeholder="検索する期間を選択してください。", min_values=1, max_values=1, options=options
)
async def callback(self, interaction: discord.Interaction):
mth = int(self.values[0]) # 1 or 3 or 6 or 12
now = datetime.now().astimezone(jst)
__to = (now - timedelta(days=30 * mth)).date()
if __to.month < 10:
month = f"0{str(__to.month)}"
else:
month = str(__to.month)
_to = f"{__to.year}/{month}/{__to.day}"
print(_to)
client = SongDBClient(url=req_url)
_date = await client.search_by_date(_to=_to)
if _date.songs == []: # no result found
embed = EB()._empty_recent(_to=_to)
await interaction.response.send_message(embed=embed, ephemeral=True)
return
embeds = EB()._recent(_to=_to, songs=_date.songs)
# await interaction.response.send_message(embed=embeds[0])
await PagePage(embeds=embeds)._send(interaction)
return
class ProdSearch(discord.ui.Modal):
def __init__(self) -> None:
super().__init__(title="歌枠データベース")
self.add_item(
discord.ui.InputText(
label="検索したい曲名を入力してください。",
style=discord.InputTextStyle.short,
required=False,
row=0,
placeholder="曲名",
)
)
self.add_item(
discord.ui.InputText(
label="検索したいアーティスト名や作曲者名を入力してください。",
style=discord.InputTextStyle.short,
required=False,
row=1,
placeholder="アーティスト名/作曲者名",
),
)
self.add_item(
discord.ui.InputText(
label="検索したい歌枠のURLを入力してください。",
style=discord.InputTextStyle.short,
required=False,
row=2,
placeholder="youtube.comとyoutu.beに対応しています",
)
)
async def callback(self, interaction: discord.Interaction):
# await interaction.response.defer(ephemeral=True)
if self.children[2].value:
matched_id = match_url(self.children[2].value)
if not matched_id:
print("Invalid url inputted.")
await interaction.response.send_message(
content="対応していないURLが入力されました。", ephemeral=True
)
return
client = SongDBClient(url=req_url)
d = {
"song_name": self.children[0].value,
"artist_name": self.children[1].value,
"stream_id": self.children[2].value,
}
if not any(d.values()):
await interaction.response.send_message(
content="一つ以上の検索条件を指定してください。", ephemeral=True
)
return
songs = await client.multi_search(**d)
if songs.songs == []: # no result found
embed = EB()._empty(_input=d)
await interaction.response.send_message(embed=embed, ephemeral=True)
return
embeds = EB()._rawsong(_input=d, songs=songs.songs)
# await interaction.response.send_message(embed=embeds[0])
await PagePage(embeds=embeds)._send(interaction)
return
class ProdDropdownView(discord.ui.View):
def __init__(self):
super().__init__(timeout=None)
self.add_item(ProdDropdown())
class ProdRecentDropdownView(discord.ui.View):
def __init__(self):
super().__init__(timeout=None)
self.add_item(DateSelect())
def setup(bot):
return bot.add_cog(SongDB(bot))
|
the-stack_0_17617 | # coding: utf8
# 在scikit-learn中,提供了3中朴素贝叶斯分类算法:GaussianNB(高斯朴素贝叶斯)、MultinomialNB(多项式朴素贝叶斯)、BernoulliNB(伯努利朴素贝叶斯)
import numpy as np
from sklearn.naive_bayes import GaussianNB, MultinomialNB, BernoulliNB
X = np.asarray([[-1, -1], [-2, -2], [-3, -3], [-4, -4], [-5, -5], [1, 1], [2, 2], [3, 3]])
y = np.asarray([1, 1, 1, 1, 1, 2, 2, 2])
clf = GaussianNB()
clf.class_prior_ = [0.675, 0.325]
clf.fit(X, y)
print(clf.predict([[-1, -1], [2,3]]))
|
the-stack_0_17618 | # pipeline example modified from David Johnson of the Michigan SPIDER team's regression example, originally at
# https://gitlab.datadrivendiscovery.org/michigan/spider/blob/master/spider/pipelines/supervised_learning_owl.py
from d3m.metadata import pipeline as d3m_pipeline
from d3m.metadata import base as d3m_base
from d3m.metadata.base import ArgumentType, Context
from realML.pipelines.base import BasePipeline
#from realML.kernel import RFMPreconditionedGaussianKRR
from common_primitives.dataframe_to_ndarray import DataFrameToNDArrayPrimitive
from common_primitives.ndarray_to_dataframe import NDArrayToDataFramePrimitive
from common_primitives.dataset_to_dataframe import DatasetToDataFramePrimitive
from common_primitives.column_parser import ColumnParserPrimitive
from common_primitives.construct_predictions import ConstructPredictionsPrimitive
from common_primitives.extract_columns_semantic_types import ExtractColumnsBySemanticTypesPrimitive
from d3m.primitives.data_transformation.encoder import DistilBinaryEncoder as BinaryEncoderPrimitive
from d3m import index
import d3m.primitives.data_cleaning.imputer
#import d3m.primitives.data_preprocessing.horizontal_concat
import os.path
#from d3m.primitives.data_preprocessing.horizontal_concat import HorizontalConcat as HorizontalConcat
from common_primitives.horizontal_concat import HorizontalConcatPrimitive
import pandas as pd
import d3m.primitives.regression.gradient_boosting
from d3m import index
import numpy as np
from realML.matrix import SparsePCA
class sparsepcaPipeline2(BasePipeline):
def __init__(self):
super().__init__()
#specify one seed dataset on which this pipeline can operate
dataset = '534_cps_85_wages'
self.meta_info = self.genmeta(dataset)
#define pipeline object
def _gen_pipeline(self):
pipeline = d3m_pipeline.Pipeline()
#define inputs. This will be read in automatically as a Dataset object.
pipeline.add_input(name = 'inputs')
#step 0: Denormalize: join multiple tabular resource?
# Why is there no entry point for Denormalize?
#step 0: Dataset -> Dataframe
step_0 = d3m_pipeline.PrimitiveStep(primitive_description = DatasetToDataFramePrimitive.metadata.query())
step_0.add_argument(
name = 'inputs',
argument_type = d3m_base.ArgumentType.CONTAINER,
data_reference = 'inputs.0')
step_0.add_output('produce')
pipeline.add_step(step_0)
# Step 1: Simple Profiler Column Role Annotation
step_1 = d3m_pipeline.PrimitiveStep(
primitive=index.get_primitive("d3m.primitives.schema_discovery.profiler.Common")
)
step_1.add_argument(
name="inputs",
argument_type=d3m_base.ArgumentType.CONTAINER,
data_reference="steps.0.produce",
)
step_1.add_output("produce")
pipeline.add_step(step_1)
#step 2: ColumnParser
step_2 = d3m_pipeline.PrimitiveStep(primitive_description=ColumnParserPrimitive.metadata.query())
step_2.add_argument(
name='inputs',
argument_type=d3m_base.ArgumentType.CONTAINER,
data_reference='steps.1.produce')
step_2.add_output('produce')
pipeline.add_step(step_2)
#step 3: Extract attributes from dataset into a dedicated dataframe
step_3 = d3m_pipeline.PrimitiveStep(primitive_description = ExtractColumnsBySemanticTypesPrimitive.metadata.query())
step_3.add_argument(
name = 'inputs',
argument_type = d3m_base.ArgumentType.CONTAINER,
data_reference = 'steps.2.produce')
step_3.add_output('produce')
step_3.add_hyperparameter(
name='semantic_types',
argument_type=d3m_base.ArgumentType.VALUE,
data=['https://metadata.datadrivendiscovery.org/types/Attribute'])
pipeline.add_step(step_3)
#step 4: Binary encoding for categorical features
step_4 = d3m_pipeline.PrimitiveStep(primitive_description = BinaryEncoderPrimitive.metadata.query())
step_4.add_hyperparameter(
name = 'min_binary',
argument_type = d3m_base.ArgumentType.VALUE,
data = 2
)
step_4.add_argument(
name = 'inputs',
argument_type = d3m_base.ArgumentType.CONTAINER,
data_reference = 'steps.3.produce')
step_4.add_output('produce')
pipeline.add_step(step_4)
#step 5: Extract Targets
step_5 = d3m_pipeline.PrimitiveStep(primitive_description = ExtractColumnsBySemanticTypesPrimitive.metadata.query())
step_5.add_argument(
name='inputs',
argument_type=d3m_base.ArgumentType.CONTAINER,
data_reference='steps.2.produce'
)
step_5.add_hyperparameter(
name='semantic_types',
argument_type=d3m_base.ArgumentType.VALUE,
data=['https://metadata.datadrivendiscovery.org/types/TrueTarget'])
step_5.add_output('produce')
pipeline.add_step(step_5)
#step 6: transform targets dataframe into an ndarray
step_6 = d3m_pipeline.PrimitiveStep(primitive_description = DataFrameToNDArrayPrimitive.metadata.query())
step_6.add_argument(
name = 'inputs',
argument_type = d3m_base.ArgumentType.CONTAINER,
data_reference = 'steps.5.produce'
)
step_6.add_output('produce')
pipeline.add_step(step_6)
#step 7 : transform features dataframe into an ndarray
step_7 = d3m_pipeline.PrimitiveStep(primitive_description = DataFrameToNDArrayPrimitive.metadata.query())
step_7.add_argument(
name = 'inputs',
argument_type = d3m_base.ArgumentType.CONTAINER,
data_reference = 'steps.4.produce'
)
step_7.add_output('produce')
pipeline.add_step(step_7)
attributes = 'steps.7.produce'
targets = 'steps.6.produce'
#step 8: call RFMPreconditionedGaussianKRR for regression
#Run SparsePCA
step_8 = d3m_pipeline.PrimitiveStep(primitive_description = SparsePCA.metadata.query())
step_8.add_argument(
name = 'inputs',
argument_type = d3m_base.ArgumentType.CONTAINER,
data_reference = attributes #inputs here are the outputs from step 7
)
step_8.add_hyperparameter(
name = 'n_components',
argument_type = d3m_base.ArgumentType.VALUE,
data = 9
)
step_8.add_hyperparameter(
name = 'beta',
argument_type = d3m_base.ArgumentType.VALUE,
data = 1e-9
)
step_8.add_hyperparameter(
name = 'alpha',
argument_type = d3m_base.ArgumentType.VALUE,
data = 1e-4
)
step_8.add_hyperparameter(
name = 'degree',
argument_type = d3m_base.ArgumentType.VALUE,
data = 2
)
step_8.add_output('produce')
pipeline.add_step(step_8)
#step 9: convert numpy-formatted prediction outputs to a dataframe
step_9 = d3m_pipeline.PrimitiveStep(primitive_description = NDArrayToDataFramePrimitive.metadata.query())
step_9.add_argument(
name = 'inputs',
argument_type = d3m_base.ArgumentType.CONTAINER,
data_reference = 'steps.8.produce'
)
step_9.add_output('produce')
pipeline.add_step(step_9)
#step 9: convert numpy-formatted prediction outputs to a dataframe
step_10 = d3m_pipeline.PrimitiveStep(primitive_description = HorizontalConcatPrimitive.metadata.query())
step_10.add_argument(
name = 'left',
argument_type = d3m_base.ArgumentType.CONTAINER,
data_reference = 'steps.4.produce'
)
step_10.add_argument(
name = 'right',
argument_type = d3m_base.ArgumentType.CONTAINER,
data_reference = 'steps.9.produce'
)
step_10.add_output('produce')
pipeline.add_step(step_10)
#Linear Regression on low-rank data (inputs and outputs for sklearns are both dataframes)
step_11 = d3m_pipeline.PrimitiveStep(primitive_description = d3m.primitives.regression.gradient_boosting.SKlearn.metadata.query())
step_11.add_argument(
name = 'inputs',
argument_type = d3m_base.ArgumentType.CONTAINER,
data_reference = 'steps.10.produce'
)
step_11.add_argument(
name = 'outputs',
argument_type = d3m_base.ArgumentType.CONTAINER,
data_reference = 'steps.5.produce'
)
step_11.add_hyperparameter(
name = 'n_estimators',
argument_type = d3m_base.ArgumentType.VALUE,
data = 63000
)
step_11.add_hyperparameter(
name = 'learning_rate',
argument_type = d3m_base.ArgumentType.VALUE,
data = 0.0001
)
step_11.add_hyperparameter(
name = 'max_depth',
argument_type = d3m_base.ArgumentType.VALUE,
data = 3
)
step_11.add_output('produce')
pipeline.add_step(step_11)
#step 10: generate a properly-formatted output dataframe from the dataframed prediction outputs using the input dataframe as a reference
step_12 = d3m_pipeline.PrimitiveStep(primitive_description = ConstructPredictionsPrimitive.metadata.query())
step_12.add_argument(
name = 'inputs',
argument_type = d3m_base.ArgumentType.CONTAINER,
data_reference = 'steps.11.produce' #inputs here are the prediction column
)
step_12.add_argument(
name = 'reference',
argument_type = d3m_base.ArgumentType.CONTAINER,
data_reference = 'steps.1.produce' #inputs here are the dataframe input dataset
)
step_12.add_output('produce')
pipeline.add_step(step_12)
# Final Output
pipeline.add_output(
name='output',
data_reference='steps.12.produce')
return pipeline
if __name__ == '__main__':
instance = sparsepcaPipeline2()
json_info = instance.get_json()
instanceid = instance.get_id()
instancepath = os.path.join(".", instanceid)
with open(instancepath + ".json", 'w') as file:
file.write(json_info)
file.close()
|
the-stack_0_17619 | # Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
# pylint: disable=all
import torch
from ...core import multiclass_nms
from ...core.bbox.coder.delta_xywh_bbox_coder import delta2bbox
from ...core.anchor.anchor_generator import SSDAnchorGeneratorClustered
def get_proposals(img_metas, cls_scores, bbox_preds, priors,
cfg, rescale, cls_out_channels, use_sigmoid_cls,
target_means, target_stds):
result_list = []
cls_score_list = cls_scores.tolist()
bbox_pred_list = bbox_preds.tolist()
assert len(cls_score_list) == len(bbox_pred_list)
for img_id in range(len(img_metas)):
cls_score = \
torch.Tensor(cls_score_list[img_id]).detach().to(priors.device)
bbox_pred = \
torch.Tensor(bbox_pred_list[img_id]).detach().to(priors.device)
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
proposals = get_bboxes_single(cls_score, bbox_pred, priors, img_shape,
scale_factor, cfg, rescale,
cls_out_channels, use_sigmoid_cls,
target_means, target_stds)
result_list.append(proposals)
return result_list
def get_bboxes_single(cls_scores, bbox_preds, priors, img_shape, scale_factor,
cfg, rescale, cls_out_channels, use_sigmoid_cls,
target_means, target_stds):
cls_scores = cls_scores.view(-1, cls_out_channels)
bbox_preds = bbox_preds.view(-1, 4)
priors = priors.view(-1, 4)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and cls_scores.shape[0] > nms_pre:
if use_sigmoid_cls:
max_scores, _ = cls_scores.max(dim=1)
else:
max_scores, _ = cls_scores[:, :-1].max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
priors = priors[topk_inds, :]
bbox_preds = bbox_preds[topk_inds, :]
cls_scores = cls_scores[topk_inds, :]
mlvl_bboxes = delta2bbox(priors, bbox_preds, target_means,
target_stds, img_shape)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
if use_sigmoid_cls:
padding = cls_scores.new_zeros(cls_scores.shape[0], 1)
cls_scores = torch.cat([padding, cls_scores], dim=1)
det_bboxes, det_labels = multiclass_nms(
mlvl_bboxes, cls_scores, cfg.score_thr, cfg.nms, cfg.max_per_img)
return det_bboxes, det_labels
class PriorBox(torch.autograd.Function):
"""Compute priorbox coordinates in point form for each source
feature map.
"""
@staticmethod
def symbolic(g, single_level_grid_anchors, base_anchors, base_size, scales, ratios,
anchor_stride, feat, img_tensor, target_stds):
min_size = base_size
max_sizes = []
ars = []
for scale in scales[1:]:
max_sizes.append(scale * scale * min_size)
for ar in ratios:
if ar > 1:
ars.append(ar)
return g.op("PriorBox", feat, img_tensor, min_size_f=[min_size],
max_size_f=max_sizes, aspect_ratio_f=ars, flip_i=1,
clip_i=0, variance_f=list(target_stds),
step_f=anchor_stride[0], offset_f=0.5, step_h_f=0,
step_w_f=0, img_size_i=0, img_h_i=0, img_w_i=0)
@staticmethod
def forward(ctx, single_level_grid_anchors, base_anchors, base_size, scales, ratios,
anchor_stride, feat, img_tensor, target_stds):
assert anchor_stride[0] == anchor_stride[1]
mlvl_anchor = single_level_grid_anchors(base_anchors, feat.size()[-2:], anchor_stride)
mlvl_anchor = mlvl_anchor.view(1, -1).unsqueeze(0)
return mlvl_anchor
class PriorBoxClustered(torch.autograd.Function):
"""Compute priorbox coordinates in point form for each source
feature map.
"""
@staticmethod
def symbolic(g, single_level_grid_anchors, base_anchors, anchors_heights, anchors_widths,
anchor_stride, feat, img_tensor, target_stds):
return g.op("PriorBoxClustered", feat, img_tensor,
height_f=anchors_heights, width_f=anchors_widths,
flip_i=0, clip_i=0, variance_f=list(target_stds),
step_f=anchor_stride[0], offset_f=0.5, step_h_f=0,
step_w_f=0, img_size_i=0, img_h_i=0, img_w_i=0)
@staticmethod
def forward(ctx, single_level_grid_anchors, base_anchors, anchors_heights, anchors_widths,
anchor_stride, feat, img_tensor, target_stds):
assert anchor_stride[0] == anchor_stride[1]
mlvl_anchor = single_level_grid_anchors(base_anchors, feat.size()[-2:], anchor_stride, base_anchors.device)
mlvl_anchor = mlvl_anchor.view(1, -1).unsqueeze(0)
return mlvl_anchor
class DetectionOutput(torch.autograd.Function):
"""At test time, Detect is the final layer of SSD. Decode location preds,
apply non-maximum suppression to location predictions based on conf
scores and threshold to a top_k number of output predictions for both
confidence score and locations.
"""
@staticmethod
def symbolic(g, cls_scores, bbox_preds, img_metas, cfg,
rescale, priors, cls_out_channels, use_sigmoid_cls,
target_means, target_stds):
return g.op("DetectionOutput", bbox_preds, cls_scores, priors,
num_classes_i=cls_out_channels, background_label_id_i=cls_out_channels - 1,
top_k_i=cfg['max_per_img'],
keep_top_k_i=cfg['max_per_img'],
confidence_threshold_f=cfg['score_thr'],
nms_threshold_f=cfg['nms']['iou_thr'],
eta_f=1, share_location_i=1,
code_type_s="CENTER_SIZE", variance_encoded_in_target_i=0)
@staticmethod
def forward(ctx, cls_scores, bbox_preds, img_metas, cfg,
rescale, priors, cls_out_channels, use_sigmoid_cls,
target_means, target_stds):
proposals = get_proposals(img_metas, cls_scores, bbox_preds, priors,
cfg, rescale, cls_out_channels,
use_sigmoid_cls, target_means, target_stds)
b_s = len(proposals)
output = \
torch.zeros(b_s, 1, cfg.max_per_img, 7).to(cls_scores.device)
for img_id in range(0, b_s):
bboxes, labels = proposals[img_id]
coords = bboxes[:, :4]
scores = bboxes[:, 4]
labels = labels.float()
output_for_img = \
torch.zeros(scores.size()[0], 7).to(cls_scores.device)
output_for_img[:, 0] = img_id
output_for_img[:, 1] = labels
output_for_img[:, 2] = scores
output_for_img[:, 3:] = coords
output[img_id, 0, :output_for_img.size()[0]] = output_for_img
return output
def onnx_export(self, img, img_metas, export_name='', **kwargs):
self._export_mode = True
self.img_metas = img_metas
torch.onnx.export(self, img, export_name, **kwargs)
def forward(self, img, img_meta=[None], return_loss=True,
**kwargs): # passing None here is a hack to fool the jit engine
if self._export_mode:
return self.forward_export(img)
if return_loss:
return self.forward_train(img, img_meta, **kwargs)
else:
return self.forward_test(img, img_meta, **kwargs)
def forward_export_detector(self, img):
x = self.extract_feat(img)
outs = self.bbox_head(x)
bbox_result = self.bbox_head.export_forward(*outs, self.test_cfg, True,
self.img_metas, x, img)
return bbox_result
def export_forward_ssd_head(self, cls_scores, bbox_preds, cfg, rescale,
img_metas, feats, img_tensor):
num_levels = len(cls_scores)
anchors = []
for i in range(num_levels):
if isinstance(self.anchor_generator, SSDAnchorGeneratorClustered):
anchors.append(PriorBoxClustered.apply(
self.anchor_generator.single_level_grid_anchors,
self.anchor_generator.base_anchors[i],
self.anchor_generator.heights[i],
self.anchor_generator.widths[i],
self.anchor_generator.strides[i],
feats[i], img_tensor, self.bbox_coder.stds))
else:
anchors.append(PriorBox.apply(
self.anchor_generator.single_level_grid_anchors,
self.anchor_generator.base_anchors[i],
self.anchor_generator.base_sizes[i],
self.anchor_generator.scales[i].tolist(),
self.anchor_generator.ratios[i].tolist(),
self.anchor_generator.strides[i],
feats[i],
img_tensor, self.bbox_coder.stds))
anchors = torch.cat(anchors, 2)
cls_scores, bbox_preds = self._prepare_cls_scores_bbox_preds(cls_scores, bbox_preds)
return DetectionOutput.apply(cls_scores, bbox_preds, img_metas, cfg,
rescale, anchors, self.cls_out_channels,
self.use_sigmoid_cls, self.bbox_coder.means,
self.bbox_coder.stds)
def prepare_cls_scores_bbox_preds_ssd_head(self, cls_scores, bbox_preds):
scores_list = []
for o in cls_scores:
score = o.permute(0, 2, 3, 1).contiguous().view(o.size(0), -1)
scores_list.append(score)
cls_scores = torch.cat(scores_list, 1)
cls_scores = cls_scores.view(cls_scores.size(0), -1, self.cls_out_channels)
if self.use_sigmoid_cls:
cls_scores = cls_scores.sigmoid()
else:
cls_scores = cls_scores.softmax(-1)
cls_scores = cls_scores.view(cls_scores.size(0), -1)
bbox_list = []
for o in bbox_preds:
boxes = o.permute(0, 2, 3, 1).contiguous().view(o.size(0), -1)
bbox_list.append(boxes)
bbox_preds = torch.cat(bbox_list, 1)
return cls_scores, bbox_preds
|
the-stack_0_17622 | #!/usr/bin/env python
import os
import glob
import yaml
import flask
from flask_hal import HAL
from flask_hal.document import Document, Embedded
from flask_hal.link import Collection, Link
from versions import Version
app = flask.Flask(__name__)
HAL(app)
def filter_versions():
"""
Filters versions for the current request.
"""
snapshots = flask.request.args.get('snapshots', flask.request.args.get('snapshot'))
platform = flask.request.args.get('platform')
if snapshots == 'true':
snapshots = True
else:
snapshots = False
return Version.objects.filter(snapshots=snapshots, platform=platform)
@app.route('/')
def root():
return Document(links=Collection(
Link('versions', '/versions'),
)).to_dict()
@app.route('/versions/<name>')
def version_detail(name):
version = Version.objects.get(version=name)
links = [Link(rel, url) for (rel, url) in version.binaries.items()]
return Document(data={'version': version.version}, links=Collection(*links)).to_dict()
@app.route('/versions/<name>/binaries/<platform>')
def binary_detail(name, platform):
version = Version.objects.get(version=name)
binary = version.binaries.get(platform)
if not binary:
raise flask.abort(404)
response = app.make_response(binary)
response.headers['Content-Type'] = 'text/plain'
return response
@app.route('/versions')
def list_text_versions():
versions = filter_versions().versions
if 'text/plain' in flask.request.accept_mimetypes.values():
names = [str(v) for v in versions]
response = app.make_response('\n'.join(names) + '\n')
response.headers['Content-Type'] = 'text/plain'
return response
def to_embedded(v):
return Embedded(links=Collection(Link('self', '/versions/{}'.format(v.version))))
return Document(embedded=dict([(v.version, to_embedded(v)) for v in versions])).to_dict()
if __name__ == '__main__':
app.run(debug=True)
|
the-stack_0_17625 | # -*- coding: utf-8 -*-
# Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2009 Mads Kiilerich <[email protected]>
# Copyright (c) 2010 Daniel Harding <[email protected]>
# Copyright (c) 2011-2014, 2017 Google, Inc.
# Copyright (c) 2012 FELD Boris <[email protected]>
# Copyright (c) 2013-2018 Claudiu Popa <[email protected]>
# Copyright (c) 2014 Michal Nowikowski <[email protected]>
# Copyright (c) 2014 Brett Cannon <[email protected]>
# Copyright (c) 2014 Ricardo Gemignani <[email protected]>
# Copyright (c) 2014 Arun Persaud <[email protected]>
# Copyright (c) 2015 Dmitry Pribysh <[email protected]>
# Copyright (c) 2015 Radu Ciorba <[email protected]>
# Copyright (c) 2015 Simu Toni <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2016, 2018 Ashley Whetter <[email protected]>
# Copyright (c) 2016-2017 Derek Gustafson <[email protected]>
# Copyright (c) 2016-2017 Łukasz Rogalski <[email protected]>
# Copyright (c) 2016 Grant Welch <[email protected]>
# Copyright (c) 2016 Jakub Wilk <[email protected]>
# Copyright (c) 2017-2018 hippo91 <[email protected]>
# Copyright (c) 2017 Dan Garrette <[email protected]>
# Copyright (c) 2017 Ville Skyttä <[email protected]>
# Copyright (c) 2018 Bryce Guinta <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""variables checkers for Python code
"""
import copy
import itertools
import collections
import os
import sys
import re
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
import six
import astroid
from astroid import decorators
from astroid import modutils
from pylint.interfaces import IAstroidChecker, INFERENCE, INFERENCE_FAILURE, HIGH
from pylint.utils import get_global_option
from pylint.checkers import BaseChecker
from pylint.checkers import utils
SPECIAL_OBJ = re.compile("^_{2}[a-z]+_{2}$")
FUTURE = '__future__'
# regexp for ignored argument name
IGNORED_ARGUMENT_NAMES = re.compile('_.*|^ignored_|^unused_')
PY3K = sys.version_info >= (3, 0)
def _is_from_future_import(stmt, name):
"""Check if the name is a future import from another module."""
try:
module = stmt.do_import_module(stmt.modname)
except astroid.AstroidBuildingException:
return None
for local_node in module.locals.get(name, []):
if (isinstance(local_node, astroid.ImportFrom)
and local_node.modname == FUTURE):
return True
return None
def in_for_else_branch(parent, stmt):
"""Returns True if stmt in inside the else branch for a parent For stmt."""
return (isinstance(parent, astroid.For) and
any(else_stmt.parent_of(stmt) or else_stmt == stmt
for else_stmt in parent.orelse))
@lru_cache(maxsize=1000)
def overridden_method(klass, name):
"""get overridden method if any"""
try:
parent = next(klass.local_attr_ancestors(name))
except (StopIteration, KeyError):
return None
try:
meth_node = parent[name]
except KeyError:
# We have found an ancestor defining <name> but it's not in the local
# dictionary. This may happen with astroid built from living objects.
return None
if isinstance(meth_node, astroid.FunctionDef):
return meth_node
return None
def _get_unpacking_extra_info(node, infered):
"""return extra information to add to the message for unpacking-non-sequence
and unbalanced-tuple-unpacking errors
"""
more = ''
infered_module = infered.root().name
if node.root().name == infered_module:
if node.lineno == infered.lineno:
more = ' %s' % infered.as_string()
elif infered.lineno:
more = ' defined at line %s' % infered.lineno
elif infered.lineno:
more = ' defined at line %s of %s' % (infered.lineno, infered_module)
return more
def _detect_global_scope(node, frame, defframe):
""" Detect that the given frames shares a global
scope.
Two frames shares a global scope when neither
of them are hidden under a function scope, as well
as any of parent scope of them, until the root scope.
In this case, depending from something defined later on
will not work, because it is still undefined.
Example:
class A:
# B has the same global scope as `C`, leading to a NameError.
class B(C): ...
class C: ...
"""
def_scope = scope = None
if frame and frame.parent:
scope = frame.parent.scope()
if defframe and defframe.parent:
def_scope = defframe.parent.scope()
if isinstance(frame, astroid.FunctionDef):
# If the parent of the current node is a
# function, then it can be under its scope
# (defined in, which doesn't concern us) or
# the `->` part of annotations. The same goes
# for annotations of function arguments, they'll have
# their parent the Arguments node.
if not isinstance(node.parent,
(astroid.FunctionDef, astroid.Arguments)):
return False
elif any(not isinstance(f, (astroid.ClassDef, astroid.Module))
for f in (frame, defframe)):
# Not interested in other frames, since they are already
# not in a global scope.
return False
break_scopes = []
for s in (scope, def_scope):
# Look for parent scopes. If there is anything different
# than a module or a class scope, then they frames don't
# share a global scope.
parent_scope = s
while parent_scope:
if not isinstance(parent_scope, (astroid.ClassDef, astroid.Module)):
break_scopes.append(parent_scope)
break
if parent_scope.parent:
parent_scope = parent_scope.parent.scope()
else:
break
if break_scopes and len(set(break_scopes)) != 1:
# Store different scopes than expected.
# If the stored scopes are, in fact, the very same, then it means
# that the two frames (frame and defframe) shares the same scope,
# and we could apply our lineno analysis over them.
# For instance, this works when they are inside a function, the node
# that uses a definition and the definition itself.
return False
# At this point, we are certain that frame and defframe shares a scope
# and the definition of the first depends on the second.
return frame.lineno < defframe.lineno
def _fix_dot_imports(not_consumed):
""" Try to fix imports with multiple dots, by returning a dictionary
with the import names expanded. The function unflattens root imports,
like 'xml' (when we have both 'xml.etree' and 'xml.sax'), to 'xml.etree'
and 'xml.sax' respectively.
"""
# TODO: this should be improved in issue astroid #46
names = {}
for name, stmts in six.iteritems(not_consumed):
if any(isinstance(stmt, astroid.AssignName)
and isinstance(stmt.assign_type(), astroid.AugAssign)
for stmt in stmts):
continue
for stmt in stmts:
if not isinstance(stmt, (astroid.ImportFrom, astroid.Import)):
continue
for imports in stmt.names:
second_name = None
if imports[0] == "*":
# In case of wildcard imports,
# pick the name from inside the imported module.
second_name = name
else:
if imports[0].find(".") > -1 or name in imports:
# Most likely something like 'xml.etree',
# which will appear in the .locals as 'xml'.
# Only pick the name if it wasn't consumed.
second_name = imports[0]
if second_name and second_name not in names:
names[second_name] = stmt
return sorted(names.items(), key=lambda a: a[1].fromlineno)
def _find_frame_imports(name, frame):
"""
Detect imports in the frame, with the required
*name*. Such imports can be considered assignments.
Returns True if an import for the given name was found.
"""
imports = frame.nodes_of_class((astroid.Import, astroid.ImportFrom))
for import_node in imports:
for import_name, import_alias in import_node.names:
# If the import uses an alias, check only that.
# Otherwise, check only the import name.
if import_alias:
if import_alias == name:
return True
elif import_name and import_name == name:
return True
return None
def _import_name_is_global(stmt, global_names):
for import_name, import_alias in stmt.names:
# If the import uses an alias, check only that.
# Otherwise, check only the import name.
if import_alias:
if import_alias in global_names:
return True
elif import_name in global_names:
return True
return False
def _flattened_scope_names(iterator):
values = (set(stmt.names) for stmt in iterator)
return set(itertools.chain.from_iterable(values))
def _assigned_locally(name_node):
"""
Checks if name_node has corresponding assign statement in same scope
"""
assign_stmts = name_node.scope().nodes_of_class(astroid.AssignName)
return any(a.name == name_node.name for a in assign_stmts)
MSGS = {
'E0601': ('Using variable %r before assignment',
'used-before-assignment',
'Used when a local variable is accessed before it\'s \
assignment.'),
'E0602': ('Undefined variable %r',
'undefined-variable',
'Used when an undefined variable is accessed.'),
'E0603': ('Undefined variable name %r in __all__',
'undefined-all-variable',
'Used when an undefined variable name is referenced in __all__.'),
'E0604': ('Invalid object %r in __all__, must contain only strings',
'invalid-all-object',
'Used when an invalid (non-string) object occurs in __all__.'),
'E0611': ('No name %r in module %r',
'no-name-in-module',
'Used when a name cannot be found in a module.'),
'W0601': ('Global variable %r undefined at the module level',
'global-variable-undefined',
'Used when a variable is defined through the "global" statement \
but the variable is not defined in the module scope.'),
'W0602': ('Using global for %r but no assignment is done',
'global-variable-not-assigned',
'Used when a variable is defined through the "global" statement \
but no assignment to this variable is done.'),
'W0603': ('Using the global statement', # W0121
'global-statement',
'Used when you use the "global" statement to update a global \
variable. Pylint just try to discourage this \
usage. That doesn\'t mean you cannot use it !'),
'W0604': ('Using the global statement at the module level', # W0103
'global-at-module-level',
'Used when you use the "global" statement at the module level \
since it has no effect'),
'W0611': ('Unused %s',
'unused-import',
'Used when an imported module or variable is not used.'),
'W0612': ('Unused variable %r',
'unused-variable',
'Used when a variable is defined but not used.'),
'W0613': ('Unused argument %r',
'unused-argument',
'Used when a function or method argument is not used.'),
'W0614': ('Unused import %s from wildcard import',
'unused-wildcard-import',
'Used when an imported module or variable is not used from a \
`\'from X import *\'` style import.'),
'W0621': ('Redefining name %r from outer scope (line %s)',
'redefined-outer-name',
'Used when a variable\'s name hides a name defined in the outer \
scope.'),
'W0622': ('Redefining built-in %r',
'redefined-builtin',
'Used when a variable or function override a built-in.'),
'W0623': ('Redefining name %r from %s in exception handler',
'redefine-in-handler',
'Used when an exception handler assigns the exception \
to an existing name'),
'W0631': ('Using possibly undefined loop variable %r',
'undefined-loop-variable',
'Used when an loop variable (i.e. defined by a for loop or \
a list comprehension or a generator expression) is used outside \
the loop.'),
'E0632': ('Possible unbalanced tuple unpacking with '
'sequence%s: '
'left side has %d label(s), right side has %d value(s)',
'unbalanced-tuple-unpacking',
'Used when there is an unbalanced tuple unpacking in assignment',
{'old_names': [('W0632', 'unbalanced-tuple-unpacking')]}),
'E0633': ('Attempting to unpack a non-sequence%s',
'unpacking-non-sequence',
'Used when something which is not '
'a sequence is used in an unpack assignment',
{'old_names': [('W0633', 'unpacking-non-sequence')]}),
'W0640': ('Cell variable %s defined in loop',
'cell-var-from-loop',
'A variable used in a closure is defined in a loop. '
'This will result in all closures using the same value for '
'the closed-over variable.'),
}
ScopeConsumer = collections.namedtuple("ScopeConsumer", "to_consume consumed scope_type")
class NamesConsumer(object):
"""
A simple class to handle consumed, to consume and scope type info of node locals
"""
def __init__(self, node, scope_type):
self._atomic = ScopeConsumer(copy.copy(node.locals), {}, scope_type)
def __repr__(self):
msg = "\nto_consume : {:s}\n".format(
", ".join(["{}->{}".format(key, val)
for key, val in self._atomic.to_consume.items()]))
msg += "consumed : {:s}\n".format(
", ".join(["{}->{}".format(key, val)
for key, val in self._atomic.consumed.items()]))
msg += "scope_type : {:s}\n".format(self._atomic.scope_type)
return msg
def __iter__(self):
return iter(self._atomic)
@property
def to_consume(self):
return self._atomic.to_consume
@property
def consumed(self):
return self._atomic.consumed
@property
def scope_type(self):
return self._atomic.scope_type
def mark_as_consumed(self, name, new_node):
"""
Mark the name as consumed and delete it from
the to_consume dictionnary
"""
self.consumed[name] = new_node
del self.to_consume[name]
def get_next_to_consume(self, node):
# mark the name as consumed if it's defined in this scope
name = node.name
parent_node = node.parent
found_node = self.to_consume.get(name)
if (found_node and isinstance(parent_node, astroid.Assign)
and parent_node == found_node[0].parent):
lhs = found_node[0].parent.targets[0]
if lhs.name == name: # this name is defined in this very statement
found_node = None
return found_node
class VariablesChecker(BaseChecker):
"""checks for
* unused variables / imports
* undefined variables
* redefinition of variable from builtins or from an outer scope
* use of variable before assignment
* __all__ consistency
"""
__implements__ = IAstroidChecker
name = 'variables'
msgs = MSGS
priority = -1
options = (("init-import",
{'default': 0, 'type' : 'yn', 'metavar' : '<y_or_n>',
'help' : 'Tells whether we should check for unused import in '
'__init__ files.'}),
("dummy-variables-rgx",
{'default': '_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_',
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'A regular expression matching the name of dummy '
'variables (i.e. expectedly not used).'}),
("additional-builtins",
{'default': (), 'type' : 'csv',
'metavar' : '<comma separated list>',
'help' : 'List of additional names supposed to be defined in '
'builtins. Remember that you should avoid to define new builtins '
'when possible.'
}),
("callbacks",
{'default' : ('cb_', '_cb'), 'type' : 'csv',
'metavar' : '<callbacks>',
'help' : 'List of strings which can identify a callback '
'function by name. A callback name must start or '
'end with one of those strings.'}
),
("redefining-builtins-modules",
{'default': ('six.moves', 'past.builtins', 'future.builtins', 'io', 'builtins'),
'type': 'csv',
'metavar': '<comma separated list>',
'help': 'List of qualified module names which can have objects '
'that can redefine builtins.'}
),
('ignored-argument-names',
{'default' : IGNORED_ARGUMENT_NAMES,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Argument names that match this expression will be '
'ignored. Default to name with leading underscore'}
),
('allow-global-unused-variables',
{'default': True,
'type': 'yn', 'metavar': '<y_or_n>',
'help': 'Tells whether unused global variables should be treated as a violation.'}
),
)
def __init__(self, linter=None):
BaseChecker.__init__(self, linter)
self._to_consume = None # list of tuples: (to_consume:dict, consumed:dict, scope_type:str)
self._checking_mod_attr = None
self._loop_variables = []
# Relying on other checker's options, which might not have been initialized yet.
@decorators.cachedproperty
def _analyse_fallback_blocks(self):
return get_global_option(self, 'analyse-fallback-blocks', default=False)
@decorators.cachedproperty
def _ignored_modules(self):
return get_global_option(self, 'ignored-modules', default=[])
@decorators.cachedproperty
def _allow_global_unused_variables(self):
return get_global_option(self, 'allow-global-unused-variables', default=True)
@utils.check_messages('redefined-outer-name')
def visit_for(self, node):
assigned_to = [var.name for var in node.target.nodes_of_class(astroid.AssignName)]
# Only check variables that are used
dummy_rgx = self.config.dummy_variables_rgx
assigned_to = [var for var in assigned_to if not dummy_rgx.match(var)]
for variable in assigned_to:
for outer_for, outer_variables in self._loop_variables:
if (variable in outer_variables
and not in_for_else_branch(outer_for, node)):
self.add_message(
'redefined-outer-name',
args=(variable, outer_for.fromlineno),
node=node
)
break
self._loop_variables.append((node, assigned_to))
@utils.check_messages('redefined-outer-name')
def leave_for(self, _):
self._loop_variables.pop()
def visit_module(self, node):
"""visit module : update consumption analysis variable
checks globals doesn't overrides builtins
"""
self._to_consume = [NamesConsumer(node, 'module')]
for name, stmts in six.iteritems(node.locals):
if utils.is_builtin(name) and not utils.is_inside_except(stmts[0]):
if self._should_ignore_redefined_builtin(stmts[0]) or name == '__doc__':
continue
self.add_message('redefined-builtin', args=name, node=stmts[0])
@utils.check_messages('unused-import', 'unused-wildcard-import',
'redefined-builtin', 'undefined-all-variable',
'invalid-all-object', 'unused-variable')
def leave_module(self, node):
"""leave module: check globals
"""
assert len(self._to_consume) == 1
not_consumed = self._to_consume.pop().to_consume
# attempt to check for __all__ if defined
if '__all__' in node.locals:
self._check_all(node, not_consumed)
# check for unused globals
self._check_globals(not_consumed)
# don't check unused imports in __init__ files
if not self.config.init_import and node.package:
return
self._check_imports(not_consumed)
def _check_all(self, node, not_consumed):
assigned = next(node.igetattr('__all__'))
if assigned is astroid.YES:
return
for elt in getattr(assigned, 'elts', ()):
try:
elt_name = next(elt.infer())
except astroid.InferenceError:
continue
if elt_name is astroid.Uninferable:
continue
if not elt_name.parent:
continue
if (not isinstance(elt_name, astroid.Const)
or not isinstance(elt_name.value, six.string_types)):
self.add_message('invalid-all-object',
args=elt.as_string(), node=elt)
continue
elt_name = elt_name.value
# If elt is in not_consumed, remove it from not_consumed
if elt_name in not_consumed:
del not_consumed[elt_name]
continue
if elt_name not in node.locals:
if not node.package:
self.add_message('undefined-all-variable',
args=(elt_name, ),
node=elt)
else:
basename = os.path.splitext(node.file)[0]
if os.path.basename(basename) == '__init__':
name = node.name + "." + elt_name
try:
modutils.file_from_modpath(name.split("."))
except ImportError:
self.add_message('undefined-all-variable',
args=(elt_name, ),
node=elt)
except SyntaxError:
# don't yield an syntax-error warning,
# because it will be later yielded
# when the file will be checked
pass
def _check_globals(self, not_consumed):
if self._allow_global_unused_variables:
return
for name, nodes in six.iteritems(not_consumed):
for node in nodes:
self.add_message('unused-variable', args=(name,), node=node)
def _check_imports(self, not_consumed):
local_names = _fix_dot_imports(not_consumed)
checked = set()
for name, stmt in local_names:
for imports in stmt.names:
real_name = imported_name = imports[0]
if imported_name == "*":
real_name = name
as_name = imports[1]
if real_name in checked:
continue
if name not in (real_name, as_name):
continue
checked.add(real_name)
if (isinstance(stmt, astroid.Import) or
(isinstance(stmt, astroid.ImportFrom) and
not stmt.modname)):
if (isinstance(stmt, astroid.ImportFrom) and
SPECIAL_OBJ.search(imported_name)):
# Filter special objects (__doc__, __all__) etc.,
# because they can be imported for exporting.
continue
if as_name == "_":
continue
if as_name is None:
msg = "import %s" % imported_name
else:
msg = "%s imported as %s" % (imported_name, as_name)
self.add_message('unused-import', args=msg, node=stmt)
elif (isinstance(stmt, astroid.ImportFrom)
and stmt.modname != FUTURE):
if SPECIAL_OBJ.search(imported_name):
# Filter special objects (__doc__, __all__) etc.,
# because they can be imported for exporting.
continue
if _is_from_future_import(stmt, name):
# Check if the name is in fact loaded from a
# __future__ import in another module.
continue
if imported_name == '*':
self.add_message('unused-wildcard-import',
args=name, node=stmt)
else:
if as_name is None:
msg = "%s imported from %s" % (imported_name, stmt.modname)
else:
fields = (imported_name, stmt.modname, as_name)
msg = "%s imported from %s as %s" % fields
self.add_message('unused-import', args=msg, node=stmt)
del self._to_consume
def visit_classdef(self, node):
"""visit class: update consumption analysis variable
"""
self._to_consume.append(NamesConsumer(node, 'class'))
def leave_classdef(self, _):
"""leave class: update consumption analysis variable
"""
# do not check for not used locals here (no sense)
self._to_consume.pop()
def visit_lambda(self, node):
"""visit lambda: update consumption analysis variable
"""
self._to_consume.append(NamesConsumer(node, 'lambda'))
def leave_lambda(self, _):
"""leave lambda: update consumption analysis variable
"""
# do not check for not used locals here
self._to_consume.pop()
def visit_generatorexp(self, node):
"""visit genexpr: update consumption analysis variable
"""
self._to_consume.append(NamesConsumer(node, 'comprehension'))
def leave_generatorexp(self, _):
"""leave genexpr: update consumption analysis variable
"""
# do not check for not used locals here
self._to_consume.pop()
def visit_dictcomp(self, node):
"""visit dictcomp: update consumption analysis variable
"""
self._to_consume.append(NamesConsumer(node, 'comprehension'))
def leave_dictcomp(self, _):
"""leave dictcomp: update consumption analysis variable
"""
# do not check for not used locals here
self._to_consume.pop()
def visit_setcomp(self, node):
"""visit setcomp: update consumption analysis variable
"""
self._to_consume.append(NamesConsumer(node, 'comprehension'))
def leave_setcomp(self, _):
"""leave setcomp: update consumption analysis variable
"""
# do not check for not used locals here
self._to_consume.pop()
def visit_functiondef(self, node):
"""visit function: update consumption analysis variable and check locals
"""
self._to_consume.append(NamesConsumer(node, 'function'))
if not (self.linter.is_message_enabled('redefined-outer-name') or
self.linter.is_message_enabled('redefined-builtin')):
return
globs = node.root().globals
for name, stmt in node.items():
if utils.is_inside_except(stmt):
continue
if name in globs and not isinstance(stmt, astroid.Global):
definition = globs[name][0]
if (isinstance(definition, astroid.ImportFrom)
and definition.modname == FUTURE):
# It is a __future__ directive, not a symbol.
continue
line = definition.fromlineno
if not self._is_name_ignored(stmt, name):
self.add_message('redefined-outer-name',
args=(name, line), node=stmt)
elif utils.is_builtin(name) and not self._should_ignore_redefined_builtin(stmt):
# do not print Redefining builtin for additional builtins
self.add_message('redefined-builtin', args=name, node=stmt)
def _is_name_ignored(self, stmt, name):
authorized_rgx = self.config.dummy_variables_rgx
if (isinstance(stmt, astroid.AssignName)
and isinstance(stmt.parent, astroid.Arguments)):
regex = self.config.ignored_argument_names
else:
regex = authorized_rgx
return regex and regex.match(name)
def _check_is_unused(self, name, node, stmt, global_names, nonlocal_names):
# Ignore some special names specified by user configuration.
if self._is_name_ignored(stmt, name):
return
# Ignore names that were added dynamically to the Function scope
if (isinstance(node, astroid.FunctionDef)
and name == '__class__'
and len(node.locals['__class__']) == 1
and isinstance(node.locals['__class__'][0], astroid.ClassDef)):
return
# Ignore names imported by the global statement.
# FIXME: should only ignore them if it's assigned latter
if isinstance(stmt, astroid.Global):
return
if isinstance(stmt, (astroid.Import, astroid.ImportFrom)):
# Detect imports, assigned to global statements.
if global_names and _import_name_is_global(stmt, global_names):
return
argnames = list(itertools.chain(
node.argnames(),
[arg.name for arg in node.args.kwonlyargs]
))
is_method = node.is_method()
klass = node.parent.frame()
if is_method and isinstance(klass, astroid.ClassDef):
confidence = INFERENCE if utils.has_known_bases(klass) else INFERENCE_FAILURE
else:
confidence = HIGH
# Care about functions with unknown argument (builtins)
if name in argnames:
if is_method:
# Don't warn for the first argument of a (non static) method
if node.type != 'staticmethod' and name == argnames[0]:
return
# Don't warn for argument of an overridden method
overridden = overridden_method(klass, node.name)
if overridden is not None and name in overridden.argnames():
return
if node.name in utils.PYMETHODS and node.name not in ('__init__', '__new__'):
return
# Don't check callback arguments
if any(node.name.startswith(cb) or node.name.endswith(cb)
for cb in self.config.callbacks):
return
# Don't check arguments of singledispatch.register function.
if utils.is_registered_in_singledispatch_function(node):
return
self.add_message('unused-argument', args=name, node=stmt,
confidence=confidence)
else:
if stmt.parent and isinstance(stmt.parent, astroid.Assign):
if name in nonlocal_names:
return
if isinstance(stmt, astroid.Import):
# Need the complete name, which we don't have in .locals.
qname, asname = stmt.names[0]
name = asname or qname
self.add_message('unused-variable', args=name, node=stmt)
def leave_functiondef(self, node):
"""leave function: check function's locals are consumed"""
not_consumed = self._to_consume.pop().to_consume
if not (self.linter.is_message_enabled('unused-variable') or
self.linter.is_message_enabled('unused-argument')):
return
# Don't check arguments of function which are only raising an exception.
if utils.is_error(node):
return
# Don't check arguments of abstract methods or within an interface.
is_method = node.is_method()
if is_method and node.is_abstract():
return
global_names = _flattened_scope_names(node.nodes_of_class(astroid.Global))
nonlocal_names = _flattened_scope_names(node.nodes_of_class(astroid.Nonlocal))
for name, stmts in six.iteritems(not_consumed):
self._check_is_unused(name, node, stmts[0], global_names, nonlocal_names)
visit_asyncfunctiondef = visit_functiondef
leave_asyncfunctiondef = leave_functiondef
@utils.check_messages('global-variable-undefined', 'global-variable-not-assigned',
'global-statement', 'global-at-module-level',
'redefined-builtin')
def visit_global(self, node):
"""check names imported exists in the global scope"""
frame = node.frame()
if isinstance(frame, astroid.Module):
self.add_message('global-at-module-level', node=node)
return
module = frame.root()
default_message = True
for name in node.names:
try:
assign_nodes = module.getattr(name)
except astroid.NotFoundError:
# unassigned global, skip
assign_nodes = []
if not assign_nodes:
self.add_message('global-variable-not-assigned',
args=name, node=node)
default_message = False
continue
for anode in assign_nodes:
if (isinstance(anode, astroid.AssignName)
and anode.name in module.special_attributes):
self.add_message('redefined-builtin', args=name, node=node)
break
if anode.frame() is module:
# module level assignment
break
else:
# global undefined at the module scope
self.add_message('global-variable-undefined', args=name, node=node)
default_message = False
if default_message:
self.add_message('global-statement', node=node)
def _check_late_binding_closure(self, node, assignment_node):
def _is_direct_lambda_call():
return (isinstance(node_scope.parent, astroid.Call)
and node_scope.parent.func is node_scope)
node_scope = node.scope()
if not isinstance(node_scope, (astroid.Lambda, astroid.FunctionDef)):
return
if isinstance(node.parent, astroid.Arguments):
return
if isinstance(assignment_node, astroid.Comprehension):
if assignment_node.parent.parent_of(node.scope()):
self.add_message('cell-var-from-loop', node=node, args=node.name)
else:
assign_scope = assignment_node.scope()
maybe_for = assignment_node
while not isinstance(maybe_for, astroid.For):
if maybe_for is assign_scope:
break
maybe_for = maybe_for.parent
else:
if (maybe_for.parent_of(node_scope)
and not _is_direct_lambda_call()
and not isinstance(node_scope.statement(), astroid.Return)):
self.add_message('cell-var-from-loop', node=node, args=node.name)
def _loopvar_name(self, node, name):
# filter variables according to node's scope
# XXX used to filter parents but don't remember why, and removing this
# fixes a W0631 false positive reported by Paul Hachmann on 2008/12 on
# python-projects (added to func_use_for_or_listcomp_var test)
#astmts = [stmt for stmt in node.lookup(name)[1]
# if hasattr(stmt, 'ass_type')] and
# not stmt.statement().parent_of(node)]
if not self.linter.is_message_enabled('undefined-loop-variable'):
return
astmts = [stmt for stmt in node.lookup(name)[1]
if hasattr(stmt, 'ass_type')]
# filter variables according their respective scope test is_statement
# and parent to avoid #74747. This is not a total fix, which would
# introduce a mechanism similar to special attribute lookup in
# modules. Also, in order to get correct inference in this case, the
# scope lookup rules would need to be changed to return the initial
# assignment (which does not exist in code per se) as well as any later
# modifications.
if not astmts or (astmts[0].is_statement or astmts[0].parent) \
and astmts[0].statement().parent_of(node):
_astmts = []
else:
_astmts = astmts[:1]
for i, stmt in enumerate(astmts[1:]):
if (astmts[i].statement().parent_of(stmt)
and not in_for_else_branch(astmts[i].statement(), stmt)):
continue
_astmts.append(stmt)
astmts = _astmts
if len(astmts) == 1:
assign = astmts[0].assign_type()
if (isinstance(assign, (astroid.For, astroid.Comprehension,
astroid.GeneratorExp))
and assign.statement() is not node.statement()):
self.add_message('undefined-loop-variable', args=name, node=node)
def _should_ignore_redefined_builtin(self, stmt):
if not isinstance(stmt, astroid.ImportFrom):
return False
return stmt.modname in self.config.redefining_builtins_modules
@utils.check_messages('redefine-in-handler')
def visit_excepthandler(self, node):
for name in utils.get_all_elements(node.name):
clobbering, args = utils.clobber_in_except(name)
if clobbering:
self.add_message('redefine-in-handler', args=args, node=name)
def visit_assignname(self, node):
if isinstance(node.assign_type(), astroid.AugAssign):
self.visit_name(node)
def visit_delname(self, node):
self.visit_name(node)
@staticmethod
def _defined_in_function_definition(node, frame):
in_annotation_or_default = False
if (isinstance(frame, astroid.FunctionDef) and
node.statement() is frame):
in_annotation_or_default = (
(
PY3K and (node in frame.args.annotations
or node in frame.args.kwonlyargs_annotations
or node is frame.args.varargannotation
or node is frame.args.kwargannotation)
)
or
frame.args.parent_of(node)
)
return in_annotation_or_default
@staticmethod
def _is_variable_violation(node, name, defnode, stmt, defstmt,
frame, defframe, base_scope_type,
recursive_klass):
# node: Node to check for violation
# name: name of node to check violation for
# frame: Scope of statement of node
# base_scope_type: local scope type
maybee0601 = True
annotation_return = False
use_outer_definition = False
if frame is not defframe:
maybee0601 = _detect_global_scope(node, frame, defframe)
elif defframe.parent is None:
# we are at the module level, check the name is not
# defined in builtins
if name in defframe.scope_attrs or astroid.builtin_lookup(name)[1]:
maybee0601 = False
else:
# we are in a local scope, check the name is not
# defined in global or builtin scope
# skip this lookup if name is assigned later in function scope/lambda
# Note: the node.frame() is not the same as the `frame` argument which is
# equivalent to frame.statement().scope()
forbid_lookup = ((isinstance(frame, astroid.FunctionDef) or
isinstance(node.frame(), astroid.Lambda)) and
_assigned_locally(node))
if not forbid_lookup and defframe.root().lookup(name)[1]:
maybee0601 = False
use_outer_definition = (
stmt == defstmt
and not isinstance(defnode, astroid.node_classes.Comprehension)
)
else:
# check if we have a nonlocal
if name in defframe.locals:
maybee0601 = not any(isinstance(child, astroid.Nonlocal)
and name in child.names
for child in defframe.get_children())
if (base_scope_type == 'lambda' and
isinstance(frame, astroid.ClassDef)
and name in frame.locals):
# This rule verifies that if the definition node of the
# checked name is an Arguments node and if the name
# is used a default value in the arguments defaults
# and the actual definition of the variable label
# is happening before the Arguments definition.
#
# bar = None
# foo = lambda bar=bar: bar
#
# In this case, maybee0601 should be False, otherwise
# it should be True.
maybee0601 = not (isinstance(defnode, astroid.Arguments) and
node in defnode.defaults and
frame.locals[name][0].fromlineno < defstmt.fromlineno)
elif (isinstance(defframe, astroid.ClassDef) and
isinstance(frame, astroid.FunctionDef)):
# Special rule for function return annotations,
# which uses the same name as the class where
# the function lives.
if (PY3K and node is frame.returns and
defframe.parent_of(frame.returns)):
maybee0601 = annotation_return = True
if (maybee0601 and defframe.name in defframe.locals and
defframe.locals[name][0].lineno < frame.lineno):
# Detect class assignments with the same
# name as the class. In this case, no warning
# should be raised.
maybee0601 = False
if isinstance(node.parent, astroid.Arguments):
maybee0601 = stmt.fromlineno <= defstmt.fromlineno
elif recursive_klass:
maybee0601 = True
else:
maybee0601 = maybee0601 and stmt.fromlineno <= defstmt.fromlineno
if maybee0601 and stmt.fromlineno == defstmt.fromlineno:
if (isinstance(defframe, astroid.FunctionDef)
and frame is defframe
and defframe.parent_of(node)
and stmt is not defstmt):
# Single statement function, with the statement on the
# same line as the function definition
maybee0601 = False
return maybee0601, annotation_return, use_outer_definition
def _ignore_class_scope(self, node):
"""
Return True if the node is in a local class scope, as an assignment.
:param node: Node considered
:type node: astroid.Node
:return: True if the node is in a local class scope, as an assignment. False otherwise.
:rtype: bool
"""
# Detect if we are in a local class scope, as an assignment.
# For example, the following is fair game.
#
# class A:
# b = 1
# c = lambda b=b: b * b
#
# class B:
# tp = 1
# def func(self, arg: tp):
# ...
# class C:
# tp = 2
# def func(self, arg=tp):
# ...
name = node.name
frame = node.statement().scope()
in_annotation_or_default = self._defined_in_function_definition(node, frame)
if in_annotation_or_default:
frame_locals = frame.parent.scope().locals
else:
frame_locals = frame.locals
return not ((isinstance(frame, astroid.ClassDef) or in_annotation_or_default) and
name in frame_locals)
@utils.check_messages(*(MSGS.keys()))
def visit_name(self, node):
"""check that a name is defined if the current scope and doesn't
redefine a built-in
"""
stmt = node.statement()
if stmt.fromlineno is None:
# name node from a astroid built from live code, skip
assert not stmt.root().file.endswith('.py')
return
name = node.name
frame = stmt.scope()
# if the name node is used as a function default argument's value or as
# a decorator, then start from the parent frame of the function instead
# of the function frame - and thus open an inner class scope
if ((utils.is_func_default(node) and not utils.in_comprehension(node)) or
utils.is_func_decorator(node) or utils.is_ancestor_name(frame, node)):
# Do not use the highest scope to look for variable name consumption in this case
# If the name is used in the function default, or as a decorator, then it
# cannot be defined there
# (except for list comprehensions in function defaults)
start_index = len(self._to_consume) - 2
else:
start_index = len(self._to_consume) - 1
# iterates through parent scopes, from the inner to the outer
base_scope_type = self._to_consume[start_index].scope_type
# pylint: disable=too-many-nested-blocks; refactoring this block is a pain.
for i in range(start_index, -1, -1):
current_consumer = self._to_consume[i]
# if the current scope is a class scope but it's not the inner
# scope, ignore it. This prevents to access this scope instead of
# the globals one in function members when there are some common
# names. The only exception is when the starting scope is a
# comprehension and its direct outer scope is a class
if current_consumer.scope_type == 'class' and i != start_index and not (
base_scope_type == 'comprehension' and i == start_index-1):
if self._ignore_class_scope(node):
continue
# the name has already been consumed, only check it's not a loop
# variable used outside the loop
# avoid the case where there are homonyms inside function scope and
# comprehension current scope (avoid bug #1731)
if name in current_consumer.consumed and not (
current_consumer.scope_type == 'comprehension'
and self._has_homonym_in_upper_function_scope(node, i)):
defnode = utils.assign_parent(current_consumer.consumed[name][0])
self._check_late_binding_closure(node, defnode)
self._loopvar_name(node, name)
break
found_node = current_consumer.get_next_to_consume(node)
if found_node is None:
continue
# checks for use before assignment
defnode = utils.assign_parent(current_consumer.to_consume[name][0])
if defnode is not None:
self._check_late_binding_closure(node, defnode)
defstmt = defnode.statement()
defframe = defstmt.frame()
# The class reuses itself in the class scope.
recursive_klass = (frame is defframe and
defframe.parent_of(node) and
isinstance(defframe, astroid.ClassDef) and
node.name == defframe.name)
maybee0601, annotation_return, use_outer_definition = self._is_variable_violation(
node, name, defnode, stmt, defstmt,
frame, defframe,
base_scope_type, recursive_klass)
if use_outer_definition:
continue
if (maybee0601
and not utils.is_defined_before(node)
and not astroid.are_exclusive(stmt, defstmt, ('NameError',))):
# Used and defined in the same place, e.g `x += 1` and `del x`
defined_by_stmt = (
defstmt is stmt
and isinstance(node, (astroid.DelName, astroid.AssignName))
)
if (recursive_klass
or defined_by_stmt
or annotation_return
or isinstance(defstmt, astroid.Delete)):
if not utils.node_ignores_exception(node, NameError):
self.add_message('undefined-variable', args=name,
node=node)
elif base_scope_type != 'lambda':
# E0601 may *not* occurs in lambda scope.
self.add_message('used-before-assignment', args=name, node=node)
elif base_scope_type == 'lambda':
# E0601 can occur in class-level scope in lambdas, as in
# the following example:
# class A:
# x = lambda attr: f + attr
# f = 42
if isinstance(frame, astroid.ClassDef) and name in frame.locals:
if isinstance(node.parent, astroid.Arguments):
if stmt.fromlineno <= defstmt.fromlineno:
# Doing the following is fine:
# class A:
# x = 42
# y = lambda attr=x: attr
self.add_message('used-before-assignment',
args=name, node=node)
else:
self.add_message('undefined-variable',
args=name, node=node)
elif current_consumer.scope_type == 'lambda':
self.add_message('undefined-variable',
node=node, args=name)
current_consumer.mark_as_consumed(name, found_node)
# check it's not a loop variable used outside the loop
self._loopvar_name(node, name)
break
else:
# we have not found the name, if it isn't a builtin, that's an
# undefined name !
if not (name in astroid.Module.scope_attrs or utils.is_builtin(name)
or name in self.config.additional_builtins):
if not utils.node_ignores_exception(node, NameError):
self.add_message('undefined-variable', args=name, node=node)
def _has_homonym_in_upper_function_scope(self, node, index):
"""
Return True if there is a node with the same name in the to_consume dict of an upper scope
and if that scope is a function
:param node: node to check for
:type node: astroid.Node
:param index: index of the current consumer inside self._to_consume
:type index: int
:return: True if there is a node with the same name in the to_consume dict of a upper scope
and if that scope is a function
:rtype: bool
"""
for _consumer in self._to_consume[index-1::-1]:
if _consumer.scope_type == 'function' and node.name in _consumer.to_consume:
return True
return False
@utils.check_messages('no-name-in-module')
def visit_import(self, node):
"""check modules attribute accesses"""
if not self._analyse_fallback_blocks and utils.is_from_fallback_block(node):
# No need to verify this, since ImportError is already
# handled by the client code.
return
for name, _ in node.names:
parts = name.split('.')
try:
module = next(node.infer_name_module(parts[0]))
except astroid.ResolveError:
continue
self._check_module_attrs(node, module, parts[1:])
@utils.check_messages('no-name-in-module')
def visit_importfrom(self, node):
"""check modules attribute accesses"""
if not self._analyse_fallback_blocks and utils.is_from_fallback_block(node):
# No need to verify this, since ImportError is already
# handled by the client code.
return
name_parts = node.modname.split('.')
try:
module = node.do_import_module(name_parts[0])
except astroid.AstroidBuildingException:
return
module = self._check_module_attrs(node, module, name_parts[1:])
if not module:
return
for name, _ in node.names:
if name == '*':
continue
self._check_module_attrs(node, module, name.split('.'))
@utils.check_messages('unbalanced-tuple-unpacking', 'unpacking-non-sequence')
def visit_assign(self, node):
"""Check unbalanced tuple unpacking for assignments
and unpacking non-sequences.
"""
if not isinstance(node.targets[0], (astroid.Tuple, astroid.List)):
return
targets = node.targets[0].itered()
try:
infered = utils.safe_infer(node.value)
if infered is not None:
self._check_unpacking(infered, node, targets)
except astroid.InferenceError:
return
def _check_unpacking(self, infered, node, targets):
""" Check for unbalanced tuple unpacking
and unpacking non sequences.
"""
if utils.is_inside_abstract_class(node):
return
if utils.is_comprehension(node):
return
if infered is astroid.YES:
return
if (isinstance(infered.parent, astroid.Arguments) and
isinstance(node.value, astroid.Name) and
node.value.name == infered.parent.vararg):
# Variable-length argument, we can't determine the length.
return
if isinstance(infered, (astroid.Tuple, astroid.List)):
# attempt to check unpacking is properly balanced
values = infered.itered()
if len(targets) != len(values):
# Check if we have starred nodes.
if any(isinstance(target, astroid.Starred)
for target in targets):
return
self.add_message('unbalanced-tuple-unpacking', node=node,
args=(_get_unpacking_extra_info(node, infered),
len(targets),
len(values)))
# attempt to check unpacking may be possible (ie RHS is iterable)
else:
if not utils.is_iterable(infered):
self.add_message('unpacking-non-sequence', node=node,
args=(_get_unpacking_extra_info(node, infered),))
def _check_module_attrs(self, node, module, module_names):
"""check that module_names (list of string) are accessible through the
given module
if the latest access name corresponds to a module, return it
"""
assert isinstance(module, astroid.Module), module
while module_names:
name = module_names.pop(0)
if name == '__dict__':
module = None
break
try:
module = next(module.getattr(name)[0].infer())
if module is astroid.Uninferable:
return None
except astroid.NotFoundError:
if module.name in self._ignored_modules:
return None
self.add_message('no-name-in-module',
args=(name, module.name), node=node)
return None
except astroid.InferenceError:
return None
if module_names:
# FIXME: other message if name is not the latest part of
# module_names ?
modname = module.name if module else '__dict__'
self.add_message('no-name-in-module', node=node,
args=('.'.join(module_names), modname))
return None
if isinstance(module, astroid.Module):
return module
return None
class VariablesChecker3k(VariablesChecker):
'''Modified variables checker for 3k'''
# listcomp have now also their scope
def visit_listcomp(self, node):
"""visit dictcomp: update consumption analysis variable
"""
self._to_consume.append(NamesConsumer(node, 'comprehension'))
def leave_listcomp(self, _):
"""leave dictcomp: update consumption analysis variable
"""
# do not check for not used locals here
self._to_consume.pop()
def leave_functiondef(self, node):
self._check_metaclasses(node)
super(VariablesChecker3k, self).leave_functiondef(node)
def leave_module(self, node):
self._check_metaclasses(node)
super(VariablesChecker3k, self).leave_module(node)
def _check_metaclasses(self, node):
""" Update consumption analysis for metaclasses. """
consumed = [] # [(scope_locals, consumed_key)]
for child_node in node.get_children():
if isinstance(child_node, astroid.ClassDef):
consumed.extend(self._check_classdef_metaclasses(child_node, node))
# Pop the consumed items, in order to avoid having
# unused-import and unused-variable false positives
for scope_locals, name in consumed:
scope_locals.pop(name, None)
def _check_classdef_metaclasses(self, klass, parent_node):
if not klass._metaclass:
# Skip if this class doesn't use explicitly a metaclass, but inherits it from ancestors
return []
consumed = [] # [(scope_locals, consumed_key)]
metaclass = klass.metaclass()
name = None
if isinstance(klass._metaclass, astroid.Name):
name = klass._metaclass.name
elif metaclass:
name = metaclass.root().name
found = None
if name:
# check enclosing scopes starting from most local
for scope_locals, _, _ in self._to_consume[::-1]:
found = scope_locals.get(name)
if found:
consumed.append((scope_locals, name))
break
if found is None and not metaclass:
name = None
if isinstance(klass._metaclass, astroid.Name):
name = klass._metaclass.name
elif isinstance(klass._metaclass, astroid.Attribute):
name = klass._metaclass.as_string()
if name is not None:
if not (name in astroid.Module.scope_attrs or
utils.is_builtin(name) or
name in self.config.additional_builtins or
name in parent_node.locals):
self.add_message('undefined-variable',
node=klass,
args=(name,))
return consumed
if sys.version_info >= (3, 0):
VariablesChecker = VariablesChecker3k
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(VariablesChecker(linter))
|
the-stack_0_17627 | # -*- coding: utf-8 -*-
"""Example of using Linear Method Deviation-base outlier detection (LMDD)
"""
# Author: Yahya Almardeny <[email protected]>
# License: BSD 2 clause
from __future__ import division
from __future__ import print_function
import os
import sys
# temporary solution for relative imports in case pyod is not installed
# if pyod is installed, no need to use the following line
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname("__file__"), '..')))
from pyod.models.lmdd import LMDD
from pyod.utils.data import generate_data
from pyod.utils.data import evaluate_print
from pyod.utils.example import visualize
if __name__ == "__main__":
contamination = 0.1 # percentage of outliers
n_train = 200 # number of training points
n_test = 100 # number of testing points
# Generate sample data
X_train, y_train, X_test, y_test = \
generate_data(n_train=n_train,
n_test=n_test,
n_features=2,
contamination=contamination,
random_state=42)
# train LMDD detector
clf_name = 'LMDD'
clf = LMDD(random_state=42)
clf.fit(X_train)
# get the prediction labels and outlier scores of the training data
y_train_pred = clf.labels_ # binary labels (0: inliers, 1: outliers)
y_train_scores = clf.decision_scores_ # raw outlier scores
# get the prediction on the test data
y_test_pred = clf.predict(X_test) # outlier labels (0 or 1)
y_test_scores = clf.decision_function(X_test) # outlier scores
# evaluate and print the results
print("\nOn Training Data:")
evaluate_print(clf_name, y_train, y_train_scores)
print("\nOn Test Data:")
evaluate_print(clf_name, y_test, y_test_scores)
# visualize the results
visualize(clf_name, X_train, y_train, X_test, y_test, y_train_pred,
y_test_pred, show_figure=True, save_figure=False)
|
the-stack_0_17628 | #!/share/apps/canopy-1.4.1/Canopy_64bit/User/bin/python
import numpy as np
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,AutoMinorLocator)
import colormaps as cmaps
from matplotlib import rcParams
from matplotlib.ticker import MaxNLocator
import os
import IO.reader
fig_width = 3.0 # width in inches
fig_height = fig_width/1.333 # height in inches
fig_size = [fig_width,fig_height]
params = {'backend': 'Agg',
'axes.labelsize': 8,
'axes.titlesize': 8,
'font.size': 8,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'figure.figsize': fig_size,
'savefig.dpi' : 600,
'font.family': 'sans-serif',
'axes.linewidth' : 0.5,
'xtick.major.size' : 2,
'ytick.major.size' : 2,
'font.size' : 8,
'svg.fonttype' : 'none',
'pdf.fonttype' : 42
}
rcParams.update(params)
# Create inset
fig = plt.figure()
ax1 = fig.add_subplot(111)
lwidth=0.8
msize=4
#left, bottom, width, height = [0.3, 0.3, 0.3, 0.3]
#ax2 = fig.add_axes([left, bottom, width, height])
# colormap
n_curves = 11
values = list(range(n_curves))
plt.register_cmap(name='magma', cmap=cmaps.magma)
jet = cm = plt.get_cmap('magma')
cNorm = colors.Normalize(vmin=0, vmax=values[-1])
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
# Data
# column 1: q, column 2: P(q)
#Colors
col=list(range(n_curves))
col[0] = scalarMap.to_rgba(values[0])
col[1] = scalarMap.to_rgba(values[1])
col[2] = scalarMap.to_rgba(values[2])
col[3] = scalarMap.to_rgba(values[3])
col[4] = scalarMap.to_rgba(values[4])
col[5] = scalarMap.to_rgba(values[5])
col[6] = scalarMap.to_rgba(values[6])
col[7] = scalarMap.to_rgba(values[7])
col[8] = scalarMap.to_rgba(values[8])
col[9] = scalarMap.to_rgba(values[9])
#Labels
# Force data:
job_folder = "Force_mathcing_442301"
ref_data_path = "../Tutorial_04_preparation/ReferenceData"
guess_force_data_address = os.path.join(job_folder,"Output/mW_300K_1bar_500_guess.force")
ref_force_data_address = os.path.join(ref_data_path,"force/mW_300K_1bar_500/Ref.force")
best_force_data_address = os.path.join(job_folder,"Output/mW_300K_1bar_500_best.force")
# Force matching:
# read the force data in parallel:
# ----- Modify the following depending on the size your data -----
start_at = 1 # The nth configuration to start with ( by default starts with 1st configuration)
work_load = 500 # total number of configurations
num_cores = 1 # total number of cores assigned
buffer_size = 2000 # total number of configuration read into memory at once for each time
total_atoms = 512
# ---------------------------------------------------------------
work_flow = IO.reader.parallel_assignment(start_at,work_load,num_cores,buffer_size)
# work_flow: A python list containing
# [((start nconfigs),(start,nconfigs)),((start,nconfigs) ...]
ref_output_lst = IO.reader.read_LAMMPS_traj_in_parallel(ref_force_data_address,num_cores,total_atoms,work_load,first=1,buffer_size=buffer_size)
guess_output_lst = IO.reader.read_LAMMPS_traj_in_parallel(guess_force_data_address,num_cores,total_atoms,work_load,first=1,buffer_size=buffer_size)
best_output_lst = IO.reader.read_LAMMPS_traj_in_parallel(best_force_data_address,num_cores,total_atoms,work_load,first=1,buffer_size=buffer_size)
x_force = np.arange(-100,100)
y_force = x_force
# Loop over each chunk of data and plot it
for ref_output,guess_output,best_output in zip(ref_output_lst, guess_output_lst, best_output_lst):
ref_data = ref_output.get()
guess_data = guess_output.get()
best_data = best_output.get()
ax1.scatter(ref_data, best_data,color="r")
ax1.set_xlim([-40,40])
ax1.set_ylim([-40,40])
ax1.set_xlabel("Reference forces: " + "$kcal \cdot (mol \cdot \AA)^{-1}$")
#ax1.set_ylabel("Guess forces: "+ "$kcal \cdot (mol \cdot \AA)^{-1}$")
ax1.set_ylabel("Best forces: "+ "$kcal \cdot (mol \cdot \AA)^{-1}$")
plt.plot(x_force,y_force,label="forces are equal",color="k")
#ax1.plot(ref_gr_data[:,0],ref_gr_data[:,1],color="k",label="Ref")
#ax1.plot(guess_gr_data[:,0],guess_gr_data[:,1],color="r",label="Guess")
#plt.plot(best_gr_data[:,0],best_gr_data[:,1],color="r",label="Best predicted")
#ax1.scatter(T,predicted_data_T,color="r",label="Best Predicted")
#plt.ylim([0.99,1.48])
#ax1.set_ylim([0.995,1.01])
#ax1.set_ylim([0.8,1.05])
# Plot P(q) vs q:
#ax1.set_title("production")
#ax1.scatter(tersoff[:,0],tersoff [:,1],s=6,label=plot_ID[0],color=col[0])
#ax1.scatter(tersoff_table[:,0],tersoff_table[:,1],s=6,label=plot_ID[1],color=col[5])
minorLocator = MultipleLocator(0.5)
majorLocator=MultipleLocator(5)
ax = plt.subplot(111)
handles, labels = ax.get_legend_handles_labels()
plt.legend(handles[::-1],labels[::-1],loc="upper center",fontsize=5,frameon=False, labelspacing=0.07,ncol=2)
#plt.legend(loc="upper right")
#plt.legend(loc=(0.1,0.385),fontsize=7,frameon=False, labelspacing=0.15,ncol=1)
left, bottom, width, height = [0.48, 0.62, 0.48, 0.3]
plt.subplots_adjust(left=0.2, bottom=0.22, right=0.95, top=0.90, wspace=0.0, hspace=0.0)
#plt.savefig('fig1a.pdf',transparent=True)
plt.savefig('force_mW_300K_best_ref.png',transparent=False)
#plt.savefig('fig1a.eps',transparent=True)
plt.show()
|
the-stack_0_17629 | import pytest
from pymyenergi.client import MyenergiClient
from pymyenergi.eddi import Eddi
from pymyenergi.harvi import Harvi
from pymyenergi.zappi import Zappi
# All test coroutines will be treated as marked.
pytestmark = pytest.mark.asyncio
conn = {}
async def test_init(bypass_client_fetch_data):
client = MyenergiClient(conn)
await client.refresh()
assert len(client.devices) == 0
async def test_init_error(error_on_client_fetch_data):
client = MyenergiClient(conn)
with pytest.raises(Exception):
assert await client.refresh()
async def test_get_all_devices(client_fetch_data_fixture):
client = MyenergiClient(conn)
devices = await client.get_devices()
assert len(devices) == 5
async def test_get_eddi_devices(client_fetch_data_fixture):
client = MyenergiClient(conn)
devices = await client.get_devices("eddi")
assert len(devices) == 1
assert isinstance(devices[0], Eddi)
async def test_get_zappi_devices(client_fetch_data_fixture):
client = MyenergiClient(conn)
devices = await client.get_devices("zappi")
assert len(devices) == 2
assert isinstance(devices[1], Zappi)
async def test_get_harvi_devices(client_fetch_data_fixture):
client = MyenergiClient(conn)
devices = await client.get_devices("harvi")
assert len(devices) == 2
assert isinstance(devices[1], Harvi)
async def test_1p_harvi_eddi_solar_battery(client_1p_zappi_harvi_solar_battery_fixture):
client = MyenergiClient(conn)
devices = await client.get_devices("harvi")
assert len(devices) == 1
assert isinstance(devices[0], Harvi)
devices = await client.get_devices("zappi")
assert len(devices) == 1
assert isinstance(devices[0], Zappi)
assert client.power_grid == 10000
assert client.power_generation == 5000
assert client.power_battery == 3000
assert client.power_charging == 2000
assert client.consumption_home == 16000
|
the-stack_0_17631 | """Color edit tool."""
import sublime
import sublime_plugin
from .lib.coloraide import Color
import mdpopups
from . import ch_util as util
from .ch_mixin import _ColorMixin
import copy
from . import ch_tools as tools
DEF_EDIT = """---
markdown_extensions:
- markdown.extensions.attr_list
- markdown.extensions.def_list
- pymdownx.betterem
...
{}
## Format
<code>Source( + Backdrop)?( !blendmode)?( @colorspace)?</code>
## Instructions
Colors can be specified in any supported color space, but blend modes work best on<br>
RGB-ish colors spaces. They can be converted and output to another color space with<br>
<code>@colorspace</code>.
If two colors are provided, joined with <code>+</code>, the colors will be blended.<br>
Default blend mode is <code>normal</code>, but can be changed with<br>
<code>!blendmode</code>.
Transparent backdrops will be <code>normal</code> blended with white.
"""
def parse_color(string, start=0, second=False):
"""
Parse colors.
The return of `more`:
- `None`: there is no more colors to process
- `True`: there are more colors to process
- `False`: there are more colors to process, but we failed to find them.
"""
length = len(string)
more = None
space = None
blend_mode = 'normal'
# First color
color = Color.match(string, start=start, fullmatch=False)
if color:
start = color.end
if color.end != length:
more = True
# Is the first color in the input or the second?
if not second:
# Plus sign indicating we have an additional color to mix
m = tools.RE_PLUS.match(string, start)
if m:
start = m.end(0)
more = start != length
else:
m = tools.RE_MODE.match(string, start)
if m:
blend_mode = m.group(1)
start = m.end(0)
m = tools.RE_SPACE.match(string, start)
if m:
text = m.group(1).lower()
if text in color.color.CS_MAP:
space = text
start = m.end(0)
more = None if start == length else False
else:
m = tools.RE_MODE.match(string, start)
if m:
blend_mode = m.group(1)
start = m.end(0)
# Color space indicator
m = tools.RE_SPACE.match(string, start)
if m:
text = m.group(1).lower()
if text in color.color.CS_MAP:
space = text
start = m.end(0)
more = None if start == length else False
if color:
color.end = start
return color, more, space, blend_mode
def evaluate(string):
"""Evaluate color."""
colors = []
try:
color = string.strip()
second = None
blend_mode = 'normal'
space = None
# Try to capture the color or the two colors to mix
first, more, space, blend_mode = parse_color(color)
if first and more is not None:
if more is False:
first = None
else:
second, more, space, blend_mode = parse_color(color, start=first.end, second=True)
if not second or more is False:
first = None
second = None
# Package up the color, or the two reference colors along with the mixed.
if first:
colors.append(first.color)
if second is None and space is not None and space != first.color.space():
colors[0] = first.color.convert(space)
if second:
colors.append(second.color)
colors.append(first.color.compose(second.color, blend=blend_mode, space=space, out_space=space))
except Exception:
colors = []
return colors
class ColorHelperBlendModeInputHandler(tools._ColorInputHandler):
"""Handle color inputs."""
def __init__(self, view, initial=None, **kwargs):
"""Initialize."""
self.color = initial
super().__init__(view, **kwargs)
def placeholder(self):
"""Placeholder."""
return "Color"
def initial_text(self):
"""Initial text."""
if self.color is not None:
return self.color
elif len(self.view.sel()) == 1:
self.setup_color_class()
text = self.view.substr(self.view.sel()[0])
if text:
color = None
try:
color = self.custom_color_class(text, filters=self.filters)
except Exception:
pass
if color is not None:
color = Color(color)
return color.to_string(**util.DEFAULT)
return ''
def preview(self, text):
"""Preview."""
style = self.get_html_style()
try:
colors = evaluate(text)
html = ""
for color in colors:
orig = Color(color)
message = ""
color_string = ""
check_space = 'srgb' if orig.space() not in util.SRGB_SPACES else orig.space()
if not orig.in_gamut(check_space):
orig = orig.fit("srgb")
message = '<br><em style="font-size: 0.9em;">* preview out of gamut</em>'
color_string = "<strong>Gamut Mapped</strong>: {}<br>".format(orig.to_string())
srgb = orig.convert('srgb', fit=True)
color_string += "<strong>Color</strong>: {}".format(color.to_string(**util.DEFAULT))
preview = srgb.to_string(**util.HEX_NA)
preview_alpha = srgb.to_string(**util.HEX)
preview_border = self.default_border
temp = Color(preview_border)
if temp.luminance() < 0.5:
second_border = temp.mix('white', 0.25, space="srgb").to_string(**util.HEX_NA)
else:
second_border = temp.mix('black', 0.25, space="srgb").to_string(**util.HEX_NA)
height = self.height * 3
width = self.width * 3
check_size = self.check_size(height, scale=8)
html += tools.PREVIEW_IMG.format(
mdpopups.color_box(
[preview, preview_alpha],
preview_border, second_border,
border_size=2, height=height, width=width, check_size=check_size
),
message,
color_string
)
if html:
return sublime.Html('<html><body>{}</body></html>'.format(style + html))
else:
return sublime.Html(
'<html><body>{}</body></html>'.format(mdpopups.md2html(self.view, DEF_EDIT.format(style)))
)
except Exception:
return sublime.Html(mdpopups.md2html(self.view, DEF_EDIT.format(style)))
def validate(self, color):
"""Validate."""
try:
color = evaluate(color)
return len(color) > 0
except Exception:
return False
class ColorHelperBlendModeCommand(_ColorMixin, sublime_plugin.TextCommand):
"""Open edit a color directly."""
def run(
self, edit, color_helper_blend_mode, initial=None, on_done=None, **kwargs
):
"""Run command."""
colors = evaluate(color_helper_blend_mode)
color = None
if colors:
color = colors[-1]
if color is not None:
if on_done is None:
on_done = {
'command': 'color_helper',
'args': {'mode': "result", "result_type": "__tool__:__blend__"}
}
call = on_done.get('command')
if call is None:
return
args = copy.deepcopy(on_done.get('args', {}))
args['color'] = color.to_string(**util.COLOR_FULL_PREC)
self.view.run_command(call, args)
def input(self, kwargs): # noqa: A003
"""Input."""
return ColorHelperBlendModeInputHandler(self.view, **kwargs)
|
the-stack_0_17632 | import argparse
import os
import math
# import time
import numpy as np
import cv2
import matplotlib.pyplot as plt
import tensorflow as tf
import progressbar
from waymo_open_dataset.utils import range_image_utils
from waymo_open_dataset.utils import transform_utils
from waymo_open_dataset.utils import test_utils
from waymo_open_dataset.utils import box_utils
from waymo_open_dataset import dataset_pb2 as open_dataset
from adapter_lib import *
import pdb
############################Config###########################################
# path to waymo dataset "folder" (all .tfrecord files in that folder will
# be converted)
DATA_PATH = '/media/trail/harddrive/datasets/Waymo/original/validation'
# path to save kitti dataset
KITTI_PATH = '/media/trail/harddrive/datasets/Waymo/waymo/validation'
# location filter, use this to convert your preferred location
LOCATION_FILTER = False
LOCATION_NAME = ['location_sf']
# max indexing length
INDEX_LENGTH = 15
# as name
IMAGE_FORMAT = 'png'
# do not change
LABEL_PATH = KITTI_PATH + '/label_0'
LABEL_ALL_PATH = KITTI_PATH + '/label_all'
IMAGE_PATH = KITTI_PATH + '/image_0'
CALIB_PATH = KITTI_PATH + '/calib'
LIDAR_PATH = KITTI_PATH + '/velodyne'
IMG_CALIB_PATH = KITTI_PATH + '/img_calib'
###############################################################################
class Adapter:
def __init__(self):
self.__lidar_list = ['_FRONT', '_FRONT_RIGHT',
'_FRONT_LEFT', '_SIDE_RIGHT', '_SIDE_LEFT']
self.__type_list = ['UNKNOWN', 'VEHICLE',
'PEDESTRIAN', 'SIGN', 'CYCLIST']
self.__file_names = []
self.T_front_cam_to_ref = []
self.T_vehicle_to_front_cam = []
def cvt(self, args, folder, start_ind):
""" convert dataset from Waymo to KITTI
Args:
return:
"""
self.start_ind = start_ind
self.get_file_names(DATA_PATH + '/' + folder)
print("Converting ..." + folder)
self.create_folder(args.camera_type)
bar = progressbar.ProgressBar(maxval=len(self.__file_names) + 1,
widgets=[progressbar.Percentage(), ' ',
progressbar.Bar(
marker='>', left='[', right=']'), ' ',
progressbar.ETA()])
tf.enable_eager_execution()
file_num = 1
frame_num = 0
frame_name = self.start_ind
label_exists = False
print("start converting ...")
bar.start()
for file_idx, file_name in enumerate(self.__file_names):
print('File {}/{}'.format(file_idx, len(self.__file_names)))
dataset = tf.data.TFRecordDataset(file_name, compression_type='')
for data in dataset:
frame = open_dataset.Frame()
frame.ParseFromString(bytearray(data.numpy()))
if (frame_num % args.keyframe) == 0:
if LOCATION_FILTER == True and frame.context.stats.location not in LOCATION_NAME:
continue
if args.test == False:
label_exists = self.save_label(frame, frame_name, args.camera_type, False, True)
if args.test == label_exists:
frame_num += 1
continue
self.save_calib(frame, frame_name)
self.save_label(
frame, frame_name, args.camera_type)
self.save_image(frame, frame_name, args.camera_type)
self.save_lidar(frame, frame_name)
self.save_image_calib(frame, frame_name)
# print("image:{}\ncalib:{}\nlidar:{}\nlabel:{}\n".format(str(s1-e1),str(s2-e2),str(s3-e3),str(s4-e4)))
frame_name += 1
frame_num += 1
bar.update(file_num)
file_num += 1
bar.finish()
print("\nfinished ...")
return frame_name
def save_image(self, frame, frame_num, cam_type):
""" parse and save the images in png format
:param frame: open dataset frame proto
:param frame_num: the current frame number
:return:
"""
for img in frame.images:
if cam_type == 'all' or cam_type == str(img.name - 1):
img_path = IMAGE_PATH + '/' + \
str(frame_num).zfill(INDEX_LENGTH) + '.' + IMAGE_FORMAT
img = cv2.imdecode(np.frombuffer(
img.image, np.uint8), cv2.IMREAD_COLOR)
rgb_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
plt.imsave(img_path, rgb_img, format=IMAGE_FORMAT)
def save_calib(self, frame, frame_num, kitti_format=True):
""" parse and save the calibration data
:param frame: open dataset frame proto
:param frame_num: the current frame number
:return:
"""
fp_calib = open(CALIB_PATH + '/' +
str(frame_num).zfill(INDEX_LENGTH) + '.txt', 'w+')
self.T_front_cam_to_ref = np.array([
[0.0, -1.0, 0.0],
[0.0, 0.0, -1.0],
[1.0, 0.0, 0.0]
])
camera_calib = []
R0_rect = ["%e" % i for i in np.eye(3).flatten()]
Tr_velo_to_cam = []
calib_context = ''
for camera in frame.context.camera_calibrations:
tmp = np.array(camera.extrinsic.transform).reshape(4, 4)
tmp = self.cart_to_homo(self.T_front_cam_to_ref) @ np.linalg.inv(tmp)
Tr_velo_to_cam.append(["%e" % i for i in tmp[:3,:].reshape(12)])
for cam in frame.context.camera_calibrations:
tmp = np.zeros((3, 4))
tmp[0, 0] = cam.intrinsic[0]
tmp[1, 1] = cam.intrinsic[1]
tmp[0, 2] = cam.intrinsic[2]
tmp[1, 2] = cam.intrinsic[3]
tmp[2, 2] = 1
tmp = list(tmp.reshape(12))
tmp = ["%e" % i for i in tmp]
camera_calib.append(tmp)
T_front_cam_to_vehicle = np.array(frame.context.camera_calibrations[0].extrinsic.transform).reshape(4, 4)
self.T_vehicle_to_front_cam = np.linalg.inv(T_front_cam_to_vehicle)
for i in range(5):
calib_context += "P" + str(i) + ": " + \
" ".join(camera_calib[i]) + '\n'
calib_context += "R0_rect" + ": " + " ".join(R0_rect) + '\n'
for i in range(5):
calib_context += "Tr_velo_to_cam_" + \
str(i) + ": " + " ".join(Tr_velo_to_cam[i]) + '\n'
calib_context += "timestamp_micros: " + \
str(frame.timestamp_micros) + '\n'
calib_context += "context_name: " + str(frame.context.name) + '\n'
fp_calib.write(calib_context)
fp_calib.close()
def save_lidar(self, frame, frame_num):
""" parse and save the lidar data in psd format
:param frame: open dataset frame proto
:param frame_num: the current frame number
:return:
"""
range_images, range_image_top_pose = self.parse_range_image_and_camera_projection(
frame)
points, intensity = self.convert_range_image_to_point_cloud(
frame,
range_images,
range_image_top_pose)
points_all = np.concatenate(points, axis=0)
intensity_all = np.concatenate(intensity, axis=0)
point_cloud = np.column_stack((points_all, intensity_all))
pc_path = LIDAR_PATH + '/' + \
str(frame_num).zfill(INDEX_LENGTH) + '.bin'
point_cloud.tofile(pc_path)
def save_label(self, frame, frame_num, cam_type, kitti_format=False, check_label_exists = False):
""" parse and save the label data in .txt format
:param frame: open dataset frame proto
:param frame_num: the current frame number
:return:
"""
# get point cloud in the frame
range_images, range_image_top_pose = self.parse_range_image_and_camera_projection(
frame)
points, intensity = self.convert_range_image_to_point_cloud(
frame,
range_images,
range_image_top_pose)
points_all = tf.convert_to_tensor(
np.concatenate(points, axis=0), dtype=np.float32)
# preprocess bounding box data
id_to_bbox = dict()
id_to_name = dict()
for labels in frame.projected_lidar_labels:
name = labels.name
for label in labels.labels:
bbox = [label.box.center_x - label.box.length / 2, label.box.center_y - label.box.width / 2,
label.box.center_x + label.box.length / 2, label.box.center_y + label.box.width / 2]
id_to_bbox[label.id] = bbox
id_to_name[label.id] = name - 1
Tr_velo_to_cam = []
recorded_label = []
label_lines = ''
label_all_lines = ''
"""
if kitti_format:
for camera in frame.context.camera_calibrations:
tmp = np.array(camera.extrinsic.transform).reshape(4, 4)
tmp = np.linalg.inv(tmp)
axes_transformation = np.array([[0, -1, 0, 0],
[0, 0, -1, 0],
[1, 0, 0, 0],
[0, 0, 0, 1]])
tmp = np.matmul(axes_transformation, tmp)
Tr_velo_to_cam.append(tmp)
"""
for obj in frame.laser_labels:
# caculate bounding box
bounding_box = None
name = None
id = obj.id
for lidar in self.__lidar_list:
if id + lidar in id_to_bbox:
bounding_box = id_to_bbox.get(id + lidar)
name = str(id_to_name.get(id + lidar))
break
if bounding_box == None or name == None:
continue
box = tf.convert_to_tensor(
[obj.box.center_x, obj.box.center_y, obj.box.center_z, obj.box.length, obj.box.width, obj.box.height, obj.box.heading], dtype=np.float32)
box = tf.reshape(box, (1, 7))
num_points = box_utils.compute_num_points_in_box_3d(
points_all, box)
num_points = num_points.numpy()[0]
detection_difficulty = obj.detection_difficulty_level
my_type = self.__type_list[obj.type]
truncated = 0
occluded = 0
height = obj.box.height
width = obj.box.width
length = obj.box.length
x = obj.box.center_x
y = obj.box.center_y
z = obj.box.center_z - height/2
if check_label_exists == False:
pt_ref = self.cart_to_homo(self.T_front_cam_to_ref) @ self.T_vehicle_to_front_cam @ np.array([x,y,z,1]).reshape((4,1))
x, y, z, _ = pt_ref.flatten().tolist()
rotation_y = -obj.box.heading - np.pi/2
beta = math.atan2(x, z)
alpha = (rotation_y + beta - math.pi / 2) % (2 * math.pi)
# save the labels
line = my_type + ' {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}\n'.format(round(truncated, 2),
occluded,
round(
alpha, 2),
round(
bounding_box[0], 2),
round(
bounding_box[1], 2),
round(
bounding_box[2], 2),
round(
bounding_box[3], 2),
round(
height, 2),
round(
width, 2),
round(
length, 2),
round(
x, 2),
round(
y, 2),
round(
z, 2),
round(
rotation_y, 2),
num_points,
detection_difficulty)
line_all = line[:-1] + ' ' + name + '\n'
# store the label
label_all_lines += line_all
if (name == cam_type):
label_lines += line
recorded_label.append(line)
if len(recorded_label) == 0:
return False
else:
fp_label_all = open(LABEL_ALL_PATH + '/' +
str(frame_num).zfill(INDEX_LENGTH) + '.txt', 'w+')
fp_label = open(LABEL_PATH + '/' +
str(frame_num).zfill(INDEX_LENGTH) + '.txt', 'w+')
fp_label.write(label_lines)
fp_label.close()
fp_label_all.write(label_all_lines)
fp_label_all.close()
return True
def save_image_calib(self, frame, frame_num):
fp_image_calib = open(IMG_CALIB_PATH + '/' +
str(frame_num).zfill(INDEX_LENGTH) + '.txt', 'w+')
camera_calib = []
pose = []
velocity = []
timestamp = []
shutter = []
trigger_time = []
readout_done_time = []
calib_context = ''
for camera in frame.images:
tmp = np.array(camera.pose.transform).reshape((16,))
pose.append(["%e" % i for i in tmp])
tmp = np.zeros(6)
tmp[0] = camera.velocity.v_x
tmp[1] = camera.velocity.v_y
tmp[2] = camera.velocity.v_z
tmp[3] = camera.velocity.w_x
tmp[4] = camera.velocity.w_y
tmp[5] = camera.velocity.w_z
velocity.append(["%e" % i for i in tmp])
timestamp.append(camera.pose_timestamp)
shutter.append(camera.shutter)
trigger_time.append(camera.camera_trigger_time)
readout_done_time.append(camera.camera_readout_done_time)
for i in range(5):
calib_context += "Pose_" + str(i) + ": " + \
" ".join(pose[i]) + '\n'
for i in range(5):
calib_context += "Velocity_" + str(i) + ": " + \
" ".join(velocity[i]) + '\n'
for i in range(5):
calib_context += "Timestamp_" + str(i) + ": " + \
" ".join(velocity[i]) + '\n'
for i in range(5):
calib_context += "Shutter_" + str(i) + ": " + \
" ".join(velocity[i]) + '\n'
for i in range(5):
calib_context += "Trigger_" + str(i) + ": " + \
" ".join(velocity[i]) + '\n'
for i in range(5):
calib_context += "Readout_" + str(i) + ": " + \
" ".join(velocity[i]) + '\n'
fp_image_calib.write(calib_context)
fp_image_calib.close()
def get_file_names(self, folder):
for i in os.listdir(folder):
if i.split('.')[-1] == 'tfrecord':
self.__file_names.append(folder + '/' + i)
def cart_to_homo(self, mat):
ret = np.eye(4)
if mat.shape == (3, 3):
ret[:3, :3] = mat
elif mat.shape == (3, 4):
ret[:3, :] = mat
else:
raise ValueError(mat.shape)
return ret
def create_folder(self, cam_type):
if not os.path.exists(KITTI_PATH):
os.mkdir(KITTI_PATH)
if not os.path.exists(CALIB_PATH):
os.mkdir(CALIB_PATH)
if not os.path.exists(LIDAR_PATH):
os.mkdir(LIDAR_PATH)
if not os.path.exists(LABEL_ALL_PATH):
os.mkdir(LABEL_ALL_PATH)
if not os.path.exists(IMG_CALIB_PATH):
os.mkdir(IMG_CALIB_PATH)
if not os.path.exists(IMAGE_PATH):
os.mkdir(IMAGE_PATH)
if not os.path.exists(LABEL_PATH):
os.mkdir(LABEL_PATH)
def extract_intensity(self, frame, range_images, lidar_num):
""" extract the intensity from the original range image
:param frame: open dataset frame proto
:param frame_num: the current frame number
:param lidar_num: the number of current lidar
:return:
"""
intensity_0 = np.array(range_images[lidar_num][0].data).reshape(-1, 4)
intensity_0 = intensity_0[:, 1]
intensity_1 = np.array(range_images[lidar_num][
1].data).reshape(-1, 4)[:, 1]
return intensity_0, intensity_1
def image_show(self, data, name, layout, cmap=None):
"""Show an image."""
plt.subplot(*layout)
plt.imshow(tf.image.decode_jpeg(data), cmap=cmap)
plt.title(name)
plt.grid(False)
plt.axis('off')
def parse_range_image_and_camera_projection(self, frame):
"""Parse range images and camera projections given a frame.
Args:
frame: open dataset frame proto
Returns:
range_images: A dict of {laser_name,
[range_image_first_return, range_image_second_return]}.
camera_projections: A dict of {laser_name,
[camera_projection_from_first_return,
camera_projection_from_second_return]}.
range_image_top_pose: range image pixel pose for top lidar.
"""
self.__range_images = {}
# camera_projections = {}
# range_image_top_pose = None
for laser in frame.lasers:
if len(laser.ri_return1.range_image_compressed) > 0:
range_image_str_tensor = tf.decode_compressed(
laser.ri_return1.range_image_compressed, 'ZLIB')
ri = open_dataset.MatrixFloat()
ri.ParseFromString(bytearray(range_image_str_tensor.numpy()))
self.__range_images[laser.name] = [ri]
if laser.name == open_dataset.LaserName.TOP:
range_image_top_pose_str_tensor = tf.decode_compressed(
laser.ri_return1.range_image_pose_compressed, 'ZLIB')
range_image_top_pose = open_dataset.MatrixFloat()
range_image_top_pose.ParseFromString(
bytearray(range_image_top_pose_str_tensor.numpy()))
# camera_projection_str_tensor = tf.decode_compressed(
# laser.ri_return1.camera_projection_compressed, 'ZLIB')
# cp = open_dataset.MatrixInt32()
# cp.ParseFromString(bytearray(camera_projection_str_tensor.numpy()))
# camera_projections[laser.name] = [cp]
if len(laser.ri_return2.range_image_compressed) > 0:
range_image_str_tensor = tf.decode_compressed(
laser.ri_return2.range_image_compressed, 'ZLIB')
ri = open_dataset.MatrixFloat()
ri.ParseFromString(bytearray(range_image_str_tensor.numpy()))
self.__range_images[laser.name].append(ri)
#
# camera_projection_str_tensor = tf.decode_compressed(
# laser.ri_return2.camera_projection_compressed, 'ZLIB')
# cp = open_dataset.MatrixInt32()
# cp.ParseFromString(bytearray(camera_projection_str_tensor.numpy()))
# camera_projections[laser.name].append(cp)
return self.__range_images, range_image_top_pose
def plot_range_image_helper(self, data, name, layout, vmin=0, vmax=1, cmap='gray'):
"""Plots range image.
Args:
data: range image data
name: the image title
layout: plt layout
vmin: minimum value of the passed data
vmax: maximum value of the passed data
cmap: color map
"""
plt.subplot(*layout)
plt.imshow(data, cmap=cmap, vmin=vmin, vmax=vmax)
plt.title(name)
plt.grid(False)
plt.axis('off')
def get_range_image(self, laser_name, return_index):
"""Returns range image given a laser name and its return index."""
return self.__range_images[laser_name][return_index]
def show_range_image(self, range_image, layout_index_start=1):
"""Shows range image.
Args:
range_image: the range image data from a given lidar of type MatrixFloat.
layout_index_start: layout offset
"""
range_image_tensor = tf.convert_to_tensor(range_image.data)
range_image_tensor = tf.reshape(
range_image_tensor, range_image.shape.dims)
lidar_image_mask = tf.greater_equal(range_image_tensor, 0)
range_image_tensor = tf.where(lidar_image_mask, range_image_tensor,
tf.ones_like(range_image_tensor) * 1e10)
range_image_range = range_image_tensor[..., 0]
range_image_intensity = range_image_tensor[..., 1]
range_image_elongation = range_image_tensor[..., 2]
self.plot_range_image_helper(range_image_range.numpy(), 'range',
[8, 1, layout_index_start], vmax=75, cmap='gray')
self.plot_range_image_helper(range_image_intensity.numpy(), 'intensity',
[8, 1, layout_index_start + 1], vmax=1.5, cmap='gray')
self.plot_range_image_helper(range_image_elongation.numpy(), 'elongation',
[8, 1, layout_index_start + 2], vmax=1.5, cmap='gray')
def convert_range_image_to_point_cloud(self, frame, range_images, range_image_top_pose, ri_index=0):
"""Convert range images to point cloud.
Args:
frame: open dataset frame
range_images: A dict of {laser_name,
[range_image_first_return, range_image_second_return]}.
camera_projections: A dict of {laser_name,
[camera_projection_from_first_return,
camera_projection_from_second_return]}.
range_image_top_pose: range image pixel pose for top lidar.
ri_index: 0 for the first return, 1 for the second return.
Returns:
points: {[N, 3]} list of 3d lidar points of length 5 (number of lidars).
cp_points: {[N, 6]} list of camera projections of length 5
(number of lidars).
intensity: {[N, 1]} list of intensity of length 5 (number of lidars).
"""
calibrations = sorted(
frame.context.laser_calibrations, key=lambda c: c.name)
# lasers = sorted(frame.lasers, key=lambda laser: laser.name)
points = []
# cp_points = []
intensity = []
frame_pose = tf.convert_to_tensor(
np.reshape(np.array(frame.pose.transform), [4, 4]))
# [H, W, 6]
range_image_top_pose_tensor = tf.reshape(
tf.convert_to_tensor(range_image_top_pose.data),
range_image_top_pose.shape.dims)
# [H, W, 3, 3]
range_image_top_pose_tensor_rotation = transform_utils.get_rotation_matrix(
range_image_top_pose_tensor[...,
0], range_image_top_pose_tensor[..., 1],
range_image_top_pose_tensor[..., 2])
range_image_top_pose_tensor_translation = range_image_top_pose_tensor[
..., 3:]
range_image_top_pose_tensor = transform_utils.get_transform(
range_image_top_pose_tensor_rotation,
range_image_top_pose_tensor_translation)
for c in calibrations:
range_image = range_images[c.name][ri_index]
if len(c.beam_inclinations) == 0:
beam_inclinations = range_image_utils.compute_inclination(
tf.constant([c.beam_inclination_min,
c.beam_inclination_max]),
height=range_image.shape.dims[0])
else:
beam_inclinations = tf.constant(c.beam_inclinations)
beam_inclinations = tf.reverse(beam_inclinations, axis=[-1])
extrinsic = np.reshape(np.array(c.extrinsic.transform), [4, 4])
range_image_tensor = tf.reshape(
tf.convert_to_tensor(range_image.data), range_image.shape.dims)
pixel_pose_local = None
frame_pose_local = None
if c.name == open_dataset.LaserName.TOP:
pixel_pose_local = range_image_top_pose_tensor
pixel_pose_local = tf.expand_dims(pixel_pose_local, axis=0)
frame_pose_local = tf.expand_dims(frame_pose, axis=0)
range_image_mask = range_image_tensor[..., 0] > 0
range_image_cartesian = range_image_utils.extract_point_cloud_from_range_image(
tf.expand_dims(range_image_tensor[..., 0], axis=0),
tf.expand_dims(extrinsic, axis=0),
tf.expand_dims(tf.convert_to_tensor(
beam_inclinations), axis=0),
pixel_pose=pixel_pose_local,
frame_pose=frame_pose_local)
range_image_cartesian = tf.squeeze(range_image_cartesian, axis=0)
points_tensor = tf.gather_nd(range_image_cartesian,
tf.where(range_image_mask))
intensity_tensor = tf.gather_nd(range_image_tensor,
tf.where(range_image_mask))
# cp = camera_projections[c.name][0]
# cp_tensor = tf.reshape(tf.convert_to_tensor(cp.data), cp.shape.dims)
# cp_points_tensor = tf.gather_nd(cp_tensor, tf.where(range_image_mask))
points.append(points_tensor.numpy())
# cp_points.append(cp_points_tensor.numpy())
intensity.append(intensity_tensor.numpy()[:, 1])
return points, intensity
def rgba(self, r):
"""Generates a color based on range.
Args:
r: the range value of a given point.
Returns:
The color for a given range
"""
c = plt.get_cmap('jet')((r % 20.0) / 20.0)
c = list(c)
c[-1] = 0.5 # alpha
return c
def plot_image(self, camera_image):
"""Plot a cmaera image."""
plt.figure(figsize=(20, 12))
plt.imshow(tf.image.decode_jpeg(camera_image.image))
plt.grid("off")
def plot_points_on_image(self, projected_points, camera_image, rgba_func, point_size=5.0):
"""Plots points on a camera image.
Args:
projected_points: [N, 3] numpy array. The inner dims are
[camera_x, camera_y, range].
camera_image: jpeg encoded camera image.
rgba_func: a function that generates a color from a range value.
point_size: the point size.
"""
self.plot_image(camera_image)
xs = []
ys = []
colors = []
for point in projected_points:
xs.append(point[0]) # width, col
ys.append(point[1]) # height, row
colors.append(rgba_func(point[2]))
plt.scatter(xs, ys, c=colors, s=point_size, edgecolors="none")
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Save Waymo dataset into Kitti format')
parser.add_argument('--keyframe',
type=int,
default=10,
help='Saves every specified # of scenes. Default is 1 and the program saves every scene')
parser.add_argument('--camera_type',
type=str,
default="0",
help='Select camera views to save. Input argument from 0 to 4 or all')
parser.add_argument('--start_ind',
type=int,
default=0,
help='File number starts counting from this index')
parser.add_argument('--test',
type=bool,
default=False,
help='if true, does not save any ground truth data')
args = parser.parse_args()
start_ind = args.start_ind
path, dirs, files = next(os.walk(DATA_PATH))
dirs.sort()
for directory in dirs:
adapter = Adapter()
last_ind = adapter.cvt(args, directory, start_ind)
start_ind = last_ind
|
the-stack_0_17634 | # Global program step time in seconds
STEP_TIME = 0.004
# Left Motor pins
PINS_MOTOR_LEFT = [2, 3, 4, 17]
# Right Motor pins
PINS_MOTOR_RIGHT = [27, 22, 10, 9]
# IR Sensor Pins
PINS_SENSOR = [14, 15, 18]
# Button pin
PIN_BUTTON = 26
# Led Pins
PINS_FRONT_LED = [12, 16, 20, 21]
# Left indicatior led
PINS_LEFT_LED = 13
# Right indicator led
PINS_RIGHT_LED = 19
# Degrees the wheel turns per motor step
MOTOR_DEG_PER_STEP = 5.625 / 64
# Url to get new delivers
DELIVERY_URL = "http://rutgeruijtendaal.com/PiRiders/api.php?function=deliveryInfo"
# Url to say a delivery is done
DELIVERED_URL = "http://rutgeruijtendaal.com/PiRiders/api.php?function=deliveryUpdate"
|
the-stack_0_17635 | # Everything is better with friends: Executing SAS® code in Python scripts with
# SASPy, and turbocharging your SAS programming with open-source tooling
#
# Half-day class, Western Users of SAS Software (WUSS) 2019
###############################################################################
# Exercises 6-9: SASPy Data Round Trip #
###############################################################################
# Lines 12-13 load modules needed for exercises and should be left as-is
from class_setup import print_with_title
from saspy import SASsession
###############################################################################
# #
# Exercise 6. [Python w/ saspy] Connect to a SAS kernel #
# #
# Instructions: Uncomment the code immediately below, and then execute #
# #
###############################################################################
sas = SASsession()
print_with_title(type(sas), 'The type of SAS session object sas:')
# Notes:
#
# 1. A SASsession object named sas is created, and the following are printed:
# * confirmation a SAS session has been established
# * the type of object sas (which is saspy.sasbase.SASsession)
#
# 2. As with the DataFrame object type above, SASsession is not built into
# Python, so we had to import its definition from the saspy module at the
# beginning of this file.
#
# 3. All subsequent exercises in this file will assume the object sas exists,
# so please don't comment out the line creating it.
###############################################################################
# #
# Exercise 7. [Python w/ pandas & saspy] Load a SAS dataset into a DataFrame #
# #
# Instructions: Uncomment the code immediately below, and then execute #
# #
###############################################################################
# Original Version
fish_df = sas.sasdata2dataframe(table='fish', libref='sashelp')
print_with_title(fish_df, 'The value of fish_df:')
print_with_title(
fish_df.describe(),
'The Python equivalent of PROC MEANS using fish_df:'
)
print_with_title(fish_df.head(), 'The first five rows of fish_df:')
# Pass a numerical parameter to the head method
print(fish_df.head(4))
# Change the head method to tail
print(fish_df.tail())
# View other portions of fish_df
print(fish_df.iloc[0:2, 1:4])
# Notes:
#
# 1. A DataFrame object named fish_df with dimensions 159x7 (159 rows and 7
# columns) is created from the SAS dataset fish in the sashelp library, and
# the following are printed:
# * the type of object fish_df (which is
# <class 'pandas.core.frame.DataFrame'>)
# * the first five rows of fish_df, which are at row indices 0 through 4
# since Python uses zero-based indexing
# * summary information about the 6 numerical columns of fish_df, which is
# obtained by fish_df calling its describe method (the pandas equivalent
# of the SAS MEANS procedure)
#
# 2. The sas object represents a connection to a SAS session and was created
# in a previous exercise. Here, sas calls its sasdata2dataframe method to
# access the SAS library sashelp defined within this SAS session and to load
# the entire contents of SAS dataset sashelp.fish into the DataFrame
# fish_df.
#
# 3. All subsequent exercises in this file will assume the object fish_df
# exists, so please don't comment out the line creating it.
#
# 4. For additional practice, try any or all of the following:
# * Pass a numerical parameter to the head method to see a different number
# of rows (e.g., fish_df.head(4)).
# * Change the head method to tail to see a different part of the dataset.
# * To view other portions of fish_df, explore the more advanced indexing
# methods loc and iloc explained at
# https://brohrer.github.io/dataframe_indexing.html.
###############################################################################
# #
# Exercise 8. [Python w/ pandas] Manipulate a DataFrame #
# #
# Instructions: Uncomment the code immediately below, and then execute #
# #
###############################################################################
# Original Version
fish_df_g = fish_df.groupby('Species')
fish_df_gs = fish_df_g['Weight']
fish_df_gsa = fish_df_gs.agg(['count', 'std', 'mean', 'min', 'max'])
print_with_title(
fish_df_gsa,
'The Python equivalent of PROC MEANS with CLASS and VAR statements:'
)
# Move around and/or remove functions used for aggregation
fish_df_gsa = fish_df_gs.agg(['min', 'mean', 'max'])
print(fish_df_gsa)
# Change the variable whose values are summarized to 'Width'
fish_df_gs = fish_df_g['Width']
fish_df_gsa = fish_df_gs.agg(['count', 'std', 'mean', 'min', 'max'])
print(fish_df_gsa)
# Print out the results of using the one-liner version
print(
fish_df.groupby('Species')['Weight'].agg(
['count', 'std', 'mean', 'min', 'max']
)
)
# Notes:
#
# 1. The DataFrame fish_df, which was created in an exercise above from the SAS
# dataset sashelp.fish, is manipulated, and the following is printed:
# * a table giving the number of rows, standard deviation, mean, min, and
# max of Weight in fish_df when aggregated by Species
#
# 2. This is accomplished by creating a series of new DataFrames:
# * The DataFrame fish_df_g is created from fish_df using the groupby method
# to group rows by values in column 'Species'.
# * The DataFrame fish_df_gs is created from fish_df_g by extracting the
# 'Weight' column using bracket notation.
# * The DataFrame fish_df_gsa is created from fish_df_gs using the agg
# method to aggregate by the functions in the list ['count', 'std',
# 'mean', 'min', 'max'].
#
# 3. Identical results could be obtained using the following SAS code:
# proc means data=sashelp.fish std mean min max;
# class species;
# var Weight;
# run;
# However, while PROC MEANS operates on SAS datasets row-by-row from disk,
# DataFrames are stored entirely in main memory. This allows any number of
# DataFrame operations to be combined for on-the-fly reshaping using "method
# chaining." In other words, fish_df_gsa could have instead been created
# with the following one-liner, which avoids the need for intermediate
# DataFrames (and thus executes much more quickly):
# fish_df_gsa = fish_df.groupby('Species')['Weight'].agg(
# ['count', 'std', 'mean', 'min', 'max']
# )
#
# 3. All subsequent exercises in this file will assume the object fish_df_gsa
# exists, so please don't comment out the line(s) creating it.
#
# 4. For additional practice, try any or all of the following:
# * Move around and/or remove functions used for aggregation, and see how
# the output changes.
# * Change the variable whose values are summarized to 'Width'.
# * Print out the results of using the one-liner version.
###############################################################################
# #
# Exercise 9. [Python w/ pandas & saspy] Load a DataFrame into a SAS dataset #
# #
# Instructions: Uncomment the code immediately below, and then execute #
# #
###############################################################################
# Original Version
sas.dataframe2sasdata(fish_df_gsa, table="fish_sds_gsa", libref="Work")
sas_submit_return_value = sas.submit(
'''
PROC PRINT DATA=fish_sds_gsa;
RUN;
''',
results='TEXT'
)
sas_submit_results = sas_submit_return_value['LST']
print_with_title(
sas_submit_results,
'SAS results from PROC PRINT applies to new SAS dataset Work.fish_sds_gsa:'
)
# Print out the SAS log
print(sas_submit_return_value['LOG'])
# Change the SAS procedure
print(
sas.submit(
'''
PROC CONTENTS DATA=fish_sds_gsa;
RUN;
''',
results='TEXT'
)['LST']
)
# Notes:
#
# 1. The DataFrame fish_df_gsa, which was created in an exercise above from the
# SAS dataset sashelp.fish, is used to create the new SAS dataset
# Work.fish_sds_gsa. The SAS PRINT procedure is then called, and the
# following is printed:
# * the output returned by PROC PRINT
#
# 2. The sas object, which was created in a cell above, is a persistent
# connection to a SAS session, and two of its methods are used as follows:
# * The dataframe2sasdata method writes the contents of the DataFrame
# fish_df_gsa to the SAS dataset fish_sds_gsa stored in the Work library.
# (Note: The row indexes of the DataFrame fish_df_gsa are lost when the
# SAS dataset fish_sds_gsa is created.)
# * The submit method is used to submit the PROC PRINT step to the SAS
# kernel, and a dictionary is returned with the following two key-value
# pairs:
# - sas_submit_return_value['LST'] is a string comprising the results from
# executing PROC PRINT, which will be in plain text because the
# results='TEXT' was used
# - sas_submit_return_value['LOG'] is a string comprising the plain-text
# log resulting from executing PROC PRINT
#
# 3. Python strings surrounded by single quotes (e.g., 'Hello, World!') cannot
# be written across multiple lines of code, whereas strings surrounded by
# triple quotes (e.g., the argument to the submit method) can.
#
# 4. For additional practice, try any or all of the following:
# * Print out the SAS log.
# * Change the SAS procedure used to interact with SAS dataset
# Work.fish_sds_gsa (e.g., try PROC CONTENTS).
|
the-stack_0_17636 | import asyncio
import docopt
import logging
import pymongo
import pymongo.errors
import sys
# from zonlib.scripts.utils import create_app, get_host_by_db_name
from tycho.app import create_app
from tycho.app import init_app
# from zonlib.async_db.connection import COLLECTIONS
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
async def create_indexes(app):
# the check can not be replaced with None check because
# pymongo/motor returns collection object if any property is missing
# hence explicitly checking its type as list
if not isinstance(app['db'].event.indexes, list):
LOGGER.warn("Collection {0} on {1} has no attribute 'indexes'".format(
'event', app.async_db.name))
# not throwing exception here but continue to run script with other
# valid collections
return
for index in app['db'].event.indexes:
LOGGER.debug(
"Creating index {0} for {1} collection with unique constraint as {2} \n".format(
index['keys'], app['db'].event, index['unique'])
)
try:
await app['db'].event.collection.create_index(index['keys'],
unique=index['unique'])
except pymongo.errors.OperationFailure as e:
LOGGER.exception(
"Error occured while creating the index {0} on collection {1}".format(
index, 'event')
)
def main(argv=sys.argv[1:]):
'''
Script to create indexes for collections in given DB
Usage:
create_indexes
'''
loop = asyncio.get_event_loop()
from tycho.main import app
init_app(app, app['config'])
loop.run_until_complete(create_indexes(app))
|
the-stack_0_17637 | import numpy as np
import os
os.environ.setdefault('PATH', '')
from collections import deque
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
from wrappers import TimeLimit
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None):
"""
Warp frames to 84x84 as done in the Nature paper and later work.
If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which
observation should be warped.
"""
super().__init__(env)
self._width = width
self._height = height
self._grayscale = grayscale
self._key = dict_space_key
if self._grayscale:
num_colors = 1
else:
num_colors = 3
new_space = gym.spaces.Box(
low=0,
high=255,
shape=(self._height, self._width, num_colors),
dtype=np.uint8,
)
if self._key is None:
original_space = self.observation_space
self.observation_space = new_space
else:
original_space = self.observation_space.spaces[self._key]
self.observation_space.spaces[self._key] = new_space
assert original_space.dtype == np.uint8 and len(original_space.shape) == 3
def observation(self, obs):
if self._key is None:
frame = obs
else:
frame = obs[self._key]
if self._grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(
frame, (self._width, self._height), interpolation=cv2.INTER_AREA
)
if self._grayscale:
frame = np.expand_dims(frame, -1)
if self._key is None:
obs = frame
else:
obs = obs.copy()
obs[self._key] = frame
return obs
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[:-1] + (shp[-1] * k,)), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=-1)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def count(self):
frames = self._force()
return frames.shape[frames.ndim - 1]
def frame(self, i):
return self._force()[..., i]
def make_atari(env_id, max_episode_steps=None):
env = gym.make(env_id)
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
if max_episode_steps is not None:
env = TimeLimit(env, max_episode_steps=max_episode_steps)
return env
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env
|
the-stack_0_17640 | import configparser
import os
from load_config_file import locate_config_file
from default_colors import default_print_info, default_print_error, default_print_instruction
from colored_output import ColoredOutput
from pkg_resources import Requirement, resource_filename
from shutil import copyfile
def write_example_config():
garrick_dir, config_file_name = locate_config_file()
default_print_info(
'Your config file is {}.'.format(os.path.join(garrick_dir, config_file_name))
)
default_print_info(
'I am writing a file called {}.example into the same directory.'.format(config_file_name)
)
default_print_instruction(
'You can work from this file to restore your garrick.conf file to a valid state.'
)
print()
example_config_file = resource_filename(Requirement.parse('garrick'), 'garrick.conf.example')
copyfile(example_config_file, os.path.join(garrick_dir, '{}.example'.format(config_file_name)))
raise Exception('Invalid or incomplete config file.')
def get_config():
garrick_dir, config_file_name = locate_config_file()
config_file = os.path.join(garrick_dir, config_file_name)
config = configparser.ConfigParser(allow_no_value = True)
try:
config.read(config_file)
except Exception as exception:
print()
default_print_error('Something is wrong with your config file.')
default_print_error('ConfigParser has thrown the following exception:')
print()
print(exception)
print()
write_example_config()
return config
def parse_db_files():
config = get_config()
if not 'database_files' in config.sections():
print()
default_print_error(
'Error: There is no [database_files] section in your config file.'
)
print()
write_example_config()
db_files = []
for db_file in config['database_files']:
db_files.append(db_file)
if len(db_files) == 0:
print()
default_print_error(
'Error: No databases are listed in your config file.'
)
default_print_instruction(
'Write a name for a database file into its [database_files] section.'
)
default_print_info('This file will be created the next time you run garrick,')
default_print_info('or it will be used if it already exists.')
print()
write_example_config()
return db_files
def parse_editor():
config = get_config()
if not 'config' in config.sections():
print()
default_print_error('Error: There is no [config] section in your config file.')
print()
write_example_config()
if not 'editor' in config['config']:
print()
default_print_error(
'Error: There is no "editor" variable in the [config] section of your config file.'
)
print()
write_example_config()
editor = config['config']['editor']
if editor == '' or editor == None:
editor = os.getenv('EDITOR')
if editor == None:
print()
default_print_error('Error: No editor is defined in your config file.')
default_print_instruction(
'Add the name of your favourite editor at the end of the line "editor = "'
)
default_print_instruction('so you can use it to edit your cards.')
default_print_info(
"(This is normal if you haven't set the editor variable before.)"
)
print()
write_example_config()
return editor
def parse_colors():
config = get_config()
if not 'config' in config.sections():
print()
default_print_error('Error: There is no [config] section in your config file.')
print()
write_example_config()
if 'info' in config['config']:
info_color = config['config']['info']
else:
info_color = 'brightgreen'
if 'error' in config['config']:
error_color = config['config']['error']
else:
error_color = 'brightred'
if 'instruction' in config['config']:
instruction_color = config['config']['instruction']
else:
instruction_color = 'brightmagenta'
if 'side_of_card' in config['config']:
side_color = config['config']['side_of_card']
else:
side_color = 'brightyellow'
if 'prompt' in config['config']:
prompt_color = config['config']['prompt']
else:
prompt_color = 'brightcyan'
if 'silent_prompt' in config['config']:
silent_prompt_color = config['config']['silent_prompt']
else:
silent_prompt_color = 'brightyellow'
return ColoredOutput(
info_color,
error_color,
instruction_color,
side_color,
prompt_color,
silent_prompt_color
)
|
the-stack_0_17642 | from machinetranslation import translator
from flask import Flask, render_template, request
import json
app = Flask("Web Translator")
@app.route("/englishToFrench")
def englishToFrench():
textToTranslate = request.args.get('textToTranslate')
txt = translator.english_to_french(textToTranslate)
return txt
@app.route("/frenchToEnglish")
def frenchToEnglish():
textToTranslate = request.args.get('textToTranslate')
txt = translator.french_to_english(textToTranslate)
return txt
@app.route("/")
def renderIndexPage():
return render_template("index.html")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080)
|
the-stack_0_17644 | _base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/default_runtime.py'
]
# model settings
input_size = 300
model = dict(
type='SingleStageDetector',
pretrained='open-mmlab://vgg16_caffe',
backbone=dict(
type='CCB',
input_size=input_size,
depth=16,
with_last_pool=False,
ceil_mode=True,
out_indices=(3, 4),
out_feature_indices=(22, 34),
l2_norm_scale=20,
assist = dict(depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch')),
neck=None,
bbox_head=dict(
type='PRSHead',
in_channels=(512, 1024, 512, 256, 256, 256),
num_classes=4,
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=input_size,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2, 3], [2, 3], [2, 3], [2, 3], [2], [2]]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2])))
cudnn_benchmark = True
train_cfg = dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False)
test_cfg = dict(
nms=dict(type='nms', iou_threshold=0.45),
min_bbox_size=0,
score_thr=0.02,
max_per_img=200)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco_new/'
classes = ('echinus','starfish','holothurian','scallop','waterweeds')
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(300, 300),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=2,
train=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.0001,
step=[8, 11])
total_epochs = 24 |
the-stack_0_17645 | import logging
import claripy
from rex import Vulnerability
from rex.exploit import CannotExploit
from rex.exploit.cgc import CGCType1CircumstantialExploit
from ..technique import Technique
l = logging.getLogger("rex.exploit.techniques.circumstantial_set_register")
class CircumstantialSetRegister(Technique):
name = "circumstantially_set_register"
applicable_to = ['cgc']
cgc_registers = ["eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi"]
bitmask_threshold = 20
# this technique should create an exploit which is a type1 pov
pov_type = 1
generates_pov = True
def __init__(self, crash, rop, shellcode):
super(CircumstantialSetRegister, self).__init__(crash, rop, shellcode)
self._ip_bitmask = None
self._ip_bitcnt = None
def set_register(self, register):
"""
:param register
set a register with shellcode on cgc
"""
# can only exploit ip overwrites
if not self.crash.one_of([Vulnerability.IP_OVERWRITE, Vulnerability.PARTIAL_IP_OVERWRITE]):
raise CannotExploit("[%s] cannot control ip" % self.name)
state = self.crash.state
if self._ip_bitcnt < CircumstantialSetRegister.bitmask_threshold:
raise CannotExploit("not enough controlled bits of ip")
# see if the register value is nearly unconstrained
reg = getattr(state.regs, register)
# we need to make sure that the pc and this register don't conflict
conflict = not state.satisfiable(extra_constraints=(reg != state.regs.pc,))
if conflict:
raise CannotExploit("register %s conflicts with pc, pc and register must be equal" % register)
# get the register's bitmask
reg_bitmask, reg_bitcnt = self.get_bitmask_for_var(state, reg)
if reg_bitcnt >= CircumstantialSetRegister.bitmask_threshold:
if not any([v.startswith('aeg_stdin') for v in reg.variables]):
raise CannotExploit("register %s was symbolic but was not tainted by user input" % register)
l.info("can circumstantially set register %s", register)
ccp = self.crash.copy()
value_var = claripy.BVS('value_var', 32, explicit_name=True)
ip_var = claripy.BVS('ip_var', 32, explicit_name=True)
reg = getattr(ccp.state.regs, register)
ccp.state.add_constraints(reg == value_var)
ccp.state.add_constraints(ccp.state.regs.ip == ip_var)
mem = [reg] + [ccp.state.regs.ip]
return CGCType1CircumstantialExploit(ccp, register, reg_bitmask,
self._ip_bitmask, mem, value_var, ip_var)
else:
raise CannotExploit("register %s's value does not appear to be unconstrained" % register)
def apply(self, **kwargs):
ip = self.crash.state.regs.ip
self._ip_bitmask, self._ip_bitcnt = self.get_bitmask_for_var(self.crash.state, ip)
for register in CircumstantialSetRegister.cgc_registers:
try:
reg_setter = self.set_register(register)
l.info("was able to set register [%s] circumstantially", register)
return reg_setter
except CannotExploit as e:
l.debug("could not set register %s circumstantially (%s)", register, e)
|
the-stack_0_17648 | from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
class StackedFullyConnected(nn.Module):
def __init__(self, FC_List=[500, 200, 100]):
super(StackedFullyConnected, self).__init__()
self.FC_List = FC_List
self.FCs = nn.ModuleList()
self.__get_fc()
def __get_fc(self):
s = self.FC_List[0]
num = self.FC_List[1]
self.FCs.append(nn.Linear(s, num))
s = num
for num in self.FC_List[2:]:
self.FCs.append(nn.Dropout(p=0.5))
self.FCs.append(nn.Linear(s, num))
s = num
def forward(self, inputs):
x = inputs
for layer in self.FCs:
x = F.softsign(layer(x))
return x
class erfh5_Distributed_Autoencoder(nn.Module):
def __init__(self, dgx_mode=True, layers_size_list=[69366, 15000]):
super(erfh5_Distributed_Autoencoder, self).__init__()
self.encoder = StackedFullyConnected(layers_size_list)
self.decoder = StackedFullyConnected(list(reversed(layers_size_list)))
print(self.encoder)
print(self.decoder)
if dgx_mode:
self.encoder = nn.DataParallel(self.encoder, device_ids=[
0, 1, 2, 3]).to('cuda:0')
self.decoder = nn.DataParallel(self.decoder, device_ids=[
4, 5, 6, 7]).to('cuda:4')
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x.to('cuda:0')
def save_encoder(self, path):
torch.save(self.encoder.state_dict(), path)
class erfh5_Autoencoder(nn.Module):
def __init__(self, input_size, FC_List=[500, 200, 100]):
super(erfh5_Autoencoder, self).__init__()
self.FC_List = FC_List
self.input_size = input_size
self.FCs = nn.ModuleList()
self.__get_fc()
# self.weightList =
# nn.ParameterList([nn.Parameter(f.weight) for f in self.FCs])
# self.biasList =
# nn.ParameterList([nn.Parameter(f.bias) for f in self.FCs])
[print(f) for f in self.FCs]
def __get_fc(self):
s = self.input_size
for num in self.FC_List:
self.FCs.append(nn.Linear(s, num))
self.FCs.append(nn.Dropout(p=0.5))
s = num
for num in reversed(self.FC_List[:-1]):
self.FCs.append(nn.Linear(s, num))
self.FCs.append(nn.Dropout(p=0.5))
s = num
self.FCs.append(nn.Linear(s, self.input_size))
def forward(self, inputs):
x = inputs
for layer in self.FCs:
x = F.relu(layer(x))
return x
def get_encoding(self):
return self.FCs[int((self.FCs.__len__() - 1) / 2)]
# '/home/lodes/Sim_Results'
# '/cfs/share/data/RTM/Lautern/clean_erfh5/'
def load_stacked_fc(path, list=[69366, 15000, 8192]):
state_dict = torch.load(path)
new_state_dict = OrderedDict()
model = StackedFullyConnected(list)
for k, v in state_dict.items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
# load params
model.load_state_dict(new_state_dict)
return model
if __name__ == "__main__":
pass
# half_encoder = load_stacked_fc(path)
""" print(">>>INFO: Loading State dict finished.")
half_encoder.to(device)
with torch.no_grad():
half_encoder.eval()
loss = 0
counter = 0
for i in validation_samples:
i = torch.FloatTensor(i)
i = i.to(device)
i = torch.unsqueeze(i, 0)
output = half_encoder(i)
#output = output.to(device)
#loss = loss + loss_criterion(output, i).item()
output = output.cpu().numpy()
i = i.cpu().numpy()
plt.figure()
plt.subplot(211)
plt.plot(i, 'bo')
plt.subplot(212)
plt.plot(output, 'ro')
plt.savefig('/cfs/home/l/o/lodesluk/models/' +
str(counter) + '.png')
print("plot saved")
counter = counter + 1
#loss = loss / len(validation_samples)
#print(">>>Loss on loaded model:", "{:8.4f}".format(loss))
half_encoder.train()
"""
print(">>>INFO: Finished.")
|
the-stack_0_17649 | # Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.context.heat import stacks
from rally.plugins.openstack.scenarios.heat import utils as heat_utils
from tests.unit import fakes
from tests.unit import test
CTX = "rally.plugins.openstack.context"
SCN = "rally.plugins.openstack.scenarios"
class TestStackGenerator(test.ScenarioTestCase):
def _gen_tenants(self, count):
tenants = {}
for id_ in range(count):
tenants[str(id_)] = dict(name=str(id_))
return tenants
def test_init(self):
self.context.update({
"config": {
"stacks": {
"stacks_per_tenant": 1,
"resources_per_stack": 1
}
}
})
inst = stacks.StackGenerator(self.context)
self.assertEqual(inst.config, self.context["config"]["stacks"])
@mock.patch("%s.heat.utils.HeatScenario._create_stack" % SCN,
return_value=fakes.FakeStack(id="uuid"))
def test_setup(self, mock_heat_scenario__create_stack):
tenants_count = 2
users_per_tenant = 5
stacks_per_tenant = 1
tenants = self._gen_tenants(tenants_count)
users = []
for ten_id in tenants:
for i in range(users_per_tenant):
users.append({"id": i, "tenant_id": ten_id,
"credential": mock.MagicMock()})
self.context.update({
"config": {
"users": {
"tenants": tenants_count,
"users_per_tenant": users_per_tenant,
"concurrent": 10,
},
"stacks": {
"stacks_per_tenant": stacks_per_tenant,
"resources_per_stack": 1
}
},
"users": users,
"tenants": tenants
})
stack_ctx = stacks.StackGenerator(self.context)
stack_ctx.setup()
self.assertEqual(tenants_count * stacks_per_tenant,
mock_heat_scenario__create_stack.call_count)
# check that stack ids have been saved in context
for ten_id in self.context["tenants"].keys():
self.assertEqual(stacks_per_tenant,
len(self.context["tenants"][ten_id]["stacks"]))
@mock.patch("%s.heat.stacks.resource_manager.cleanup" % CTX)
def test_cleanup(self, mock_cleanup):
self.context.update({
"users": mock.MagicMock()
})
stack_ctx = stacks.StackGenerator(self.context)
stack_ctx.cleanup()
mock_cleanup.assert_called_once_with(
names=["heat.stacks"],
users=self.context["users"],
superclass=heat_utils.HeatScenario,
task_id=self.context["owner_id"])
|
the-stack_0_17650 | import sys
import os
from os import path, mkdir, listdir, rmdir
from getpass import getpass as inputHidden
import math
##############
# USER INPUT #
##############
"""
Asks the user a question and returns the number of the response. If an invalid answer is given, the question is repeated.
Parameters
----------
question : str
The question that is asked.
choices : list (str)
An array of the different possible answers.
allowMultiple : bool
If True, the user may give multiple answers, each separated by a space. An array of these answers is returned.
Returns
-------
If allowMultiple is True:
int
The chosen answer.
Else:
list (int)
An array of ints representing chosen answers.
"""
def makeChoice(question, options, allowMultiple=False):
numChoices = len(options)
if numChoices == 0:
print("Warning: A question was asked with no valid answers. Returning None.")
return None
if numChoices == 1:
print("A question was asked with only one valid answer. Returning this answer.")
return 1
print("\n"+question)
for i in range(numChoices):
print(str(i+1)+": "+options[i])
cInput = input("\n").split(" ")
if not allowMultiple:
try:
assert len(cInput) == 1
choice = int(cInput[0])
assert choice > 0 and choice <= numChoices
return choice
except:
print("\nInvalid input.")
return makeChoice(question, options, allowMultiple)
else:
try:
choices = [int(c) for c in cInput]
for choice in choices:
assert choice > 0 and choice <= numChoices
return choices
except:
print("\nInvalid input.")
return makeChoice(question, options, allowMultiple)
"""
Asks the user a question. The answer can be any number between the given minVal and maxVal. If an invalid answer is given, the question is repeated.
Parameters
----------
question : str
The question that is asked.
minVal : float
The minimum allowed value.
maxVal : float
The maximum allowed value.
Returns
-------
float
The given value.
"""
def makeChoiceNumInput(question, minVal, maxVal):
while True:
print("\n"+question)
try:
var = float(input())
assert minVal <= var <= maxVal
return var
except:
print("Invalid input.")
###########
# SEEDING #
###########
"""
Encodes an array of variable values into a seed according to a given max value array.
Parameters
----------
varArray : list (int)
The array of values
maxValueArray:
An array of the (number of possible values - 1) of each variable. For example, if you have three variables with the possible values...
var1 : [0, 1, 2, 3]
var2 : [0, 1]
var3 : [0, 1, 2, 3, 4]
... then the maxValueArray should be [4, 2, 5].
Note that the maxValueArray's implementation assumes that possible values start at 0 and each increment by 1. For example, if a variable is stated to have 4 possible values, it asusmes those values are [0, 1, 2, 3].
base : int
Between 2 and 36. The numerical base used by the seed (in other words, how many values are possible for each character, such as 0-9 and a-z).
Returns
-------
int
The seed in base-10 numerical form.
str
The seed in the given base.
"""
def encodeSeed(varArray, maxValueArray, base=10):
if base > 36:
print("Base must be between 2 and 36. Lowering to 36.")
base = 36
seed = 0
baseShift = 0
for i in range(len(varArray)):
seed += varArray[i]<<baseShift
baseShift += maxValueArray[i].bit_length()
return seed, dec_to_base(seed, base)
"""
Decodes a string or non-base-10 number into an array of variable values according to a given max value array.
Parameters
----------
seed : str or int
The seed that will be decoded.
maxValueArray:
An array of the (number of possible values - 1) of each variable. For example, if you have three variables with the possible values...
var1 : [0, 1, 2, 3]
var2 : [0, 1]
var3 : [0, 1, 2, 3, 4]
... then the maxValueArray should be [4, 2, 5].
Note that the maxValueArray's implementation assumes that possible values start at 0 and each increment by 1. For example, if a variable is stated to have 4 possible values, it asusmes those values are [0, 1, 2, 3].
base : int
Unused if seed is an int (base-10 is assumed). Between 2 and 36. The numerical base used by the seed (in other words, how many values are possible for each character, such as 0-9 and a-z).
Returns
-------
list (int)
An array of variable values decoded from the string. For example, if there are 3 variables, the returned array is [var1's value, var2's value, var3's value]
"""
def decodeSeed(seed, maxValueArray, base=10):
if type(seed) is str:
if base > 36:
print("Base must be between 2 and 36. Lowering to 36.")
base = 36
elif base < 2:
print("Base must be between 2 and 36. Increasing to 2.")
base = 2
seed = int(seed, base)
baseShift = 0
varArray = []
for i in range(len(maxValueArray)):
bitLength = maxValueArray[i].bit_length()
varArray.append((seed>>baseShift) & ((2**bitLength)-1))
baseShift += bitLength
return varArray
"""
Returns whether or not a seed is possible given a maxValueArray and base.
Parameters
----------
seed : str or int
The seed that will be verified.
maxValueArray:
An array of the (number of possible values - 1) of each variable. For example, if you have three variables with the possible values...
var1 : [0, 1, 2, 3]
var2 : [0, 1]
var3 : [0, 1, 2, 3, 4]
... then the maxValueArray should be [4, 2, 5].
Note that the maxValueArray's implementation assumes that possible values start at 0 and each increment by 1. For example, if a variable is stated to have 4 possible values, it asusmes those values are [0, 1, 2, 3].
base : int
Between 2 and 36. The numerical base used by the seed (in other words, how many values are possible for each character, such as 0-9 and a-z).
Returns
-------
bool
Whether or not the seed is valid.
list (int)
An array of variable values decoded from the string. For example, if there are 3 variables, the returned array is [var1's value, var2's value, var3's value]
"""
def verifySeed(seed, maxValueArray, base=10):
if base > 36:
print("Base must be between 2 and 36. Lowering to 36.")
base = 36
elif base < 2:
print("Base must be between 2 and 36. Increasing to 2.")
base = 2
if type(seed) is int:
base = 10
seed = dec_to_base(seed,base)
seed = seed.upper().strip()
try:
maxSeed = 0
baseShift = 0
for i in range(len(maxValueArray)):
maxSeed += maxValueArray[i]<<baseShift
baseShift += maxValueArray[i].bit_length()
assert int(seed, 36) <= maxSeed
varsInSeed = decodeSeed(seed, maxValueArray, base)
for i in range(len(varsInSeed)):
assert 0 <= varsInSeed[i] <= maxValueArray[i]
return True, varsInSeed
except:
return False, None
"""
From https://www.codespeedy.com/inter-convert-decimal-and-any-base-using-python/
Converts a base-10 int into a different base.
Parameters
----------
num : int
The number that will be converted.
base : int
Between 2 and 36. The numerical base used by the seed (in other words, how many values are possible for each character, such as 0-9 and a-z).
Returns
-------
str
The number in the given base.
"""
def dec_to_base(num,base): #Maximum base - 36
base_num = ""
while num>0:
dig = int(num%base)
if dig<10:
base_num += str(dig)
else:
base_num += chr(ord('A')+dig-10) #Using uppercase letters
num //= base
base_num = base_num[::-1] #To reverse the string
return base_num
########################
# FILE/PATH MANAGEMENT #
########################
"""
Writes a value to a file at a given address. Supports multi-byte addresses.
Parameters
----------
file : str
The file that will be modified.
address : int
The value (ideally, a hex value such as 0x12345) that will be modified.
val : int
The value that will be written to this address.
numBytes : int
The number of bytes that this value will take up.
Retruns
-------
False if the value is too large to be written within the given number of bytes; True otherwise.
Examples
--------
Example 1
writeToAddress(file.exe, 0x12345, 0x41, 1) will write the following value:
0x12345 = 41
Example 2
writeToAddress(file.exe, 0x12345, 0x6D18, 2) will write the following values:
0x12345 = 6D
0x12346 = 18
Example 3
writeToAddress(file.exe, 0x12345, 0x1C, 2) will write the following values:
0x12345 = 00
0x12346 = 1C
"""
def writeToAddress(file, address, val, numBytes=1):
if val.bit_length() > numBytes*8:
print("Given value is greater than "+str(numBytes)+" bytes.")
return False
address += (numBytes-1)
for i in range(numBytes):
file.seek(address)
currByte = val & 0xFF
file.write(bytes([currByte]))
address -= 1
val = val>>8
return True
"""
From https://gist.github.com/jacobtomlinson/9031697
Removes all empty folders, including nested empty folders, in a directory.
Parameters
----------
p : str
The path of the starting directory; all empty folders that are children (or grandchildren, etc) of this directory are removed.
"""
def removeEmptyFolders(p):
if not path.isdir(p):
return
files = listdir(p)
if len(files):
for f in files:
fullpath = path.join(p, f)
if path.isdir(fullpath):
removeEmptyFolders(fullpath)
files = listdir(p)
if len(files) == 0:
rmdir(p)
"""
Returns an array of the individual components of a given path.
Parameters
----------
p : str
The path.
Returns
-------
list (str)
The path array.
Example
-------
Input
"C:/early folder/test2/thing.exe"
Output
["C:", "early folder", "test2", "thing.exe"]
"""
def getPathArray(p):
p1, p2 = path.split(p)
if p2 == "":
p = p1
pathArray = []
while True:
p1, p2 = path.split(p)
pathArray = [p2] + pathArray
if p2 == "":
pathArray = [p1] + pathArray
try:
while pathArray[0] == "":
del pathArray[0]
except:
pass
return pathArray
p = p1
"""
Creates the given directory. Unlike mkdir, this will also create any necessary parent directories that do not already exist.
Parameters
----------
p : str
The path of the folder that will be created.
Returns
-------
True if the folder was created, False if it already exists.
"""
def createDir(p):
if path.isdir(p):
return False
pathArray = getPathArray(p)
currPath = pathArray[0]
for i in range(1, len(pathArray)):
currPath = path.join(currPath, pathArray[i])
if not path.isdir(currPath):
mkdir(currPath)
return True
"""
Returns the directory containing the current program, regardless of whether it is a standalone script or a wrapped executable.
Returns
-------
str
The directory containing the current program.
"""
def getCurrFolder():
if getattr(sys, 'frozen', False):
mainFolder = path.dirname(sys.executable) # EXE (executable) file
else:
mainFolder = path.dirname(path.realpath(__file__)) # PY (source) file
sys.path.append(mainFolder)
return mainFolder
"""
Returns the file extension (including the ".") of the first file found in the given folder that matches the given file name.
Parameters
----------
folder : str
The given folder.
fileName : str
The given file name.
Returns
-------
str
The file extension (including the ".") of the first file found in folder named fileName (with any extension); if no file with that name is found, return an empty string.
"""
def getFileExt(folder, fileName):
for f in listdir(folder):
fName, fExt = path.splitext(f)
if fName == fileName:
return fExt
return ""
"""
From https://stackoverflow.com/questions/1392413/calculating-a-directorys-size-using-python
Returns the total number of bytes taken up by the given directory and its subdirectories.
Parameters
----------
startPath : str
The given directory.
Returns
-------
int
The number of bytes taken up by the directory.
"""
def getDirSize(startPath = '.'):
totalSize = 0
for dirpath, dirnames, filenames in os.walk(startPath):
for f in filenames:
fp = os.path.join(dirpath, f)
# skip if it is symbolic link
if not os.path.islink(fp):
totalSize += os.path.getsize(fp)
return totalSize
####################
# ARRAY MANAGEMENT #
####################
"""
Returns the number of elements (including duplicates) that exist in two different given arrays.
Parameters
----------
arr1 : list
The first array.
arr2 : list
The second array.
Returns
-------
int
The number of elements in the overlap
"""
def arrayOverlap(arr1, arr2):
count = 0
for a in arr1:
if a in arr2:
count += 1
return count
"""
Merges a nested array into a single one-dimensional array.
Parameters
----------
arr : list
The nested array that will be merged.
finalArr : list (str)
Should be ignored (only used in recursion). The created array so far.
Returns
-------
list (str):
The merged array.
Example
-------
Input
[item1, [item2, item3], item4, [item 5, [item6, item7], item8]]
Output
[item1, item2, item3, item4, item5, item6, item7, item8]
"""
def mergeNestedArray(arr, finalArr=[]):
for val in arr:
if not isinstance(val, list):
finalArr.append(val)
else:
finalArr = mergeNestedArray(val, finalArr)
return finalArr
"""
From https://www.geeksforgeeks.org/python-find-most-frequent-element-in-a-list/
Returns the most common element in a list, along with how many times it occurrs.
Parameters
----------
arr : list
The array.
Returns
-------
anything
The most frequently-occurring element.
int
How many instances of this element there are in the array.
"""
def most_frequent(arr):
counter = 0
elem = arr[0]
for i in arr:
curr_frequency = arr.count(i)
if (curr_frequency > counter):
counter = curr_frequency
elem = i
return elem, counter
"""
Returns whether or not arr1 is an ordered subset of arr2.
Parameters
----------
arr1 : list
The first array.
arr2: list
The second array.
Returns
-------
bool
Whether or not arr1 is an ordered subset of arr2.
Examples
--------
Input 1
[3, 5], [1, 3, 5, 7, 9]
Output 1
True
Input 2
[3, 5], [1, 2, 3, 4, 5, 6, 7]
Output 2
False
"""
def arrayInArray(arr1, arr2):
for i in range(len(arr2)-len(arr1)+1):
passed = True
for j in range(len(arr1)):
if arr1[j] != arr2[i+j]:
passed = False
break
if passed:
return True
return False
###############################
# CONSOLE/TERMINAL MANAGEMENT #
###############################
"""
Clears the console screen.
"""
def clearScreen():
os.system('clear' if os.name =='posix' else 'cls')
"""
From https://www.quora.com/How-can-I-delete-the-last-printed-line-in-Python-language
Clears ("backspaces") the last n console lines.
PARAMETERS
----------
n : int
The number of lines to clear.
"""
def delete_last_lines(n=1):
for _ in range(n):
sys.stdout.write('\x1b[1A')
sys.stdout.write('\x1b[2K')
#######################
# STRING MANIPULATION #
#######################
"""
Prints a title surrounded by a certain character.
Parameters
----------
string : str
The string that is printed.
char : str
The one-character string that surrounds the string.
Example
-------
Input
"MY TITLE", "#"
Output
############
# MY TITLE #
############
"""
def printTitle(string, topBottomChar="#", sideChar="#", cornerChar="#"):
topBottom = cornerChar+(topBottomChar*(len(string)+2))+cornerChar
print(topBottom)
print(sideChar+" "+string+" "+sideChar)
print(topBottom)
"""
Returns the base string with either the singular or plural suffix depending on the value of num.
Parameters
----------
base : str
The base of the word.
num : int
The quantity of the desired word.
singularSuffix : str
The suffix of the word's singular form
pluralSuffix : str
The suffix of the word's plural form
Returns
-------
str
The resulting string
Examples
--------
Input 1
pluralize("ind", 1, "ex", "ices")
Output 1
"index"
Input 2
pluralize("ind", 2, "ex", "ices")
Output 2
"indices"
"""
def pluralize(base, num, singularSuffix="", pluralSuffix="s"):
return base+singularSuffix if num == 1 else base+pluralSuffix
"""
Creates a copy of a given string, automatically adding line breaks and indenting lines, without splitting any words in two.
A line's length will only exceed the given limit if a single word in the string exceeds it.
Parameters
----------
string : str
The string to be printed.
lineLength : int
The max length of each printed line.
firstLineIndent : str
The start of the first line.
lineIndent : str
The start of all subsequent lines.
Returns
-------
The output string.
Examples
--------
Input 1
limitedString("Strong Bad's test sentence is as follows: The fish was delish, and it made quite a dish.", 40, "? ", ". ! ")
Output 1
"? Strong Bad's test sentence is as\n. ! follows: The fish was delish, and it\n. ! made quite a dish."
(Which would look like the following when printed):
? Strong Bad's test sentence is as
. ! follows: The fish was delish, and it
. ! made quite a dish.
Input 2
limitedString("THIS_WORD_IS_VERY_LONG there", 15, "", "")
Output 2:
"THIS_WORD_IS_VERY_LONG\nthere"
(Which would look like the following when printed):
THIS_WORD_IS_VERY_LONG
there
"""
def limitedString(string, lineLength=80, firstLineIndent="", lineIndent=" "):
printArray = string.split(" ")
totalString = ""
currString = firstLineIndent
isStartOfLine = True
while len(printArray) > 0:
if isStartOfLine or (len(printArray[0]) + (not isStartOfLine) <= lineLength - len(currString)):
currString += (" " if not isStartOfLine else "")+printArray.pop(0)
isStartOfLine = False
else:
totalString += currString+"\n"
currString = lineIndent
isStartOfLine = True
totalString += currString
return totalString
"""
Shortens a string to a maximum length, padding the last few characters with a given character if necessary.
You have the option of whether or not the string can cut off mid-word.
Parameters
----------
string : str
The string to be shortened.
maxLength : int
The maximum length of the output.
suffixChar : str
The character that will pad a long string
suffixLength : int
The length of the padding
cutoff : bool
If True, the string can be cut mid-word; else, it will be cut at the end of the previous word.
Returns
-------
The (possibly) shortened string, with spaces stripped from the right side of the pre-padded output.
Examples
--------
Input 1
shorten("this string is too long", 20, '.', 3, True)
Output 1
"This string is to..."
Input 2
shorten("this string is too long", 20, '.', 3, False)
Output 2
"This string is..."
Input 3
shorten("this is short", 15, '.', 3, True)
Output 3
"this is short"
"""
def shorten(string, maxLength=10, suffixChar='.', suffixLength=3, cutoff=True):
if len(string) <= maxLength:
return string
if cutoff:
return string[:(maxLength-suffixLength)].rstrip()+(suffixChar*suffixLength)
shortened = string.rstrip()
while len(shortened) > maxLength-suffixLength:
shortened = " ".join(shortened.split(" ")[:-1]).rstrip()
return shortened+(suffixChar*suffixLength)
"""
Splits a string into multiple parts, with each part being about equal in length, and no words cut off in the middle.
Parameters
----------
string : str
The string to be split.
numParts : int
The number of parts to split the string into.
reverse : bool
Decide if the last part (False) or first part (True) is likely to be the longest part.
Returns
-------
list
The split string.
Examples
--------
Input 1
splitStringIntoParts("This string is split into three whole parts", 3, True)
Output 1
['This string is split', 'into three', 'whole parts']
Input 2
splitStringIntoParts("This string is split into three whole parts", 3, False)
Output 2
['This string', 'is split into', 'three whole parts']
"""
def splitStringIntoParts(string, numParts=2, reverse=False):
totalLen = len(string) - (numParts-1)
maxSubStringLength = math.ceil(totalLen/numParts)
stringArray = string.split(" ")
if reverse:
stringArray.reverse()
splitArray = []
currString = ""
offset = 0
while len(stringArray) > 0:
if len(currString) + (currString != "") + len(stringArray[0]) < maxSubStringLength + offset:
currString += (" " if currString != "" else "")+stringArray.pop(0)
else:
offset = (maxSubStringLength + offset) - (len(currString) + (currString != ""))
splitArray.append(currString)
currString = ""
splitArray[-1] += " "+currString
if reverse:
newSplitArray = []
while len(splitArray) > 0:
curr = splitArray.pop(-1).split(" ")
curr.reverse()
curr = " ".join(curr)
newSplitArray.append(curr)
return newSplitArray
return splitArray
"""
Returns a string indicating the input number of bytes in its most significant form, rounding up to the indicated number of decimal places.
For example, if numBytes is at least 1 MB but less than 1 GB, it will be displayed in MB.
Parameters
----------
numBytes : int
The number of bytes.
decimalPlaces : int
The number of decimal places to round to.
Retruns
-------
str
The number of the most significant data size, along with the data size itself.
Examples
--------
Input 1
5000000, 3
Output 1
4.769 MB
Input 2
2048, 1
Output 2
2 KB
Input 3
2049, 1
Output 3
2.1 KB
"""
def simplifyNumBytes(numBytes, decimalPlaces=2):
numBytes = float(numBytes)
byteTypeArray = ["B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"]
temp = (10.0**decimalPlaces)
for byteType in byteTypeArray:
if numBytes < 1024:
num = math.ceil(numBytes * temp) / temp
if num == int(num):
num = int(num)
return str(num)+" "+byteType
numBytes /= 1024.0
numBytes *= 1024
num = math.ceil(numBytes * temp) / temp
if num == int(num):
num = int(num)
return str(num)+" YB"
"""
SOURCES
dec_to_base
https://www.codespeedy.com/inter-convert-decimal-and-any-base-using-python/
removeEmptyFolders
https://gist.github.com/jacobtomlinson/9031697
getDirSize
https://stackoverflow.com/questions/1392413/calculating-a-directorys-size-using-python
most_frequent
https://www.geeksforgeeks.org/python-find-most-frequent-element-in-a-list/
delete_last_lines
https://www.quora.com/How-can-I-delete-the-last-printed-line-in-Python-language
All other functions made by GateGuy
"""
|
the-stack_0_17651 | """Test scikit-optimize based implementation of hyperparameter
search with interface similar to those of GridSearchCV
"""
import pytest
import time
from sklearn.datasets import load_iris, make_classification
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC, LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.base import clone
from sklearn.base import BaseEstimator
from joblib import cpu_count
from scipy.stats import rankdata
import numpy as np
from numpy.testing import assert_array_equal
from skopt.space import Real, Categorical, Integer
from skopt import BayesSearchCV
def _fit_svc(n_jobs=1, n_points=1, cv=None):
"""
Utility function to fit a larger classification task with SVC
"""
X, y = make_classification(n_samples=1000, n_features=20, n_redundant=0,
n_informative=18, random_state=1,
n_clusters_per_class=1)
opt = BayesSearchCV(
SVC(),
{
'C': Real(1e-3, 1e+3, prior='log-uniform'),
'gamma': Real(1e-3, 1e+1, prior='log-uniform'),
'degree': Integer(1, 3),
},
n_jobs=n_jobs, n_iter=11, n_points=n_points, cv=cv,
random_state=42,
)
opt.fit(X, y)
assert opt.score(X, y) > 0.9
def test_raise_errors():
# check if empty search space is raising errors
with pytest.raises(ValueError):
BayesSearchCV(SVC(), {})
# check if invalid dimensions are raising errors
with pytest.raises(ValueError):
BayesSearchCV(SVC(), {'C': '1 ... 100.0'})
with pytest.raises(TypeError):
BayesSearchCV(SVC(), ['C', (1.0, 1)])
@pytest.mark.parametrize("surrogate", ['gp', None])
@pytest.mark.parametrize("n_jobs", [1, -1]) # test sequential and parallel
@pytest.mark.parametrize("n_points", [1, 3]) # test query of multiple points
def test_searchcv_runs(surrogate, n_jobs, n_points, cv=None):
"""
Test whether the cross validation search wrapper around sklearn
models runs properly with available surrogates and with single
or multiple workers and different number of parameter settings
to ask from the optimizer in parallel.
Parameters
----------
* `surrogate` [str or None]:
A class of the scikit-optimize surrogate used. None means
to use default surrogate.
* `n_jobs` [int]:
Number of parallel processes to use for computations.
"""
X, y = load_iris(True)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.75, random_state=0
)
# create an instance of a surrogate if it is not a string
if surrogate is not None:
optimizer_kwargs = {'base_estimator': surrogate}
else:
optimizer_kwargs = None
opt = BayesSearchCV(
SVC(),
{
'C': Real(1e-6, 1e+6, prior='log-uniform'),
'gamma': Real(1e-6, 1e+1, prior='log-uniform'),
'degree': Integer(1, 8),
'kernel': Categorical(['linear', 'poly', 'rbf']),
},
n_jobs=n_jobs, n_iter=11, n_points=n_points, cv=cv,
optimizer_kwargs=optimizer_kwargs
)
opt.fit(X_train, y_train)
# this normally does not hold only if something is wrong
# with the optimizaiton procedure as such
assert opt.score(X_test, y_test) > 0.9
@pytest.mark.slow_test
def test_parallel_cv():
"""
Test whether parallel jobs work
"""
_fit_svc(n_jobs=1, cv=5)
_fit_svc(n_jobs=2, cv=5)
def test_searchcv_runs_multiple_subspaces():
"""
Test whether the BayesSearchCV runs without exceptions when
multiple subspaces are given.
"""
X, y = load_iris(True)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.75, random_state=0
)
# used to try different model classes
pipe = Pipeline([
('model', SVC())
])
# single categorical value of 'model' parameter sets the model class
lin_search = {
'model': Categorical([LinearSVC()]),
'model__C': Real(1e-6, 1e+6, prior='log-uniform'),
}
dtc_search = {
'model': Categorical([DecisionTreeClassifier()]),
'model__max_depth': Integer(1, 32),
'model__min_samples_split': Real(1e-3, 1.0, prior='log-uniform'),
}
svc_search = {
'model': Categorical([SVC()]),
'model__C': Real(1e-6, 1e+6, prior='log-uniform'),
'model__gamma': Real(1e-6, 1e+1, prior='log-uniform'),
'model__degree': Integer(1, 8),
'model__kernel': Categorical(['linear', 'poly', 'rbf']),
}
opt = BayesSearchCV(
pipe,
[(lin_search, 1), (dtc_search, 1), svc_search],
n_iter=2
)
opt.fit(X_train, y_train)
# test if all subspaces are explored
total_evaluations = len(opt.cv_results_['mean_test_score'])
assert total_evaluations == 1+1+2, "Not all spaces were explored!"
def test_searchcv_sklearn_compatibility():
"""
Test whether the BayesSearchCV is compatible with base sklearn methods
such as clone, set_params, get_params.
"""
X, y = load_iris(True)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.75, random_state=0
)
# used to try different model classes
pipe = Pipeline([
('model', SVC())
])
# single categorical value of 'model' parameter sets the model class
lin_search = {
'model': Categorical([LinearSVC()]),
'model__C': Real(1e-6, 1e+6, prior='log-uniform'),
}
dtc_search = {
'model': Categorical([DecisionTreeClassifier()]),
'model__max_depth': Integer(1, 32),
'model__min_samples_split': Real(1e-3, 1.0, prior='log-uniform'),
}
svc_search = {
'model': Categorical([SVC()]),
'model__C': Real(1e-6, 1e+6, prior='log-uniform'),
'model__gamma': Real(1e-6, 1e+1, prior='log-uniform'),
'model__degree': Integer(1, 8),
'model__kernel': Categorical(['linear', 'poly', 'rbf']),
}
opt = BayesSearchCV(
pipe,
[(lin_search, 1), svc_search],
n_iter=2
)
opt_clone = clone(opt)
params, params_clone = opt.get_params(), opt_clone.get_params()
assert params.keys() == params_clone.keys()
for param, param_clone in zip(params.items(), params_clone.items()):
assert param[0] == param_clone[0]
assert isinstance(param[1], type(param_clone[1]))
opt.set_params(search_spaces=[(dtc_search, 1)])
opt.fit(X_train, y_train)
opt_clone.fit(X_train, y_train)
total_evaluations = len(opt.cv_results_['mean_test_score'])
total_evaluations_clone = len(opt_clone.cv_results_['mean_test_score'])
# test if expected number of subspaces is explored
assert total_evaluations == 1
assert total_evaluations_clone == 1+2
def test_searchcv_reproducibility():
"""
Test whether results of BayesSearchCV can be reproduced with a fixed
random state.
"""
X, y = load_iris(True)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.75, random_state=0
)
random_state = 42
opt = BayesSearchCV(
SVC(random_state=random_state),
{
'C': Real(1e-6, 1e+6, prior='log-uniform'),
'gamma': Real(1e-6, 1e+1, prior='log-uniform'),
'degree': Integer(1, 8),
'kernel': Categorical(['linear', 'poly', 'rbf']),
},
n_iter=11, random_state=random_state
)
opt.fit(X_train, y_train)
best_est = opt.best_estimator_
opt2 = clone(opt).fit(X_train, y_train)
best_est2 = opt2.best_estimator_
assert getattr(best_est, 'C') == getattr(best_est2, 'C')
assert getattr(best_est, 'gamma') == getattr(best_est2, 'gamma')
assert getattr(best_est, 'degree') == getattr(best_est2, 'degree')
assert getattr(best_est, 'kernel') == getattr(best_est2, 'kernel')
@pytest.mark.fast_test
def test_searchcv_rank():
"""
Test whether results of BayesSearchCV can be reproduced with a fixed
random state.
"""
X, y = load_iris(True)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.75, random_state=0
)
random_state = 42
opt = BayesSearchCV(
SVC(random_state=random_state),
{
'C': Real(1e-6, 1e+6, prior='log-uniform'),
'gamma': Real(1e-6, 1e+1, prior='log-uniform'),
'degree': Integer(1, 8),
'kernel': Categorical(['linear', 'poly', 'rbf']),
},
n_iter=11, random_state=random_state, return_train_score=True
)
opt.fit(X_train, y_train)
results = opt.cv_results_
test_rank = np.asarray(rankdata(-np.array(results["mean_test_score"]),
method='min'), dtype=np.int32)
train_rank = np.asarray(rankdata(-np.array(results["mean_train_score"]),
method='min'), dtype=np.int32)
assert_array_equal(np.array(results['rank_test_score']), test_rank)
assert_array_equal(np.array(results['rank_train_score']), train_rank)
def test_searchcv_refit():
"""
Test whether results of BayesSearchCV can be reproduced with a fixed
random state.
"""
X, y = load_iris(True)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.75, random_state=0
)
random_state = 42
opt = BayesSearchCV(
SVC(random_state=random_state),
{
'C': Real(1e-6, 1e+6, prior='log-uniform'),
'gamma': Real(1e-6, 1e+1, prior='log-uniform'),
'degree': Integer(1, 8),
'kernel': Categorical(['linear', 'poly', 'rbf']),
},
n_iter=11, random_state=random_state
)
opt2 = BayesSearchCV(
SVC(random_state=random_state),
{
'C': Real(1e-6, 1e+6, prior='log-uniform'),
'gamma': Real(1e-6, 1e+1, prior='log-uniform'),
'degree': Integer(1, 8),
'kernel': Categorical(['linear', 'poly', 'rbf']),
},
n_iter=11, random_state=random_state, refit=True
)
opt.fit(X_train, y_train)
opt2.best_estimator_ = opt.best_estimator_
opt2.fit(X_train, y_train)
# this normally does not hold only if something is wrong
# with the optimizaiton procedure as such
assert opt2.score(X_test, y_test) > 0.9
def test_searchcv_callback():
# Test whether callback is used in BayesSearchCV and
# whether is can be used to interrupt the search loop
X, y = load_iris(True)
opt = BayesSearchCV(
DecisionTreeClassifier(),
{
'max_depth': [3], # additional test for single dimension
'min_samples_split': Real(0.1, 0.9),
},
n_iter=5
)
total_iterations = [0]
def callback(opt_result):
# this simply counts iterations
total_iterations[0] += 1
# break the optimization loop at some point
if total_iterations[0] > 2:
return True # True == stop optimization
return False
opt.fit(X, y, callback=callback)
assert total_iterations[0] == 3
# test whether final model was fit
opt.score(X, y)
def test_searchcv_total_iterations():
# Test the total iterations counting property of BayesSearchCV
opt = BayesSearchCV(
DecisionTreeClassifier(),
[
({'max_depth': (1, 32)}, 10), # 10 iterations here
{'min_samples_split': Real(0.1, 0.9)} # 5 (default) iters here
],
n_iter=5
)
assert opt.total_iterations == 10 + 5
def test_search_cv_internal_parameter_types():
# Test whether the parameters passed to the
# estimator of the BayesSearchCV are of standard python
# types - float, int, str
# This is estimator is used to check whether the types provided
# are native python types.
class TypeCheckEstimator(BaseEstimator):
def __init__(self, float_param=0.0, int_param=0, str_param=""):
self.float_param = float_param
self.int_param = int_param
self.str_param = str_param
def fit(self, X, y):
assert isinstance(self.float_param, float)
assert isinstance(self.int_param, int)
assert isinstance(self.str_param, str)
return self
def score(self, X, y):
return 0.0
# Below is example code that used to not work.
X, y = make_classification(10, 4)
model = BayesSearchCV(
estimator=TypeCheckEstimator(),
search_spaces={
'float_param': [0.0, 1.0],
'int_param': [0, 10],
'str_param': ["one", "two", "three"],
},
n_iter=11
)
model.fit(X, y)
|
the-stack_0_17653 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('tutor', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('release_date', models.DateField()),
('num_stars', models.IntegerField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Musician',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('instrument', models.CharField(max_length=100)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='album',
name='artist',
field=models.ForeignKey(to='tutor.Musician'),
preserve_default=True,
),
]
|
the-stack_0_17656 | #!/usr/bin/env python
"""MODULE DOCSTRING WILL BE DYNAMICALLY OVERRIDED."""
from argparse import ArgumentParser
from functools import wraps
import sys
from secret import twitter_instance
from twmods import (EPILOG, output)
DESCRIPTION = "Demonstrate Twitter's GET help/xxx endpoints."
USAGE = """
twhelp.py [--version] [--help]
twhelp.py configuration | languages | privacy | tos
"""
# pylint: disable=redefined-builtin
__doc__ = '\n'.join((DESCRIPTION, USAGE, EPILOG))
__version__ = '1.0.2'
def parse_args(args):
"""Parse the command line parameters."""
root_parser = ArgumentParser(
description=DESCRIPTION,
epilog=EPILOG,
usage=USAGE)
root_parser.add_argument(
'--version',
action='version',
version=__version__)
commands = (
dict(func=request_help_configuration,
command='help/configuration',
aliases=['configuration', 'config'],
help='print the current configuration used by Twitter'),
dict(func=request_help_languages,
command='help/languages',
aliases=['languages', 'lang'],
help='print the list of languages supported by Twitter'),
dict(func=request_help_privacy,
command='help/privacy',
aliases=['privacy'],
help='print Twitter\'s Privacy Policy'),
dict(func=request_help_tos,
command='help/tos',
aliases=['tos'],
help='print Twitter Terms of Service'))
subparsers = root_parser.add_subparsers(help='commands')
for cmd in commands:
parser = subparsers.add_parser(
cmd['command'],
aliases=cmd['aliases'],
help=cmd['help'])
parser.set_defaults(func=cmd['func'])
return root_parser.parse_args(args=args or ('--help',))
def request_decorator(request):
"""Decorate a function that returns an endpoint."""
@wraps(request)
def request_wrapper(stdout, stderr):
"""Output the response received from Twitter."""
output(request(twitter_instance())(), stdout)
return request_wrapper
@request_decorator
def request_help_configuration(twhandler):
"""Return the handler for GET help/configuration."""
return twhandler.help.configuration
@request_decorator
def request_help_languages(twhandler):
"""Return the handler for GET help/languages."""
return twhandler.help.languages
@request_decorator
def request_help_privacy(twhandler):
"""Return the handler for GET help/privacy."""
return twhandler.help.privacy
@request_decorator
def request_help_tos(twhandler):
"""Return the handler for GET help/tos."""
return twhandler.help.tos
def run(args, stdout=sys.stdout, stderr=sys.stderr):
args.func(stdout, stderr)
def main(args=sys.argv[1:]):
sys.exit(run(parse_args(args)))
if __name__ == '__main__':
main()
|
the-stack_0_17659 | import pickle
import os
import numpy as np
import pandas as pd
from .dataset import GeneExpressionDataset, arrange_categories
from .dataset10X import Dataset10X
class PbmcDataset(GeneExpressionDataset):
r""" Loads pbmc dataset.
We considered scRNA-seq data from two batches of peripheral blood mononuclear cells (PBMCs) from a healthy donor
(4K PBMCs and 8K PBMCs). We derived quality control metrics using the cellrangerRkit R package (v. 1.1.0).
Quality metrics were extracted from CellRanger throughout the molecule specific information file. After filtering,
we extract 12,039 cells with 10,310 sampled genes and get biologically meaningful clusters with the
software Seurat. We then filter genes that we could not match with the bulk data used for differential
expression to be left with g = 3346.
Args:
:save_path: Save path of raw data file. Default: ``'data/'``.
Examples:
>>> gene_dataset = PbmcDataset()
"""
def __init__(self, save_path="data/"):
self.save_path = save_path
self.urls = [
"https://github.com/YosefLab/scVI-data/raw/master/gene_info.csv",
"https://github.com/YosefLab/scVI-data/raw/master/pbmc_metadata.pickle",
]
self.download_names = ["gene_info_pbmc.csv", "pbmc_metadata.pickle"]
self.download()
self.de_metadata = pd.read_csv(
os.path.join(self.save_path, "gene_info_pbmc.csv"), sep=","
)
pbmc_metadata = pickle.load(
open(os.path.join(self.save_path, "pbmc_metadata.pickle"), "rb")
)
pbmc = GeneExpressionDataset.concat_datasets(
Dataset10X("pbmc8k", save_path=save_path),
Dataset10X("pbmc4k", save_path=save_path),
)
self.barcodes = pd.concat(pbmc.barcodes).values.ravel().astype(str)
super().__init__(
pbmc.X,
pbmc.local_means,
pbmc.local_vars,
pbmc.batch_indices,
pbmc.labels,
pbmc.gene_names,
)
dict_barcodes = dict(zip(self.barcodes, np.arange(len(self.barcodes))))
subset_cells = []
barcodes_metadata = (
pbmc_metadata["barcodes"].index.values.ravel().astype(np.str)
)
for barcode in barcodes_metadata:
if (
barcode in dict_barcodes
): # barcodes with end -11 filtered on 10X website (49 cells)
subset_cells += [dict_barcodes[barcode]]
self.update_cells(subset_cells=np.array(subset_cells))
idx_metadata = np.array(
[not barcode.endswith("11") for barcode in barcodes_metadata], dtype=np.bool
)
self.design = pbmc_metadata["design"][idx_metadata]
self.raw_qc = pbmc_metadata["raw_qc"][idx_metadata]
self.qc_names = self.raw_qc.columns
self.qc = self.raw_qc.values
self.qc_pc = pbmc_metadata["qc_pc"][idx_metadata]
self.normalized_qc = pbmc_metadata["normalized_qc"][idx_metadata]
labels = pbmc_metadata["clusters"][idx_metadata].reshape(-1, 1)[: len(self)]
self.labels, self.n_labels = arrange_categories(labels)
self.cell_types = pbmc_metadata["list_clusters"][: self.n_labels]
genes_to_keep = list(
self.de_metadata["ENSG"].values
) # only keep the genes for which we have de data
difference = list(
set(genes_to_keep).difference(set(pbmc.gene_names))
) # Non empty only for unit tests
for gene in difference:
genes_to_keep.remove(gene)
self.filter_genes(genes_to_keep)
self.de_metadata = self.de_metadata.head(
len(genes_to_keep)
) # this would only affect the unit tests
class PurifiedPBMCDataset(GeneExpressionDataset):
r""" The purified PBMC dataset from: "Massively parallel digital transcriptional profiling of single cells".
Args:
:save_path: Save path of raw data file. Default: ``'data/'``.
Examples:
>>> gene_dataset = PurifiedPBMCDataset()
"""
def __init__(self, save_path="data/", filter_cell_types=None):
cell_types = np.array(
[
"cd4_t_helper",
"regulatory_t",
"naive_t",
"memory_t",
"cytotoxic_t",
"naive_cytotoxic",
"b_cells",
"cd4_t_helper",
"cd34",
"cd56_nk",
"cd14_monocytes",
]
)
if (
filter_cell_types
): # filter = np.arange(6) - for T cells: np.arange(4) for T/CD4 cells
cell_types = cell_types[np.array(filter_cell_types)]
datasets = []
for cell_type in cell_types:
dataset = Dataset10X(cell_type, save_path=save_path)
dataset.cell_types = np.array([cell_type])
datasets += [dataset]
pbmc = GeneExpressionDataset.concat_datasets(*datasets, shared_batches=True)
pbmc.subsample_genes(subset_genes=(np.array(pbmc.X.sum(axis=0)) > 0).ravel())
super().__init__(
pbmc.X,
pbmc.local_means,
pbmc.local_vars,
pbmc.batch_indices,
pbmc.labels,
gene_names=pbmc.gene_names,
cell_types=pbmc.cell_types,
)
|
the-stack_0_17661 | """
This is where I will actually make my
daily dashboard to monitor my stocks.
"""
#%%
import ingest
with open("../data/stocks.txt") as f:
raw = f.read()
df = ingest.ingest(raw)
# %%
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
# Add an import for pandas_datareader and datetime
import pandas_datareader.data as web
from datetime import datetime
#%%
app = dash.Dash()
portfolio = ["TSLA", "PLTR", "JMIA"]
names = ["Tesla", "Palantir", "Jumia"]
options = []
for tic, name in zip(portfolio, names):
mydict = {} # label: user sees, value: script sees
mydict["label"] = tic + " " + name
mydict["value"] = tic
options.append(mydict)
app.layout = html.Div(
[
html.H1("Stock Ticker Dashboard"),
html.Div(
[
html.H3("Enter a stock symbol:", style={"paddingRight": "30px"}),
dcc.Dropdown(
id="my_ticker_symbol", options=options, value=["TSLA"], multi=True
),
],
style={"display": "inline-block", "verticalAlign": "top", "width": "30%"},
),
html.Div(
[
html.H3("Select a start and end date"),
dcc.DatePickerRange(
id="my_date_picker",
min_date_allowed=datetime(2021, 1, 1),
max_date_allowed=datetime.today(),
start_date=datetime(2021, 1, 1),
end_date=datetime.today(),
),
],
style={"display": "inline-block"},
),
html.Div(
[
html.Button(
id="submit-button",
n_clicks=0,
children="Submit",
style={"fontSize": 24, "marginLeft": "30px"},
)
]
),
dcc.Graph(id="my_graph", figure={"data": [{"x": [1, 2], "y": [3, 1]}]}),
]
)
@app.callback(
Output("my_graph", "figure"),
[Input("submit-button", "n_clicks")],
[
State("my_ticker_symbol", "value"),
State("my_date_picker", "start_date"),
State("my_date_picker", "end_date"),
],
)
def update_graph(n_clicks, stock_ticker, start_date, end_date):
# Use datareader and datetime to define a DataFrame
start = (datetime.strptime(str(start_date)[:10], "%Y-%m-%d"),)
end = datetime.strptime(str(end_date)[:10], "%Y-%m-%d")
# creating trace for every stock ticker
traces = []
for tic in stock_ticker:
df = price_action(
tic, token_path="../token.txt", start_date=start, end_date=end
)
traces.append({"x": df.reset_index()["date"], "y": df["close"], "name": tic})
# change the output data
fig = {
"data": traces,
"layout": {"title": ", ".join(stock_ticker) + " Closing Prices"},
}
return fig
if __name__ == "__main__":
app.run_server()
# %%
|
the-stack_0_17663 | from nose.tools import (
assert_equal, assert_in, assert_not_in,
assert_false, assert_true
)
from app.main.services.query_builder import construct_query, is_filtered
from app.main.services.query_builder import (
field_is_or_filter, field_filters,
or_field_filters, and_field_filters,
filter_clause
)
from werkzeug.datastructures import MultiDict
def test_should_have_correct_root_element():
assert_equal("query" in construct_query(build_query_params()), True)
def test_should_have_page_size_set():
assert_equal(construct_query(build_query_params())["size"], 100)
def test_should_be_able_to_override_pagesize():
assert_equal(construct_query(build_query_params(), 10)["size"], 10)
def test_page_should_set_from_parameter():
assert_equal(
construct_query(build_query_params(page=2))["from"], 100)
def test_should_have_no_from_by_default():
assert_false("from" in construct_query(build_query_params()))
def test_should_have_match_all_query_if_no_params():
assert_equal("query" in construct_query(build_query_params()), True)
assert_equal("match_all" in
construct_query(build_query_params())["query"], True)
def test_should_make_multi_match_query_if_keywords_supplied():
keywords = "these are my keywords"
query = construct_query(build_query_params(keywords))
assert_equal("query" in query, True)
assert_in("simple_query_string", query["query"])
query_string_clause = query["query"]["simple_query_string"]
assert_equal(query_string_clause["query"], keywords)
assert_equal(query_string_clause["default_operator"], "and")
assert_equal(query_string_clause["fields"], [
"frameworkName",
"id",
"lot",
"serviceBenefits",
"serviceFeatures",
"serviceName",
"serviceSummary",
"serviceTypes",
"supplierName",
])
def test_should_identify_filter_search_from_query_params():
cases = (
(build_query_params(), False),
(build_query_params(keywords="lot"), False),
(build_query_params(lot="lot"), True),
(build_query_params(keywords="something", lot="lot"), True),
(build_query_params(service_types=["serviceTypes"]), True)
)
for query, expected in cases:
yield assert_equal, is_filtered(query), expected
def test_should_have_filtered_root_element_if_service_types_search():
query = construct_query(build_query_params(
service_types=["my serviceTypes"]))
assert_equal("query" in query, True)
assert_equal("filtered" in query["query"], True)
def test_should_have_filtered_root_element_if_lot_search():
query = construct_query(build_query_params(lot="SaaS"))
assert_equal("query" in query, True)
assert_equal("filtered" in query["query"], True)
def test_should_have_filtered_root_element_and_match_all_if_no_keywords():
query = construct_query(build_query_params(
service_types=["my serviceTypes"]))
assert_equal("match_all" in query["query"]["filtered"]["query"], True)
def test_should_have_filtered_root_element_and_match_keywords():
query = construct_query(
build_query_params(keywords="some keywords",
service_types=["my serviceTypes"])
)["query"]["filtered"]["query"]
assert_in("simple_query_string", query)
query_string_clause = query["simple_query_string"]
assert_equal(query_string_clause["query"], "some keywords")
assert_equal(query_string_clause["default_operator"], "and")
assert_equal(query_string_clause["fields"], [
"frameworkName",
"id",
"lot",
"serviceBenefits",
"serviceFeatures",
"serviceName",
"serviceSummary",
"serviceTypes",
"supplierName",
])
def test_should_have_filtered_term_service_types_clause():
query = construct_query(build_query_params(service_types=["serviceTypes"]))
assert_equal("term" in
query["query"]["filtered"]["filter"]["bool"]["must"][0], True)
assert_equal(
query["query"]["filtered"]["filter"]
["bool"]["must"][0]["term"]["filter_serviceTypes"],
"servicetypes")
def test_should_have_filtered_term_lot_clause():
query = construct_query(build_query_params(lot="SaaS"))
assert_equal(
"term" in query["query"]["filtered"]["filter"]["bool"]["must"][0],
True)
assert_equal(
query["query"]["filtered"]["filter"]
["bool"]["must"][0]["term"]["filter_lot"],
"saas")
def test_should_have_filtered_term_for_lot_and_service_types_clause():
query = construct_query(
build_query_params(lot="SaaS", service_types=["serviceTypes"]))
terms = query["query"]["filtered"]["filter"]["bool"]["must"]
assert_in({"term": {'filter_serviceTypes': 'servicetypes'}}, terms)
assert_in({"term": {'filter_lot': 'saas'}}, terms)
def test_should_not_filter_on_unknown_keys():
params = build_query_params(lot="SaaS", service_types=["serviceTypes"])
params.add("this", "that")
query = construct_query(params)
terms = query["query"]["filtered"]["filter"]["bool"]["must"]
assert_in({"term": {'filter_serviceTypes': 'servicetypes'}}, terms)
assert_in({"term": {'filter_lot': 'saas'}}, terms)
assert_not_in({"term": {'unknown': 'something to ignore'}}, terms)
def test_should_have_filtered_term_for_multiple_service_types_clauses():
query = construct_query(
build_query_params(
service_types=["serviceTypes1", "serviceTypes2", "serviceTypes3"]))
terms = query["query"]["filtered"]["filter"]["bool"]["must"]
assert_in({"term": {'filter_serviceTypes': 'servicetypes1'}}, terms)
assert_in({"term": {'filter_serviceTypes': 'servicetypes2'}}, terms)
assert_in({"term": {'filter_serviceTypes': 'servicetypes3'}}, terms)
def test_should_use_whitespace_stripped_lowercased_service_types():
query = construct_query(build_query_params(
service_types=["My serviceTypes"]))
assert_equal(
"term" in query["query"]["filtered"]["filter"]["bool"]["must"][0],
True)
assert_equal(
query["query"]["filtered"]["filter"]
["bool"]["must"][0]["term"]["filter_serviceTypes"],
"myservicetypes")
def test_should_use_no_non_alphanumeric_characters_in_service_types():
query = construct_query(
build_query_params(service_types=["Mys Service TYPes"]))
assert_equal(
"term" in query["query"]["filtered"]["filter"]["bool"]["must"][0],
True)
assert_equal(
query["query"]["filtered"]["filter"]["bool"]["must"][0]
["term"]["filter_serviceTypes"],
"mysservicetypes")
def test_should_have_highlight_block_on_keyword_search():
query = construct_query(build_query_params(keywords="some keywords"))
assert_equal("highlight" in query, True)
def test_should_have_highlight_block_on_filtered_search():
query = construct_query(
build_query_params(keywords="some keywords",
service_types=["some serviceTypes"]))
assert_equal("highlight" in query, True)
def test_highlight_block_sets_encoder_to_html():
query = construct_query(
build_query_params(keywords="some keywords",
service_types=["some serviceTypes"]))
assert_equal(query["highlight"]["encoder"], "html")
def test_highlight_block_contains_correct_fields():
query = construct_query(
build_query_params(keywords="some keywords",
service_types=["some serviceTypes"]))
assert_equal("highlight" in query, True)
cases = [
("id", True),
("lot", True),
("serviceName", True),
("serviceSummary", True),
("serviceFeatures", True),
("serviceBenefits", True),
("serviceTypes", True),
("supplierName", True)
]
for example, expected in cases:
yield \
assert_equal, \
example in query["highlight"]["fields"], \
expected, \
example
def build_query_params(keywords=None, service_types=None, lot=None, page=None):
query_params = MultiDict()
if keywords:
query_params["q"] = keywords
if service_types:
for service_type in service_types:
query_params.add("filter_serviceTypes", service_type)
if lot:
query_params["filter_lot"] = lot
if page:
query_params["page"] = page
return query_params
class TestFieldFilters(object):
def test_field_is_or_filter(self):
assert_true(field_is_or_filter(['a,b']))
def test_field_is_or_filter_no_comma(self):
assert_false(field_is_or_filter(['a']))
def test_field_is_or_filter_multiple_values_no_comma(self):
assert_false(field_is_or_filter(['a', 'b']))
def test_field_is_or_filter_multiple_values(self):
assert_false(field_is_or_filter(['a,b', 'b,c']))
def test_or_field_filters(self):
assert_equal(
or_field_filters('filterName', ['Aa bb', 'Bb cc']),
[{"terms": {"filterName": ['aabb', 'bbcc'], "execution": "bool"}}]
)
def test_or_field_filters_single_value(self):
assert_equal(
or_field_filters('filterName', ['Aa bb']),
[{"terms": {"filterName": ['aabb'], "execution": "bool"}}]
)
def test_and_field_filters(self):
assert_equal(
and_field_filters('filterName', ['Aa bb', 'Bb cc']),
[
{"term": {"filterName": 'aabb'}},
{"term": {"filterName": 'bbcc'}}
]
)
def test_and_field_filters_single_value(self):
assert_equal(
and_field_filters('filterName', ['Aa bb']),
[{"term": {"filterName": 'aabb'}}]
)
def test_field_filters_single_value(self):
assert_equal(
field_filters('filterName', ['Aa Bb']),
[{"term": {"filterName": 'aabb'}}]
)
def test_field_filters_multiple_and_values(self):
assert_equal(
field_filters('filterName', ['Aa bb', 'Bb,Cc']),
[
{"term": {"filterName": 'aabb'}},
{"term": {"filterName": 'bbcc'}}
]
)
def test_field_filters_or_value(self):
assert_equal(
field_filters('filterName', ['Aa,Bb']),
[{"terms": {"filterName": ['aa', 'bb'], "execution": "bool"}}]
)
class TestFilterClause(object):
def test_filter_ignores_non_filter_query_args(self):
assert_equal(
filter_clause(
MultiDict({'fieldName': ['Aa bb'], 'lot': ['saas']})
),
{'bool': {'must': []}}
)
def test_single_and_field(self):
assert_equal(
filter_clause(MultiDict(
{'filter_fieldName': ['Aa bb'], 'lot': 'saas'}
)),
{'bool': {
'must': [
{"term": {"filter_fieldName": 'aabb'}},
]
}}
)
def test_single_or_field(self):
assert_equal(
filter_clause(MultiDict({'filter_fieldName': ['Aa,Bb']})),
{'bool': {
'must': [
{"terms": {"filter_fieldName": ['aa', 'bb'], "execution": "bool"}},
]
}}
)
def test_or_and_combination(self):
bool_filter = filter_clause(MultiDict({
'filter_andFieldName': ['Aa', 'bb'],
'filter_orFieldName': ['Aa,Bb']
}))
assert_in(
{"terms": {"filter_orFieldName": ['aa', 'bb'], "execution": "bool"}},
bool_filter['bool']['must']
)
assert_in(
{"term": {"filter_andFieldName": 'aa'}},
bool_filter['bool']['must']
)
assert_in(
{"term": {"filter_andFieldName": 'bb'}},
bool_filter['bool']['must']
)
|
the-stack_0_17664 | import logging
from typing import Optional, Union
from .MatchMSDataBuilder import MatchMSDataBuilder
from .PandasDataBuilder import PandasDataBuilder
logging.getLogger(__name__).addHandler(logging.NullHandler())
def get_builder(filetype) -> Optional[Union[PandasDataBuilder, MatchMSDataBuilder]]:
if (filetype in ['csv', 'tsv']):
return PandasDataBuilder().with_filetype(filetype)
if (filetype in ['msp']):
return MatchMSDataBuilder().with_filetype(filetype)
return None
__all__ = [
"MatchMSDataBuilder",
"PandasDataBuilder",
]
|
the-stack_0_17665 | """
This module is meant to compare results with those expected from papers, or create figures illustrating the
behavior of sdba methods and utilities.
"""
import numpy as np
from scipy.stats import scoreatpercentile
from scipy.stats.kde import gaussian_kde
from xclim.sdba.adjustment import (
DetrendedQuantileMapping,
EmpiricalQuantileMapping,
QuantileDeltaMapping,
)
from xclim.sdba.processing import adapt_freq
from . import utils as tu
try:
from matplotlib import pyplot as plt
except ModuleNotFoundError:
plt = False
__all__ = ["synth_rainfall", "cannon_2015_figure_2", "adapt_freq_graph"]
def synth_rainfall(shape, scale=1, wet_freq=0.25, size=1):
"""Return gamma distributed rainfall values for wet days.
Notes
-----
The probability density for the Gamma distribution is:
.. math::
p(x) = x^{k-1}\frac{e^{-x/\theta}}{\theta^k\\Gamma(k)},
where :math:`k` is the shape and :math:`\theta` the scale, and :math:`\\Gamma` is the Gamma function.
"""
is_wet = np.random.binomial(1, p=wet_freq, size=size)
wet_intensity = np.random.gamma(shape, scale, size)
return np.where(is_wet, wet_intensity, 0)
def cannon_2015_figure_2():
n = 10000
ref, hist, sim = tu.cannon_2015_rvs(n, random=False)
QM = EmpiricalQuantileMapping(kind="*", group="time", interp="linear")
QM.train(ref, hist)
sim_eqm = QM.predict(sim)
DQM = DetrendedQuantileMapping(kind="*", group="time", interp="linear")
DQM.train(ref, hist)
sim_dqm = DQM.predict(sim, degree=0)
QDM = QuantileDeltaMapping(kind="*", group="time", interp="linear")
QDM.train(ref, hist)
sim_qdm = QDM.predict(sim)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(11, 4))
x = np.linspace(0, 105, 50)
ax1.plot(x, gaussian_kde(ref)(x), color="r", label="Obs hist")
ax1.plot(x, gaussian_kde(hist)(x), color="k", label="GCM hist")
ax1.plot(x, gaussian_kde(sim)(x), color="blue", label="GCM simure")
ax1.plot(x, gaussian_kde(sim_qdm)(x), color="lime", label="QDM future")
ax1.plot(x, gaussian_kde(sim_eqm)(x), color="darkgreen", ls="--", label="QM future")
ax1.plot(x, gaussian_kde(sim_dqm)(x), color="lime", ls=":", label="DQM future")
ax1.legend(frameon=False)
ax1.set_xlabel("Value")
ax1.set_ylabel("Density")
tau = np.array([0.25, 0.5, 0.75, 0.95, 0.99]) * 100
bc_gcm = (
scoreatpercentile(sim, tau) - scoreatpercentile(hist, tau)
) / scoreatpercentile(hist, tau)
bc_qdm = (
scoreatpercentile(sim_qdm, tau) - scoreatpercentile(ref, tau)
) / scoreatpercentile(ref, tau)
bc_eqm = (
scoreatpercentile(sim_eqm, tau) - scoreatpercentile(ref, tau)
) / scoreatpercentile(ref, tau)
bc_dqm = (
scoreatpercentile(sim_dqm, tau) - scoreatpercentile(ref, tau)
) / scoreatpercentile(ref, tau)
ax2.plot([0, 1], [0, 1], ls=":", color="blue")
ax2.plot(bc_gcm, bc_gcm, "-", color="blue", label="GCM")
ax2.plot(bc_gcm, bc_qdm, marker="o", mfc="lime", label="QDM")
ax2.plot(
bc_gcm,
bc_eqm,
marker="o",
mfc="darkgreen",
ls=":",
color="darkgreen",
label="QM",
)
ax2.plot(
bc_gcm,
bc_dqm,
marker="s",
mec="lime",
mfc="w",
ls="--",
color="lime",
label="DQM",
)
for i, s in enumerate(tau / 100):
ax2.text(bc_gcm[i], bc_eqm[i], f"{s} ", ha="right", va="center", fontsize=9)
ax2.set_xlabel("GCM relative change")
ax2.set_ylabel("Bias adjusted relative change")
ax2.legend(loc="upper left", frameon=False)
ax2.set_aspect("equal")
plt.tight_layout()
return fig
def adapt_freq_graph():
"""
Create a graphic with the additive adjustment factors estimated after applying the adapt_freq method.
"""
n = 10000
x = tu.series(synth_rainfall(2, 2, wet_freq=0.25, size=n), "pr") # sim
y = tu.series(synth_rainfall(2, 2, wet_freq=0.5, size=n), "pr") # ref
xp = adapt_freq(x, y, thresh=0).sim_ad
fig, (ax1, ax2) = plt.subplots(2, 1)
sx = x.sortby(x)
sy = y.sortby(y)
sxp = xp.sortby(xp)
# Original and corrected series
ax1.plot(sx.values, color="blue", lw=1.5, label="x : sim")
ax1.plot(sxp.values, color="pink", label="xp : sim corrected")
ax1.plot(sy.values, color="k", label="y : ref")
ax1.legend()
# Compute qm factors
qm_add = QuantileDeltaMapping(kind="+", group="time").train(y, x).ds
qm_mul = QuantileDeltaMapping(kind="*", group="time").train(y, x).ds
qm_add_p = QuantileDeltaMapping(kind="+", group="time").train(y, xp).ds
qm_mul_p = QuantileDeltaMapping(kind="*", group="time").train(y, xp).ds
qm_add.cf.plot(ax=ax2, color="cyan", ls="--", label="+: y-x")
qm_add_p.cf.plot(ax=ax2, color="cyan", label="+: y-xp")
qm_mul.cf.plot(ax=ax2, color="brown", ls="--", label="*: y/x")
qm_mul_p.cf.plot(ax=ax2, color="brown", label="*: y/xp")
ax2.legend(loc="upper left", frameon=False)
return fig
|
the-stack_0_17666 | from collections import deque
import weakref
import py4j.protocol as proto
from py4j.clientserver import (
ClientServerConnection, ClientServer, JavaClient, PythonServer)
from py4j.java_gateway import (
CallbackServer, JavaGateway, GatewayClient, GatewayProperty,
PythonProxyPool, GatewayConnection, CallbackConnection)
from py4j.tests.py4j_callback_recursive_example import PythonPing
# Use deque to be thread-safe
MEMORY_HOOKS = deque()
CREATED = deque()
FINALIZED = deque()
def register_creation(obj):
obj_str = str(obj)
CREATED.append(obj_str)
MEMORY_HOOKS.append(weakref.ref(
obj,
lambda wr: FINALIZED.append(obj_str)
))
class InstrumentedPythonPing(PythonPing):
def __init__(self, fail=False):
super(InstrumentedPythonPing, self).__init__(fail)
register_creation(self)
class InstrJavaGateway(JavaGateway):
def __init__(self, *args, **kwargs):
super(InstrJavaGateway, self). __init__(*args, **kwargs)
register_creation(self)
def _create_gateway_client(self):
gateway_client = InstrGatewayClient(
gateway_parameters=self.gateway_parameters)
return gateway_client
def _create_callback_server(self, callback_server_parameters):
callback_server = InstrCallbackServer(
self.gateway_property.pool, self._gateway_client,
callback_server_parameters=callback_server_parameters)
return callback_server
def _create_gateway_property(self):
gateway_property = InstrGatewayProperty(
self.gateway_parameters.auto_field, PythonProxyPool(),
self.gateway_parameters.enable_memory_management)
if self.python_server_entry_point:
gateway_property.pool.put(
self.python_server_entry_point, proto.ENTRY_POINT_OBJECT_ID)
return gateway_property
class InstrGatewayClient(GatewayClient):
def __init__(self, *args, **kwargs):
super(InstrGatewayClient, self).__init__(*args, **kwargs)
register_creation(self)
def _create_connection(self):
connection = InstrGatewayConnection(
self.gateway_parameters, self.gateway_property)
connection.start()
return connection
class InstrGatewayProperty(GatewayProperty):
"""Object shared by callbackserver, gateway, and connections.
"""
def __init__(self, *args, **kwargs):
super(InstrGatewayProperty, self).__init__(*args, **kwargs)
register_creation(self)
class InstrGatewayConnection(GatewayConnection):
def __init__(self, *args, **kwargs):
super(InstrGatewayConnection, self).__init__(*args, **kwargs)
register_creation(self)
class InstrCallbackServer(CallbackServer):
def __init__(self, *args, **kwargs):
super(InstrCallbackServer, self).__init__(*args, **kwargs)
register_creation(self)
def _create_connection(self, socket_instance, stream):
connection = InstrCallbackConnection(
self.pool, stream, socket_instance, self.gateway_client,
self.callback_server_parameters, self)
return connection
class InstrCallbackConnection(CallbackConnection):
def __init__(self, *args, **kwargs):
super(InstrCallbackConnection, self).__init__(*args, **kwargs)
register_creation(self)
class InstrClientServerConnection(ClientServerConnection):
def __init__(self, *args, **kwargs):
super(InstrClientServerConnection, self).__init__(*args, **kwargs)
register_creation(self)
class InstrPythonServer(PythonServer):
def __init__(self, *args, **kwargs):
super(InstrPythonServer, self).__init__(*args, **kwargs)
register_creation(self)
def _create_connection(self, socket, stream):
connection = InstrClientServerConnection(
self.java_parameters, self.python_parameters,
self.gateway_property, self.gateway_client, self)
connection.init_socket_from_python_server(socket, stream)
return connection
class InstrJavaClient(JavaClient):
def __init__(self, *args, **kwargs):
super(InstrJavaClient, self).__init__(*args, **kwargs)
register_creation(self)
def _create_new_connection(self):
connection = InstrClientServerConnection(
self.java_parameters, self.python_parameters,
self.gateway_property, self)
connection.connect_to_java_server()
self.set_thread_connection(connection)
self.deque.append(connection)
return connection
class InstrClientServer(ClientServer):
def __init__(self, *args, **kwargs):
super(InstrClientServer, self).__init__(*args, **kwargs)
register_creation(self)
def _create_gateway_client(self):
java_client = InstrJavaClient(
self.java_parameters, self.python_parameters)
return java_client
def _create_callback_server(self, callback_server_parameters):
callback_server = InstrPythonServer(
self._gateway_client, self.java_parameters, self.python_parameters,
self.gateway_property)
return callback_server
def _create_gateway_property(self):
gateway_property = InstrGatewayProperty(
self.java_parameters.auto_field, PythonProxyPool(),
self.java_parameters.enable_memory_management)
if self.python_server_entry_point:
gateway_property.pool.put(
self.python_server_entry_point, proto.ENTRY_POINT_OBJECT_ID)
return gateway_property
|
the-stack_0_17667 | """ Basic tests for idact-gui.
"""
import pytest
from pytestqt.qt_compat import qt_api
from gui.functionality.running_notebooks import RunningNotebooks
from gui.helpers.configuration_provider import ConfigurationProvider
from gui.functionality.main_window import MainWindow
from gui.functionality.idact_notebook import IdactNotebook
from gui.functionality.manage_jobs import ManageJobs
from gui.functionality.add_cluster import AddCluster
from gui.functionality.remove_cluster import RemoveCluster
from gui.functionality.adjust_timeouts import AdjustTimeouts
@pytest.fixture()
def window():
assert qt_api.QApplication.instance() is not None
conf_provider = ConfigurationProvider()
if not conf_provider.check_if_conf_file_exists():
conf_provider.create_conf_file()
if not conf_provider.check_if_args_files_exist():
conf_provider.create_args_files()
window = MainWindow()
return window
def test_basics(window, qtbot):
""" Tests if idact-gui renders itself.
"""
window.show()
assert window.isVisible()
assert window.windowTitle() == 'Idact GUI'
def test_deploy_notebook_window(window, qtbot):
""" Tests if it is possible to open deploy notebook window.
"""
window.show()
window.deploy_notebook_action.trigger()
assert window.centralWidget().__class__ == IdactNotebook(window).__class__
def test_manage_jobs_window(window, qtbot):
""" Tests if it is possible to open manage jobs window.
"""
window.show()
window.manage_jobs_action.trigger()
assert window.centralWidget().__class__ == ManageJobs(window).__class__
def test_running_notebooks_window(window, qtbot):
""" Tests if it is possible to open running notebooks window.
"""
window.show()
window.running_notebooks_action.trigger()
assert window.centralWidget().__class__ == RunningNotebooks(window).__class__
def test_add_cluster_window(window, qtbot):
""" Tests if it is possible to open add cluster window.
"""
window.show()
window.add_cluster_action.trigger()
assert window.centralWidget().__class__ == AddCluster(window).__class__
def test_remove_cluster_window(window, qtbot):
""" Tests if it is possible to open remove cluster window.
"""
window.show()
window.remove_cluster_action.trigger()
assert window.centralWidget().__class__ == RemoveCluster(window).__class__
def test_edit_configuration_window(window, qtbot):
""" Tests if it is possible to open edit configuration window.
"""
window.show()
window.edit_configuration_action.trigger()
assert window.centralWidget().__class__ == AdjustTimeouts(window).__class__
def test_logs_window(window, qtbot):
""" Tests if it is possible to open logs window.
"""
window.show()
window.show_logs_action.trigger()
assert window.show_logs_window.isVisible()
assert window.show_logs_window.windowTitle() == 'Logs'
def test_help_window(window, qtbot):
""" Tests if it is possible to open help window.
"""
window.show()
window.see_help_action.trigger()
assert window.help_window.isVisible()
assert window.help_window.windowTitle() == 'Help'
def test_about_window(window, qtbot):
""" Tests if it is possible to open help window.
"""
window.show()
window.about_the_program_action.trigger()
assert window.program_info_window.isVisible()
assert window.program_info_window.windowTitle() == 'About'
|
the-stack_0_17668 | """ A simple module for caching a single object on disk"""
import cPickle as pickle
import errno
import logging
import os
import simpleflock
__LOCK_FILE = 'dmsa.cache.lockfile'
__CACHE_FILE = 'dmsa.cache'
__DIR = None
def set_cache_dir(cache_dir):
"""Set the directory to use for holding cache and lock files
If the directory is never set, the current directory is used (see below).
"""
global __DIR
__DIR = cache_dir
def _pathname(name):
"""Ensure directory `__DIR` exists and return path name
If `__DIR` is falsy, simply return `name` as the pathname.
Otherwise, create `__DIR` if necessary, and return the pathname.
Return: the pathname resulting from path-joining `__DIR` and `name`
(or just `name`).
"""
if not __DIR:
return name
try:
os.makedirs(__DIR)
except OSError:
pass
return os.path.join(__DIR, name)
def _pickle_and_cache_models(obj):
pathname = _pathname(__CACHE_FILE)
try:
with open(pathname, mode='w') as f:
pickle.dump(obj, f)
except pickle.PicklingError as e:
logging.error('pickling object: {}'.format(e))
raise
except IOError as e:
logging.error('opening {} for writing: {}'.format(pathname, e))
raise
def set_cache(obj):
"""Update the cache with an object (dict, e.g.)
The object is cached on disk. A lock file is used to coordinate
updates to this cache among threads and processes.
If another process has the cache locked (only used for writing),
then this function does nothing; i.e. it assumes that somebody
else is taking care of the update ....
Arguments:
obj - object to write to the cache
Return:
none
"""
lock_path = _pathname(__LOCK_FILE)
try:
with simpleflock.SimpleFlock(lock_path, timeout=0):
_pickle_and_cache_models(obj)
except IOError as e:
if e.errno != errno.EWOULDBLOCK:
logging.error('creating lock file {}: {}'.format(lock_path, e))
raise
def get_cache():
"""Fetch the object from disk cache
Return: cached object as written by set_cache, or None if no cache file
"""
pathname = _pathname(__CACHE_FILE)
try:
with open(pathname, mode='r') as f:
try:
obj = pickle.load(f)
except pickle.UnpicklingError as e:
logging.error('unpickling object: {}'.format(e))
raise
except IOError as e:
if e.errno == errno.ENOENT:
return None
logging.error('opening {} for reading: {}'.format(pathname, e))
raise
return obj
|
the-stack_0_17669 | """Initialization of ATAG One climate platform."""
from __future__ import annotations
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
PRESET_AWAY,
PRESET_BOOST,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE
from . import CLIMATE, DOMAIN, AtagEntity
PRESET_MAP = {
"Manual": "manual",
"Auto": "automatic",
"Extend": "extend",
PRESET_AWAY: "vacation",
PRESET_BOOST: "fireplace",
}
PRESET_INVERTED = {v: k for k, v in PRESET_MAP.items()}
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
HVAC_MODES = [HVAC_MODE_AUTO, HVAC_MODE_HEAT]
async def async_setup_entry(hass, entry, async_add_entities):
"""Load a config entry."""
coordinator = hass.data[DOMAIN][entry.entry_id]
async_add_entities([AtagThermostat(coordinator, CLIMATE)])
class AtagThermostat(AtagEntity, ClimateEntity):
"""Atag climate device."""
_attr_hvac_modes = HVAC_MODES
_attr_preset_modes = list(PRESET_MAP.keys())
_attr_supported_features = SUPPORT_FLAGS
def __init__(self, coordinator, atag_id):
"""Initialize an Atag climate device."""
super().__init__(coordinator, atag_id)
self._attr_temperature_unit = coordinator.data.climate.temp_unit
@property
def hvac_mode(self) -> str | None: # type: ignore[override]
"""Return hvac operation ie. heat, cool mode."""
if self.coordinator.data.climate.hvac_mode in HVAC_MODES:
return self.coordinator.data.climate.hvac_mode
return None
@property
def hvac_action(self) -> str | None:
"""Return the current running hvac operation."""
is_active = self.coordinator.data.climate.status
return CURRENT_HVAC_HEAT if is_active else CURRENT_HVAC_IDLE
@property
def current_temperature(self) -> float | None:
"""Return the current temperature."""
return self.coordinator.data.climate.temperature
@property
def target_temperature(self) -> float | None:
"""Return the temperature we try to reach."""
return self.coordinator.data.climate.target_temperature
@property
def preset_mode(self) -> str | None:
"""Return the current preset mode, e.g., auto, manual, fireplace, extend, etc."""
preset = self.coordinator.data.climate.preset_mode
return PRESET_INVERTED.get(preset)
async def async_set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
await self.coordinator.data.climate.set_temp(kwargs.get(ATTR_TEMPERATURE))
self.async_write_ha_state()
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
await self.coordinator.data.climate.set_hvac_mode(hvac_mode)
self.async_write_ha_state()
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
await self.coordinator.data.climate.set_preset_mode(PRESET_MAP[preset_mode])
self.async_write_ha_state()
|
the-stack_0_17670 | import torch
from torch.nn import Parameter
from torch_geometric.nn import ChebConv
from torch_geometric.nn.inits import glorot, zeros
class GConvLSTM(torch.nn.Module):
r"""An implementation of the Chebyshev Graph Convolutional Long Short Term Memory
Cell. For details see this paper: `"Structured Sequence Modeling with Graph
Convolutional Recurrent Networks." <https://arxiv.org/abs/1612.07659>`_
Args:
in_channels (int): Number of input features.
out_channels (int): Number of output features.
K (int): Chebyshev filter size :math:`K`.
normalization (str, optional): The normalization scheme for the graph
Laplacian (default: :obj:`"sym"`):
1. :obj:`None`: No normalization
:math:`\mathbf{L} = \mathbf{D} - \mathbf{A}`
2. :obj:`"sym"`: Symmetric normalization
:math:`\mathbf{L} = \mathbf{I} - \mathbf{D}^{-1/2} \mathbf{A}
\mathbf{D}^{-1/2}`
3. :obj:`"rw"`: Random-walk normalization
:math:`\mathbf{L} = \mathbf{I} - \mathbf{D}^{-1} \mathbf{A}`
You need to pass :obj:`lambda_max` to the :meth:`forward` method of
this operator in case the normalization is non-symmetric.
:obj:`\lambda_max` should be a :class:`torch.Tensor` of size
:obj:`[num_graphs]` in a mini-batch scenario and a
scalar/zero-dimensional tensor when operating on single graphs.
You can pre-compute :obj:`lambda_max` via the
:class:`torch_geometric.transforms.LaplacianLambdaMax` transform.
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
"""
def __init__(self, in_channels: int, out_channels: int, K: int,
normalization: str="sym", bias: bool=True):
super(GConvLSTM, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.K = K
self.normalization = normalization
self.bias = bias
self._create_parameters_and_layers()
self._set_parameters()
def _create_input_gate_parameters_and_layers(self):
self.conv_x_i = ChebConv(in_channels=self.in_channels,
out_channels=self.out_channels,
K=self.K,
normalization=self.normalization,
bias=self.bias)
self.conv_h_i = ChebConv(in_channels=self.out_channels,
out_channels=self.out_channels,
K=self.K,
normalization=self.normalization,
bias=self.bias)
self.w_c_i = Parameter(torch.Tensor(1, self.out_channels))
self.b_i = Parameter(torch.Tensor(1, self.out_channels))
def _create_forget_gate_parameters_and_layers(self):
self.conv_x_f = ChebConv(in_channels=self.in_channels,
out_channels=self.out_channels,
K=self.K,
normalization=self.normalization,
bias=self.bias)
self.conv_h_f = ChebConv(in_channels=self.out_channels,
out_channels=self.out_channels,
K=self.K,
normalization=self.normalization,
bias=self.bias)
self.w_c_f = Parameter(torch.Tensor(1, self.out_channels))
self.b_f = Parameter(torch.Tensor(1, self.out_channels))
def _create_cell_state_parameters_and_layers(self):
self.conv_x_c = ChebConv(in_channels=self.in_channels,
out_channels=self.out_channels,
K=self.K,
normalization=self.normalization,
bias=self.bias)
self.conv_h_c = ChebConv(in_channels=self.out_channels,
out_channels=self.out_channels,
K=self.K,
normalization=self.normalization,
bias=self.bias)
self.b_c = Parameter(torch.Tensor(1, self.out_channels))
def _create_output_gate_parameters_and_layers(self):
self.conv_x_o = ChebConv(in_channels=self.in_channels,
out_channels=self.out_channels,
K=self.K,
normalization=self.normalization,
bias=self.bias)
self.conv_h_o = ChebConv(in_channels=self.out_channels,
out_channels=self.out_channels,
K=self.K,
normalization=self.normalization,
bias=self.bias)
self.w_c_o = Parameter(torch.Tensor(1, self.out_channels))
self.b_o = Parameter(torch.Tensor(1, self.out_channels))
def _create_parameters_and_layers(self):
self._create_input_gate_parameters_and_layers()
self._create_forget_gate_parameters_and_layers()
self._create_cell_state_parameters_and_layers()
self._create_output_gate_parameters_and_layers()
def _set_parameters(self):
glorot(self.w_c_i)
glorot(self.w_c_f)
glorot(self.w_c_o)
zeros(self.b_i)
zeros(self.b_f)
zeros(self.b_c)
zeros(self.b_o)
def _set_hidden_state(self, X, H):
if H is None:
H = torch.zeros(X.shape[0], self.out_channels)
return H
def _set_cell_state(self, X, C):
if C is None:
C = torch.zeros(X.shape[0], self.out_channels)
return C
def _calculate_input_gate(self, X, edge_index, edge_weight, H, C):
I = self.conv_x_i(X, edge_index, edge_weight)
I = I + self.conv_h_i(H, edge_index, edge_weight)
I = I + (self.w_c_i*C)
I = I + self.b_i
I = torch.sigmoid(I)
return I
def _calculate_forget_gate(self, X, edge_index, edge_weight, H, C):
F = self.conv_x_f(X, edge_index, edge_weight)
F = F + self.conv_h_f(H, edge_index, edge_weight)
F = F + (self.w_c_f*C)
F = F + self.b_f
F = torch.sigmoid(F)
return F
def _calculate_cell_state(self, X, edge_index, edge_weight, H, C, I, F):
T = self.conv_x_c(X, edge_index, edge_weight)
T = T + self.conv_h_c(H, edge_index, edge_weight)
T = T + self.b_c
T = torch.tanh(T)
C = F*C + I*T
return C
def _calculate_output_gate(self, X, edge_index, edge_weight, H, C):
O = self.conv_x_o(X, edge_index, edge_weight)
O = O + self.conv_h_o(H, edge_index, edge_weight)
O = O + (self.w_c_o*C)
O = O + self.b_o
O = torch.sigmoid(O)
return O
def _calculate_hidden_state(self, O, C):
H = O * torch.tanh(C)
return H
def forward(self, X: torch.FloatTensor, edge_index: torch.LongTensor, edge_weight: torch.FloatTensor=None,
H: torch.FloatTensor=None, C: torch.FloatTensor=None) -> torch.FloatTensor:
"""
Making a forward pass. If edge weights are not present the forward pass
defaults to an unweighted graph. If the hidden state and cell state
matrices are not present when the forward pass is called these are
initialized with zeros.
Arg types:
* **X** *(PyTorch Float Tensor)* - Node features.
* **edge_index** *(PyTorch Long Tensor)* - Graph edge indices.
* **edge_weight** *(PyTorch Long Tensor, optional)* - Edge weight vector.
* **H** *(PyTorch Float Tensor, optional)* - Hidden state matrix for all nodes.
* **C** *(PyTorch Float Tensor, optional)* - Cell state matrix for all nodes.
Return types:
* **H** *(PyTorch Float Tensor)* - Hidden state matrix for all nodes.
* **C** *(PyTorch Float Tensor)* - Cell state matrix for all nodes.
"""
H = self._set_hidden_state(X, H)
C = self._set_cell_state(X, C)
I = self._calculate_input_gate(X, edge_index, edge_weight, H, C)
F = self._calculate_forget_gate(X, edge_index, edge_weight, H, C)
C = self._calculate_cell_state(X, edge_index, edge_weight, H, C, I, F)
O = self._calculate_output_gate(X, edge_index, edge_weight, H, C)
H = self._calculate_hidden_state(O, C)
return H, C
|
the-stack_0_17671 | import argparse
import os
import random
import shutil
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim
import torch.utils.data
from torch.utils.data import DataLoader
from torchvision import transforms
import data
import new_data
import resnet
def get_arguments():
parser = argparse.ArgumentParser(description='RecycleNet')
parser.add_argument('--b', '--batch', type=int, default=16)
parser.add_argument('--gpu', type=str, help='0; 0,1; 0,3; etc', required=True)
parser.add_argument('--root_dir', type=str, default='data/')
parser.add_argument('--save_dir', type=str, default='save/')
parser.add_argument('--resume', type=str, default=None)
parser.add_argument('--lr', type=float, default=2e-4)
parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, metavar='W',
help='weight decay (default: 1e-4)')
parser.add_argument('--arch', type=str, default='resnet18_base', help='resnet18, 34, 50, 101, 152')
# parser.add_argument('--lr_finetune', type=float, default=5e-5)
# parser.add_argument('--save_model_interval', type=int, default=5000)
# parser.add_argument('--save_training_img_interval', type=int, default=5000)
# parser.add_argument('--vis_interval', type=int, default=5)
# parser.add_argument('--max_iter', type=int, default=1000000)
# parser.add_argument('--display_id', type=int, default=10)
parser.add_argument('--att_mode', type=str, default='ours', help='attention module mode: ours, cbam, se')
parser.add_argument('--use_att', action='store_true', help='use attention module')
parser.add_argument('--no_pretrain', action='store_false', help='training from scratch')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--epochs', default=100, type=int, metavar='N', help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--adjust-freq', type=int, default=40, help='learning rate adjustment frequency (default: 40)')
parser.add_argument('--print-freq', '-p', default=10, type=int, metavar='N', help='print frequency (default: 10)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--seed', default=1234, type=int, help='seed for initializing training. ')
parser.add_argument('--new_data', action='store_true', help='use scott\'s relabelled dataset')
return parser.parse_args()
def main():
args = get_arguments()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
BATCH_SIZE = args.b
GPU = args.gpu
ROOT_DIR = args.root_dir
MEAN = [0.485, 0.456, 0.406]
STD = [0.229, 0.224, 0.225]
os.environ['CUDA_VISIBLE_DEVICES'] = GPU
if torch.cuda.is_available():
print('using Cuda devices, num:', torch.cuda.device_count())
if not args.evaluate:
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
'''def ToCudaVariable(xs, volatile=False, requires_grad=True):
if torch.cuda.is_available():
return [Variable(x.cuda(), volatile=volatile, requires_grad=requires_grad) for x in xs]
else:
return [Variable(x, volatile=volatile, requires_grad=requires_grad) for x in xs]
'''
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
if args.new_data:
n_classes = 9
else:
n_classes = 6
if args.arch == 'resnet18_base':
model = nn.DataParallel(
resnet.resnet18(pretrained=True if not args.resume else False, num_classes=n_classes, use_att=args.use_att,
att_mode=args.att_mode).to(device))
elif args.arch == 'resnet34_base':
model = nn.DataParallel(
resnet.resnet34(pretrained=not args.no_pretrain if not args.resume else False, num_classes=n_classes,
use_att=args.use_att, att_mode=args.att_mode).to(device))
elif args.arch == 'resnet50_base':
model = nn.DataParallel(
resnet.resnet50(pretrained=not args.no_pretrain if not args.resume else False, num_classes=n_classes,
use_att=args.use_att, att_mode=args.att_mode).to(device))
elif args.arch == 'resnet101_base':
model = nn.DataParallel(
resnet.resnet101(pretrained=not args.no_pretrain if not args.resume else False, num_classes=n_classes,
use_att=args.use_att, att_mode=args.att_mode).to(device))
elif args.arch == 'resnet152_base':
model = nn.DataParallel(
resnet.resnet152(pretrained=not args.no_pretrain if not args.resume else False, num_classes=n_classes,
use_att=args.use_att, att_mode=args.att_mode).to(device))
else:
model = nn.DataParallel(resnet.resnet18(pretrained=True, num_classes=5, use_att=False).to(device))
print(model)
print('Number of model parameters: {}'.format(
sum([p.data.nelement() for p in model.parameters()])))
criterion = nn.CrossEntropyLoss().to(device)
# att_params = [p for n,p in model.named_parameters() if n.startswith('module.att') and p.requires_grad]
# non_att_params = [p for n,p in model.named_parameters() if not n.startswith('module.att') and p.requires_grad]
# params = [{'params': non_att_params, 'lr': args.lr / 10.0}, {'params': att_params}]
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location=device)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
print('=> best accuracy {}'.format(best_acc1))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
train_img_transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=MEAN, std=STD)])
if args.new_data is True:
train_dataset = new_data.TrashDataset(ROOT_DIR, train_img_transform, 'train')
else:
train_dataset = data.TrashDataset(ROOT_DIR, train_img_transform, 'train')
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=args.workers,
pin_memory=True)
val_img_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=MEAN, std=STD)])
if args.new_data is True:
val_dataset = new_data.TrashDataset(ROOT_DIR, val_img_transform, 'val')
else:
val_dataset = data.TrashDataset(ROOT_DIR, val_img_transform, 'val')
val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=args.workers,
pin_memory=True)
test_img_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=MEAN, std=STD)])
if args.new_data is True:
test_dataset = new_data.TrashDataset(ROOT_DIR, test_img_transform, 'test')
else:
test_dataset = data.TrashDataset(ROOT_DIR, test_img_transform, 'test')
test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=args.workers,
pin_memory=True)
if args.evaluate:
# validate(args, val_loader, model, criterion, device)
test(args, test_loader, model, criterion, device)
return
best_acc1 = 0
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(args, optimizer, epoch, args.adjust_freq)
train(args, train_loader, model, criterion, optimizer, epoch, device)
acc1 = validate(args, val_loader, model, criterion, device)
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
}, is_best, args.save_dir)
def train(args, train_loader, model, criterion, optimizer, epoch, device):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
data_time.update(time.time() - end)
if args.gpu is not None:
input = input.to(device)
target = torch.from_numpy(np.asarray(target))
target = target.to(device)
output = model(input)
loss = criterion(output[0], target)
acc1 = accuracy(output[0], target)
losses.update(loss.item(), input.size(0))
top1.update(acc1[0].item(), input.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
# import pdb
# pdb.set_trace()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1))
def validate(args, val_loader, model, criterion, device):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.to(device)
target = torch.from_numpy(np.asarray(target))
target = target.to(device)
output = model(input)
loss = criterion(output[0], target)
acc1 = accuracy(output[0], target)
losses.update(loss.item(), input.size(0))
top1.update(acc1[0].item(), input.size(0))
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1))
print(' * Acc@1 {top1.avg:.3f}'.format(top1=top1))
return top1.avg
def test(args, val_loader, model, criterion, device):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target, input_path) in enumerate(val_loader):
if args.gpu is not None:
input = input.to(device)
target = torch.from_numpy(np.asarray(target))
target = target.to(device)
output = model(input)
# import pdb
# npdb.set_trace()
loss = criterion(output[0], target)
acc1 = accuracy(output[0], target)
losses.update(loss.item(), input.size(0))
top1.update(acc1[0].item(), input.size(0))
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1))
print(' * Acc@1 {top1.avg:.3f}'.format(top1=top1))
return top1.avg
def save_checkpoint(state, is_best, save_dir, filename='checkpoint.pth.tar'):
torch.save(state, os.path.join(save_dir, filename))
if is_best:
shutil.copyfile(os.path.join(save_dir, filename), os.path.join(save_dir, 'model_best.pth.tar'))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(args, optimizer, epoch, N):
"""Sets the learning rate to the initial LR decayed by 10 every N epochs"""
lr = args.lr * (0.1 ** (epoch // N))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
|
the-stack_0_17673 | # -*- coding: utf-8 -*-
import time
import numpy as np
from pyodesys import ODESys as _ODESys
from pyodesys.results import Result
from chempy.units import get_derived_unit, unitless_in_registry, uniform, patched_numpy as pnp
from .integrate import run
from ._chemreac import cvode_predefined_durations_fields
class ODESys(_ODESys):
def __init__(self, rd, k_from_params=None, variables_from_params=None):
if rd.N > 1:
raise NotImplementedError("ODESys expects single bin for now")
self.rd = rd
self.k_from_params = k_from_params
self.variables_from_params = variables_from_params
ny = property(lambda self: self.rd.n*self.rd.N)
names = property(lambda self: self.rd.substance_names)
latex_names = property(lambda self: self.rd.substance_latex_names)
param_names = property(lambda self: self.rd.param_names)
autonomous_interface = property(lambda self: not self.rd.logt)
numpy = pnp
# dep_by_name = True
# par_by_name = True
def _get_units_util(self):
if self.rd.unit_registry is None:
_dedim = lambda x: np.array(x)
time_u = 1
conc_u = 1
dr_u = 1
else:
_dedim = lambda x: unitless_in_registry(x, self.rd.unit_registry)
time_u = get_derived_unit(self.rd.unit_registry, 'time')
conc_u = get_derived_unit(self.rd.unit_registry, 'concentration')
dr_u = get_derived_unit(self.rd.unit_registry, 'doserate')
return locals()
def integrate(self, x, y0, params=None, integrator='cvode', **kwargs):
if params is not None and self.k_from_params is not None:
self.rd.k = self.k_from_params(self, params)
if 'doserate' in (params or {}):
self.rd.set_with_units(
'fields', [[self.variables_from_params['density'](self, params)*params['doserate']]])
if 'atol' in kwargs and isinstance(kwargs['atol'], dict):
kwargs['atol'] = [kwargs['atol'][k] for k in self.names]
integr = run(self.rd, [y0[k] for k in self.names] if isinstance(y0, dict) else y0,
x, integrator=integrator, **kwargs)
pout = [params[k] for k in self.param_names] if self.param_names else None
return Result(integr.with_units('tout'), integr.with_units('Cout')[:, 0, :],
pout, integr.info, self)
def chained_parameter_variation(self, durations, y0, varied_params, default_params=None,
integrate_kwargs=None, x0=None, npoints=1, numpy=None):
if list(varied_params) != ['doserate']:
raise NotImplementedError("For now only varied doserate is supported")
if self.param_names != ['doserate']:
raise NotImplementedError("We expect doserate to be varied for now")
uutil = self._get_units_util()
_dedim, time_u, conc_u, dr_u = [uutil[k] for k in '_dedim time_u conc_u dr_u'.split()]
density = _dedim(self.variables_from_params['density'](self, default_params))
if default_params:
self.rd.k = _dedim(self.k_from_params(self, default_params))
if x0 is not None:
assert x0 == 0*time_u
integrate_kwargs = integrate_kwargs or {}
atol = integrate_kwargs.pop('atol', 1e-8)
if isinstance(atol, float):
atol = [atol]
elif isinstance(atol, dict):
atol = [atol[k] for k in self.names]
rtol = integrate_kwargs.pop('rtol', 1e-8)
method = integrate_kwargs.pop('method', 'bdf')
integrator = integrate_kwargs.pop('integrator', 'cvode')
if integrator != 'cvode':
raise NotImplementedError("chained_parameter_variation requires cvode for now")
drate = uniform(varied_params['doserate'])
time_cpu = time.process_time()
time_wall = time.time()
tout, yout = cvode_predefined_durations_fields(
self.rd, _dedim([y0[k] for k in self.names]),
_dedim(durations),
_dedim(drate*density),
atol=atol, rtol=rtol, method=method, npoints=npoints, **integrate_kwargs)
info = dict(
nsteps=-1,
nfev=self.rd.nfev,
njev=self.rd.njev,
time_wall=time.time() - time_wall,
time_cpu=time.process_time() - time_cpu,
success=True,
integrator=[integrator],
t0_set=False,
linear_solver=0, # pyodesys.results.Result work-around for now (not important)
)
info.update(self.rd.last_integration_info)
dr_out = np.concatenate((np.repeat(drate, npoints), drate[-1:]))
return Result(tout*time_u, yout[:, 0, :]*conc_u, dr_out.reshape((-1, 1))*dr_u, info, self)
|
the-stack_0_17676 | #!/usr/bin/env python3
# controls individual pis locally based on LAN commands
import socket
import sys
import os
import time
import picamera
# server connection and client identification
SERVER = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
SERVER.connect((sys.argv[1], int(sys.argv[2])))
# utf-8 byte encoder with injected header
def msgEncode(message):
msg = str(message)
msgLength = len(msg)
msg = "{:<4}".format(msgLength) + msg
msg = msg.encode()
return msg
# utf-8 byte reciever, buffer, and decoder
def msgDecode():
chunk = SERVER.recv(1)
while len(chunk) < 4:
chunk += SERVER.recv(1)
chunk = chunk.decode()
msgLength = int(chunk[:4])
msg = chunk[4:]
while len(msg) < msgLength:
chunk = SERVER.recv(1)
chunk = chunk.decode()
msg += chunk
return msg
# send encoded message to server
def msgSend(message):
msg = msgEncode(message)
SERVER.send(msg)
msgSend(socket.gethostname()) # confirm connection to server with host name
# calibrate camera
camera = picamera.PiCamera()
camera.resolution = (3280, 2464)
camera.meter_mode = 'spot'
camera.image_denoise = False
fileName = msgDecode()
imgFormat = msgDecode()
FILE_NAME = str(socket.gethostname()) + "_" + fileName + "." + imgFormat
def cameraCalibration(iso=0, shutter=0):
camera.start_preview()
camera.iso = iso
camera.shutter_speed = shutter
camera.exposure_mode = 'auto'
camera.awb_mode = "auto"
time.sleep(2) # pause for exposure adjustments
camera.exposure_mode = 'off'
time.sleep(0.25) # allow white balance to adjust based on locked exposure
whiteBal = camera.awb_gains
camera.awb_mode = "off"
time.sleep(0.25) # allow gains to settle
camera.awb_gains = whiteBal
camera.stop_preview()
def profileAnnotation(profile):
string = '''PROFILE {}\nShutter: {:.3f} ISO: {}\nGain: {:.3f} :: {:.3f}
White Balance: {:.3f} :: {:.3f}'''.format(profile, camera.exposure_speed * 0.000001,
camera.iso, float(camera.digital_gain), float(camera.analog_gain),
float(camera.awb_gains[0]), float(camera.awb_gains[1]))
return string
def profileCycle(count, path, iso, shutter):
cameraCalibration(iso, shutter)
camera.annotate_text = profileAnnotation(count)
camera.capture("{}/{}.jpeg".format(path, count))
def generateProfiles():
path = fileName + "_Profiles"
os.mkdir(path, 0o777)
camera.resolution = (1280, 720) # adjust camera resoluton for preview images
profileCycle(1, path, 0, 0)
profileCycle(2, path, 100, 0)
profileCycle(3, path, 100, 10000)
profileCycle(4, path, 200, 10000)
profileCycle(5, path, 400, 10000)
camera.resolution = (3280, 2464) #resotre camera resolution to full quality
camera.annotate_text = ""
# generate exposure profiles
msg = msgDecode()
if msg == "EXPOSURE":
generateProfiles()
msgSend("GENERATED")
msg = msgDecode()
# set exposure
while True:
if msg == "1":
cameraCalibration(0, 0)
break
if msg == "2":
cameraCalibration(100, 0)
break
if msg == "3":
cameraCalibration(100, 10000)
break
if msg == "4":
cameraCalibration(200, 10000)
break
if msg == "5":
cameraCalibration(400, 10000)
break
msgSend("EXPOSED")
# create workspace
os.mkdir(fileName, 0o777)
directory = fileName + "/"
imgName = directory + FILE_NAME
cycle = 1
# capture sequence
while True:
msg = msgDecode()
if msg == "CAPTURE":
img = imgName + "_{:0>3}.{}".format(cycle, imgFormat)
camera.capture(img, format=imgFormat, quality=100)
cycle += 1
msgSend("CAPTURED")
if msg == "DONE":
break
# exit
camera.close()
SERVER.close()
|
the-stack_0_17677 | import datetime
import logging
import math
import sys
import textwrap
import time
from pathlib import Path
from typing import Union
from amset.constants import output_width
__author__ = "Alex Ganose"
__maintainer__ = "Alex Ganose"
__email__ = "[email protected]"
logger = logging.getLogger(__name__)
def initialize_amset_logger(
directory: Union[str, Path] = ".",
filename: Union[str, Path, bool] = "amset.log",
level: int = logging.INFO,
print_log: bool = True,
) -> logging.Logger:
"""Initialize the default logger with stdout and file handlers.
Args:
directory: Path to the folder where the log file will be written.
filename: The log filename. If False, no log will be written.
level: The log level.
print_log: Whether to print the log to the screen.
Returns:
A logging instance with customized formatter and handlers.
"""
log = logging.getLogger("amset")
log.setLevel(level)
log.handlers = [] # reset logging handlers if they already exist
screen_formatter = WrappingFormatter(fmt="%(message)s")
file_formatter = WrappingFormatter(fmt="%(message)s", simple_ascii=True)
if filename is not False:
handler = logging.FileHandler(Path(directory) / filename, mode="w")
handler.setFormatter(file_formatter)
log.addHandler(handler)
if print_log:
screen_handler = logging.StreamHandler(stream=sys.stdout)
screen_handler.setFormatter(screen_formatter)
log.addHandler(screen_handler)
def handle_exception(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
now = datetime.datetime.now()
exit_msg = "amset exiting on {} at {}".format(
now.strftime("%d %b %Y"), now.strftime("%H:%M")
)
log.error(
f"\n ERROR: {exit_msg}",
exc_info=(exc_type, exc_value, exc_traceback),
)
sys.excepthook = handle_exception
return log
class WrappingFormatter(logging.Formatter):
def __init__(
self, fmt=None, datefmt=None, style="%", width=output_width, simple_ascii=False
):
super().__init__(fmt=fmt, datefmt=datefmt, style=style)
self.simple_ascii = simple_ascii
self.wrapper = textwrap.TextWrapper(
width=width,
subsequent_indent=" ",
replace_whitespace=True,
drop_whitespace=False,
)
def format(self, record):
text = super().format(record)
if "└" in text or "├" in text:
# don't have blank time when reporting list
text = " " + text
else:
text = "\n" + "\n".join(
[self.wrapper.fill(" " + s) for s in text.splitlines()]
)
if self.simple_ascii:
return self.make_simple_ascii(text)
return text
@staticmethod
def make_simple_ascii(text):
replacements = {
"├──": "-",
"│": " ",
"└──": "-",
fancy_logo: simple_logo,
"ᵢᵢ": "_i",
"ħω": "hbar.omega",
"cm²/Vs": "cm2/Vs",
"β²": "b2",
"a₀⁻²": "a^-2",
"cm⁻³": "cm-3",
"–": "-",
"₀": "0",
"₁": "1",
"₂": "2",
"₃": "3",
"₄": "4",
"₅": "5",
"₆": "6",
"₇": "7",
"₈": "8",
"₉": "8",
"\u0305": "-",
"π": "pi",
"ħ": "h",
"ω": "w",
"α": "a",
"β": "b",
"γ": "y",
"°": "deg",
"Å": "angstrom",
}
for initial, final in replacements.items():
text = text.replace(initial, final)
return text
def log_time_taken(t0: float):
logger.info(f" └── time: {time.perf_counter() - t0:.4f} s")
def log_banner(text):
width = output_width - 2
nstars = (width - (len(text) + 2)) / 2
logger.info(
"\n{} {} {}".format("~" * math.ceil(nstars), text, "~" * math.floor(nstars))
)
def log_list(list_strings, prefix=" ", level=logging.INFO):
for i, text in enumerate(list_strings):
if i == len(list_strings) - 1:
pipe = "└"
else:
pipe = "├"
logger.log(level, f"{prefix}{pipe}── {text}")
fancy_logo = """ █████╗ ███╗ ███╗███████╗███████╗████████╗
██╔══██╗████╗ ████║██╔════╝██╔════╝╚══██╔══╝
███████║██╔████╔██║███████╗█████╗ ██║
██╔══██║██║╚██╔╝██║╚════██║██╔══╝ ██║
██║ ██║██║ ╚═╝ ██║███████║███████╗ ██║
╚═╝ ╚═╝╚═╝ ╚═╝╚══════╝╚══════╝ ╚═╝
"""
simple_logo = r""" /$$$$$$ /$$ /$$ /$$$$$$ /$$$$$$$$ /$$$$$$$$
/$$__ $$| $$$ /$$$ /$$__ $$| $$_____/|__ $$__/
| $$ \ $$| $$$$ /$$$$| $$ \__/| $$ | $$
| $$$$$$$$| $$ $$/$$ $$| $$$$$$ | $$$$$ | $$
| $$__ $$| $$ $$$| $$ \____ $$| $$__/ | $$
| $$ | $$| $$\ $ | $$ /$$ \ $$| $$ | $$
| $$ | $$| $$ \/ | $$| $$$$$$/| $$$$$$$$ | $$
|__/ |__/|__/ |__/ \______/ |________/ |__/
"""
|
the-stack_0_17678 | """Represents an invitation returned to the introduction service."""
from marshmallow import EXCLUDE, fields
from .....messaging.agent_message import AgentMessage, AgentMessageSchema
from ....connections.v1_0.messages.connection_invitation import (
ConnectionInvitation,
ConnectionInvitationSchema,
)
from ..message_types import INVITATION, PROTOCOL_PACKAGE
HANDLER_CLASS = f"{PROTOCOL_PACKAGE}.handlers.invitation_handler.InvitationHandler"
class Invitation(AgentMessage):
"""Class representing an invitation returned to the introduction service."""
class Meta:
"""Metadata for an invitation."""
handler_class = HANDLER_CLASS
message_type = INVITATION
schema_class = "InvitationSchema"
def __init__(
self, *, invitation: ConnectionInvitation = None, message: str = None, **kwargs
):
"""
Initialize invitation object.
Args:
invitation: The connection invitation
message: Comments on the introduction
"""
super().__init__(**kwargs)
self.invitation = invitation
self.message = message
class InvitationSchema(AgentMessageSchema):
"""Invitation request schema class."""
class Meta:
"""Invitation request schema metadata."""
model_class = Invitation
unknown = EXCLUDE
invitation = fields.Nested(ConnectionInvitationSchema(), required=True)
message = fields.Str(
required=False,
description="Comments on the introduction",
example="Hello Bob, it's Charlie as Alice mentioned",
allow_none=True,
)
|
the-stack_0_17681 | from abc import ABC, abstractmethod
from data_generators.basic_generator import *
from data_generators.standard_generator import StandardDataGenerator
class Environment(ABC):
"""Environment abstract base class.
Constructor method uploads all the basic data from the given json source in the given mode"""
def __init__(self, mode='all', bid=None, src='src/basic003.json', generator='basic'):
if generator == 'basic':
self.data_gen = BasicDataGenerator(src)
elif generator == 'standard':
self.data_gen = StandardDataGenerator(src)
else:
raise NotImplementedError
self.bids = self.data_gen.get_bids()
self.prices = self.data_gen.get_prices()
self.margins = self.data_gen.get_margins()
self.n_clicks = self.data_gen.get_daily_clicks(mode=mode)
if bid is not None:
self.cpc = self.data_gen.get_costs_per_click(mode=mode, bid=bid)
self.conv_rates = self.data_gen.get_conversion_rates(mode=mode, bid=bid)
self.tau = self.data_gen.get_future_purchases(mode=mode, bid=bid)
self.features = self.data_gen.get_features()
self.customer_classes = self.data_gen.get_classes()
@abstractmethod
def round(self, pulled_arm):
"""Play a single round of the environment"""
pass
|
the-stack_0_17682 | import itertools
import numpy as np
import pytest
import matplotlib.pyplot as plt
import cirq
import examples.basic_arithmetic
import examples.bell_inequality
import examples.bernstein_vazirani
import examples.bcs_mean_field
import examples.bristlecone_heatmap_example
import examples.cross_entropy_benchmarking_example
import examples.deutsch
import examples.grover
import examples.hello_qubit
import examples.hhl
import examples.noisy_simulation_example
import examples.phase_estimator
import examples.place_on_bristlecone
import examples.qaoa
import examples.quantum_fourier_transform
import examples.quantum_teleportation
import examples.qubit_characterizations_example
import examples.shor
import examples.superdense_coding
import examples.swap_networks
def test_example_runs_bernstein_vazirani():
examples.bernstein_vazirani.main(qubit_count=3)
# Check empty oracle case. Cover both biases.
a = cirq.NamedQubit('a')
assert list(examples.bernstein_vazirani.make_oracle(
[], a, [], False)) == []
assert list(examples.bernstein_vazirani.make_oracle(
[], a, [], True)) == [cirq.X(a)]
def test_example_runs_deutsch():
examples.deutsch.main()
def test_example_runs_hello_line():
examples.place_on_bristlecone.main()
def test_example_runs_hello_qubit():
examples.hello_qubit.main()
def test_example_runs_bell_inequality():
examples.bell_inequality.main()
def test_example_runs_quantum_fourier_transform():
examples.quantum_fourier_transform.main()
def test_example_runs_bcs_mean_field():
examples.bcs_mean_field.main()
def test_example_runs_grover():
examples.grover.main()
def test_example_runs_basic_arithmetic():
examples.basic_arithmetic.main(n=2)
def test_example_runs_phase_estimator():
examples.phase_estimator.main(qnums=(2,), repetitions=2)
def test_example_runs_bristlecone_heatmap():
plt.switch_backend('agg')
examples.bristlecone_heatmap_example.main()
def test_example_runs_qaoa():
examples.qaoa.main(repetitions=10, maxiter=5)
def test_example_runs_quantum_teleportation():
expected, teleported = examples.quantum_teleportation.main()
assert np.all(np.isclose(expected, teleported, atol=1e-4))
def test_example_runs_superdense_coding():
examples.superdense_coding.main()
def test_example_runs_hhl():
examples.hhl.main()
def test_example_runs_qubit_characterizations():
examples.qubit_characterizations_example.main()
def test_example_swap_networks():
examples.swap_networks.main()
def test_example_cross_entropy_benchmarking():
examples.cross_entropy_benchmarking_example.main(repetitions=10,
num_circuits=2,
cycles=[2, 3, 4])
def test_example_noisy_simulation():
examples.noisy_simulation_example.main()
def test_example_shor_modular_exp_register_size():
with pytest.raises(ValueError):
_ = examples.shor.ModularExp(target=cirq.LineQubit.range(2),
exponent=cirq.LineQubit.range(2, 5),
base=4,
modulus=5)
def test_example_shor_modular_exp_register_type():
operation = examples.shor.ModularExp(target=cirq.LineQubit.range(3),
exponent=cirq.LineQubit.range(3, 5),
base=4,
modulus=5)
with pytest.raises(ValueError):
_ = operation.with_registers(cirq.LineQubit.range(3))
with pytest.raises(ValueError):
_ = operation.with_registers(1, cirq.LineQubit.range(3, 6), 4, 5)
with pytest.raises(ValueError):
_ = operation.with_registers(cirq.LineQubit.range(3),
cirq.LineQubit.range(3, 6),
cirq.LineQubit.range(6, 9), 5)
with pytest.raises(ValueError):
_ = operation.with_registers(cirq.LineQubit.range(3),
cirq.LineQubit.range(3, 6), 4,
cirq.LineQubit.range(6, 9))
def test_example_shor_modular_exp_registers():
target = cirq.LineQubit.range(3)
exponent = cirq.LineQubit.range(3, 5)
operation = examples.shor.ModularExp(target, exponent, 4, 5)
assert operation.registers() == (target, exponent, 4, 5)
new_target = cirq.LineQubit.range(5, 8)
new_exponent = cirq.LineQubit.range(8, 12)
new_operation = operation.with_registers(new_target, new_exponent, 6, 7)
assert new_operation.registers() == (new_target, new_exponent, 6, 7)
def test_example_shor_modular_exp_diagram():
target = cirq.LineQubit.range(3)
exponent = cirq.LineQubit.range(3, 5)
operation = examples.shor.ModularExp(target, exponent, 4, 5)
circuit = cirq.Circuit(operation)
cirq.testing.assert_has_diagram(
circuit, """
0: ───ModularExp(t*4**e % 5)───
│
1: ───t1───────────────────────
│
2: ───t2───────────────────────
│
3: ───e0───────────────────────
│
4: ───e1───────────────────────
""")
operation = operation.with_registers(target, 2, 4, 5)
circuit = cirq.Circuit(operation)
cirq.testing.assert_has_diagram(
circuit, """
0: ───ModularExp(t*4**2 % 5)───
│
1: ───t1───────────────────────
│
2: ───t2───────────────────────
""")
def assert_order(r: int, x: int, n: int) -> None:
"""Assert that r is the order of x modulo n."""
y = x
for _ in range(1, r):
assert y % n != 1
y *= x
assert y % n == 1
@pytest.mark.parametrize('x, n', ((2, 3), (5, 6), (2, 7), (6, 7), (5, 8),
(6, 11), (6, 49), (7, 810)))
def test_example_shor_naive_order_finder(x, n):
r = examples.shor.naive_order_finder(x, n)
assert_order(r, x, n)
@pytest.mark.parametrize('x, n', ((2, 3), (5, 6), (2, 7), (6, 7), (5, 8)))
def test_example_shor_quantum_order_finder(x, n):
r = None
for _ in range(15):
r = examples.shor.quantum_order_finder(x, n)
if r is not None:
break
assert_order(r, x, n)
@pytest.mark.parametrize('x, n', ((1, 7), (7, 7)))
def test_example_shor_naive_order_finder_invalid_x(x, n):
with pytest.raises(ValueError):
_ = examples.shor.naive_order_finder(x, n)
@pytest.mark.parametrize('x, n', ((1, 7), (7, 7)))
def test_example_shor_quantum_order_finder_invalid_x(x, n):
with pytest.raises(ValueError):
_ = examples.shor.quantum_order_finder(x, n)
@pytest.mark.parametrize('n', (4, 6, 15, 125, 101 * 103, 127 * 127))
def test_example_shor_find_factor_with_composite_n_and_naive_order_finder(n):
d = examples.shor.find_factor(n, examples.shor.naive_order_finder)
assert 1 < d < n
assert n % d == 0
@pytest.mark.parametrize('n', (4, 6, 15, 125))
def test_example_shor_find_factor_with_composite_n_and_quantum_order_finder(n):
d = examples.shor.find_factor(n, examples.shor.naive_order_finder)
assert 1 < d < n
assert n % d == 0
@pytest.mark.parametrize(
'n, order_finder',
itertools.product(
(2, 3, 5, 11, 101, 127, 907),
(examples.shor.naive_order_finder, examples.shor.quantum_order_finder)))
def test_example_shor_find_factor_with_prime_n(n, order_finder):
d = examples.shor.find_factor(n, order_finder)
assert d is None
@pytest.mark.parametrize('n', (2, 3, 15, 17, 2**89 - 1))
def test_example_runs_shor_valid(n):
examples.shor.main(n=n)
@pytest.mark.parametrize('n', (-1, 0, 1))
def test_example_runs_shor_invalid(n):
with pytest.raises(ValueError):
examples.shor.main(n=n)
|
the-stack_0_17683 | # coding: utf-8
from django.urls import path,re_path,include
from . import views
app_name = 'reviews.conducting'
urlpatterns = [
re_path(r'^add_source_string/$', views.add_source_string, name='add_source_string'),
re_path(r'^save_source_string/$', views.save_source_string, name='save_source_string'),
re_path(r'^remove_source_string/$', views.remove_source_string, name='remove_source_string'),
re_path(r'^import_base_string/$', views.import_base_string, name='import_base_string'),
re_path(r'^search_scopus/$', views.search_scopus, name='search_scopus'),
re_path(r'^search_science_direct/$', views.search_science_direct, name='search_science_direct'),
re_path(r'^new_article/$', views.new_article, name='new_article'),
re_path(r'^import/bibtex_file/$', views.import_bibtex, name='import_bibtex'),
re_path(r'^import/bibtex_raw_content/$', views.import_bibtex_raw_content, name='import_bibtex_raw_content'),
re_path(r'^source_articles/$', views.source_articles, name='source_articles'),
re_path(r'^article_details/$', views.article_details, name='article_details'),
re_path(r'^find_duplicates/$', views.find_duplicates, name='find_duplicates'),
re_path(r'^resolve_duplicated/$', views.resolve_duplicated, name='resolve_duplicated'),
re_path(r'^export_results/$', views.export_results, name='export_results'),
re_path(r'^resolve_all/$', views.resolve_all, name='resolve_all'),
re_path(r'^save_article_details/$', views.save_article_details, name='save_article_details'),
re_path(r'^save_quality_assessment/$', views.save_quality_assessment, name='save_quality_assessment'),
re_path(r'^quality_assessment_detailed/$', views.quality_assessment_detailed, name='quality_assessment_detailed'),
re_path(r'^quality_assessment_summary/$', views.quality_assessment_summary, name='quality_assessment_summary'),
re_path(r'^multiple_articles_action/remove/$', views.multiple_articles_action_remove, name='multiple_articles_action_remove'),
re_path(r'^multiple_articles_action/accept/$', views.multiple_articles_action_accept, name='multiple_articles_action_accept'),
re_path(r'^multiple_articles_action/reject/$', views.multiple_articles_action_reject, name='multiple_articles_action_reject'),
re_path(r'^multiple_articles_action/duplicated/$', views.multiple_articles_action_duplicated, name='multiple_articles_action_duplicated'),
#re_path(r'^articles/upload/$', 'articles_upload', name='articles_upload'),
re_path(r'^save_data_extraction/$', views.save_data_extraction, name='save_data_extraction'),
re_path(r'^save_data_extraction_status/$', views.save_data_extraction_status, name='save_data_extraction_status'),
re_path(r'^articles_selection_chart/$', views.articles_selection_chart, name='articles_selection_chart'),
re_path(r'^articles_per_year/$', views.articles_per_year, name='articles_per_year'),
re_path(r'^export_data_extraction/$', views.export_data_extraction, name='export_data_extraction')
]
|
the-stack_0_17685 | from logging import getLogger
from drf_yasg.utils import swagger_auto_schema
from hexbytes import HexBytes
from rest_framework import status
from rest_framework.generics import CreateAPIView
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from gnosis.eth.constants import NULL_ADDRESS
from .serializers import (SafeCreation2ResponseSerializer,
SafeCreation2Serializer,
SafeCreationEstimateResponseSerializer,
SafeCreationEstimateV2Serializer)
from .services.safe_creation_service import SafeCreationServiceProvider
logger = getLogger(__name__)
class SafeCreationEstimateView(CreateAPIView):
permission_classes = (AllowAny,)
serializer_class = SafeCreationEstimateV2Serializer
@swagger_auto_schema(responses={201: SafeCreationEstimateResponseSerializer(),
400: 'Invalid data',
422: 'Cannot process data'})
def post(self, request, *args, **kwargs):
"""
Estimates creation of a Safe
"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
number_owners = serializer.data['number_owners']
safe_creation_estimates = SafeCreationServiceProvider().estimate_safe_creation_for_all_tokens(number_owners)
safe_creation_estimate_response_data = SafeCreationEstimateResponseSerializer(safe_creation_estimates,
many=True)
return Response(status=status.HTTP_200_OK, data=safe_creation_estimate_response_data.data)
else:
return Response(status=status.HTTP_422_UNPROCESSABLE_ENTITY, data=serializer.errors)
class SafeCreationView(CreateAPIView):
permission_classes = (AllowAny,)
serializer_class = SafeCreation2Serializer
@swagger_auto_schema(responses={201: SafeCreation2ResponseSerializer(),
400: 'Invalid data',
422: 'Cannot process data'})
def post(self, request, *args, **kwargs):
"""
Begins creation of a Safe
"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
salt_nonce, owners, threshold, payment_token = (serializer.data['salt_nonce'], serializer.data['owners'],
serializer.data['threshold'],
serializer.data['payment_token'])
safe_creation_service = SafeCreationServiceProvider()
safe_creation = safe_creation_service.create2_safe_tx(salt_nonce, owners, threshold, payment_token)
safe_creation_response_data = SafeCreation2ResponseSerializer(data={
'safe': safe_creation.safe.address,
'master_copy': safe_creation.master_copy,
'proxy_factory': safe_creation.proxy_factory,
'payment': safe_creation.payment,
'payment_token': safe_creation.payment_token or NULL_ADDRESS,
'payment_receiver': safe_creation.payment_receiver or NULL_ADDRESS,
'setup_data': HexBytes(safe_creation.setup_data).hex(),
'gas_estimated': safe_creation.gas_estimated,
'gas_price_estimated': safe_creation.gas_price_estimated,
})
safe_creation_response_data.is_valid(raise_exception=True)
return Response(status=status.HTTP_201_CREATED, data=safe_creation_response_data.data)
else:
return Response(status=status.HTTP_422_UNPROCESSABLE_ENTITY,
data=serializer.errors)
|
the-stack_0_17686 | #!/usr/bin/env python
import os
import sys
import re
# from distutils.core import setup
from setuptools import setup
VERSION = "0.9.8"
if __name__ == "__main__":
if "--format=msi" in sys.argv or "bdist_msi" in sys.argv:
# hack the version name to a format msi doesn't have trouble with
VERSION = VERSION.replace("-alpha", "a")
VERSION = VERSION.replace("-beta", "b")
VERSION = VERSION.replace("-rc", "r")
fname = os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md")
with open(fname, "r") as readme:
long_desc = readme.read()
# Strip out CI badges for PyPI releases
long_desc = re.sub(r"\[!\[Build Status(.*?)\n", "", long_desc)
setupdata = {
"name": "PySDL2",
"version": VERSION,
"description": "Python SDL2 bindings",
"long_description": long_desc,
"long_description_content_type": "text/markdown",
"author": "Marcus von Appen",
"author_email": "[email protected]",
"license": "Public Domain / zlib",
"url": "https://github.com/marcusva/py-sdl2",
"download_url": "https://pypi.python.org/pypi/PySDL2",
"package_dir": {"sdl2.examples": "examples"},
"package_data": {"sdl2.test": ["resources/*.*"],
"sdl2.examples": ["resources/*.*"]},
"packages": ["sdl2",
"sdl2.ext",
"sdl2.test",
"sdl2.examples"
],
"classifiers": [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: Public Domain",
"License :: OSI Approved :: zlib/libpng License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
],
}
setup(**setupdata)
|
the-stack_0_17688 | import torch.nn.functional as F
from torch import nn
class Classifier(nn.Module):
"""
A class used build a neural network for classification of MNIST digits
...
Methods
-------
forward()
Forward pass through the network, returns the output logits
"""
def __init__(self):
super().__init__()
# Define fully connected layers
self.fc1 = nn.Linear(28*28, 256)
self.fc2 = nn.Linear(256, 128)
self.fc3 = nn.Linear(128, 64)
self.fc4 = nn.Linear(64, 10)
# Dropout module with 0.2 drop probability
self.dropout = nn.Dropout(p=0.2)
def forward(self, x):
""" Forward pass through the network, returns the output logits """
# Flattening input tensor except for the minibatch dimension
x = x.view(x.shape[0], -1)
# Fully connected layers with dropout
x = self.dropout(F.relu(self.fc1(x)))
x = self.dropout(F.relu(self.fc2(x)))
x = self.dropout(F.relu(self.fc3(x)))
features = x
# Output so no dropout here
x = F.log_softmax(self.fc4(x), dim=1)
return x, features
|
the-stack_0_17689 | import argparse
parser = argparse.ArgumentParser()
def setarg(parser, argname, dfl):
parser.add_argument('-'+argname, dest=argname,
action='store_true')
parser.add_argument('-no_'+argname, dest=argname,
action='store_false')
exec('parser.set_defaults('+argname+'=dfl)')
parser.add_argument('-bs', type=int, default=4)
parser.add_argument('-epoch', type=int, default=8)
parser.add_argument('-lr', type=float, default=5e-5)
# normalize output on the tmean matrix, to have min = 0 and max = 1
setarg(parser, 'minmax',False)
# normalize input point cloud to have every coordinate between 0 and 1
setarg(parser, 'minmax3dimage',False)
# normalize input point cloud, that it is in canonical view
setarg(parser, 'normalize',False)
# centerize input point cloud, to have it's center of masses in the origin
setarg(parser, 'center',False)
# linearly downsample input point cloud
parser.add_argument('-downsample', type=int, default=1)
# use f_n or f, that was gotten with normalization on canonical view before
# processing
setarg(parser, 'classicnorm',False)
# cut the number of maximum SH amplitude to regress
parser.add_argument('-ampl', type=int, default=441)
# centerize seed on the input image and crop to this width
parser.add_argument('-cmscrop', type=int, default=0)
parser.add_argument('-cencrop', type=int, default=700)
# rescale input image
parser.add_argument('-rescale', type=int, default=500)
setarg(parser, 'use_adasum',False)
parser.add_argument(
'-gradient_predivide_factor', type=float, default=1.0,
help='apply gradient predivide factor in optimizer (default: 1.0)')
# name of experiment directory
parser.add_argument('-expnum', type=str, default='111')
# hidden_dim - size of appendix FC layers
parser.add_argument(
'-hidden_dim', nargs='+', type=int, default=[5000,2500,1000,441])
parser.add_argument(
'-chidden_dim', nargs='+', type=int, default=[96, 128, 256, 256, 256])
parser.add_argument('-kernel_sizes', nargs='+', default=[7, 3, 3, 3, 3, 3])
# number of input images that will be loaded
parser.add_argument('-num_input_images', type=int, default=1)
# name of standard model
parser.add_argument('-model_name', type=str, default='')
parser.add_argument('-netname', nargs='+', default=['cnet'])
setarg(parser, 'use_pretrained',False)
parser.add_argument('-weight_decay', type=float, default=0)
# used to load images all in parallel, or merge them after output
# "separate" merging order means to get from Dataloader tensor like as for
# color channel, that [15, 3, 1000, 1800], but then reshape this tensor to
# the [45, 1, 1000, 1800] and work with it like with separate data points
parser.add_argument('-merging', type=str,
choices=['color', 'latent', 'batch'], default='batch')
# take input image of random angle, if not, then image will
# be taken relative to the horizontal pose
setarg(parser, 'rand_angle',False)
# number of experiment from phenoseeder
parser.add_argument('-specie', type=str, default='598')
# number of sampled directions to make subsampling after f_n
parser.add_argument('-num_sam_points', type=int, default=500)
# loss calculating between 'pc','f' or 'f_n'
parser.add_argument('-lb', type=str, default='f')
# short description what exactly this job is up for
parser.add_argument('-expdescr', type=str, default='')
# use csv file with pathes to all input files together with
# horizontal image index
setarg(parser, 'use_existing_csv',True)
setarg(parser, 'use_sep_csv',True)
# instead of input files noise is generating with random numbers
setarg(parser, 'noise_input',False)
# use convolutional part of the network or not
setarg(parser, 'haf',True)
# type of input data. can be 'img', 'f' or 'pc'
parser.add_argument('-inputt', type=str, default='img')
# normalize to make min = 0 and max = 1 for input f
setarg(parser, 'minmax_f',True)
# criterion to calculate loss
parser.add_argument('-criterion', type=str, default='L1')
# number of GPUs is used in the job
parser.add_argument('-ngpu', type=int, default=4)
# type of parallelization. 'hvd' means horovod, or 't'
parser.add_argument('-parallel', type=str, choices=['horovod', 'torch'],
default='hvd')
# in case loading standard model, it can be use as feature extracting
# (when freezeing all layers except the last one)
setarg(parser, 'feature_extract',False)
# if load only one image as input, this will be always image with index
# 000_rotation
# if load more than 1 image, then number of images will be spread evenly in
# the range (0,36)
# if false, images will be taking that first image in views will be with
# horizontal pose
setarg(parser, 'zero_angle',True)
# is used for testing computing time,
# where all needed files including data in one folder
parser.add_argument('-single_folder',
dest='single_folder', action='store_true')
parser.set_defaults(single_folder=False)
parser.add_argument('-noise_output', dest='noise_output',
action='store_true')
parser.set_defaults(noise_output=False)
# only log will be in the output
setarg(parser, 'save_output',True)
# type of data that is loaded for gt. for example, single_f_n
# means that only *f_n files will be used for GT in dataloader
# and maybe it will be singular loading of y_n
# it is used separate transform_f_n.py to not load more than is
# needed
# In case if gt is loaded not from dataloader, but from csv or from h5 file,
# there is option "single_file"
parser.add_argument('-gttype', type=str,
choices=['single_file'],
default='single_file')
# name of csv that will be used for loading GT
# it can be 598csv9 for original pose and 598csv11 for normalized pose
parser.add_argument('-csvname', type=str, default='598csv9')
# name of the csv which will be used for loading data
# choices are : 598frame for full or 598frame_dummy
parser.add_argument('-dfname', type=str, default='598frame')
# factor on which all output point cloud data will be normalized
parser.add_argument('-pscale', type=int, default=100)
# if view_sep = True, and more than one image is loaded,
# all input images will be treated as separate data elements
# new dataframe will be created
setarg(parser, 'view_sep',False)
# rotate directions together with angle from which
# current image were taken
setarg(parser, 'rot_dirs',False)
# for dataloader
parser.add_argument('-num_workers', type=int, default=0)
setarg(parser, 'pin_memory',False)
# manually calculate distance vector F out of point cloud output
setarg(parser, 'man_dist',False)
setarg(parser, 'use_cuda',True)
parser.add_argument('-machine', type=str,
choices=['jureca', 'workstation', 'lenovo', 'huawei'],
default='jureca')
setarg(parser, 'maintain',False)
setarg(parser, 'maintain_line',False)
parser.add_argument('-wandb', type=str, default="")
setarg(parser, 'measure_time',False)
setarg(parser, 'rotate_output',False)
parser.add_argument('-transappendix', type=str, default="_image")
# how often to save batch output intermediate in epoch
parser.add_argument('-batch_output', type=int, default=2)
# minmax fun for current ground truth preparation before training
parser.add_argument('-minmax_fn', type=str,
choices=['min,max','mean,std', ''], default='')
parser.add_argument('-updateFraction', type=float, default=3)
parser.add_argument('-standardize', nargs='+', default=255)
# parser.add_argument('-standardize', default=(18.31589541, 39.63290785))
# if rmdirname is True, delete dirname content and use this directory again
# for saving output
setarg(parser, 'rmdirname', False)
parser.add_argument('-steplr', nargs='+', type=float, default=(30,1))
parser.add_argument('-outputt', type=str,
choices=['points','pose6', 'eul', 'orient', 'cms'],
default='points')
parser.add_argument('-ufmodel', type=int, default=100000)
parser.add_argument('-framelim', type=int, default=int(1e20))
parser.add_argument('-conTrain', type=str, default='')
# how often to print loss in the log output
parser.add_argument('-print_minibatch', type=int, default=10)
# for orientation there are two right GT, because it is a ray. That is why
# augementation of ground truth is needed for evaluation
parser.add_argument('-aug_gt', nargs='+', type=str, default='')
parser.add_argument('-datapath', type=str,
default='C:/cherepashkin1/phenoseed')
# job name is used to create corresponding subdirectory
parser.add_argument('-jobname', type=str, default='')
# real job of the executed sh file. it is needed to copy sh file to the new
# directory
parser.add_argument('-realjobname', type=str, default='')
parser.add_argument('-jobdir', type=str, default='')
setarg(parser, 'loadh5', False)
opt = parser.parse_args() |
the-stack_0_17690 | import random
import matplotlib
import numpy as np
from sklearn.model_selection import KFold
matplotlib.use('Agg') # todo: remove or change if not working
def augment(X):
if X.ndim == 1:
return np.concatenate((X, [1]))
else:
pad = np.ones((1, X.shape[1]))
return np.concatenate((X, pad), axis=0)
def onehot_decode(X, axis):
return np.argmax(X, axis=axis)
def onehot_encode(L, c):
if isinstance(L, int):
L = [L]
n = len(L)
out = np.zeros((c, n))
out[L, range(n)] = 1
return np.squeeze(out)
# normalize inputs
def normalize(x, axis=1):
"""
By rows...
x1 - 5 4 68 0
x2 - 8 6 5 0
"""
_avg = x.mean(axis=axis, keepdims=True)
_std = x.std(axis=axis, keepdims=True)
return (x - _avg) / _std
def load_data(path):
"""
Load data, convert classes to ints, split inputs and labels.
:param path: path to data
:return:
"""
letter_map = {
'A': 0,
'B': 1,
'C': 2
}
convert_letter = lambda x: letter_map[x.decode('UTF-8')]
data = np.loadtxt(path, skiprows=1, converters={2: convert_letter}).T
inputs = data[:-1]
labels = data[-1].astype(int)
return inputs, labels
def split_train_test(inputs, labels, ratio=0.8):
"""
Randomly shuffle dataset and split it to training and testing.
:return: tuple with training/testing inputs/labels
"""
count = inputs.shape[1]
ind = np.arange(count)
random.shuffle(ind)
split = int(count * ratio)
train_ind = ind[:split]
test_ind = ind[split:]
train_inputs = inputs[:, train_ind]
train_labels = labels[train_ind]
test_inputs = inputs[:, test_ind]
test_labels = labels[test_ind]
return train_inputs, train_labels, test_inputs, test_labels
def k_fold_cross_validation(clf, inputs, labels, n, verbosity):
kf = KFold(n_splits=n)
i = 1
train_acc, train_rmse = [], []
test_acc, test_rmse = [], []
for train, validate in kf.split(inputs.T):
train_fold_inputs, train_fold_labels = inputs[:, train], labels[train]
validate_fold_inputs, validate_fold_labels = inputs[:, validate], labels[validate]
trainCE, trainRE = clf.train(train_fold_inputs, train_fold_labels)
testCE, testRE = clf.test(validate_fold_inputs, validate_fold_labels)
if verbosity > 1:
print('Fold n.{}: CE = {:6.2%}, RE = {:.5f}'.format(i, testCE, testRE))
train_acc.append(trainCE)
train_rmse.append(trainRE)
test_acc.append(testCE)
test_rmse.append(testRE)
i += 1
# reset weights on classifier for evaluating next fold
clf.init_weights()
if verbosity > 0:
print('After {n}-fold cross-validation'.format(n=n))
print('CEs - AVG - {avg:.5f}, STD - {std:.5f}'.format(avg=np.mean(test_acc),
std=np.std(test_acc)))
print('REs - AVG - {avg:.5f}, STD - {std:.5f}'.format(avg=np.mean(test_rmse),
std=np.std(test_rmse)))
train_acc = np.mean(train_acc, axis=0)
train_rmse = np.mean(train_rmse, axis=0)
return list(train_acc), list(train_rmse), np.mean(test_acc), np.mean(test_rmse)
def save_confusion_matrix(true_labels, predicted_labels, n_classes):
confusion_matrix = np.zeros((n_classes, n_classes))
for g_true, predict in zip(true_labels, predicted_labels):
confusion_matrix[g_true, predict] += 1
with open('results/confusion.txt', 'w') as f:
for row in confusion_matrix:
f.write(str(row) + '\n')
|
the-stack_0_17693 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import html.parser
class ContentParser(html.parser.HTMLParser):
def __init__(self, begin_tag, stop_tag):
html.parser.HTMLParser.__init__(self)
tag_temple = ('type', 'name', 'attrs', 'contains_me')
self.begin_tag = dict(zip(tag_temple, begin_tag))
self.begin_tag.setdefault('contains_me', False)
self.stop_tag = dict(zip(tag_temple, stop_tag))
self.stop_tag.setdefault('contains_me', False)
def reset(self):
html.parser.HTMLParser.reset(self)
self.switch_flag = False
self.content = ['']
def begin_now(self):
self.switch_flag = True
return
def stop_now(self):
self.switch_flag = False
return
@staticmethod
def tag_process(tag_type, target_tag, target_action, tag, attrs):
def has_attr(match_attrs, source_attrs):
match_dict = dict(match_attrs)
source_dict = dict(source_attrs)
if 'class' in match_dict:
if 'class' in source_dict:
if set(str.split(match_dict.pop('class'))).issubset(set(str.split(source_dict.pop('class')))):
pass
else:
return False
else:
return False
return set(match_dict.items()).issubset(set(source_dict.items()))
if target_tag['type'] == tag_type:
if tag == target_tag['name']:
if target_tag['attrs'] is None or len(target_tag['attrs']) == 0 or tag_type == 'endtag':
target_action()
return True
else:
if len(target_tag['attrs']) > len(attrs):
return False
else:
if has_attr(target_tag['attrs'], attrs):
target_action()
return True
else:
return False
else:
return False
else:
return False
def pre_tag_process(self, tag_type, tag, attrs = None):
def get_tag_text():
if tag_type == 'endtag':
return '</{0}>'.format(tag)
else:
return self.get_starttag_text()
if self.switch_flag == False:
if self.tag_process(tag_type, self.begin_tag, self.begin_now, tag, attrs) == True:
if self.begin_tag['contains_me'] == False:
self.content = []
else:
self.content = [get_tag_text()]
return True
else:
return False
else:
if self.tag_process(tag_type, self.stop_tag, self.stop_now, tag, attrs) == True:
if self.stop_tag['contains_me'] == False:
return False
else:
self.content.append(get_tag_text())
return True
else:
self.content.append(get_tag_text())
return True
def handle_starttag(self, tag, attrs):
self.pre_tag_process('starttag', tag, attrs)
def handle_endtag(self, tag):
self.pre_tag_process('endtag', tag)
def handle_startendtag(self, tag, attrs):
self.pre_tag_process('startendtag', tag, attrs)
def handle_data(self, data):
if self.switch_flag == False:
return False
else:
self.content.append(data)
return True
def main():
page = '<html><h1 id="q" class="a c b">Title</h1><p>Im a paragraph!</p><p>Another paragraph</p></html>'
myparser = ContentParser(['starttag', 'h1', [('class','a b'), ('id', 'q')], True], ['endtag', 'p', None, True])
myparser.feed(page)
print(''.join(myparser.content))
myparser.reset()
print(myparser.content)
if __name__ == '__main__':
main() |
the-stack_0_17694 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('proposals', '0009_auto_20170209_2326'),
]
operations = [
migrations.RenameField('proposalbase', 'submitted', 'submitted_at'),
migrations.AlterField(
model_name='proposalbase',
name='submitted_at',
field=models.DateTimeField(null=True, editable=False, blank=True),
),
migrations.AddField(
model_name='proposalbase',
name='submitted',
field=models.BooleanField(default=False),
),
]
|
the-stack_0_17695 | import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'lib'))
import base58
import hashlib
import re
from decimal import Decimal
import simplejson
import binascii
from misc import printdbg, epoch2str
import time
def is_valid_moondex_address(address, network='mainnet'):
# Only public key addresses are allowed
# A valid address is a RIPEMD-160 hash which contains 20 bytes
# Prior to base58 encoding 1 version byte is prepended and
# 4 checksum bytes are appended so the total number of
# base58 encoded bytes should be 25. This means the number of characters
# in the encoding should be about 34 ( 25 * log2( 256 ) / log2( 58 ) ).
moondex_version = 140 if network == 'testnet' else 76
# Check length (This is important because the base58 library has problems
# with long addresses (which are invalid anyway).
if ((len(address) < 26) or (len(address) > 35)):
return False
address_version = None
try:
decoded = base58.b58decode_chk(address)
address_version = ord(decoded[0:1])
except:
# rescue from exception, not a valid Moondex address
return False
if (address_version != moondex_version):
return False
return True
def hashit(data):
return int(hashlib.sha256(data.encode('utf-8')).hexdigest(), 16)
# returns the masternode VIN of the elected winner
def elect_mn(**kwargs):
current_block_hash = kwargs['block_hash']
mn_list = kwargs['mnlist']
# filter only enabled MNs
enabled = [mn for mn in mn_list if mn.status == 'ENABLED']
block_hash_hash = hashit(current_block_hash)
candidates = []
for mn in enabled:
mn_vin_hash = hashit(mn.vin)
diff = mn_vin_hash - block_hash_hash
absdiff = abs(diff)
candidates.append({'vin': mn.vin, 'diff': absdiff})
candidates.sort(key=lambda k: k['diff'])
try:
winner = candidates[0]['vin']
except:
winner = None
return winner
def parse_masternode_status_vin(status_vin_string):
status_vin_string_regex = re.compile('CTxIn\(COutPoint\(([0-9a-zA-Z]+),\\s*(\d+)\),')
m = status_vin_string_regex.match(status_vin_string)
# To Support additional format of string return from masternode status rpc.
if m is None:
status_output_string_regex = re.compile('([0-9a-zA-Z]+)\-(\d+)')
m = status_output_string_regex.match(status_vin_string)
txid = m.group(1)
index = m.group(2)
vin = txid + '-' + index
if (txid == '0000000000000000000000000000000000000000000000000000000000000000'):
vin = None
return vin
def create_superblock(proposals, event_block_height, budget_max, sb_epoch_time):
from models import Superblock, GovernanceObject, Proposal
from constants import SUPERBLOCK_FUDGE_WINDOW
# don't create an empty superblock
if (len(proposals) == 0):
printdbg("No proposals, cannot create an empty superblock.")
return None
budget_allocated = Decimal(0)
fudge = SUPERBLOCK_FUDGE_WINDOW # fudge-factor to allow for slighly incorrect estimates
payments = []
for proposal in proposals:
fmt_string = "name: %s, rank: %4d, hash: %s, amount: %s <= %s"
# skip proposals that are too expensive...
if (budget_allocated + proposal.payment_amount) > budget_max:
printdbg(
fmt_string % (
proposal.name,
proposal.rank,
proposal.object_hash,
proposal.payment_amount,
"skipped (blows the budget)",
)
)
continue
# skip proposals if the SB isn't within the Proposal time window...
window_start = proposal.start_epoch - fudge
window_end = proposal.end_epoch + fudge
printdbg("\twindow_start: %s" % epoch2str(window_start))
printdbg("\twindow_end: %s" % epoch2str(window_end))
printdbg("\tsb_epoch_time: %s" % epoch2str(sb_epoch_time))
if (sb_epoch_time < window_start or sb_epoch_time > window_end):
printdbg(
fmt_string % (
proposal.name,
proposal.rank,
proposal.object_hash,
proposal.payment_amount,
"skipped (SB time is outside of Proposal window)",
)
)
continue
printdbg(
fmt_string % (
proposal.name,
proposal.rank,
proposal.object_hash,
proposal.payment_amount,
"adding",
)
)
# else add proposal and keep track of total budget allocation
budget_allocated += proposal.payment_amount
payment = {'address': proposal.payment_address,
'amount': "{0:.8f}".format(proposal.payment_amount),
'proposal': "{}".format(proposal.object_hash)}
payments.append(payment)
# don't create an empty superblock
if not payments:
printdbg("No proposals made the cut!")
return None
# 'payments' now contains all the proposals for inclusion in the
# Superblock, but needs to be sorted by proposal hash descending
payments.sort(key=lambda k: k['proposal'], reverse=True)
sb = Superblock(
event_block_height=event_block_height,
payment_addresses='|'.join([pd['address'] for pd in payments]),
payment_amounts='|'.join([pd['amount'] for pd in payments]),
proposal_hashes='|'.join([pd['proposal'] for pd in payments]),
)
printdbg("generated superblock: %s" % sb.__dict__)
return sb
# shims 'til we can fix the moondexd side
def SHIM_serialise_for_moondexd(sentinel_hex):
from models import DASHD_GOVOBJ_TYPES
# unpack
obj = deserialise(sentinel_hex)
# shim for moondexd
govtype = obj[0]
# add 'type' attribute
obj[1]['type'] = DASHD_GOVOBJ_TYPES[govtype]
# superblock => "trigger" in moondexd
if govtype == 'superblock':
obj[0] = 'trigger'
# moondexd expects an array (even though there is only a 1:1 relationship between govobj->class)
obj = [obj]
# re-pack
moondexd_hex = serialise(obj)
return moondexd_hex
# shims 'til we can fix the moondexd side
def SHIM_deserialise_from_moondexd(moondexd_hex):
from models import DASHD_GOVOBJ_TYPES
# unpack
obj = deserialise(moondexd_hex)
# shim from moondexd
# only one element in the array...
obj = obj[0]
# extract the govobj type
govtype = obj[0]
# superblock => "trigger" in moondexd
if govtype == 'trigger':
obj[0] = govtype = 'superblock'
# remove redundant 'type' attribute
if 'type' in obj[1]:
del obj[1]['type']
# re-pack
sentinel_hex = serialise(obj)
return sentinel_hex
# convenience
def deserialise(hexdata):
json = binascii.unhexlify(hexdata)
obj = simplejson.loads(json, use_decimal=True)
return obj
def serialise(dikt):
json = simplejson.dumps(dikt, sort_keys=True, use_decimal=True)
hexdata = binascii.hexlify(json.encode('utf-8')).decode('utf-8')
return hexdata
def did_we_vote(output):
from bitcoinrpc.authproxy import JSONRPCException
# sentinel
voted = False
err_msg = ''
try:
detail = output.get('detail').get('moondex.conf')
result = detail.get('result')
if 'errorMessage' in detail:
err_msg = detail.get('errorMessage')
except JSONRPCException as e:
result = 'failed'
err_msg = e.message
# success, failed
printdbg("result = [%s]" % result)
if err_msg:
printdbg("err_msg = [%s]" % err_msg)
voted = False
if result == 'success':
voted = True
# in case we spin up a new instance or server, but have already voted
# on the network and network has recorded those votes
m_old = re.match(r'^time between votes is too soon', err_msg)
m_new = re.search(r'Masternode voting too often', err_msg, re.M)
if result == 'failed' and (m_old or m_new):
printdbg("DEBUG: Voting too often, need to sync w/network")
voted = False
return voted
def parse_raw_votes(raw_votes):
votes = []
for v in list(raw_votes.values()):
(outpoint, ntime, outcome, signal) = v.split(':')
signal = signal.lower()
outcome = outcome.lower()
mn_collateral_outpoint = parse_masternode_status_vin(outpoint)
v = {
'mn_collateral_outpoint': mn_collateral_outpoint,
'signal': signal,
'outcome': outcome,
'ntime': ntime,
}
votes.append(v)
return votes
def blocks_to_seconds(blocks):
"""
Return the estimated number of seconds which will transpire for a given
number of blocks.
"""
return blocks * 2.62 * 60
|
the-stack_0_17696 | import os
import numpy as np
from PIL import Image
data_path = '../data/red-lights'
template_imgs_dir = './templates/red-light'
template_img_files = sorted(os.listdir(template_imgs_dir))
template_img_files = [f for f in template_img_files if '.jpg' in f]
DATA_MEAN = 90
DATA_STD = 65
for i, filename in enumerate(template_img_files):
I = Image.open(os.path.join(data_path, filename))
template = Image.open(os.path.join(template_imgs_dir, filename))
I = np.asarray(I)
template = np.asarray(template)
mean = np.mean(I, axis=(0, 1))
std = np.std(I, axis=(0, 1))
template = (template - mean) / std
# template = (template - np.mean(I)) / np.std(I)
# template = (template - DATA_MEAN) / DATA
print(filename, mean, std, np.mean(template), np.std(template))
np.save(os.path.join(template_imgs_dir, f'template{i}'), # chop off '.jpg'
template)
|
the-stack_0_17697 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# reana documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 23 14:17:34 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
from __future__ import print_function
import os
import sphinx.environment
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Do not warn on external images.
suppress_warnings = ["image.nonlocal_uri"]
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.graphviz",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"sphinx_click.ext",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "reana"
copyright = "2017-2020 [email protected]"
author = "[email protected]"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# Get the version string. Cannot be done with import!
g = {}
with open(os.path.join("..", "reana_client", "version.py"), "rt") as fp:
exec(fp.read(), g)
version = g["__version__"]
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"description": """<p>REANA-Client is a component of the <a
href="http://www.reana.io">REANA</a> reusable and
reproducible research data analysis
platform.</p><p>REANA-Client provides a command-line tool
that allows researchers to submit, run, and manage their
computational workflows.</p>""",
"github_user": "reanahub",
"github_repo": "reana-client",
"github_button": False,
"github_banner": True,
"show_powered_by": False,
"extra_nav_links": {
"REANA@DockerHub": "https://hub.docker.com/u/reanahub/",
"REANA@GitHub": "https://github.com/reanahub",
"REANA@Twitter": "https://twitter.com/reanahub",
"REANA@Web": "http://www.reana.io",
},
"nosidebar": True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
"**": [
"about.html",
"navigation.html",
"relations.html",
"searchbox.html",
"donate.html",
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "reanadoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "reana.tex", "reana Documentation", "[email protected]", "manual"),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "reana", "reana Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"reana",
"reana Documentation",
author,
"reana",
"One line description of project.",
"Miscellaneous",
),
]
|
the-stack_0_17699 | """
Originally a mod of ajpalkovic's plugin https://github.com/ajpalkovic, though it doesn't appear to be available anymore.
Highlights all other instances of the selected word (or optional word under cursor):
```
// Require the word to be selected.
// Disable to highlight word with no selection.
"require_word_select": true,
```
Can be configured to highlight multiple word sets simultaneously with multiple cursors.
When doing multiple cursors, you can highlight each in their own color
(limited by available theme colors):
```
// Define scopes for highlights
// The more you define, the more selections you can do
"highlight_scopes": ["string", "keyword", "constant.language"],
```
Style of highlights can also be controlled:
```
// Highlight style (solid|outline|underline|thin_underline|squiggly|stippled)
"highlight_style": "outline",
```
Optionally can disable highlight if number of selections in view are greater than a certain value:
```
// If selection threshold is greater than
// the specified setting, don't highlight words.
// -1 means no threshold.
"selection_threshold": -1
```
"""
import sublime
import sublime_plugin
from time import time, sleep
import threading
KEY = "HighlightCurrentWord"
SCOPE = 'comment'
reload_flag = False
highlight_word = None
settings = None
if 'hw_thread' not in globals():
hw_thread = None
def debug(s):
"""Debug logging."""
print("HighlightWord: " + s)
def highlight_style(option):
"""Configure style of region based on option."""
style = 0
if option == "outline":
style |= sublime.DRAW_NO_FILL
elif option == "none":
style |= sublime.HIDDEN
elif option == "underline":
style |= sublime.DRAW_EMPTY_AS_OVERWRITE
elif option == "thin_underline":
style |= sublime.DRAW_NO_FILL
style |= sublime.DRAW_NO_OUTLINE
style |= sublime.DRAW_SOLID_UNDERLINE
elif option == "squiggly":
style |= sublime.DRAW_NO_FILL
style |= sublime.DRAW_NO_OUTLINE
style |= sublime.DRAW_SQUIGGLY_UNDERLINE
elif option == "stippled":
style |= sublime.DRAW_NO_FILL
style |= sublime.DRAW_NO_OUTLINE
style |= sublime.DRAW_STIPPLED_UNDERLINE
return style
def clear_regions(view=None):
"""Clear regions."""
if view is None:
win = sublime.active_window()
if win is not None:
view = win.active_view()
if view is not None:
regions = view.settings().get('highlight_word.regions', 0)
if highlight_word is not None:
for count in range(0, regions):
view.erase_regions(KEY + str(count))
view.settings().set('highlight_word.regions', 0)
def underline(regions):
"""Convert to empty regions."""
new_regions = []
for region in regions:
start = region.begin()
end = region.end()
while start < end:
new_regions.append(sublime.Region(start))
start += 1
return new_regions
# The search is performed half a second after the most recent event
# in order to prevent the search happening on every key press.
# Each of the event handlers simply marks the time of the most recent
# event and a timer periodically executes do_search
class HighlightWord(object):
"""HighlightWord."""
def __init__(self):
"""Setup."""
self.previous_region = sublime.Region(0, 0)
self.theme_selectors = tuple(settings.get('highlight_scopes', [SCOPE]))
self.word_select = settings.get('require_word_select', False)
style = settings.get('highlight_style', 'outline')
self.style = highlight_style(style)
self.underline = style == 'underline'
self.max_selections = len(self.theme_selectors)
self.sel_threshold = int(settings.get('selection_threshold', -1))
def do_search(self, view, force=True):
"""Perform the search for the highlighted word."""
global reload_flag
if view is None:
return
if reload_flag:
reload_flag = False
self.theme_selectors = tuple(settings.get('highlight_scopes', [SCOPE]))
self.max_selections = len(self.theme_selectors)
self.word_select = settings.get('require_word_select', False)
style = settings.get('highlight_style', 'outline')
self.style = highlight_style(style)
self.underline = style == 'underline'
self.sel_threshold = int(settings.get('selection_threshold', -1))
force = True
visible_region = view.visible_region()
if not force and self.previous_region == visible_region:
return
clear_regions()
# The default separator does not include whitespace, so I add that here no matter what
separator_string = view.settings().get('word_separators', "") + " \n\r\t"
current_words = []
current_regions = []
good_words = set()
words = []
selections = view.sel()
sel_len = len(selections)
if sel_len > 0 and (self.sel_threshold == -1 or self.sel_threshold >= sel_len):
self.previous_region = visible_region
# Reduce m*n search to just n by mapping each word
# separator character into a dictionary
self.separators = {}
for c in separator_string:
self.separators[c] = True
for selection in selections:
current_regions.append(view.word(selection))
current_words.append(
view.substr(current_regions[-1]).strip(separator_string)
)
count = 0
for word in current_words:
if word not in good_words:
if count != self.max_selections:
good_words.add(word)
words.append((word, current_regions[count]))
count += 1
else:
return
count = 0
for word in words:
key = KEY + str(count)
selector = self.theme_selectors[count]
# See if a word is selected or if you are just in a word
if self.word_select and word[1].size() != selections[count].size():
continue
# remove leading/trailing separator characters just in case
if len(word[0]) == 0:
continue
# ignore the selection if it spans multiple words
abort = False
for c in word[0]:
if c in self.separators:
abort = True
break
if abort:
continue
self.highlight_word(view, key, selector, word[1], word[0])
count += 1
def highlight_word(self, view, key, selector, current_region, current_word):
"""Find and highlight word."""
size = view.size() - 1
search_start = max(0, self.previous_region.begin() - len(current_word))
search_end = min(size, self.previous_region.end() + len(current_word))
valid_regions = []
while True:
found_region = view.find(current_word, search_start, sublime.LITERAL)
if found_region is None:
break
# regions can have reversed start/ends so normalize them
start = max(0, found_region.begin())
end = min(size, found_region.end())
if search_start == end:
search_start += 1
continue
search_start = end
if search_start >= size:
break
if found_region.empty():
break
if found_region.intersects(current_region):
continue
# check if the character before and after the region is a separator character
# if it is not, then the region is part of a larger word and shouldn't match
# this can't be done in a regex because we would be unable to use the word_separators setting string
if start == 0 or view.substr(sublime.Region(start - 1, start)) in self.separators:
if end == size or view.substr(sublime.Region(end, end + 1)) in self.separators:
valid_regions.append(found_region)
if search_start > search_end:
break
view.add_regions(
key,
valid_regions if not self.underline else underline(valid_regions),
selector,
"",
self.style
)
view.settings().set('highlight_word.regions', self.max_selections)
class HighlightWordListenerCommand(sublime_plugin.EventListener):
"""Handle listener events."""
def on_selection_modified(self, view):
"""Handle selection events for highlighting."""
if hw_thread is None or hw_thread.ignore_all:
return
now = time()
hw_thread.modified = True
hw_thread.time = now
class HighlightWordSelectCommand(sublime_plugin.TextCommand):
"""Select all instances of the selected word(s)."""
def run(self, edit):
"""Run the command."""
theme_selectors = tuple(settings.get('highlight_scopes', [SCOPE]))
max_selections = len(theme_selectors)
word_select = settings.get('require_word_select', False)
current_words = []
current_regions = []
good_words = set()
words = []
separator_string = self.view.settings().get('word_separators', "") + " \n\r\t"
selections = self.view.sel()
sel_len = len(selections)
if sel_len > 0:
# Reduce m*n search to just n by mapping each word
# separator character into a dictionary
self.separators = {}
for c in separator_string:
self.separators[c] = True
for selection in selections:
current_regions.append(self.view.word(selection))
current_words.append(
self.view.substr(current_regions[-1]).strip(separator_string)
)
count = 0
for word in current_words:
if word not in good_words:
if count != max_selections:
good_words.add(word)
words.append((word, current_regions[count]))
count += 1
else:
return
count = 0
select_regions = []
for word in words:
key = KEY + str(count)
selector = theme_selectors[count]
# See if a word is selected or if you are just in a word
if word_select and word[1].size() != selections[count].size():
continue
# remove leading/trailing separator characters just in case
if len(word[0]) == 0:
continue
# ignore the selection if it spans multiple words
abort = False
for c in word[0]:
if c in self.separators:
abort = True
break
if abort:
continue
select_regions += self.select_word(key, selector, word[0])
count += 1
if select_regions:
self.view.sel().clear()
self.view.sel().add_all(select_regions)
def select_word(self, key, selector, current_word):
"""Find and highlight word."""
size = self.view.size() - 1
search_start = 0
valid_regions = []
while True:
found_region = self.view.find(current_word, search_start, sublime.LITERAL)
if found_region is None:
break
# regions can have reversed start/ends so normalize them
start = max(0, found_region.begin())
end = min(size, found_region.end())
if search_start == end:
search_start += 1
continue
search_start = end
if search_start >= size:
break
if found_region.empty():
break
# check if the character before and after the region is a separator character
# if it is not, then the region is part of a larger word and shouldn't match
# this can't be done in a regex because we would be unable to use the word_separators setting string
if start == 0 or self.view.substr(sublime.Region(start - 1, start)) in self.separators:
if end == size or self.view.substr(sublime.Region(end, end + 1)) in self.separators:
valid_regions.append(found_region)
return valid_regions
class HwThread(threading.Thread):
"""Load up defaults."""
def __init__(self):
"""Setup the thread."""
self.reset()
threading.Thread.__init__(self)
def reset(self):
"""Reset the thread variables."""
self.wait_time = 0.12
self.time = time()
self.modified = False
self.ignore_all = False
self.abort = False
def payload(self, force=False):
"""Code to run."""
self.modified = False
# Ignore selection and edit events inside the routine
self.ignore_all = True
if highlight_word is not None:
highlight_word.do_search(sublime.active_window().active_view(), force)
self.ignore_all = False
self.time = time()
def kill(self):
"""Kill thread."""
self.abort = True
while self.is_alive():
pass
self.reset()
def run(self):
"""Thread loop."""
while not self.abort:
if self.modified is True and time() - self.time > self.wait_time:
sublime.set_timeout(lambda: self.payload(force=True), 0)
elif not self.modified:
sublime.set_timeout(self.payload, 0)
sleep(0.5)
def set_reload():
"""Set reload events."""
global reload_flag
global settings
reload_flag = True
settings = sublime.load_settings("highlight_word.sublime-settings")
settings.clear_on_change('reload')
settings.add_on_change('reload', set_reload)
def plugin_loaded():
"""Setup plugin."""
global highlight_word
global hw_thread
set_reload()
highlight_word = HighlightWord()
if hw_thread is not None:
hw_thread.kill()
hw_thread = HwThread()
hw_thread.start()
def plugin_unloaded():
"""Kill thread."""
hw_thread.kill()
clear_regions()
|
the-stack_0_17701 | import io
import json
import os
import subprocess
from setuptools import Command
from setuptools.command.bdist_egg import bdist_egg
from setuptools.command.sdist import sdist as base_sdist
from wagtail import __semver__
class assets_mixin:
def compile_assets(self):
try:
subprocess.check_call(['npm', 'run', 'dist'])
except (OSError, subprocess.CalledProcessError) as e:
print('Error compiling assets: ' + str(e)) # noqa
raise SystemExit(1)
def publish_assets(self):
try:
subprocess.check_call(['npm', 'publish', 'client'])
except (OSError, subprocess.CalledProcessError) as e:
print('Error publishing front-end assets: ' + str(e)) # noqa
raise SystemExit(1)
def bump_client_version(self):
"""
Writes the current Wagtail version number into package.json
"""
path = os.path.join('.', 'client', 'package.json')
input_file = io.open(path, "r")
try:
package = json.loads(input_file.read().decode("utf-8"))
except (ValueError) as e:
print('Unable to read ' + path + ' ' + e) # noqa
raise SystemExit(1)
package['version'] = __semver__
try:
with io.open(path, 'w', encoding='utf-8') as f:
f.write(str(json.dumps(package, indent=2, ensure_ascii=False)))
except (IOError) as e:
print('Error setting the version for front-end assets: ' + str(e)) # noqa
raise SystemExit(1)
class assets(Command, assets_mixin):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self.bump_client_version()
self.compile_assets()
self.publish_assets()
class sdist(base_sdist, assets_mixin):
def run(self):
self.compile_assets()
base_sdist.run(self)
class check_bdist_egg(bdist_egg):
# If this file does not exist, warn the user to compile the assets
sentinel_dir = 'wagtail/wagtailadmin/static/'
def run(self):
bdist_egg.run(self)
if not os.path.isdir(self.sentinel_dir):
print("\n".join([ # noqa
"************************************************************",
"The front end assets for Wagtail are missing.",
"To generate the assets, please refer to the documentation in",
"docs/contributing/css_guidelines.rst",
"************************************************************",
]))
|
the-stack_0_17702 | import ast
import functools
import inspect
import re
import sys
import textwrap
import numpy as np
import taichi.lang
from taichi._lib import core as _ti_core
from taichi.lang import impl, runtime_ops, util
from taichi.lang.ast import (ASTTransformerContext, KernelSimplicityASTChecker,
transform_tree)
from taichi.lang.enums import Layout
from taichi.lang.exception import TaichiSyntaxError
from taichi.lang.expr import Expr
from taichi.lang.matrix import MatrixType
from taichi.lang.shell import _shell_pop_print, oinspect
from taichi.lang.util import to_taichi_type
from taichi.linalg.sparse_matrix import sparse_matrix_builder
from taichi.tools.util import obsolete
from taichi.types import any_arr, primitive_types, template
from taichi import _logging
if util.has_pytorch():
import torch
def func(fn):
"""Marks a function as callable in Taichi-scope.
This decorator transforms a Python function into a Taichi one. Taichi
will JIT compile it into native instructions.
Args:
fn (Callable): The Python function to be decorated
Returns:
Callable: The decorated function
Example::
>>> @ti.func
>>> def foo(x):
>>> return x + 2
>>>
>>> @ti.kernel
>>> def run():
>>> print(foo(40)) # 42
"""
is_classfunc = _inside_class(level_of_class_stackframe=3)
fun = Func(fn, _classfunc=is_classfunc)
@functools.wraps(fn)
def decorated(*args):
return fun.__call__(*args)
decorated._is_taichi_function = True
return decorated
def pyfunc(fn):
"""Marks a function as callable in both Taichi and Python scopes.
When called inside the Taichi scope, Taichi will JIT compile it into
native instructions. Otherwise it will be invoked directly as a
Python function.
See also :func:`~taichi.lang.kernel_impl.func`.
Args:
fn (Callable): The Python function to be decorated
Returns:
Callable: The decorated function
"""
is_classfunc = _inside_class(level_of_class_stackframe=3)
fun = Func(fn, _classfunc=is_classfunc, _pyfunc=True)
@functools.wraps(fn)
def decorated(*args):
return fun.__call__(*args)
decorated._is_taichi_function = True
return decorated
def _get_tree_and_ctx(self,
excluded_parameters=(),
is_kernel=True,
arg_features=None,
args=None):
file = oinspect.getsourcefile(self.func)
src, start_lineno = oinspect.getsourcelines(self.func)
src = [textwrap.fill(line, tabsize=4, width=9999) for line in src]
tree = ast.parse(textwrap.dedent("\n".join(src)))
func_body = tree.body[0]
func_body.decorator_list = []
global_vars = _get_global_vars(self.func)
for i, arg in enumerate(func_body.args.args):
anno = arg.annotation
if isinstance(anno, ast.Name):
global_vars[anno.id] = self.argument_annotations[i]
if isinstance(func_body.returns, ast.Name):
global_vars[func_body.returns.id] = self.return_type
if is_kernel or impl.get_runtime().experimental_real_function:
# inject template parameters into globals
for i in self.template_slot_locations:
template_var_name = self.argument_names[i]
global_vars[template_var_name] = args[i]
return tree, ASTTransformerContext(excluded_parameters=excluded_parameters,
is_kernel=is_kernel,
func=self,
arg_features=arg_features,
global_vars=global_vars,
argument_data=args,
src=src,
start_lineno=start_lineno,
file=file)
class Func:
function_counter = 0
def __init__(self, _func, _classfunc=False, _pyfunc=False):
self.func = _func
self.func_id = Func.function_counter
Func.function_counter += 1
self.compiled = None
self.classfunc = _classfunc
self.pyfunc = _pyfunc
self.argument_annotations = []
self.argument_names = []
self.return_type = None
self.extract_arguments()
self.template_slot_locations = []
for i, anno in enumerate(self.argument_annotations):
if isinstance(anno, template):
self.template_slot_locations.append(i)
self.mapper = TaichiCallableTemplateMapper(
self.argument_annotations, self.template_slot_locations)
self.taichi_functions = {} # The |Function| class in C++
def __call__(self, *args):
if not impl.inside_kernel():
if not self.pyfunc:
raise TaichiSyntaxError(
"Taichi functions cannot be called from Python-scope."
" Use @ti.pyfunc if you wish to call Taichi functions "
"from both Python-scope and Taichi-scope.")
return self.func(*args)
if impl.get_runtime().experimental_real_function:
if impl.get_runtime().current_kernel.is_grad:
raise TaichiSyntaxError(
"Real function in gradient kernels unsupported.")
instance_id, _ = self.mapper.lookup(args)
key = _ti_core.FunctionKey(self.func.__name__, self.func_id,
instance_id)
if self.compiled is None:
self.compiled = {}
if key.instance_id not in self.compiled:
self.do_compile(key=key, args=args)
return self.func_call_rvalue(key=key, args=args)
tree, ctx = _get_tree_and_ctx(self, is_kernel=False, args=args)
ret = transform_tree(tree, ctx)
if not impl.get_runtime().experimental_real_function:
if self.return_type and not ctx.returned:
raise TaichiSyntaxError(
"Function has a return type but does not have a return statement"
)
return ret
def func_call_rvalue(self, key, args):
# Skip the template args, e.g., |self|
assert impl.get_runtime().experimental_real_function
non_template_args = []
for i, anno in enumerate(self.argument_annotations):
if not isinstance(anno, template):
non_template_args.append(args[i])
non_template_args = impl.make_expr_group(non_template_args)
return Expr(
_ti_core.make_func_call_expr(
self.taichi_functions[key.instance_id], non_template_args))
def do_compile(self, key, args):
tree, ctx = _get_tree_and_ctx(self, is_kernel=False, args=args)
self.compiled[key.instance_id] = lambda: transform_tree(tree, ctx)
self.taichi_functions[key.instance_id] = _ti_core.create_function(key)
self.taichi_functions[key.instance_id].set_function_body(
self.compiled[key.instance_id])
def extract_arguments(self):
sig = inspect.signature(self.func)
if sig.return_annotation not in (inspect._empty, None):
self.return_type = sig.return_annotation
params = sig.parameters
arg_names = params.keys()
for i, arg_name in enumerate(arg_names):
param = params[arg_name]
if param.kind == inspect.Parameter.VAR_KEYWORD:
raise KernelDefError(
'Taichi functions do not support variable keyword parameters (i.e., **kwargs)'
)
if param.kind == inspect.Parameter.VAR_POSITIONAL:
raise KernelDefError(
'Taichi functions do not support variable positional parameters (i.e., *args)'
)
if param.kind == inspect.Parameter.KEYWORD_ONLY:
raise KernelDefError(
'Taichi functions do not support keyword parameters')
if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD:
raise KernelDefError(
'Taichi functions only support "positional or keyword" parameters'
)
annotation = param.annotation
if annotation is inspect.Parameter.empty:
if i == 0 and self.classfunc:
annotation = template()
# TODO: pyfunc also need type annotation check when real function is enabled,
# but that has to happen at runtime when we know which scope it's called from.
elif not self.pyfunc and impl.get_runtime(
).experimental_real_function:
raise KernelDefError(
f'Taichi function `{self.func.__name__}` parameter `{arg_name}` must be type annotated'
)
else:
if not id(annotation
) in primitive_types.type_ids and not isinstance(
annotation, template):
raise KernelDefError(
f'Invalid type annotation (argument {i}) of Taichi function: {annotation}'
)
self.argument_annotations.append(annotation)
self.argument_names.append(param.name)
class TaichiCallableTemplateMapper:
def __init__(self, annotations, template_slot_locations):
self.annotations = annotations
self.num_args = len(annotations)
self.template_slot_locations = template_slot_locations
self.mapping = {}
@staticmethod
def extract_arg(arg, anno):
if isinstance(anno, template):
if isinstance(arg, taichi.lang.snode.SNode):
return arg.ptr
if isinstance(arg, taichi.lang.expr.Expr):
return arg.ptr.get_underlying_ptr_address()
if isinstance(arg, _ti_core.Expr):
return arg.get_underlying_ptr_address()
if isinstance(arg, tuple):
return tuple(
TaichiCallableTemplateMapper.extract_arg(item, anno)
for item in arg)
return arg
if isinstance(anno, any_arr):
if isinstance(arg, taichi.lang._ndarray.ScalarNdarray):
anno.check_element_dim(arg, 0)
anno.check_element_shape(())
anno.check_field_dim(len(arg.shape))
return arg.dtype, len(arg.shape), (), Layout.AOS
if isinstance(arg, taichi.lang.matrix.VectorNdarray):
anno.check_element_dim(arg, 1)
anno.check_element_shape((arg.n, ))
anno.check_field_dim(len(arg.shape))
anno.check_layout(arg)
return arg.dtype, len(arg.shape) + 1, (arg.n, ), arg.layout
if isinstance(arg, taichi.lang.matrix.MatrixNdarray):
anno.check_element_dim(arg, 2)
anno.check_element_shape((arg.n, arg.m))
anno.check_field_dim(len(arg.shape))
anno.check_layout(arg)
return arg.dtype, len(arg.shape) + 2, (arg.n,
arg.m), arg.layout
# external arrays
element_dim = 0 if anno.element_dim is None else anno.element_dim
layout = Layout.AOS if anno.layout is None else anno.layout
shape = tuple(arg.shape)
if len(shape) < element_dim:
raise ValueError(
f"Invalid argument into ti.any_arr() - required element_dim={element_dim}, "
f"but the argument has only {len(shape)} dimensions")
element_shape = (
) if element_dim == 0 else shape[:
element_dim] if layout == Layout.SOA else shape[
-element_dim:]
return to_taichi_type(arg.dtype), len(shape), element_shape, layout
return type(arg).__name__,
def extract(self, args):
extracted = []
for arg, anno in zip(args, self.annotations):
extracted.append(self.extract_arg(arg, anno))
return tuple(extracted)
def lookup(self, args):
if len(args) != self.num_args:
raise TypeError(
f'{self.num_args} argument(s) needed but {len(args)} provided.'
)
key = self.extract(args)
if key not in self.mapping:
count = len(self.mapping)
self.mapping[key] = count
return self.mapping[key], key
class KernelDefError(Exception):
pass
class KernelArgError(Exception):
def __init__(self, pos, needed, provided):
message = f'Argument {pos} (type={provided}) cannot be converted into required type {needed}'
super().__init__(message)
self.pos = pos
self.needed = needed
self.provided = provided
def _get_global_vars(_func):
closure_vars = inspect.getclosurevars(_func)
return {
**closure_vars.globals,
**closure_vars.nonlocals,
**closure_vars.builtins
}
class Kernel:
counter = 0
def __init__(self, _func, is_grad, _classkernel=False):
self.func = _func
self.kernel_counter = Kernel.counter
Kernel.counter += 1
self.is_grad = is_grad
self.grad = None
self.argument_annotations = []
self.argument_names = []
self.return_type = None
self.classkernel = _classkernel
self.extract_arguments()
self.template_slot_locations = []
for i, anno in enumerate(self.argument_annotations):
if isinstance(anno, template):
self.template_slot_locations.append(i)
self.mapper = TaichiCallableTemplateMapper(
self.argument_annotations, self.template_slot_locations)
impl.get_runtime().kernels.append(self)
self.reset()
self.kernel_cpp = None
def reset(self):
self.runtime = impl.get_runtime()
if self.is_grad:
self.compiled_functions = self.runtime.compiled_grad_functions
else:
self.compiled_functions = self.runtime.compiled_functions
def extract_arguments(self):
sig = inspect.signature(self.func)
if sig.return_annotation not in (inspect._empty, None):
self.return_type = sig.return_annotation
params = sig.parameters
arg_names = params.keys()
for i, arg_name in enumerate(arg_names):
param = params[arg_name]
if param.kind == inspect.Parameter.VAR_KEYWORD:
raise KernelDefError(
'Taichi kernels do not support variable keyword parameters (i.e., **kwargs)'
)
if param.kind == inspect.Parameter.VAR_POSITIONAL:
raise KernelDefError(
'Taichi kernels do not support variable positional parameters (i.e., *args)'
)
if param.default is not inspect.Parameter.empty:
raise KernelDefError(
'Taichi kernels do not support default values for arguments'
)
if param.kind == inspect.Parameter.KEYWORD_ONLY:
raise KernelDefError(
'Taichi kernels do not support keyword parameters')
if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD:
raise KernelDefError(
'Taichi kernels only support "positional or keyword" parameters'
)
annotation = param.annotation
if param.annotation is inspect.Parameter.empty:
if i == 0 and self.classkernel: # The |self| parameter
annotation = template()
else:
raise KernelDefError(
'Taichi kernels parameters must be type annotated')
else:
if isinstance(annotation, (template, any_arr)):
pass
elif id(annotation) in primitive_types.type_ids:
pass
elif isinstance(annotation, sparse_matrix_builder):
pass
elif isinstance(annotation, MatrixType):
pass
else:
raise KernelDefError(
f'Invalid type annotation (argument {i}) of Taichi kernel: {annotation}'
)
self.argument_annotations.append(annotation)
self.argument_names.append(param.name)
def materialize(self, key=None, args=None, arg_features=None):
if key is None:
key = (self.func, 0)
self.runtime.materialize()
if key in self.compiled_functions:
return
grad_suffix = ""
if self.is_grad:
grad_suffix = "_grad"
kernel_name = f"{self.func.__name__}_c{self.kernel_counter}_{key[1]}{grad_suffix}"
_logging.trace(f"Compiling kernel {kernel_name}...")
tree, ctx = _get_tree_and_ctx(
self,
args=args,
excluded_parameters=self.template_slot_locations,
arg_features=arg_features)
if self.is_grad:
KernelSimplicityASTChecker(self.func).visit(tree)
# Do not change the name of 'taichi_ast_generator'
# The warning system needs this identifier to remove unnecessary messages
def taichi_ast_generator():
if self.runtime.inside_kernel:
raise TaichiSyntaxError(
"Kernels cannot call other kernels. I.e., nested kernels are not allowed. "
"Please check if you have direct/indirect invocation of kernels within kernels. "
"Note that some methods provided by the Taichi standard library may invoke kernels, "
"and please move their invocations to Python-scope.")
self.runtime.inside_kernel = True
self.runtime.current_kernel = self
try:
transform_tree(tree, ctx)
if not impl.get_runtime().experimental_real_function:
if self.return_type and not ctx.returned:
raise TaichiSyntaxError(
"Kernel has a return type but does not have a return statement"
)
finally:
self.runtime.inside_kernel = False
self.runtime.current_kernel = None
taichi_kernel = _ti_core.create_kernel(taichi_ast_generator,
kernel_name, self.is_grad)
self.kernel_cpp = taichi_kernel
assert key not in self.compiled_functions
self.compiled_functions[key] = self.get_function_body(taichi_kernel)
def get_function_body(self, t_kernel):
# The actual function body
def func__(*args):
assert len(args) == len(
self.argument_annotations
), f'{len(self.argument_annotations)} arguments needed but {len(args)} provided'
tmps = []
callbacks = []
has_external_arrays = False
actual_argument_slot = 0
launch_ctx = t_kernel.make_launch_context()
for i, v in enumerate(args):
needed = self.argument_annotations[i]
if isinstance(needed, template):
continue
provided = type(v)
# Note: do not use sth like "needed == f32". That would be slow.
if id(needed) in primitive_types.real_type_ids:
if not isinstance(v, (float, int)):
raise KernelArgError(i, needed.to_string(), provided)
launch_ctx.set_arg_float(actual_argument_slot, float(v))
elif id(needed) in primitive_types.integer_type_ids:
if not isinstance(v, int):
raise KernelArgError(i, needed.to_string(), provided)
launch_ctx.set_arg_int(actual_argument_slot, int(v))
elif isinstance(needed, sparse_matrix_builder):
# Pass only the base pointer of the ti.linalg.sparse_matrix_builder() argument
launch_ctx.set_arg_int(actual_argument_slot, v.get_addr())
elif isinstance(needed, any_arr) and (
self.match_ext_arr(v)
or isinstance(v, taichi.lang._ndarray.Ndarray)):
is_ndarray = False
if isinstance(v, taichi.lang._ndarray.Ndarray):
v = v.arr
is_ndarray = True
has_external_arrays = True
ndarray_use_torch = self.runtime.prog.config.ndarray_use_torch
has_torch = util.has_pytorch()
is_numpy = isinstance(v, np.ndarray)
if is_numpy:
tmp = np.ascontiguousarray(v)
# Purpose: DO NOT GC |tmp|!
tmps.append(tmp)
launch_ctx.set_arg_external_array(
actual_argument_slot, int(tmp.ctypes.data),
tmp.nbytes, False)
elif is_ndarray and not ndarray_use_torch:
# Use ndarray's own memory allocator
tmp = v
launch_ctx.set_arg_external_array(
actual_argument_slot,
int(tmp.device_allocation_ptr()),
tmp.element_size() * tmp.nelement(), True)
else:
def get_call_back(u, v):
def call_back():
u.copy_(v)
return call_back
assert has_torch
assert isinstance(v, torch.Tensor)
tmp = v
taichi_arch = self.runtime.prog.config.arch
# Ndarray means its memory is allocated on the specified taichi arch.
# Since torch only supports CPU & CUDA, torch-base ndarray only supports
# taichi cpu/cuda backend as well.
# Note I put x64/arm64/cuda here to be more specific.
assert not is_ndarray or taichi_arch in (
_ti_core.Arch.cuda, _ti_core.Arch.x64,
_ti_core.Arch.arm64
), "Torch-based ndarray is only supported on taichi x64/arm64/cuda backend."
if str(v.device).startswith('cuda'):
# External tensor on cuda
if taichi_arch != _ti_core.Arch.cuda:
# copy data back to cpu
host_v = v.to(device='cpu', copy=True)
tmp = host_v
callbacks.append(get_call_back(v, host_v))
else:
# External tensor on cpu
if taichi_arch == _ti_core.Arch.cuda:
gpu_v = v.cuda()
tmp = gpu_v
callbacks.append(get_call_back(v, gpu_v))
launch_ctx.set_arg_external_array(
actual_argument_slot, int(tmp.data_ptr()),
tmp.element_size() * tmp.nelement(), False)
shape = v.shape
max_num_indices = _ti_core.get_max_num_indices()
assert len(
shape
) <= max_num_indices, f"External array cannot have > {max_num_indices} indices"
for ii, s in enumerate(shape):
launch_ctx.set_extra_arg_int(actual_argument_slot, ii,
s)
elif isinstance(needed, MatrixType):
if id(needed.dtype) in primitive_types.real_type_ids:
for a in range(needed.n):
for b in range(needed.m):
if not isinstance(v[a, b], (int, float)):
raise KernelArgError(
i, needed.dtype.to_string(),
type(v[a, b]))
launch_ctx.set_arg_float(
actual_argument_slot, float(v[a, b]))
actual_argument_slot += 1
elif id(needed.dtype) in primitive_types.integer_type_ids:
for a in range(needed.n):
for b in range(needed.m):
if not isinstance(v[a, b], int):
raise KernelArgError(
i, needed.dtype.to_string(),
type(v[a, b]))
launch_ctx.set_arg_int(actual_argument_slot,
int(v[a, b]))
actual_argument_slot += 1
else:
raise ValueError(
f'Matrix dtype {needed.dtype} is not integer type or real type.'
)
continue
else:
raise ValueError(
f'Argument type mismatch. Expecting {needed}, got {type(v)}.'
)
actual_argument_slot += 1
# Both the class kernels and the plain-function kernels are unified now.
# In both cases, |self.grad| is another Kernel instance that computes the
# gradient. For class kernels, args[0] is always the kernel owner.
if not self.is_grad and self.runtime.target_tape and not self.runtime.grad_replaced:
self.runtime.target_tape.insert(self, args)
t_kernel(launch_ctx)
ret = None
ret_dt = self.return_type
has_ret = ret_dt is not None
if has_ret or (impl.current_cfg().async_mode
and has_external_arrays):
runtime_ops.sync()
if has_ret:
if id(ret_dt) in primitive_types.integer_type_ids:
ret = t_kernel.get_ret_int(0)
else:
ret = t_kernel.get_ret_float(0)
if callbacks:
for c in callbacks:
c()
return ret
return func__
@staticmethod
def match_ext_arr(v):
has_array = isinstance(v, np.ndarray)
if not has_array and util.has_pytorch():
has_array = isinstance(v, torch.Tensor)
return has_array
def ensure_compiled(self, *args):
instance_id, arg_features = self.mapper.lookup(args)
key = (self.func, instance_id)
self.materialize(key=key, args=args, arg_features=arg_features)
return key
# For small kernels (< 3us), the performance can be pretty sensitive to overhead in __call__
# Thus this part needs to be fast. (i.e. < 3us on a 4 GHz x64 CPU)
@_shell_pop_print
def __call__(self, *args, **kwargs):
if self.is_grad and impl.current_cfg().opt_level == 0:
_logging.warn(
"""opt_level = 1 is enforced to enable gradient computation."""
)
impl.current_cfg().opt_level = 1
assert len(kwargs) == 0, 'kwargs not supported for Taichi kernels'
key = self.ensure_compiled(*args)
return self.compiled_functions[key](*args)
# For a Taichi class definition like below:
#
# @ti.data_oriented
# class X:
# @ti.kernel
# def foo(self):
# ...
#
# When ti.kernel runs, the stackframe's |code_context| of Python 3.8(+) is
# different from that of Python 3.7 and below. In 3.8+, it is 'class X:',
# whereas in <=3.7, it is '@ti.data_oriented'. More interestingly, if the class
# inherits, i.e. class X(object):, then in both versions, |code_context| is
# 'class X(object):'...
_KERNEL_CLASS_STACKFRAME_STMT_RES = [
re.compile(r'@(\w+\.)?data_oriented'),
re.compile(r'class '),
]
def _inside_class(level_of_class_stackframe):
try:
maybe_class_frame = sys._getframe(level_of_class_stackframe)
statement_list = inspect.getframeinfo(maybe_class_frame)[3]
first_statment = statement_list[0].strip()
for pat in _KERNEL_CLASS_STACKFRAME_STMT_RES:
if pat.match(first_statment):
return True
except:
pass
return False
def _kernel_impl(_func, level_of_class_stackframe, verbose=False):
# Can decorators determine if a function is being defined inside a class?
# https://stackoverflow.com/a/8793684/12003165
is_classkernel = _inside_class(level_of_class_stackframe + 1)
if verbose:
print(f'kernel={_func.__name__} is_classkernel={is_classkernel}')
primal = Kernel(_func, is_grad=False, _classkernel=is_classkernel)
adjoint = Kernel(_func, is_grad=True, _classkernel=is_classkernel)
# Having |primal| contains |grad| makes the tape work.
primal.grad = adjoint
if is_classkernel:
# For class kernels, their primal/adjoint callables are constructed
# when the kernel is accessed via the instance inside
# _BoundedDifferentiableMethod.
# This is because we need to bind the kernel or |grad| to the instance
# owning the kernel, which is not known until the kernel is accessed.
#
# See also: _BoundedDifferentiableMethod, data_oriented.
@functools.wraps(_func)
def wrapped(*args, **kwargs):
# If we reach here (we should never), it means the class is not decorated
# with @ti.data_oriented, otherwise getattr would have intercepted the call.
clsobj = type(args[0])
assert not hasattr(clsobj, '_data_oriented')
raise KernelDefError(
f'Please decorate class {clsobj.__name__} with @ti.data_oriented'
)
else:
@functools.wraps(_func)
def wrapped(*args, **kwargs):
return primal(*args, **kwargs)
wrapped.grad = adjoint
wrapped._is_wrapped_kernel = True
wrapped._is_classkernel = is_classkernel
wrapped._primal = primal
wrapped._adjoint = adjoint
return wrapped
def kernel(fn):
"""Marks a function as a Taichi kernel.
A Taichi kernel is a function written in Python, and gets JIT compiled by
Taichi into native CPU/GPU instructions (e.g. a series of CUDA kernels).
The top-level ``for`` loops are automatically parallelized, and distributed
to either a CPU thread pool or massively parallel GPUs.
Kernel's gradient kernel would be generated automatically by the AutoDiff system.
See also https://docs.taichi.graphics/lang/articles/basic/syntax#kernels.
Args:
fn (Callable): the Python function to be decorated
Returns:
Callable: The decorated function
Example::
>>> x = ti.field(ti.i32, shape=(4, 8))
>>>
>>> @ti.kernel
>>> def run():
>>> # Assigns all the elements of `x` in parallel.
>>> for i in x:
>>> x[i] = i
"""
return _kernel_impl(fn, level_of_class_stackframe=3)
classfunc = obsolete('@ti.classfunc', '@ti.func directly')
classkernel = obsolete('@ti.classkernel', '@ti.kernel directly')
class _BoundedDifferentiableMethod:
def __init__(self, kernel_owner, wrapped_kernel_func):
clsobj = type(kernel_owner)
if not getattr(clsobj, '_data_oriented', False):
raise KernelDefError(
f'Please decorate class {clsobj.__name__} with @ti.data_oriented'
)
self._kernel_owner = kernel_owner
self._primal = wrapped_kernel_func._primal
self._adjoint = wrapped_kernel_func._adjoint
self._is_staticmethod = wrapped_kernel_func._is_staticmethod
self.__name__ = None
def __call__(self, *args, **kwargs):
if self._is_staticmethod:
return self._primal(*args, **kwargs)
return self._primal(self._kernel_owner, *args, **kwargs)
def grad(self, *args, **kwargs):
return self._adjoint(self._kernel_owner, *args, **kwargs)
def data_oriented(cls):
"""Marks a class as Taichi compatible.
To allow for modularized code, Taichi provides this decorator so that
Taichi kernels can be defined inside a class.
See also https://docs.taichi.graphics/lang/articles/advanced/odop
Example::
>>> @ti.data_oriented
>>> class TiArray:
>>> def __init__(self, n):
>>> self.x = ti.field(ti.f32, shape=n)
>>>
>>> @ti.kernel
>>> def inc(self):
>>> for i in self.x:
>>> self.x[i] += 1.0
>>>
>>> a = TiArray(32)
>>> a.inc()
Args:
cls (Class): the class to be decorated
Returns:
The decorated class.
"""
def _getattr(self, item):
method = cls.__dict__.get(item, None)
is_property = method.__class__ == property
is_staticmethod = method.__class__ == staticmethod
if is_property:
x = method.fget
else:
x = super(cls, self).__getattribute__(item)
if hasattr(x, '_is_wrapped_kernel'):
if inspect.ismethod(x):
wrapped = x.__func__
else:
wrapped = x
wrapped._is_staticmethod = is_staticmethod
assert inspect.isfunction(wrapped)
if wrapped._is_classkernel:
ret = _BoundedDifferentiableMethod(self, wrapped)
ret.__name__ = wrapped.__name__
if is_property:
return ret()
return ret
if is_property:
return x(self)
return x
cls.__getattribute__ = _getattr
cls._data_oriented = True
return cls
|
the-stack_0_17705 | #!/usr/bin/env python3
# Depth tolerance in km (for determining if top and bottom edges are
# horizontal)
DEPTH_TOL = 0.05
# Maximum ratio of distance off of the plane (relative to edge length) for the
# 4th point to be before being considered non-co-planar and adjusted to
# actually be on the plane?
OFFPLANE_TOLERANCE = 0.05
RAKEDICT = {"SS": 0.0, "NM": -90.0, "RS": 90.0, "ALL": None}
DEFAULT_MECH = "ALL"
DEFAULT_STRIKE = 0.0
DEFAULT_DIP = 90.0
DEFAULT_RAKE = 0.0
DEFAULT_WIDTH = 0.0
DEFAULT_ZTOR = 0.0
ORIGIN_REQUIRED_KEYS = [
"id",
"netid",
"network",
"lat",
"lon",
"depth",
"locstring",
"mag",
"time",
]
# Times can have either integer or floating point (preferred) seconds
TIMEFMT = "%Y-%m-%dT%H:%M:%S.%fZ"
ALT_TIMEFMT = "%Y-%m-%dT%H:%M:%SZ"
|
the-stack_0_17706 | import os
from mkdocs.config import config_options
from mkdocs.plugins import BasePlugin
from mkdocs.structure.nav import Section
from mkdocs.structure.pages import Page
from .utils import flatten
from . import markdown as md
class AddNumberPlugin(BasePlugin):
config_scheme = (
('strict_mode', config_options.Type(bool, default=False)),
('increment_pages', config_options.Type(bool, default=False)),
('increment_topnav', config_options.Type(bool, default=False)),
('excludes', config_options.Type(list, default=[])),
('includes', config_options.Type(list, default=[])),
('order', config_options.Type(int, default=1))
)
def _check_config_params(self):
set_parameters = self.config.keys()
allowed_parameters = dict(self.config_scheme).keys()
if set_parameters != allowed_parameters:
unknown_parameters = [x for x in set_parameters if
x not in allowed_parameters]
raise AssertionError(
"Unknown parameter(s) set: %s" % ", ".join(unknown_parameters))
def on_nav(self, nav, config, files):
"""
The nav event is called after the site navigation is created and
can be used to alter the site navigation.
See:
https://www.mkdocs.org/user-guide/plugins/#on_nav
:param nav: global navigation object
:param config: global configuration object
:param files: global files collection
:return: global navigation object
"""
self._title2index = dict()
is_increment_topnav = self.config.get("increment_topnav", False)
is_increment_pages = self.config.get("increment_pages", False)
index = 0
while index < len(nav.items):
if is_increment_topnav:
nav.items[index].title = str(index + 1) + '. ' + \
nav.items[index].title
# Section(title='Linux')
# Page(title=[blank], url='/linux/epel%E6%BA%90/')
if type(nav.items[index]) == Section:
pages = nav.items[index].children
j = 0
while j < len(pages):
if is_increment_topnav and is_increment_pages:
self._title2index[pages[j].url] = \
str(index + 1) + '.' + str(j + 1) + ' '
elif is_increment_pages:
self._title2index[pages[j].url] = str(j + 1) + '. '
j += 1
index += 1
return nav
def on_files(self, files, config):
"""
The files event is called after the files collection is populated from the docs_dir.
Use this event to add, remove, or alter files in the collection.
See https://www.mkdocs.org/user-guide/plugins/#on_files
Args:
files (list): files: global files collection
config (dict): global configuration object
Returns:
files (list): global files collection
"""
self._check_config_params()
# Use navigation if set,
# (see https://www.mkdocs.org/user-guide/configuration/#nav)
# only these files will be displayed.
nav = config.get('nav', None)
if nav:
files_str = flatten(nav)
# Otherwise, take all source markdown pages
else:
files_str = [
file.src_path for file in files if file.is_documentation_page()
]
# Record excluded files from selection by user
self._excludes = self.config['excludes']
self._exclude_files = [os.path.normpath(file1) for file1 in
self._excludes if not file1.endswith('\\')
and not file1.endswith('/')]
self._exclude_dirs = [os.path.normpath(dir1) for dir1 in self._excludes
if dir1.endswith('\\')
or dir1.endswith('/')]
self._includes = self.config['includes']
self._include_files = [os.path.normpath(file1) for file1 in
self._includes if not file1.endswith('\\')
and not file1.endswith('/')]
self._include_dirs = [os.path.normpath(dir1) for dir1 in self._includes
if dir1.endswith('\\')
or dir1.endswith('/')]
self._order = self.config['order'] - 1
# Remove files excluded from selection by user
files_to_remove = [file for file in files_str if
self._is_exclude(file) and not self._is_include(
file)]
self.files_str = [file for file in files_str if
file not in files_to_remove]
return files
def on_page_markdown(self, markdown, page, config, files):
"""
The page_markdown event is called after the page's markdown is loaded
from file and can be used to alter the Markdown source text.
The meta- data has been stripped off and is available as page.meta
at this point.
See:
https://www.mkdocs.org/user-guide/plugins/#on_page_markdown
Args:
markdown (str): Markdown source text of page as string
page (Page): mkdocs.nav.Page instance
config (dict): global configuration object
files (list): global files collection
Returns:
markdown (str): Markdown source text of page as string
"""
if self.config.get('increment_pages', False):
index_str = self._title2index.get(page.url, None)
if index_str:
page.title = index_str + page.title
if page.file.src_path not in self.files_str:
return markdown
lines = markdown.split('\n')
heading_lines = md.headings(lines)
if len(heading_lines) <= self._order:
return markdown
tmp_lines_values = list(heading_lines.values())
if self.config['strict_mode']:
tmp_lines_values, _ = self._searchN(tmp_lines_values, 1,
self._order, 1, [])
else:
tmp_lines_values = self._ascent(tmp_lines_values, [0], 0, [], 1,
self._order)
# replace the links of current page after numbering the titles
def _format_link_line(line):
line = line.replace(".", "")
new_line = ''
for s in line:
if s.isdigit() or s in (" ", "_") \
or (u'\u0041' <= s <= u'\u005a') \
or (u'\u0061' <= s <= u'\u007a'):
new_line += s.lower()
return '#' + '-'.join(new_line.split())
link_lines = [_format_link_line(v) for v in tmp_lines_values]
link_lines = {'#' + i.split("-", 1)[1]: i for i in link_lines
if i.count('-') > 0}
n = 0
while n < len(lines):
for k in link_lines.keys():
line_content = lines[n]
if line_content.count('[') >= 1 \
and line_content.count('(') >= 1:
lines[n] = line_content.replace(k, link_lines[k])
n += 1
# replace these new titles
n = 0
for key in heading_lines.keys():
lines[key] = tmp_lines_values[n]
n += 1
return '\n'.join(lines)
def _ascent(self, tmp_lines, parent_nums_head, level, args, num, startrow):
"""
Add number to every line.
e.g.
if number from h2, then the level is:
## level=1
### level=2
#### level=3
### level=2
args
|...|
v v
######
^
|
num
:param tmp_lines: line
:param parent_nums_head: storage depth of header before this line.
:param level: level of header
:param args: all of numbers to combine the number
:param num: the last number
:param startrow: start row to deal
:return: lines which has been numbered
"""
if startrow == len(tmp_lines):
return tmp_lines
nums_head = md.heading_depth(tmp_lines[startrow])
parent_nums = parent_nums_head[len(parent_nums_head) - 1]
chang_num = nums_head - parent_nums
# drop one level
if chang_num < 0:
if level != 1:
# for _ in range(-chang_num):
num = args.pop()
level -= 1
parent_nums_head.pop()
return self._ascent(tmp_lines, parent_nums_head, level, args, num,
startrow)
# sibling
if chang_num == 0:
num += 1
tmp_lines[startrow] = self._replace_line(tmp_lines[startrow],
'#' * nums_head + ' ',
'%d.' * len(args) % tuple(
args), num)
return self._ascent(tmp_lines, parent_nums_head, level, args, num,
startrow + 1)
# rise one level
level += 1
if level != 1:
# for _ in range(chang_num):
args.append(num)
parent_nums_head.append(nums_head)
num = 1
tmp_lines[startrow] = self._replace_line(tmp_lines[startrow],
'#' * nums_head + ' ',
'%d.' * len(args) % tuple(
args), num)
return self._ascent(tmp_lines, parent_nums_head, level, args, num,
startrow + 1)
def _replace_line(self, tmp_line, substr, prenum_str, nextnum):
re_str = (substr + "%d. " % nextnum) if (prenum_str == '') else (
substr + "%s%d " % (prenum_str, nextnum))
tmp_line = tmp_line.replace(substr, re_str)
return tmp_line
def _searchN(self, tmp_lines, num, start_row, level, args):
while True:
tmp_lines, start_row, re = self._replace(tmp_lines,
'#' * level + ' ',
'.'.join(('%d.' * (
level - 1)).split()) % tuple(
args),
num, start_row)
if not re:
break
next_num = 1
if level != 6:
args.append(num)
re_lines, start_row = self._searchN(tmp_lines, next_num,
start_row, level + 1, args)
args.pop()
num += 1
return tmp_lines, start_row
def _replace(self, tmp_lines, substr, prenum_str, nextnum, start_row):
if start_row == len(tmp_lines) or not tmp_lines[start_row].startswith(
substr):
return tmp_lines, start_row, False
re_str = (substr + "%d. " % nextnum) if (prenum_str == '') else (
substr + "%s%d " % (prenum_str, nextnum))
tmp_lines[start_row] = tmp_lines[start_row].replace(substr, re_str)
return tmp_lines, start_row + 1, True
def _is_exclude(self, file):
if len(self._excludes) == 0:
return False
url = os.path.normpath(file)
if url in self._exclude_files or '*' in self._exclude_files:
return True
for dir1 in self._exclude_dirs:
if url.find(dir1) != -1:
return True
return False
def _is_include(self, file):
if len(self._includes) == 0:
return False
url = os.path.normpath(file)
if url in self._include_files:
return True
for dir1 in self._include_dirs:
if url.find(dir1) != -1:
return True
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.