max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
applications/RANSApplication/python_scripts/formulations/fractional_step/fractional_step_k_omega_sst_rans_formulation.py | lkusch/Kratos | 778 | 12679368 | # import kratos
import KratosMultiphysics as Kratos
import KratosMultiphysics.RANSApplication as KratosRANS
# import formulation interface
from KratosMultiphysics.RANSApplication.formulations.rans_formulation import RansFormulation
# import formulations
from KratosMultiphysics.RANSApplication.formulations.incompressible_potential_flow import IncompressiblePotentialFlowRansFormulation
from KratosMultiphysics.RANSApplication.formulations.turbulence_models.k_omega_sst_rans_formulation import KOmegaSSTRansFormulation
from KratosMultiphysics.RANSApplication.formulations.fractional_step.fractional_step_velocity_pressure_rans_formulation import FractionalStepVelocityPressureRansFormulation
class FractionalStepKOmegaSSTRansFormulation(RansFormulation):
def __init__(self, model_part, settings):
super().__init__(model_part, settings)
default_settings = Kratos.Parameters(r'''
{
"formulation_name": "fractional_step_k_epsilon",
"incompressible_potential_flow_initialization_settings": {},
"fractional_step_flow_solver_settings": {},
"k_omega_sst_solver_settings": {},
"max_iterations": 1
}''')
settings.ValidateAndAssignDefaults(default_settings)
if (not settings["incompressible_potential_flow_initialization_settings"].IsEquivalentTo(
Kratos.Parameters("{}"))):
self.incompressible_potential_flow_formulation = IncompressiblePotentialFlowRansFormulation(model_part, settings["incompressible_potential_flow_initialization_settings"])
self.AddRansFormulation(self.incompressible_potential_flow_formulation)
self.fractional_step_formulation = FractionalStepVelocityPressureRansFormulation(model_part, settings["fractional_step_flow_solver_settings"])
self.AddRansFormulation(self.fractional_step_formulation)
self.k_omega_sst_formulation = KOmegaSSTRansFormulation(model_part, settings["k_omega_sst_solver_settings"])
self.AddRansFormulation(self.k_omega_sst_formulation)
self.SetMaxCouplingIterations(settings["max_iterations"].GetInt())
def SetConstants(self, settings):
self.k_omega_sst_formulation.SetConstants(settings)
def Initialize(self):
super().Initialize()
nut_nodal_update_process = KratosRANS.RansNutNodalUpdateProcess(
self.GetBaseModelPart().GetModel(),
self.GetBaseModelPart().Name,
self.k_omega_sst_formulation.echo_level)
self.k_omega_sst_formulation.AddProcess(nut_nodal_update_process) |
wouso/interface/top/tests.py | AlexandruGhergut/wouso | 117 | 12679392 | from wouso.core.tests import WousoTest
from wouso.interface.top.models import TopUser
class TopTest(WousoTest):
def test_challenges(self):
player = self._get_player()
top_player = player.get_extension(TopUser)
self.assertEqual(top_player.won_challenges, 0) |
test/test_signature_combine.py | afermanian/signatory | 156 | 12679413 | # Copyright 2019 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
"""Tests the functions for combining signatures."""
import gc
import iisignature
import pytest
import random
import torch
from torch import autograd
import weakref
from helpers import helpers as h
from helpers import validation as v
from helpers import reimplementation as r
tests = ['signature_combine', 'multi_signature_combine']
depends = []
signatory = v.validate_tests(tests, depends)
# We have to use the iisignature implementation here, rather than our own, as else we end up with a dependency cycle
# in the tests, between signatory.signature and signatory.signature_combine.
class _IisignatureSignatureFunction(autograd.Function):
@staticmethod
def forward(ctx, path, depth):
ctx.path = path.detach().cpu()
ctx.depth = depth
ctx.device = path.device
ctx.dtype = path.dtype
return torch.tensor(iisignature.sig(ctx.path, ctx.depth), device=ctx.device, dtype=ctx.dtype)
@staticmethod
def backward(ctx, grad):
return torch.tensor(iisignature.sigbackprop(grad.cpu(), ctx.path, ctx.depth), device=ctx.device,
dtype=ctx.dtype), None
def iisignature_signature(path, depth, stream=False, basepoint=False, inverse=False, scalar_term=False):
"""Duplicates signatory.signature's functionality using iisignature, for testing purposes."""
def fn(path, depth):
signature = _IisignatureSignatureFunction.apply(path, depth)
if scalar_term:
out = torch.ones(signature.size(0), 1 + signature.size(1), dtype=signature.dtype,
device=signature.device)
out[:, 1:] = signature
signature = out
return signature
return r.iisignature_signature_or_logsignature(fn, path, depth, stream, basepoint, inverse)
def test_forward():
"""Tests that the forward calculation for combing signatures produces the correct values."""
for signature_combine, amount in ((True, 2), (False, 1), (False, 2), (False, 3), (False, 10)):
for signature_grad in (False, True):
for device in h.get_devices():
for batch_size in (1, 2, 5):
input_stream = 2
for input_channels in (1, 2, 6):
for depth in (1, 2, 4, 6):
for inverse in (False, True):
for scalar_term in (False, True):
_test_forward(signature_combine, signature_grad, amount, device, batch_size,
input_stream, input_channels, depth, inverse, scalar_term)
def _test_forward(signature_combine, signature_grad, amount, device, batch_size, input_stream, input_channels, depth,
inverse, scalar_term):
paths = []
for _ in range(amount):
paths.append(torch.rand(batch_size, input_stream, input_channels, device=device, dtype=torch.double))
signatures = []
basepoint = False
for path in paths:
signature = iisignature_signature(path, depth, basepoint=basepoint, inverse=inverse, scalar_term=scalar_term)
if signature_grad:
signature.requires_grad_()
signatures.append(signature)
basepoint = path[:, -1]
if signature_combine:
combined_signatures = signatory.signature_combine(signatures[0], signatures[1], input_channels, depth,
inverse=inverse, scalar_term=scalar_term)
else:
combined_signatures = signatory.multi_signature_combine(signatures, input_channels, depth,
inverse=inverse, scalar_term=scalar_term)
combined_paths = torch.cat(paths, dim=1)
true_combined_signatures = iisignature_signature(combined_paths, depth, inverse=inverse, scalar_term=scalar_term)
h.diff(combined_signatures, true_combined_signatures)
if signature_grad:
ctx = combined_signatures.grad_fn
assert type(ctx).__name__ == '_SignatureCombineFunctionBackward'
ref = weakref.ref(ctx)
del ctx
del combined_signatures
gc.collect()
assert ref() is None
else:
assert combined_signatures.grad_fn is None
def test_backward():
"""Tests that the backwards calculation for combining signatures produces the correct values."""
for signature_combine, amount in ((True, 2), (False, 1), (False, 2), (False, 3), (False, 10)):
for device in h.get_devices():
for batch_size, input_stream, input_channels in h.random_sizes():
for depth in (1, 2, 4, 6):
for scalar_term in (False, True):
inverse = random.choice([False, True])
_test_backward(signature_combine, amount, device, batch_size, input_stream, input_channels,
depth, inverse, scalar_term)
def _test_backward(signature_combine, amount, device, batch_size, input_stream, input_channels, depth, inverse,
scalar_term):
paths = []
for _ in range(amount):
paths.append(torch.rand(batch_size, input_stream, input_channels, device=device, dtype=torch.double,
requires_grad=True))
signatures = []
basepoint = False
for path in paths:
signature = iisignature_signature(path, depth, basepoint=basepoint, inverse=inverse, scalar_term=scalar_term)
signatures.append(signature)
basepoint = path[:, -1]
# This is the test we'd like to run here, but it takes too long.
# Furthermore we'd also prefer to only go backwards through the signature combine, not through the signature, but
# we can't really do that with our faster alternative.
#
# if signature_combine:
# def check_fn(*signatures):
# return signatory.signature_combine(signatures[0], signatures[1], input_channels, depth, inverse=inverse)
# else:
# def check_fn(*signatures):
# return signatory.multi_signature_combine(signatures, input_channels, depth, inverse=inverse)
# try:
# autograd.gradcheck(check_fn, tuple(signatures), atol=2e-05, rtol=0.002)
# except RuntimeError:
# pytest.fail()
if signature_combine:
combined_signatures = signatory.signature_combine(signatures[0], signatures[1], input_channels, depth,
inverse=inverse, scalar_term=scalar_term)
else:
combined_signatures = signatory.multi_signature_combine(signatures, input_channels, depth, inverse=inverse,
scalar_term=scalar_term)
grad = torch.rand_like(combined_signatures)
combined_signatures.backward(grad)
path_grads = [path.grad.clone() for path in paths]
for path in paths:
path.grad.zero_()
true_signature = iisignature_signature(torch.cat(paths, dim=1), depth, inverse=inverse, scalar_term=scalar_term)
true_signature.backward(grad)
for path_grad, path in zip(path_grads, paths):
h.diff(path_grad, path.grad)
def test_no_adjustments():
"""Tests that the calculations for combining signatures don't modify memory they're not supposed to."""
for signature_combine, amount in ((True, 2), (False, 1), (False, 2), (False, 3), (False, 10)):
for signature_grad in (False, True):
for device in h.get_devices():
for batch_size, input_stream, input_channels in h.random_sizes():
for depth in (1, 2, 5):
for inverse in (False, True):
for scalar_term in (False, True):
_test_no_adjustments(signature_combine, amount, device, batch_size, input_stream,
input_channels, depth, inverse, signature_grad, scalar_term)
def _test_no_adjustments(signature_combine, amount, device, batch_size, input_stream, input_channels, depth, inverse,
signature_grad, scalar_term):
paths = []
for _ in range(amount):
paths.append(torch.rand(batch_size, input_stream, input_channels, device=device, dtype=torch.double))
signatures = []
signatures_clone = []
basepoint = False
for path in paths:
signature = iisignature_signature(path, depth, basepoint=basepoint, inverse=inverse, scalar_term=scalar_term)
signatures_clone.append(signature.clone())
if signature_grad:
signature.requires_grad_()
signatures.append(signature)
basepoint = path[:, -1]
if signature_combine:
combined_signatures = signatory.signature_combine(signatures[0], signatures[1], input_channels, depth,
inverse=inverse, scalar_term=scalar_term)
else:
combined_signatures = signatory.multi_signature_combine(signatures, input_channels, depth,
inverse=inverse, scalar_term=scalar_term)
if signature_grad:
grad = torch.rand_like(combined_signatures)
grad_clone = grad.clone()
combined_signatures_clone = combined_signatures.clone()
combined_signatures.backward(grad)
for signature, signature_clone in zip(signatures, signatures_clone):
h.diff(signature, signature_clone)
if signature_grad:
h.diff(grad, grad_clone)
h.diff(combined_signatures, combined_signatures_clone)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='CUDA not available')
def test_memory_leaks():
"""Checks that there are no memory leaks."""
for signature_combine, amount in ((True, 2), (False, 1), (False, 2), (False, 3), (False, 10)):
for signature_grad in (False, True):
for batch_size, input_stream, input_channels in h.random_sizes():
for depth in (1, 2, 5):
for inverse in (False, True):
for scalar_term in (False, True):
_test_memory_leaks(signature_combine, amount, batch_size, input_stream, input_channels,
depth, inverse, signature_grad, scalar_term)
def _test_memory_leaks(signature_combine, amount, batch_size, input_stream, input_channels, depth, inverse,
signature_grad, scalar_term):
def one_iteration():
gc.collect()
torch.cuda.synchronize()
torch.cuda.reset_max_memory_allocated()
paths = []
for _ in range(amount):
paths.append(torch.rand(batch_size, input_stream, input_channels, device='cuda', dtype=torch.double))
signatures = []
basepoint = False
for path in paths:
signature = iisignature_signature(path, depth, basepoint=basepoint, inverse=inverse,
scalar_term=scalar_term)
if signature_grad:
signature.requires_grad_()
signatures.append(signature)
if signature_combine:
combined_signatures = signatory.signature_combine(signatures[0], signatures[1], input_channels, depth,
inverse=inverse, scalar_term=scalar_term)
else:
combined_signatures = signatory.multi_signature_combine(signatures, input_channels, depth,
inverse=inverse, scalar_term=scalar_term)
if signature_grad:
grad = torch.rand_like(combined_signatures)
combined_signatures.backward(grad)
torch.cuda.synchronize()
return torch.cuda.max_memory_allocated()
memory_used = one_iteration()
for repeat in range(10):
assert one_iteration() <= memory_used
|
scripts/local.datalake.RemoveIamAllowedPrincipals.py | avrios/data-lake-as-code | 106 | 12679416 | import logging
import boto3
from botocore.exceptions import ClientError
lf = boto3.client('lakeformation')
permissions = []
permissionResp = lf.list_permissions()
permissions.extend(permissionResp['PrincipalResourcePermissions'])
while 'NextToken' in permissionResp:
print(permissionResp)
permissionResp = lf.list_permissions(NextToken=permissionResp['NextToken'])
permissions.extend(permissionResp['PrincipalResourcePermissions'])
progress = 0
for grant in permissions:
print("\r"+str(progress)+"/"+str(len(permissions)), end='')
progress += 1
if(grant['Principal']['DataLakePrincipalIdentifier'] == "IAM_ALLOWED_PRINCIPALS"):
lf.revoke_permissions(
Principal=grant["Principal"],
Resource=grant["Resource"],
Permissions=grant["Permissions"],
PermissionsWithGrantOption=grant["PermissionsWithGrantOption"]
)
|
tests/test_restfulgit.py | msuszko/restfulgit | 103 | 12679417 | <filename>tests/test_restfulgit.py
# coding=utf-8
import unittest
from hashlib import sha512
import os
import os.path
import io
from base64 import b64decode
from contextlib import contextmanager
from datetime import timedelta
from tempfile import mkdtemp, mkstemp
from shutil import rmtree
from subprocess import check_call
from json import load as load_json_file
from time import time as time_now
from flask_testing import TestCase as _FlaskTestCase
import pygit2
from restfulgit.app_factory import create_app
RESTFULGIT_REPO = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
PARENT_DIR_OF_RESTFULGIT_REPO = os.path.abspath(os.path.join(RESTFULGIT_REPO, '..'))
CONFIG_FILE = os.path.join(RESTFULGIT_REPO, 'example_config.py')
TEST_SUBDIR = os.path.join(RESTFULGIT_REPO, 'tests')
FIXTURES_DIR = os.path.join(TEST_SUBDIR, 'fixtures')
GIT_MIRROR_DESCRIPTION_FILEPATH = os.path.join(RESTFULGIT_REPO, 'description')
NORMAL_CLONE_DESCRIPTION_FILEPATH = os.path.join(RESTFULGIT_REPO, '.git', 'description')
FIRST_COMMIT = "07b9bf1540305153ceeb4519a50b588c35a35464"
TREE_OF_FIRST_COMMIT = "6ca22167185c31554aa6157306e68dfd612d6345"
BLOB_FROM_FIRST_COMMIT = "ae9d90706c632c26023ce599ac96cb152673da7c"
TAG_FOR_FIRST_COMMIT = "1dffc031c9beda43ff94c526cbc00a30d231c079"
FIFTH_COMMIT = "c04112733fe2db2cb2f179fca1a19365cf15fef5"
EMPTY_COMMIT = "c8ec343d7260ba9577045a05bccd931867644f28"
IMPROBABLE_SHA = "f" * 40
def delete_file_quietly(filepath):
try:
os.remove(filepath)
except EnvironmentError as err:
pass
class _RestfulGitTestCase(_FlaskTestCase):
def create_app(self):
app = create_app()
app.config.from_pyfile(CONFIG_FILE)
app.config['RESTFULGIT_REPO_BASE_PATH'] = PARENT_DIR_OF_RESTFULGIT_REPO
return app
def assertJsonError(self, resp):
json = resp.json
self.assertIsInstance(json, dict)
self.assertIsInstance(json.get('error'), str)
def assertJson400(self, resp):
self.assert400(resp)
self.assertJsonError(resp)
def assertJson404(self, resp):
self.assert404(resp)
self.assertJsonError(resp)
def assertContentTypeIsDiff(self, resp):
self.assertEqual(resp.headers.get_all('Content-Type'), ['text/x-diff; charset=utf-8'])
@contextmanager
def config_override(self, key, val):
orig_val = self.app.config[key]
self.app.config[key] = val
try:
yield
finally:
self.app.config[key] = orig_val
def get_fixture_path(self, filename):
return os.path.join(FIXTURES_DIR, filename)
def _get_fixture_bytes(self, filename):
filepath = self.get_fixture_path(filename)
with open(filepath, 'rb') as fixture_file:
content = fixture_file.read()
return content
def assertBytesEqualFixture(self, text, fixture):
self.assertEqual(text, self._get_fixture_bytes(fixture))
@contextmanager
def temporary_file(self, suffix=''):
file_descriptor, filepath = mkstemp(suffix=suffix)
file_obj = os.fdopen(file_descriptor, 'wb')
try:
yield file_obj, filepath
finally:
if not file_obj.closed:
file_obj.close()
delete_file_quietly(filepath)
@contextmanager
def temporary_directory(self, suffix=''):
temp_dir = mkdtemp(suffix=suffix)
try:
yield temp_dir
finally:
rmtree(temp_dir)
def make_nested_dir(self, extant_parent, new_child):
new_dir = os.path.join(extant_parent, new_child)
os.mkdir(new_dir)
return new_dir
_MINUTE = 60
@property
def _author(self):
sig = pygit2.Signature('Alien Celebrity', '<EMAIL>', time=self._time, offset=0)
self._time += self._MINUTE
return sig
def _tree(self, repo, name):
blob_oid = repo.create_blob(name)
tree_builder = repo.TreeBuilder()
tree_builder.insert(name, blob_oid, pygit2.GIT_FILEMODE_BLOB)
tree_oid = tree_builder.write()
return tree_oid
def _commit(self, repo, name, parents=(), with_branch=False):
ref_name = None
commit_oid = repo.create_commit(ref_name, self._author, self._author, name, self._tree(repo, name), list(parents))
if with_branch:
repo.create_branch(name, repo[commit_oid])
return commit_oid
@property
@contextmanager
def _empty_repo(self):
with self.temporary_directory(suffix='.restfulgit') as temp_repos_dir:
self.app.config['RESTFULGIT_REPO_BASE_PATH'] = temp_repos_dir
repo_dir = os.path.join(temp_repos_dir, 'example')
os.mkdir(repo_dir)
repo = pygit2.init_repository(repo_dir, False)
yield repo
@property
@contextmanager
def _base_repo_and_commit(self):
self._time = 0
with self._empty_repo as repo:
# first commit A
a = self._commit(repo, "A", with_branch=True)
yield repo, a
@contextmanager
def _example_repo(self, b_before_e=True):
"""
Sets up an example repo with the following commits:
[A]--B--C--D--[I aka J]
\--E--F--G--/
\---[H]
[X]s denote commits that are branch tips
"""
with self._base_repo_and_commit as pair:
repo, a = pair
def make_bcd():
b = self._commit(repo, "B", [a])
c = self._commit(repo, "C", [b])
d = self._commit(repo, "D", [c])
return b ,c, d
def make_efg():
e = self._commit(repo, "E", [a])
f = self._commit(repo, "F", [e])
g = self._commit(repo, "G", [f])
return e, f, g
if b_before_e:
b, c, d = make_bcd()
e, f, g = make_efg()
else:
e, f, g = make_efg()
b, c, d = make_bcd()
# H branch
h = self._commit(repo, "H", [e], with_branch=True)
# I branch, from D & G
i = self._commit(repo, "I", [d, g], with_branch=True)
yield dict(locals())
class RepoKeyTestCase(_RestfulGitTestCase):
def test_nonexistent_directory(self):
resp = self.client.get('/repos/this-directory-does-not-exist/git/commits/')
self.assertJson404(resp)
def test_directory_is_not_git_repo(self):
self.app.config['RESTFULGIT_REPO_BASE_PATH'] = RESTFULGIT_REPO
resp = self.client.get('/repos/test/git/commits/')
self.assertJson404(resp)
def test_dot_dot_disallowed(self):
self.app.config['RESTFULGIT_REPO_BASE_PATH'] = TEST_SUBDIR
resp = self.client.get('/repos/../git/commits/')
self.assertJson404(resp)
def test_list_repos(self):
resp = self.client.get('/repos/')
self.assert200(resp)
result = resp.json
self.assertIsInstance(result, list)
repo_list = [repo['name'] for repo in result]
self.assertIn('restfulgit', repo_list)
for repo in result:
if repo['name'] == 'restfulgit':
self.assertEqual(
repo,
{
"name": 'restfulgit',
"full_name": 'restfulgit',
"description": None,
"url": 'http://localhost/repos/restfulgit/',
"branches_url": "http://localhost/repos/restfulgit/branches{/branch}",
"tags_url": "http://localhost/repos/restfulgit/tags/",
"blobs_url": "http://localhost/repos/restfulgit/git/blobs{/sha}",
"git_tags_url": "http://localhost/repos/restfulgit/git/tags{/sha}",
"git_refs_url": "http://localhost/repos/restfulgit/git/refs{/sha}",
"trees_url": "http://localhost/repos/restfulgit/git/trees{/sha}",
# "compare_url": "http://localhost/repos/restfulgit/compare/{base}...{head}",
# "contributors_url": "http://localhost/repos/restfulgit/contributors",
# "contents_url": "http://localhost/repos/restfulgit/contents/{+path}",
"commits_url": "http://localhost/repos/restfulgit/commits{/sha}",
"git_commits_url": "http://localhost/repos/restfulgit/git/commits{/sha}",
# "size": N (in what units?)
# "updated_at": "some timestamp"
}
)
def test_deepdir_repos(self):
with self.temporary_directory(suffix='.restfulgit') as temp_repos_dir:
self.app.config['RESTFULGIT_REPO_BASE_PATH'] = temp_repos_dir
pygit2.init_repository(os.path.join(temp_repos_dir, 'onedir/bare.git'), bare=True)
pygit2.init_repository(os.path.join(temp_repos_dir, 'second/more/nested/repo'))
resp = self.client.get('/repos/')
repo_names = {repo['name'] for repo in resp.json}
self.assertEquals(repo_names, {
'onedir/bare.git', 'second/more/nested/repo'})
class SHAConverterTestCase(_RestfulGitTestCase):
def test_empty_sha_rejected(self):
resp = self.client.get('/repos/restfulgit/git/trees/')
self.assertJson404(resp)
def test_too_long_sha_rejected(self):
resp = self.client.get('/repos/restfulgit/git/trees/{}0/'.format(TREE_OF_FIRST_COMMIT))
self.assertJson404(resp)
def test_malformed_sha_rejected(self):
resp = self.client.get('/repos/restfulgit/git/trees/0123456789abcdefghijklmnopqrstuvwxyzABCD/')
self.assertJson404(resp)
def test_full_sha_accepted(self):
resp = self.client.get('/repos/restfulgit/git/trees/{}/'.format(TREE_OF_FIRST_COMMIT))
self.assert200(resp)
def test_partial_sha_accepted(self):
resp = self.client.get('/repos/restfulgit/git/trees/{}/'.format(TREE_OF_FIRST_COMMIT[:35]))
self.assert200(resp)
class CommitsTestCase(_RestfulGitTestCase):
"""Tests the "commits" endpoint."""
def test_empty_repo(self):
with self._empty_repo:
resp = self.client.get('/repos/example/git/commits/')
self.assert200(resp)
self.assertEqual(resp.json, [])
def test_nonexistent_start_sha(self):
resp = self.client.get('/repos/restfulgit/git/commits/?start_sha=1234567890abcdef')
self.assertJson404(resp)
def test_non_commit_start_sha(self):
resp = self.client.get('/repos/restfulgit/git/commits/?start_sha={}'.format(TREE_OF_FIRST_COMMIT))
self.assertJson400(resp)
def test_malformed_start_sha(self):
resp = self.client.get('/repos/restfulgit/git/commits/?start_sha=thisIsNotHexHash')
self.assertJson400(resp)
def test_start_sha_works_basic(self):
resp = self.client.get('/repos/restfulgit/git/commits?start_sha={}'.format(FIRST_COMMIT), follow_redirects=True)
self.assert200(resp)
def test_nonexistent_ref_name(self):
resp = self.client.get('/repos/restfulgit/git/commits/?ref_name=doesNotExist')
self.assertJson404(resp)
def test_ref_name_works(self):
resp = self.client.get('/repos/restfulgit/git/commits?ref_name=master', follow_redirects=True)
self.assert200(resp)
# FIXME: should be more thorough
def test_non_integer_limit_rejected(self):
resp = self.client.get('/repos/restfulgit/git/commits/?limit=abc123')
self.assertJson400(resp)
def test_negative_limit_rejected(self):
resp = self.client.get('/repos/restfulgit/git/commits/?limit=-1')
self.assertJson400(resp)
def test_limit_works_basic(self):
resp = self.client.get('/repos/restfulgit/git/commits?limit=3', follow_redirects=True)
self.assert200(resp)
def test_limit_and_start_sha_work_full(self):
resp = self.client.get('/repos/restfulgit/git/commits?limit=3&start_sha={}'.format(FIFTH_COMMIT), follow_redirects=True)
self.assert200(resp)
self.assertEqual(
resp.json,
[
{
'author': {
'date': '2013-02-27T03:14:13Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'committer': {
'date': '2013-02-27T03:14:13Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'message': 'add file mode',
'parents': [{
'sha': '326d80cd68ec3413fe6eaca99c52c59ca428a0d0',
'url': 'http://localhost/repos/restfulgit/git/commits/326d80cd68ec3413fe6eaca99c52c59ca428a0d0/'
}],
'sha': 'c04112733fe2db2cb2f179fca1a19365cf15fef5',
'tree': {
'sha': '3fdeafb3d2f69a4f7d8bb499b81f836aa10b06eb',
'url': 'http://localhost/repos/restfulgit/git/trees/3fdeafb3d2f69a4f7d8bb499b81f836aa10b06eb/'
},
'url': 'http://localhost/repos/restfulgit/git/commits/c04112733fe2db2cb2f179fca1a19365cf15fef5/'
},
{
'author': {
'date': '2013-02-26T09:15:35Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'committer': {
'date': '2013-02-26T09:15:35Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'message': 'Now using a jsonify decorator which returns the correct content-type',
'parents': [{
'sha': '1f51b91ac383806df9d322ae67bbad3364f50811',
'url': 'http://localhost/repos/restfulgit/git/commits/1f51b91ac383806df9d322ae67bbad3364f50811/'
}],
'sha': '326d80cd68ec3413fe6eaca99c52c59ca428a0d0',
'tree': {
'sha': '3f4b1282d80af3f8a51000993968897330635e4f',
'url': 'http://localhost/repos/restfulgit/git/trees/3f4b1282d80af3f8a51000993968897330635e4f/'
},
'url': 'http://localhost/repos/restfulgit/git/commits/326d80cd68ec3413fe6eaca99c52c59ca428a0d0/'
},
{
'author': {
'date': '2013-02-25T12:35:29Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'committer': {
'date': '2013-02-25T12:35:29Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'message': 'Support submodule in tree-listings',
'parents': [{
'sha': 'ff6405b71273b5c2c50d5c33d5cf962af5390542',
'url': 'http://localhost/repos/restfulgit/git/commits/ff6405b71273b5c2c50d5c33d5cf962af5390542/'
}],
'sha': '1f51b91ac383806df9d322ae67bbad3364f50811',
'tree': {
'sha': '1404e1766a3269f5a73b3d2ec8c81b7ea3ad6e09',
'url': 'http://localhost/repos/restfulgit/git/trees/1404e1766a3269f5a73b3d2ec8c81b7ea3ad6e09/'
},
'url': 'http://localhost/repos/restfulgit/git/commits/1f51b91ac383806df9d322ae67bbad3364f50811/'
}
]
)
#FIXME: test combos
class MergeBaseTestCase(_RestfulGitTestCase): # NOTE: RestfulGit extension
_INITIAL_COMMIT_JSON = {
'author': {
'date': '2013-02-24T13:25:46Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'committer': {
'date': '2013-02-24T13:25:46Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'message': 'Initial support for read-only REST api for Git plumbing',
'parents': [],
'sha': '07b9bf1540305153ceeb4519a50b588c35a35464',
'tree': {
'sha': '6ca22167185c31554aa6157306e68dfd612d6345',
'url': 'http://localhost/repos/restfulgit/git/trees/6ca22167185c31554aa6157306e68dfd612d6345/'
},
'url': 'http://localhost/repos/restfulgit/git/commits/07b9bf1540305153ceeb4519a50b588c35a35464/'
}
def _make_another_initial_commit(self):
repo = pygit2.Repository(RESTFULGIT_REPO)
blob_oid = repo.create_blob("First post!")
tree_builder = repo.TreeBuilder()
tree_builder.insert("FirstPost.txt", blob_oid, pygit2.GIT_FILEMODE_BLOB)
tree_oid = tree_builder.write()
author = pygit2.Signature('Alien Celebrity', '<EMAIL>', time=int(time_now()), offset=0)
ref_name = None
parents = []
evil_twin_genesis_commit_oid = repo.create_commit(ref_name, author, author, "Other initial commit", tree_oid, parents)
return evil_twin_genesis_commit_oid
def test_nonexistent_sha_404s(self):
resp = self.client.get('/repos/restfulgit/git/commits/{0}/merge-base/{0}/'.format(IMPROBABLE_SHA))
self.assertJson404(resp)
def test_unrelateds_is_200_but_null(self):
other_unrelated_initial_commit_oid = self._make_another_initial_commit()
resp = self.client.get('/repos/restfulgit/git/commits/{}/merge-base/{}/'.format(FIRST_COMMIT, str(other_unrelated_initial_commit_oid)))
self.assert200(resp)
self.assertEqual(resp.json, None)
def test_left(self):
resp = self.client.get('/repos/restfulgit/git/commits/{}/merge-base/{}/'.format(FIRST_COMMIT, FIFTH_COMMIT))
self.assert200(resp)
self.assertEqual(resp.json, self._INITIAL_COMMIT_JSON)
def test_right(self):
resp = self.client.get('/repos/restfulgit/git/commits/{}/merge-base/{}/'.format(FIFTH_COMMIT, FIRST_COMMIT))
self.assert200(resp)
self.assertEqual(resp.json, self._INITIAL_COMMIT_JSON)
def test_branch_siblings(self):
with self._example_repo() as commits:
d = str(commits['d'])
g = str(commits['g'])
resp = self.client.get('/repos/example/git/commits/{}/merge-base/{}/'.format(d, g))
self.assert200(resp)
self.assertEqual(resp.json, {
'author': {
'date': '1970-01-01T00:00:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'committer': {
'date': '1970-01-01T00:01:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'message': 'A',
'parents': [],
'sha': 'c655dffe0fed2a78dc5f38c1bc8e5628e2605017',
'tree': {
'sha': '617601c79811cbbae338512798318b4e5b70c9ac',
'url': 'http://localhost/repos/example/git/trees/617601c79811cbbae338512798318b4e5b70c9ac/'
},
'url': 'http://localhost/repos/example/git/commits/c655dffe0fed2a78dc5f38c1bc8e5628e2605017/'
})
def test_same_commit_twice_results_in_same(self):
resp = self.client.get('/repos/restfulgit/git/commits/{0}/merge-base/{0}/'.format(FIFTH_COMMIT))
self.assert200(resp)
self.assertEqual(resp.json, {
'author': {
'date': '2013-02-27T03:14:13Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'committer': {
'date': '2013-02-27T03:14:13Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'message': 'add file mode',
'parents': [{
'sha': '326d80cd68ec3413fe6eaca99c52c59ca428a0d0',
'url': 'http://localhost/repos/restfulgit/git/commits/326d80cd68ec3413fe6eaca99c52c59ca428a0d0/'
}],
'sha': 'c04112733fe2db2cb2f179fca1a19365cf15fef5',
'tree': {
'sha': '3fdeafb3d2f69a4f7d8bb499b81f836aa10b06eb',
'url': 'http://localhost/repos/restfulgit/git/trees/3fdeafb3d2f69a4f7d8bb499b81f836aa10b06eb/'
},
'url': 'http://localhost/repos/restfulgit/git/commits/c04112733fe2db2cb2f179fca1a19365cf15fef5/'
})
class SimpleSHATestCase(_RestfulGitTestCase):
_INITIAL_COMMIT_TREE_JSON = {
"sha": "6ca22167185c31554aa6157306e68dfd612d6345",
"url": "http://localhost/repos/restfulgit/git/trees/6ca22167185c31554aa6157306e68dfd612d6345/",
"tree": [
{
"mode": "100644",
"type": "blob",
"sha": "ae9d90706c632c26023ce599ac96cb152673da7c",
"path": "api.py",
"size": 5543,
"url": "http://localhost/repos/restfulgit/git/blobs/ae9d90706c632c26023ce599ac96cb152673da7c/"
}
]
}
def test_get_commit_with_non_commit_sha(self):
resp = self.client.get('/repos/restfulgit/git/commits/{}/'.format(BLOB_FROM_FIRST_COMMIT))
self.assertJson404(resp)
def test_get_tree_with_blob_sha(self):
resp = self.client.get('/repos/restfulgit/git/trees/{}/'.format(BLOB_FROM_FIRST_COMMIT))
self.assertJson404(resp)
def test_get_tree_with_commit_sha(self):
resp = self.client.get('/repos/restfulgit/git/trees/{}/'.format(FIRST_COMMIT))
self.assert200(resp)
self.assertEqual(resp.json, self._INITIAL_COMMIT_TREE_JSON)
def test_get_tree_with_tag_sha(self):
resp = self.client.get('/repos/restfulgit/git/trees/{}/'.format(TAG_FOR_FIRST_COMMIT))
self.assert200(resp)
self.assertEqual(resp.json, self._INITIAL_COMMIT_TREE_JSON)
def test_get_blob_with_non_blob_sha(self):
resp = self.client.get('/repos/restfulgit/git/blobs/{}/'.format(FIRST_COMMIT))
self.assertJson404(resp)
def test_get_tag_with_non_tag_sha(self):
resp = self.client.get('/repos/restfulgit/git/tags/{}/'.format(BLOB_FROM_FIRST_COMMIT))
self.assertJson404(resp)
def test_get_commit_with_nonexistent_sha(self):
resp = self.client.get('/repos/restfulgit/git/commits/{}/'.format(IMPROBABLE_SHA))
self.assertJson404(resp)
def test_get_tree_with_nonexistent_sha(self):
resp = self.client.get('/repos/restfulgit/git/trees/{}/'.format(IMPROBABLE_SHA))
self.assertJson404(resp)
def test_get_blob_with_nonexistent_sha(self):
resp = self.client.get('/repos/restfulgit/git/blobs/{}/'.format(IMPROBABLE_SHA))
self.assertJson404(resp)
def test_get_tag_with_nonexistent_sha(self):
resp = self.client.get('/repos/restfulgit/git/tags/{}/'.format(IMPROBABLE_SHA))
self.assertJson404(resp)
def test_get_git_commit_works(self):
# From https://api.github.com/repos/hulu/restfulgit/git/commits/07b9bf1540305153ceeb4519a50b588c35a35464 with necessary adjustments
resp = self.client.get('/repos/restfulgit/git/commits/{}/'.format(FIRST_COMMIT))
self.assert200(resp)
self.assertEqual(
resp.json,
{
"sha": "07b9bf1540305153ceeb4519a50b588c35a35464",
"url": "http://localhost/repos/restfulgit/git/commits/07b9bf1540305153ceeb4519a50b588c35a35464/",
"author": {
"name": "<NAME>",
"email": "<EMAIL>",
"date": "2013-02-24T13:25:46Z"
},
"committer": {
"name": "<NAME>",
"email": "<EMAIL>",
"date": "2013-02-24T13:25:46Z"
},
"tree": {
"sha": "6ca22167185c31554aa6157306e68dfd612d6345",
"url": "http://localhost/repos/restfulgit/git/trees/6ca22167185c31554aa6157306e68dfd612d6345/"
},
"message": "Initial support for read-only REST api for Git plumbing",
"parents": []
}
)
def test_get_empty_git_commit_works(self):
# From https://api.github.com/repos/hulu/restfulgit/git/commits/c8ec343d7260ba9577045a05bccd931867644f28 with necessary adjustments
resp = self.client.get('/repos/restfulgit/git/commits/{}/'.format(EMPTY_COMMIT))
self.assert200(resp)
self.assertEqual(
resp.json,
{
"sha": "c8ec343d7260ba9577045a05bccd931867644f28",
"url": "http://localhost/repos/restfulgit/git/commits/c8ec343d7260ba9577045a05bccd931867644f28/",
"author": {
"name": "<NAME>",
"email": "<EMAIL>",
"date": "2015-01-07T20:15:08Z"
},
"committer": {
"name": "<NAME>",
"email": "<EMAIL>",
"date": "2015-01-07T20:15:08Z"
},
"tree": {
"sha": "9268fd675df04e7c09bceddaf9dfc38fb78787d2",
"url": "http://localhost/repos/restfulgit/git/trees/9268fd675df04e7c09bceddaf9dfc38fb78787d2/"
},
"message": "Merge pull request #96 from hulu/empty-commit\n\nAdd deliberately empty commit for testing purposes",
"parents": [
{
"sha": "4fb38539d25983c9b9b99588901a1025658d05d4",
"url": "http://localhost/repos/restfulgit/git/commits/4fb38539d25983c9b9b99588901a1025658d05d4/",
},
{
"sha": "6f4fa9af844f69137bfee3c247feec0fb03a3913",
"url": "http://localhost/repos/restfulgit/git/commits/6f4fa9af844f69137bfee3c247feec0fb03a3913/",
}
]
}
)
def test_get_tree_works(self):
# From https://api.github.com/repos/hulu/restfulgit/git/trees/6ca22167185c31554aa6157306e68dfd612d6345 with necessary adjustments
resp = self.client.get('/repos/restfulgit/git/trees/{}/'.format(TREE_OF_FIRST_COMMIT))
self.assert200(resp)
self.assertEqual(resp.json, self._INITIAL_COMMIT_TREE_JSON)
def test_get_nested_tree_works(self):
# From https://api.github.com/repos/hulu/restfulgit/git/trees/fc0fddc986c93f8444d754c7ec93c8b87f3d7c7e with necessary adjustments
resp = self.client.get('/repos/restfulgit/git/trees/fc0fddc986c93f8444d754c7ec93c8b87f3d7c7e/')
self.assert200(resp)
self.assertEqual(
resp.json,
{
"sha": "fc0fddc986c93f8444d754c7ec93c8b87f3d7c7e",
"url": "http://localhost/repos/restfulgit/git/trees/fc0fddc986c93f8444d754c7ec93c8b87f3d7c7e/",
"tree": [
{
"mode": "100644",
"type": "blob",
"sha": "b5d2ce6a7246f37aaa41e7ce3403b5acd6369914",
"path": ".coveragerc",
"size": 65,
"url": "http://localhost/repos/restfulgit/git/blobs/b5d2ce6a7246f37aaa41e7ce3403b5acd6369914/"
},
{
"mode": "100644",
"type": "blob",
"sha": "cae6643e19e7a8198a26a449f556db6d1909aec8",
"path": ".gitignore",
"size": 22,
"url": "http://localhost/repos/restfulgit/git/blobs/cae6643e19e7a8198a26a449f556db6d1909aec8/"
},
{
"mode": "100644",
"type": "blob",
"sha": "f93712aaf5fcc4c0d44dc472d86abad40fdb0ec3",
"path": ".pep8",
"size": 19,
"url": "http://localhost/repos/restfulgit/git/blobs/f93712aaf5fcc4c0d44dc472d86abad40fdb0ec3/"
},
{
"mode": "100644",
"type": "blob",
"sha": "14e6bf5b229127a5495d9c176f50e3ef1922f0f2",
"path": ".travis.yml",
"size": 985,
"url": "http://localhost/repos/restfulgit/git/blobs/14e6bf5b229127a5495d9c176f50e3ef1922f0f2/"
},
{
"mode": "100644",
"type": "blob",
"sha": "bb27aa0a502f73c19837b96d1bd514ba95e0d404",
"path": "LICENSE.md",
"size": 1056,
"url": "http://localhost/repos/restfulgit/git/blobs/bb27aa0a502f73c19837b96d1bd514ba95e0d404/"
},
{
"mode": "100644",
"type": "blob",
"sha": "342f0ffead9243f5a3514505b83b918e61247ae2",
"path": "README.md",
"size": 5655,
"url": "http://localhost/repos/restfulgit/git/blobs/342f0ffead9243f5a3514505b83b918e61247ae2/"
},
{
"mode": "100644",
"type": "blob",
"sha": "20ff5b895391daa7335cc55be7e3a4da601982da",
"path": "config.conf",
"size": 398,
"url": "http://localhost/repos/restfulgit/git/blobs/20ff5b895391daa7335cc55be7e3a4da601982da/"
},
{
"mode": "100644",
"type": "blob",
"sha": "3e4025298468787af1123191bdddfb72df19061a",
"path": "pylint.rc",
"size": 8529,
"url": "http://localhost/repos/restfulgit/git/blobs/3e4025298468787af1123191bdddfb72df19061a/"
},
{
"mode": "100644",
"type": "blob",
"sha": "77b71e4967983b090aef88ba358724ef4703b01b",
"path": "requirements.txt",
"size": 29,
"url": "http://localhost/repos/restfulgit/git/blobs/77b71e4967983b090aef88ba358724ef4703b01b/"
},
{
"mode": "040000",
"type": "tree",
"sha": "dd8a3571820936595e553c9ba9f776a5c77b1a53",
"path": "restfulgit",
"url": "http://localhost/repos/restfulgit/git/trees/dd8a3571820936595e553c9ba9f776a5c77b1a53/"
},
{
"mode": "040000",
"type": "tree",
"sha": "bdcb3627ba5b29da20f01d9c4571b0ebc6a8b2bd",
"path": "tests",
"url": "http://localhost/repos/restfulgit/git/trees/bdcb3627ba5b29da20f01d9c4571b0ebc6a8b2bd/"
}
]
}
)
def test_get_recursive_tree_works(self):
# From https://api.github.com/repos/hulu/restfulgit/git/trees/fc36ceb418b0b9e945ffd3706dd8544dd988500a?recursive=1 with necessary adjustments
resp = self.client.get('/repos/restfulgit/git/trees/fc36ceb418b0b9e945ffd3706dd8544dd988500a/?recursive=1')
self.assert200(resp)
self.assertEqual(
resp.json,
{
"sha": "fc36ceb418b0b9e945ffd3706dd8544dd988500a",
"url": "http://localhost/repos/restfulgit/git/trees/fc36ceb418b0b9e945ffd3706dd8544dd988500a/",
"tree": [
{
"mode": "100644",
"type": "blob",
"sha": "b5d2ce6a7246f37aaa41e7ce3403b5acd6369914",
"path": ".coveragerc",
"size": 65,
"url": "http://localhost/repos/restfulgit/git/blobs/b5d2ce6a7246f37aaa41e7ce3403b5acd6369914/"
},
{
"mode": "100644",
"type": "blob",
"sha": "cae6643e19e7a8198a26a449f556db6d1909aec8",
"path": ".gitignore",
"size": 22,
"url": "http://localhost/repos/restfulgit/git/blobs/cae6643e19e7a8198a26a449f556db6d1909aec8/"
},
{
"mode": "100644",
"type": "blob",
"sha": "f93712aaf5fcc4c0d44dc472d86abad40fdb0ec3",
"path": ".pep8",
"size": 19,
"url": "http://localhost/repos/restfulgit/git/blobs/f93712aaf5fcc4c0d44dc472d86abad40fdb0ec3/"
},
{
"mode": "100644",
"type": "blob",
"sha": "b3e1e0f2b569fef46e7413cadb6778504c19c87f",
"path": ".travis.yml",
"size": 1008,
"url": "http://localhost/repos/restfulgit/git/blobs/b3e1e0f2b569fef46e7413cadb6778504c19c87f/"
},
{
"mode": "100644",
"type": "blob",
"sha": "bb27aa0a502f73c19837b96d1bd514ba95e0d404",
"path": "LICENSE.md",
"size": 1056,
"url": "http://localhost/repos/restfulgit/git/blobs/bb27aa0a502f73c19837b96d1bd514ba95e0d404/"
},
{
"mode": "100644",
"type": "blob",
"sha": "ee655c4baa251fad0a67dd74b2c390b4a4f9ac53",
"path": "README.md",
"size": 7855,
"url": "http://localhost/repos/restfulgit/git/blobs/ee655c4baa251fad0a67dd74b2c390b4a4f9ac53/"
},
{
"mode": "100644",
"type": "blob",
"sha": "7186d8fab5c4bb492cbcfe1383b2270651e13c2e",
"path": "example_config.py",
"size": 489,
"url": "http://localhost/repos/restfulgit/git/blobs/7186d8fab5c4bb492cbcfe1383b2270651e13c2e/"
},
{
"mode": "100644",
"type": "blob",
"sha": "abb1a23bc0fad8f7fe1dc5996a8e4c7c4cb9903e",
"path": "pylint.rc",
"size": 8517,
"url": "http://localhost/repos/restfulgit/git/blobs/abb1a23bc0fad8f7fe1dc5996a8e4c7c4cb9903e/"
},
{
"mode": "100644",
"type": "blob",
"sha": "77b71e4967983b090aef88ba358724ef4703b01b",
"path": "requirements.txt",
"size": 29,
"url": "http://localhost/repos/restfulgit/git/blobs/77b71e4967983b090aef88ba358724ef4703b01b/"
},
{
"mode": "040000",
"type": "tree",
"sha": "c0dcf8f58a3c5bf42f07e880d5e442ef124c9370",
"path": "restfulgit",
"url": "http://localhost/repos/restfulgit/git/trees/c0dcf8f58a3c5bf42f07e880d5e442ef124c9370/"
},
{
"mode": "100644",
"type": "blob",
"sha": "7fe178c5687eae1e2c04d9d21b6a429c93a28e6a",
"path": "restfulgit/__init__.py",
"size": 15986,
"url": "http://localhost/repos/restfulgit/git/blobs/7fe178c5687eae1e2c04d9d21b6a429c93a28e6a/"
},
{
"mode": "100644",
"type": "blob",
"sha": "e067d7f361bd3b0f227ba1914c227ebf9539f59d",
"path": "restfulgit/__main__.py",
"size": 110,
"url": "http://localhost/repos/restfulgit/git/blobs/e067d7f361bd3b0f227ba1914c227ebf9539f59d/"
},
{
"mode": "040000",
"type": "tree",
"sha": "803c8592dd96cb0a6fc041ebb6af71fbf1f7551c",
"path": "tests",
"url": "http://localhost/repos/restfulgit/git/trees/803c8592dd96cb0a6fc041ebb6af71fbf1f7551c/"
},
{
"mode": "100644",
"type": "blob",
"sha": "2d500fea50b6c1a38d972c1a22b5cb5b5673167a",
"path": "tests/test_restfulgit.py",
"size": 26725,
"url": "http://localhost/repos/restfulgit/git/blobs/2d500fea50b6c1a38d972c1a22b5cb5b5673167a/"
}
]
}
)
def test_get_blob_works(self):
# From https://api.github.com/repos/hulu/restfulgit/git/blobs/ae9d90706c632c26023ce599ac96cb152673da7c with necessary adjustments
resp = self.client.get('/repos/restfulgit/git/blobs/{}/'.format(BLOB_FROM_FIRST_COMMIT))
self.assert200(resp)
json = resp.json
self.assertIsInstance(json, dict)
self.assertIn("content", json)
self.assertEqual(
sha512(json["content"].encode()).hexdigest(),
'1c846bb4d44c08073c487316a7dc02d97d825aecf50546caf9bf10277c01d17e19860d5f86de877268dd969bd081c7595991c325e0ab492374b956e3a6c9967f'
)
del json["content"]
self.assertEqual(
json,
{
"url": "http://localhost/repos/restfulgit/git/blobs/ae9d90706c632c26023ce599ac96cb152673da7c/",
"sha": "ae9d90706c632c26023ce599ac96cb152673da7c",
"encoding": "utf-8", # NOTE: RestfulGit extension
"size": 5543
}
)
def test_get_binary_blob_works(self):
# From https://api.github.com/repos/hulu/restfulgit/git/blobs/79fbf74e9d9f752c901c956e958845a308c44283 with necessary adjustments
resp = self.client.get('/repos/restfulgit/git/blobs/79fbf74e9d9f752c901c956e958845a308c44283/')
self.assert200(resp)
json = resp.json
self.assertIsInstance(json, dict)
self.assertIn('content', json)
content = json['content']
del json['content']
self.assertBytesEqualFixture(b64decode(content), 'example.png')
self.assertEqual(
json,
{
"sha": "79fbf74e9d9f752c901c956e958845a308c44283",
"size": 1185,
"url": "http://localhost/repos/restfulgit/git/blobs/79fbf74e9d9f752c901c956e958845a308c44283/",
"encoding": "base64"
}
)
def test_get_tag_works(self):
# From https://api.github.com/repos/hulu/restfulgit/git/tags/1dffc031c9beda43ff94c526cbc00a30d231c079 with necessary adjustments
resp = self.client.get('/repos/restfulgit/git/tags/{}/'.format(TAG_FOR_FIRST_COMMIT))
self.assert200(resp)
self.assertEqual(
resp.json,
{
"sha": "1dffc031c9beda43ff94c526cbc00a30d231c079",
"url": "http://localhost/repos/restfulgit/git/tags/1dffc031c9beda43ff94c526cbc00a30d231c079/",
"tagger": {
"name": "<NAME>",
"email": "<EMAIL>",
"date": "2013-09-28T01:14:09Z"
},
"object": {
"sha": "07b9bf1540305153ceeb4519a50b588c35a35464",
"type": "commit",
"url": "http://localhost/repos/restfulgit/git/commits/07b9bf1540305153ceeb4519a50b588c35a35464/"
},
"tag": "initial",
"message": "initial commit\n"
}
)
def test_get_repos_tag_works(self): # NOTE: RestfulGit extension
resp = self.client.get('/repos/restfulgit/tags/initial/')
self.assert200(resp)
self.assertEqual(resp.json, {
'commit': {
'author': {
'date': '2013-02-24T13:25:46Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'commit': {
'author': {
'date': '2013-02-24T13:25:46Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'committer': {
'date': '2013-02-24T13:25:46Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'message': 'Initial support for read-only REST api for Git plumbing',
'parents': [],
'sha': '07b9bf1540305153ceeb4519a50b588c35a35464',
'tree': {
'sha': '6ca22167185c31554aa6157306e68dfd612d6345',
'url': 'http://localhost/repos/restfulgit/git/trees/6ca22167185c31554aa6157306e68dfd612d6345/'
},
'url': 'http://localhost/repos/restfulgit/git/commits/07b9bf1540305153ceeb4519a50b588c35a35464/'
},
'committer': {
'date': '2013-02-24T13:25:46Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'parents': [],
'sha': '07b9bf1540305153ceeb4519a50b588c35a35464',
'url': 'http://localhost/repos/restfulgit/commits/07b9bf1540305153ceeb4519a50b588c35a35464/'
},
'name': 'initial',
'tag': {
'message': 'initial commit\n',
'object': {
'sha': '07b9bf1540305153ceeb4519a50b588c35a35464',
'type': 'commit',
'url': 'http://localhost/repos/restfulgit/git/commits/07b9bf1540305153ceeb4519a50b588c35a35464/'
},
'sha': '1dffc031c9beda43ff94c526cbc00a30d231c079',
'tag': 'initial',
'tagger': {
'date': '2013-09-28T01:14:09Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'url': 'http://localhost/repos/restfulgit/git/tags/1dffc031c9beda43ff94c526cbc00a30d231c079/'
},
'url': 'http://localhost/repos/restfulgit/tags/initial/'
})
def test_get_repos_tag_with_nonexistent_tag(self): # NOTE: RestfulGit extension
resp = self.client.get('/repos/restfulgit/tags/this-tag-does-not-exist/')
self.assertJson404(resp)
def test_get_repo_tags_works(self):
# From https://api.github.com/repos/hulu/restfulgit/tags with necessary adjustments
reference_tag = {
"name": "initial",
"commit": {
"sha": "07b9bf1540305153ceeb4519a50b588c35a35464",
"url": "http://localhost/repos/restfulgit/commits/07b9bf1540305153ceeb4519a50b588c35a35464/"
},
"url": "http://localhost/repos/restfulgit/tags/initial/", # NOTE: RestfulGit extension
}
resp = self.client.get('/repos/restfulgit/tags/')
self.assert200(resp)
json = resp.json
self.assertIsInstance(json, list)
for tag in json:
self.assertIsInstance(tag, dict)
self.assertIn('name', tag)
initial_tags = [tag for tag in json if tag['name'] == 'initial']
self.assertEqual(len(initial_tags), 1)
initial_tag = initial_tags[0]
self.assertEqual(reference_tag, initial_tag)
def test_get_repo_tags_with_nonexistent_repo(self):
resp = self.client.get('/repos/this-repo-does-not-exist/tags/')
self.assertJson404(resp)
def test_get_repo_tags_with_empty_repo(self):
with self._empty_repo:
resp = self.client.get('/repos/example/tags/')
self.assert200(resp)
self.assertEqual(resp.json, [])
def test_get_repo_branches_works(self):
# From https://api.github.com/repos/hulu/restfulgit/branches with necessary adjustments
reference_branch = {
"name": "ambiguous",
"commit": {
"sha": "1f51b91ac383806df9d322ae67bbad3364f50811",
"url": "http://localhost/repos/restfulgit/commits/1f51b91ac383806df9d322ae67bbad3364f50811/"
}
}
resp = self.client.get('/repos/restfulgit/branches/')
self.assert200(resp)
json = resp.json
self.assertIsInstance(json, list)
for branch in json:
self.assertIsInstance(branch, dict)
self.assertIn('name', branch)
ambiguous_branches = [branch for branch in json if branch['name'] == 'ambiguous']
self.assertEqual(len(ambiguous_branches), 1)
ambiguous_branch = ambiguous_branches[0]
self.assertEqual(reference_branch, ambiguous_branch)
def test_get_repo_branches_with_nonexistent_repo(self):
resp = self.client.get('/repos/this-repo-does-not-exist/branches/')
self.assertJson404(resp)
def test_get_repo_branches_with_empty_repo(self):
with self._empty_repo:
resp = self.client.get('/repos/example/branches/')
self.assert200(resp)
self.assertEqual(resp.json, [])
def test_get_repo_branch_works(self):
# From https://api.github.com/repos/hulu/restfulgit/branches/ambiguous with necessary adjustments
reference = {
"name": "ambiguous",
"commit": {
"sha": "1f51b91ac383806df9d322ae67bbad3364f50811",
"commit": {
"author": {
"name": "<NAME>",
"email": "<EMAIL>",
"date": "2013-02-25T12:35:29Z"
},
"committer": {
"name": "<NAME>",
"email": "<EMAIL>",
"date": "2013-02-25T12:35:29Z"
},
"message": "Support submodule in tree-listings",
"tree": {
"sha": "1404e1766a3269f5a73b3d2ec8c81b7ea3ad6e09",
"url": "http://localhost/repos/restfulgit/git/trees/1404e1766a3269f5a73b3d2ec8c81b7ea3ad6e09/"
},
"url": "http://localhost/repos/restfulgit/git/commits/1f51b91ac383806df9d322ae67bbad3364f50811/",
"sha": "1f51b91ac383806df9d322ae67bbad3364f50811", # NOTE: RestfulGit extension
"parents": [ # NOTE: RestfulGit extension
{
"sha": "ff6405b71273b5c2c50d5c33d5cf962af5390542",
"url": "http://localhost/repos/restfulgit/commits/ff6405b71273b5c2c50d5c33d5cf962af5390542/",
}
]
},
"url": "http://localhost/repos/restfulgit/commits/1f51b91ac383806df9d322ae67bbad3364f50811/",
"author": {
"name": "<NAME>",
"email": "<EMAIL>",
"date": "2013-02-25T12:35:29Z"
},
"committer": {
"name": "<NAME>",
"email": "<EMAIL>",
"date": "2013-02-25T12:35:29Z"
},
"parents": [
{
"sha": "ff6405b71273b5c2c50d5c33d5cf962af5390542",
"url": "http://localhost/repos/restfulgit/commits/ff6405b71273b5c2c50d5c33d5cf962af5390542/",
}
]
},
"_links": {
"self": "http://localhost/repos/restfulgit/branches/ambiguous/",
},
'url': 'http://localhost/repos/restfulgit/branches/ambiguous/'
}
resp = self.client.get('/repos/restfulgit/branches/ambiguous/')
self.assert200(resp)
json = resp.json
self.assertEqual(reference, json)
def test_get_repo_branch_with_nonexistent_branch(self):
resp = self.client.get('/repos/restfulgit/branches/this-branch-does-not-exist/')
self.assertJson404(resp)
def test_get_merged_branches_inclusion(self):
resp = self.client.get('/repos/restfulgit/branches/master/merged/')
self.assert200(resp)
json = resp.json
self.assertIsInstance(json, list)
for item in json:
self.assertIsInstance(item, dict)
self.assertIn('name', item)
branch_names = {item['name'] for item in json}
self.assertIn('ambiguous', branch_names)
def test_get_merged_branches_format(self):
resp = self.client.get('/repos/restfulgit/branches/master/merged/')
self.assert200(resp)
json = resp.json
self.assertIsInstance(json, list)
for item in json:
self.assertIsInstance(item, dict)
self.assertIn('name', item)
name_to_branch = {item['name']: item for item in json}
reference = {
"name": "ambiguous",
"commit": {
"sha": "1f51b91ac383806df9d322ae67bbad3364f50811",
"url": "http://localhost/repos/restfulgit/commits/1f51b91ac383806df9d322ae67bbad3364f50811/",
}
}
self.assertEqual(reference, name_to_branch.get('ambiguous'))
def test_get_merged_branches_exclusion(self):
resp = self.client.get('/repos/restfulgit/branches/ambiguous/merged/')
self.assert200(resp)
branches = {branch['name'] for branch in resp.json}
self.assertNotIn('master', branches)
def test_get_merged_branches_with_nonexistent_branch(self):
resp = self.client.get('/repos/restfulgit/branches/this-branch-does-not-exist/merged/')
self.assertJson404(resp)
def test_get_repo_commit_works(self):
# From https://api.github.com/repos/hulu/restfulgit/commits/d408fc2428bc6444cabd7f7b46edbe70b6992b16 with necessary adjustments
reference = {
"sha": "d408fc2428bc6444cabd7f7b46edbe70b6992b16",
"commit": {
"author": {
"name": "<NAME>",
"email": "<EMAIL>",
"date": "2013-04-21T11:20:14Z"
},
"committer": {
"name": "<NAME>",
"email": "<EMAIL>",
"date": "2013-04-21T11:20:14Z"
},
"message": "Added requirements.txt + more README",
"tree": {
"sha": "e49e456564f8d852f430c1d0028a9d6560e3f3e9",
"url": "http://localhost/repos/restfulgit/git/trees/e49e456564f8d852f430c1d0028a9d6560e3f3e9/"
},
"url": "http://localhost/repos/restfulgit/git/commits/d408fc2428bc6444cabd7f7b46edbe70b6992b16/",
"sha": "d408fc2428bc6444cabd7f7b46edbe70b6992b16", # NOTE: RestfulGit extension
"parents": [ # NOTE: RestfulGit extension
{
"sha": "c92de24597eff312bbdd5a70059665a2e3000590",
"url": "http://localhost/repos/restfulgit/commits/c92de24597eff312bbdd5a70059665a2e3000590/",
}
],
},
"url": "http://localhost/repos/restfulgit/commits/d408fc2428bc6444cabd7f7b46edbe70b6992b16/",
"author": {
"name": "<NAME>",
"email": "<EMAIL>",
"date": "2013-04-21T11:20:14Z"
},
"committer": {
"name": "<NAME>",
"email": "<EMAIL>",
"date": "2013-04-21T11:20:14Z"
},
"parents": [
{
"sha": "c92de24597eff312bbdd5a70059665a2e3000590",
"url": "http://localhost/repos/restfulgit/commits/c92de24597eff312bbdd5a70059665a2e3000590/",
}
],
"stats": {
"total": 10,
"additions": 10,
"deletions": 0
},
"files": [
{
"sha": "c65dc8c22cc3dc5d37a1c39e5a9f336f1dd6fe34",
"filename": "README.md",
"old_filename": "README.md", # NOTE: RestfulGit extension
"status": "modified",
"additions": 5,
"deletions": 0,
"changes": 5,
"raw_url": "http://localhost/repos/restfulgit/raw/d408fc2428bc6444cabd7f7b46edbe70b6992b16/README.md",
"contents_url": "http://localhost/repos/restfulgit/contents/README.md?ref=d408fc2428bc6444cabd7f7b46edbe70b6992b16",
"patch": "@@ -4,6 +4,11 @@ REST API for Git data\n Provides a read-only restful interface for accessing data from Git repositories (local to the server).\n Modeled off the GitHub Git DB API for compatibility (see http://developer.github.com/v3/git/).\n \n+Requires: flask, pygit2 (>= 0.18.1), libgit2 (>= 0.18).\n+Must modify: REPO_BASE (root path for repositories, note only repositories immediately under this path are currently supported).\n+\n+api.py is a valid WSGI application.\n+\n --\n \n All of these routes return JSON unless otherwise specified."
},
{
"sha": "da23f6c1cf961369faa90c8c4f4c242a09205ce6",
"filename": "requirements.txt",
"old_filename": "requirements.txt", # NOTE: RestfulGit extension
"status": "added",
"additions": 5,
"deletions": 0,
"changes": 5,
"raw_url": "http://localhost/repos/restfulgit/raw/d408fc2428bc6444cabd7f7b46edbe70b6992b16/requirements.txt",
"contents_url": "http://localhost/repos/restfulgit/contents/requirements.txt?ref=d408fc2428bc6444cabd7f7b46edbe70b6992b16",
"patch": "@@ -0,0 +1,5 @@\n+Flask==0.9\n+Jinja2==2.6\n+Werkzeug==0.8.3\n+pygit2==0.18.1\n+wsgiref==0.1.2"
}
]
}
resp = self.client.get('/repos/restfulgit/commits/d408fc2428bc6444cabd7f7b46edbe70b6992b16/')
self.assert200(resp)
self.assertEqual(reference, resp.json)
def test_get_empty_repo_commit(self):
# From https://api.github.com/repos/hulu/restfulgit/commits/c8ec343d7260ba9577045a05bccd931867644f28 with necessary adjustments
reference = {
"sha": "c8ec343d7260ba9577045a05bccd931867644f28",
"commit": {
"author": {
"name": "<NAME>",
"email": "<EMAIL>",
"date": "2015-01-07T20:15:08Z"
},
"committer": {
"name": "<NAME>",
"email": "<EMAIL>",
"date": "2015-01-07T20:15:08Z"
},
"message": "Merge pull request #96 from hulu/empty-commit\n\nAdd deliberately empty commit for testing purposes",
"parents": [
{
"sha": "4fb38539d25983c9b9b99588901a1025658d05d4",
"url": "http://localhost/repos/restfulgit/commits/4fb38539d25983c9b9b99588901a1025658d05d4/"
},
{
"sha": "6f4fa9af844f69137bfee3c247feec0fb03a3913",
"url": "http://localhost/repos/restfulgit/commits/6f4fa9af844f69137bfee3c247feec0fb03a3913/"
}
],
"sha": "c8ec343d7260ba9577045a05bccd931867644f28",
"tree": {
"sha": "9268fd675df04e7c09bceddaf9dfc38fb78787d2",
"url": "http://localhost/repos/restfulgit/git/trees/9268fd675df04e7c09bceddaf9dfc38fb78787d2/"
},
"url": "http://localhost/repos/restfulgit/git/commits/c8ec343d7260ba9577045a05bccd931867644f28/",
},
"url": "http://localhost/repos/restfulgit/commits/c8ec343d7260ba9577045a05bccd931867644f28/",
"author": {
"name": "<NAME>",
"email": "<EMAIL>",
"date": "2015-01-07T20:15:08Z"
},
"committer": {
"name": "<NAME>",
"email": "<EMAIL>",
"date": "2015-01-07T20:15:08Z"
},
"parents": [
{
"sha": "4fb38539d25983c9b9b99588901a1025658d05d4",
"url": "http://localhost/repos/restfulgit/commits/4fb38539d25983c9b9b99588901a1025658d05d4/",
},
{
"sha": "6f4fa9af844f69137bfee3c247feec0fb03a3913",
"url": "http://localhost/repos/restfulgit/commits/6f4fa9af844f69137bfee3c247feec0fb03a3913/",
}
],
"stats": {
"total": 0,
"additions": 0,
"deletions": 0
},
"files": []
}
resp = self.client.get('/repos/restfulgit/commits/{}/'.format(EMPTY_COMMIT))
self.assert200(resp)
self.assertEqual(reference, resp.json)
def test_get_repo_commit_involving_file_rename_works(self):
self.maxDiff = None
# From https://api.github.com/repos/hulu/restfulgit/commits/d3ebb7b3eec6ce13fbe77025c8b0e0240031379e with necessary adjustments
reference = {
"sha": "d3ebb7b3eec6ce13fbe77025c8b0e0240031379e",
"commit": {
"author": {
"name": "<NAME>",
"email": "<EMAIL>",
"date": "2014-06-27T22:39:07Z"
},
"committer": {
"name": "<NAME>",
"email": "<EMAIL>",
"date": "2014-06-27T22:39:07Z"
},
"message": "somewhat arbitrarily rename one of the test fixtures",
'sha': 'd3ebb7b3eec6ce13fbe77025c8b0e0240031379e',
'parents': [{
'sha': 'e8617a0c479f44e0b677481c2223995b5a8fa623',
'url': 'http://localhost/repos/restfulgit/commits/e8617a0c479f44e0b677481c2223995b5a8fa623/'
}],
"tree": {
"sha": "fffee3c6675060068f95c1c61ca5fa4db8595c0e",
"url": "http://localhost/repos/restfulgit/git/trees/fffee3c6675060068f95c1c61ca5fa4db8595c0e/"
},
"url": "http://localhost/repos/restfulgit/git/commits/d3ebb7b3eec6ce13fbe77025c8b0e0240031379e/",
},
"url": "http://localhost/repos/restfulgit/commits/d3ebb7b3eec6ce13fbe77025c8b0e0240031379e/",
"author": {
"name": "<NAME>",
"email": "<EMAIL>",
"date": "2014-06-27T22:39:07Z"
},
"committer": {
'date': '2014-06-27T22:39:07Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
"parents": [{
"sha": "e8617a0c479f44e0b677481c2223995b5a8fa623",
"url": "http://localhost/repos/restfulgit/commits/e8617a0c479f44e0b677481c2223995b5a8fa623/",
}],
"stats": {
"total": 2,
"additions": 1,
"deletions": 1
},
"files": [
{
"sha": "45a751524f43f703d5e776d48a1c495ae9e34b3e",
"filename": "tests/fixtures/initial-c04112733fe2db2cb2f179fca1a19365cf15fef5-context-1.diff",
'old_filename': 'tests/fixtures/initial_c04112733fe2db2cb2f179fca1a19365cf15fef5_context_1.diff', # NOTE: RestfulGit extension
"status": "renamed",
"additions": 0,
"deletions": 0,
"changes": 0,
"raw_url": "http://localhost/repos/restfulgit/raw/d3ebb7b3eec6ce13fbe77025c8b0e0240031379e/tests/fixtures/initial-c04112733fe2db2cb2f179fca1a19365cf15fef5-context-1.diff",
"contents_url": "http://localhost/repos/restfulgit/contents/tests/fixtures/initial-c04112733fe2db2cb2f179fca1a19365cf15fef5-context-1.diff?ref=d3ebb7b3eec6ce13fbe77025c8b0e0240031379e"
},
{
"sha": "d6d92aa58b97f090596c2b5afe30ac40e4f8e0b3",
"filename": "tests/test_restfulgit.py",
"old_filename": "tests/test_restfulgit.py", # NOTE: RestfulGit extension
"status": "modified",
"additions": 1,
"deletions": 1,
"changes": 2,
"raw_url": "http://localhost/repos/restfulgit/raw/d3ebb7b3eec6ce13fbe77025c8b0e0240031379e/tests/test_restfulgit.py",
"contents_url": "http://localhost/repos/restfulgit/contents/tests/test_restfulgit.py?ref=d3ebb7b3eec6ce13fbe77025c8b0e0240031379e",
"patch": "@@ -2274,7 +2274,7 @@ class CompareTestCase(_RestfulGitTestCase):\n resp = self.client.get('/repos/restfulgit/compare/{}...{}.diff?context=1'.format('initial', FIFTH_COMMIT))\n self.assert200(resp)\n self.assertContentTypeIsDiff(resp)\n- self.assertBytesEqualFixture(resp.get_data(), 'initial_c04112733fe2db2cb2f179fca1a19365cf15fef5_context_1.diff')\n+ self.assertBytesEqualFixture(resp.get_data(), 'initial-c04112733fe2db2cb2f179fca1a19365cf15fef5-context-1.diff')\n \n \n class ContributorsTestCase(_RestfulGitTestCase):"
}
]
}
resp = self.client.get('/repos/restfulgit/commits/d3ebb7b3eec6ce13fbe77025c8b0e0240031379e/')
self.assert200(resp)
self.assertEqual(reference, resp.json)
def test_get_repo_commit_with_nonexistent_sha(self):
resp = self.client.get('/repos/restfulgit/commits/{}/'.format(IMPROBABLE_SHA))
self.assertJson404(resp)
def test_get_diff_works(self):
resp = self.client.get('/repos/restfulgit/commit/d408fc2428bc6444cabd7f7b46edbe70b6992b16.diff')
self.assert200(resp)
self.assertContentTypeIsDiff(resp)
self.assertBytesEqualFixture(resp.get_data(), 'd408fc2428bc6444cabd7f7b46edbe70b6992b16.diff')
def test_get_diff_of_empty_commit(self):
resp = self.client.get('/repos/restfulgit/commit/{}.diff'.format(EMPTY_COMMIT))
self.assert200(resp)
self.assertContentTypeIsDiff(resp)
# Verified against https://github.com/hulu/restfulgit/commit/c8ec343d7260ba9577045a05bccd931867644f28.diff
self.assertEqual(resp.get_data(), b'')
def test_get_diff_with_parentless_commit(self): # NOTE: RestfulGit extension; GitHub gives a 404 in this case
resp = self.client.get('/repos/restfulgit/commit/07b9bf1540305153ceeb4519a50b588c35a35464.diff')
self.assert200(resp)
self.assertContentTypeIsDiff(resp)
self.assertBytesEqualFixture(resp.get_data(), '07b9bf1540305153ceeb4519a50b588c35a35464.diff')
def test_get_diff_with_nonexistent_sha(self):
resp = self.client.get('/repos/restfulgit/commit/{}.diff'.format(IMPROBABLE_SHA))
self.assertJson404(resp)
def test_get_diff_involving_binary_file(self):
# From https://github.com/hulu/restfulgit/commit/88edac1a3a55c04646ccc963fdada0e194ed5926.diff
resp = self.client.get('/repos/restfulgit/commit/88edac1a3a55c04646ccc963fdada0e194ed5926.diff')
self.assert200(resp)
self.assertContentTypeIsDiff(resp)
self.assertBytesEqualFixture(resp.get_data(), '88edac1a3a55c04646ccc963fdada0e194ed5926.diff')
def test_get_diff_with_merge_commit(self):
pass
class RefsTestCase(_RestfulGitTestCase):
def test_get_refs_works(self):
# From https://api.github.com/repos/hulu/restfulgit/git/refs with necessary adjustments
reference_initial_tag_ref = {
"ref": "refs/tags/initial",
"url": "http://localhost/repos/restfulgit/git/refs/tags/initial",
"object": {
"sha": "1dffc031c9beda43ff94c526cbc00a30d231c079",
"type": "tag",
"url": "http://localhost/repos/restfulgit/git/tags/1dffc031c9beda43ff94c526cbc00a30d231c079/"
}
}
reference_ambiguous_branch_ref = {
"ref": "refs/heads/ambiguous",
"url": "http://localhost/repos/restfulgit/git/refs/heads/ambiguous",
"object": {
"sha": "1f51b91ac383806df9d322ae67bbad3364f50811",
"type": "commit",
"url": "http://localhost/repos/restfulgit/git/commits/1f51b91ac383806df9d322ae67bbad3364f50811/"
}
}
resp = self.client.get('/repos/restfulgit/git/refs/')
self.assert200(resp)
ref_list = resp.json
self.assertIsInstance(ref_list, list)
self.assertIn(reference_initial_tag_ref, ref_list)
self.assertIn(reference_ambiguous_branch_ref, ref_list)
def test_empty_repo(self):
with self._empty_repo:
resp = self.client.get('/repos/example/git/refs/')
self.assert200(resp)
self.assertEqual(resp.json, [])
def test_invalid_ref_path(self):
resp = self.client.get('/repos/restfulgit/git/refs/this_ref/path_does/not_exist')
self.assert200(resp)
self.assertEqual([], resp.json)
def test_valid_specific_ref_path(self):
# Frpm https://api.github.com/repos/hulu/restfulgit/git/refs/tags/initial with necessary adjustments
resp = self.client.get('/repos/restfulgit/git/refs/tags/initial')
self.assert200(resp)
self.assertEqual(
resp.json,
{
"url": "http://localhost/repos/restfulgit/git/refs/tags/initial",
"object": {
"url": "http://localhost/repos/restfulgit/git/tags/1dffc031c9beda43ff94c526cbc00a30d231c079/",
"sha": "1dffc031c9beda43ff94c526cbc00a30d231c079",
"type": "tag"
},
"ref": "refs/tags/initial"
}
)
class RawFileTestCase(_RestfulGitTestCase):
def test_nonexistent_branch(self):
resp = self.client.get('/repos/restfulgit/raw/this-branch-does-not-exist/LICENSE.md')
self.assertJson404(resp)
def test_nonexistent_file_path(self):
resp = self.client.get('/repos/restfulgit/raw/master/this_path/does_not/exist.txt')
self.assertJson404(resp)
def test_mime_type_logic(self):
# FIXME: implement
pass
def test_tags_trump_branches(self):
# branch "ambiguous" = commit 1f51b91
# api.py's SHA-512 = e948e8d0b0d0703d972279382a002c90040ff19d636e96927262d63e1f1429526539ea781744d2f3a65a5938b59e0c5f57adadc26f797480efcfc6f7dcff3d81
# tag "ambiguous" = commit ff6405b
# api.py's SHA-512 = a50e02753d282c0e35630bbbc16a525ea4e0b2e2af668135b603c8e1467c7269bcbe9075886baf3f08ce195a7eab1e0b8179080af08a2c0f3eda3b9518650fa1
resp = self.client.get("/repos/restfulgit/raw/ambiguous/api.py")
self.assert200(resp)
self.assertEqual(
'a50e02753d282c0e35630bbbc16a525ea4e0b2e2af668135b603c8e1467c7269bcbe9075886baf3f08ce195a7eab1e0b8179080af08a2c0f3eda3b9518650fa1',
sha512(resp.data).hexdigest()
)
def test_sha_works(self):
resp = self.client.get('/repos/restfulgit/raw/326d80cd68ec3413fe6eaca99c52c59ca428a0d0/api.py')
self.assert200(resp)
self.assertEqual(
'0229e0a11f6a3c8c9b84c50ecbd54d476edf5c0767137e37526d1961210530aa6bd93f67a70bd4ea1998d65cdbe74c7fd8b90482ef5cbdf244cc697e3135e497',
sha512(resp.data).hexdigest()
)
def test_tag_works(self):
resp = self.client.get('/repos/restfulgit/raw/initial/api.py')
self.assert200(resp)
self.assertEqual(
'1c846bb4d44c08073c487316a7dc02d97d825aecf50546caf9bf10277c01d17e19860d5f86de877268dd969bd081c7595991c325e0ab492374b956e3a6c9967f',
sha512(resp.data).hexdigest()
)
def test_branch_works(self):
resp = self.client.get('/repos/restfulgit/raw/master/LICENSE.md')
self.assert200(resp)
self.assertEqual(
'7201955547d83fb4e740adf52d95c3044591ec8b60e4a136f5486a05d1dfaac2bd44d4546830cf0f32d05b40ce5928d0b3f71e0b2628488ea0db1427a6dd2988',
sha512(resp.data).hexdigest()
)
class RepositoryInfoCase(_RestfulGitTestCase):
def test_no_description_file(self):
delete_file_quietly(NORMAL_CLONE_DESCRIPTION_FILEPATH)
delete_file_quietly(GIT_MIRROR_DESCRIPTION_FILEPATH)
resp = self.client.get('/repos/restfulgit/')
self.assert200(resp)
self.assertEqual(
resp.json,
{
'blobs_url': 'http://localhost/repos/restfulgit/git/blobs{/sha}',
'branches_url': 'http://localhost/repos/restfulgit/branches{/branch}',
'commits_url': 'http://localhost/repos/restfulgit/commits{/sha}',
'description': None,
'full_name': 'restfulgit',
'git_commits_url': 'http://localhost/repos/restfulgit/git/commits{/sha}',
'git_refs_url': 'http://localhost/repos/restfulgit/git/refs{/sha}',
'git_tags_url': 'http://localhost/repos/restfulgit/git/tags{/sha}',
'name': 'restfulgit',
'tags_url': 'http://localhost/repos/restfulgit/tags/',
'trees_url': 'http://localhost/repos/restfulgit/git/trees{/sha}',
'url': 'http://localhost/repos/restfulgit/',
}
)
def test_default_description_file(self):
with io.open(NORMAL_CLONE_DESCRIPTION_FILEPATH, mode='wt', encoding='utf-8') as description_file:
description_file.write("Unnamed repository; edit this file 'description' to name the repository.\n")
try:
resp = self.client.get('/repos/restfulgit/')
self.assert200(resp)
self.assertEqual(
resp.json,
{
'blobs_url': 'http://localhost/repos/restfulgit/git/blobs{/sha}',
'branches_url': 'http://localhost/repos/restfulgit/branches{/branch}',
'commits_url': 'http://localhost/repos/restfulgit/commits{/sha}',
'description': None,
'full_name': 'restfulgit',
'git_commits_url': 'http://localhost/repos/restfulgit/git/commits{/sha}',
'git_refs_url': 'http://localhost/repos/restfulgit/git/refs{/sha}',
'git_tags_url': 'http://localhost/repos/restfulgit/git/tags{/sha}',
'name': 'restfulgit',
'tags_url': 'http://localhost/repos/restfulgit/tags/',
'trees_url': 'http://localhost/repos/restfulgit/git/trees{/sha}',
'url': 'http://localhost/repos/restfulgit/',
}
)
finally:
delete_file_quietly(NORMAL_CLONE_DESCRIPTION_FILEPATH)
def test_dot_dot_disallowed(self):
self.app.config['RESTFULGIT_REPO_BASE_PATH'] = TEST_SUBDIR
resp = self.client.get('/repos/../')
self.assertJson404(resp)
def test_nonexistent_repo(self):
self.app.config['RESTFULGIT_REPO_BASE_PATH'] = RESTFULGIT_REPO
resp = self.client.get('/repos/test/')
self.assertJson404(resp)
def test_works_normal_clone(self):
description = "REST API for Git data\n"
with io.open(NORMAL_CLONE_DESCRIPTION_FILEPATH, mode='wt', encoding='utf-8') as description_file:
description_file.write(description)
try:
resp = self.client.get('/repos/restfulgit/')
self.assertEqual(
resp.json,
{
'blobs_url': 'http://localhost/repos/restfulgit/git/blobs{/sha}',
'branches_url': 'http://localhost/repos/restfulgit/branches{/branch}',
'commits_url': 'http://localhost/repos/restfulgit/commits{/sha}',
'description': description,
'full_name': 'restfulgit',
'git_commits_url': 'http://localhost/repos/restfulgit/git/commits{/sha}',
'git_refs_url': 'http://localhost/repos/restfulgit/git/refs{/sha}',
'git_tags_url': 'http://localhost/repos/restfulgit/git/tags{/sha}',
'name': 'restfulgit',
'tags_url': 'http://localhost/repos/restfulgit/tags/',
'trees_url': 'http://localhost/repos/restfulgit/git/trees{/sha}',
'url': 'http://localhost/repos/restfulgit/',
}
)
finally:
delete_file_quietly(NORMAL_CLONE_DESCRIPTION_FILEPATH)
def test_works_git_mirror(self):
description = "REST API for Git data\n"
with io.open(GIT_MIRROR_DESCRIPTION_FILEPATH, mode='wt', encoding='utf-8') as description_file:
description_file.write(description)
try:
resp = self.client.get('/repos/restfulgit/')
self.assertEqual(
resp.json,
{
'blobs_url': 'http://localhost/repos/restfulgit/git/blobs{/sha}',
'branches_url': 'http://localhost/repos/restfulgit/branches{/branch}',
'commits_url': 'http://localhost/repos/restfulgit/commits{/sha}',
'description': description,
'full_name': 'restfulgit',
'git_commits_url': 'http://localhost/repos/restfulgit/git/commits{/sha}',
'git_refs_url': 'http://localhost/repos/restfulgit/git/refs{/sha}',
'git_tags_url': 'http://localhost/repos/restfulgit/git/tags{/sha}',
'name': 'restfulgit',
'tags_url': 'http://localhost/repos/restfulgit/tags/',
'trees_url': 'http://localhost/repos/restfulgit/git/trees{/sha}',
'url': 'http://localhost/repos/restfulgit/',
}
)
finally:
delete_file_quietly(GIT_MIRROR_DESCRIPTION_FILEPATH)
class CorsTestCase(_RestfulGitTestCase):
@property
@contextmanager
def cors_enabled(self):
with self.config_override('RESTFULGIT_ENABLE_CORS', True):
yield
@property
def arbitrary_response(self):
resp = self.client.get('/repos/restfulgit/raw/master/LICENSE.md')
self.assert200(resp)
return resp
def assert_header_equal(self, header, value):
resp = self.arbitrary_response
headers = resp.headers
self.assertIn(header, headers)
if header == 'Access-Control-Allow-Methods':
expected_methods = set(value.split(', '))
actual_methods = set(headers[header].split(', '))
self.assertEqual(actual_methods, expected_methods)
else:
self.assertEqual(headers[header], value)
def assert_cors_enabled_for(self, resp):
self.assertIn('Access-Control-Allow-Methods', resp.headers)
self.assertIn('Access-Control-Allow-Origin', resp.headers)
self.assertIn('Access-Control-Allow-Credentials', resp.headers)
def assert_cors_disabled_for(self, resp):
for header in list(resp.headers.keys()):
self.assertFalse(header.lower().startswith('access-control'), msg="CORS-related header present")
def test_disabled_really_disables(self):
with self.config_override('RESTFULGIT_ENABLE_CORS', False):
self.assert_cors_disabled_for(self.arbitrary_response)
def test_enabled_really_enables(self):
with self.config_override('RESTFULGIT_ENABLE_CORS', True):
self.assert_cors_enabled_for(self.arbitrary_response)
def test_disabled_disables_preflight(self):
with self.config_override('RESTFULGIT_ENABLE_CORS', False):
resp = self.client.options('/repos/restfulgit/raw/master/LICENSE.md')
self.assert200(resp)
self.assert_cors_disabled_for(resp)
def test_enabled_enables_preflight(self):
with self.config_override('RESTFULGIT_ENABLE_CORS', True):
resp = self.client.options('/repos/restfulgit/raw/master/LICENSE.md')
self.assert200(resp)
self.assert_cors_enabled_for(resp)
def test_specific_allowed_origin_honored(self):
origin = 'https://foo.bar.baz:90'
with self.cors_enabled:
with self.config_override('RESTFULGIT_CORS_ALLOWED_ORIGIN', origin):
self.assert_header_equal('Access-Control-Allow-Origin', origin)
def test_star_allowed_origin_honored(self):
with self.cors_enabled:
with self.config_override('RESTFULGIT_CORS_ALLOWED_ORIGIN', '*'):
self.assert_header_equal('Access-Control-Allow-Origin', '*')
def test_max_age_honored(self):
max_age = timedelta(minutes=427)
with self.cors_enabled:
with self.config_override('RESTFULGIT_CORS_MAX_AGE', max_age):
self.assert_header_equal('Access-Control-Max-Age', str(int(max_age.total_seconds())))
def test_enabled_allow_credentials_honored(self):
with self.cors_enabled:
with self.config_override('RESTFULGIT_CORS_ALLOW_CREDENTIALS', True):
self.assert_header_equal('Access-Control-Allow-Credentials', 'true')
def test_disabled_allow_credentials_honored(self):
with self.cors_enabled:
with self.config_override('RESTFULGIT_CORS_ALLOW_CREDENTIALS', False):
self.assert_header_equal('Access-Control-Allow-Credentials', 'false')
def test_allowed_headers_honored(self):
with self.cors_enabled:
with self.config_override('RESTFULGIT_CORS_ALLOWED_HEADERS', ['X-Foo', 'X-Bar']):
self.assert_header_equal('Access-Control-Allow-Headers', "X-Foo, X-Bar")
def test_allowed_methods(self):
with self.cors_enabled:
self.assert_header_equal('Access-Control-Allow-Methods', 'HEAD, OPTIONS, GET')
class ArchiveDownloadTestCase(_RestfulGitTestCase):
def run_command_quietly(self, args):
with open(os.devnull, 'wb') as blackhole:
check_call(args, stdout=blackhole)
def _only_subdirectory_in(self, directory):
names = os.listdir(directory)
self.assertEqual(len(names), 1)
subdir = os.path.join(directory, names[0])
self.assertTrue(os.path.isdir(subdir))
return subdir
def assertFilesEqual(self, filepath_one, filepath_two, including_permissions=False):
if including_permissions:
self.assertEqualPermissions(filepath_one, filepath_two)
with open(filepath_one, 'rb') as file_one, open(filepath_two, 'rb') as file_two:
self.assertEqual(file_one.read(), file_two.read())
def assertEqualPermissions(self, path_one, path_two):
stat_one = os.stat(path_one)
stat_two = os.stat(path_two)
self.assertEqual(stat_one.st_mode, stat_two.st_mode)
self.assertEqual(stat_one.st_uid, stat_two.st_uid)
self.assertEqual(stat_one.st_gid, stat_two.st_gid)
def assertDirectoriesEqual(self, dir_one, dir_two, including_permissions=False):
for dirpath_one, dirnames_one, filenames_one in os.walk(dir_one):
dirnames_one = frozenset(dirnames_one)
filenames_one = frozenset(filenames_one)
dirpath_two = dirpath_one.replace(dir_one, dir_two, 1)
self.assertTrue(os.path.isdir(dirpath_two))
children_two = os.listdir(dirpath_two)
dirnames_two = frozenset(name for name in children_two if os.path.isdir(os.path.join(dirpath_two, name)))
filenames_two = frozenset(name for name in children_two if os.path.isfile(os.path.join(dirpath_two, name)))
if including_permissions:
self.assertEqualPermissions(dirpath_one, dirpath_two)
self.assertEqual(dirnames_one, dirnames_two)
self.assertEqual(filenames_one, filenames_two)
for filename in filenames_one:
filepath_one = os.path.join(dirpath_one, filename)
filepath_two = os.path.join(dirpath_two, filename)
self.assertFilesEqual(filepath_one, filepath_two, including_permissions=including_permissions)
def assertIsAttachment(self, resp):
self.assertTrue(resp.headers.get('Content-Disposition', '').startswith('attachment;'))
def test_zipball_contents(self):
commit = '<PASSWORD>' # 1st commit in the repo that has multiple levels of subdirectories
with self.temporary_directory(suffix='.restfulgit') as temp_dir:
actual_dir = self.make_nested_dir(temp_dir, 'actual')
reference_dir = self.make_nested_dir(temp_dir, 'reference')
self.run_command_quietly(['unzip', self.get_fixture_path('{}.zip'.format(commit)), '-d', reference_dir])
with self.temporary_file(suffix='restfulgit_actual_zipball.zip') as pair:
actual_zip_file, actual_zip_filepath = pair
with actual_zip_file:
resp = self.client.get('/repos/restfulgit/zipball/{}/'.format(commit))
self.assert200(resp)
actual_zip_file.write(resp.data)
self.run_command_quietly(['unzip', actual_zip_filepath, '-d', actual_dir])
reference_wrapper_dir = self._only_subdirectory_in(reference_dir)
actual_wrapper_dir = self._only_subdirectory_in(actual_dir)
self.assertDirectoriesEqual(reference_wrapper_dir, actual_wrapper_dir)
def test_zipball_headers(self):
resp = self.client.get('/repos/restfulgit/zipball/7da1a61e2f566cf3094c2fea4b18b111d2638a8f/')
self.assertIsAttachment(resp)
self.assertTrue(resp.headers.get('Content-Disposition', '').endswith('filename=restfulgit-7da1a61e2f566cf3094c2fea4b18b111d2638a8f.zip'))
self.assertEqual(resp.headers.get('Content-Type'), 'application/zip')
self.assertIn('max-age=0', resp.headers.get('Cache-Control', ''))
def test_zipball_on_nonexistent_repo(self):
resp = self.client.get('/repos/this-repo-does-not-exist/zipball/master/')
self.assertJson404(resp)
def test_zipball_on_nonexistent_ref(self):
resp = self.client.get('/repos/restfulgit/zipball/{}/'.format(IMPROBABLE_SHA))
self.assertJson404(resp)
def test_tarball_contents(self):
commit = '<PASSWORD>' # 1st commit in the repo that has multiple levels of subdirectories
with self.temporary_directory(suffix='.restfulgit') as temp_dir:
actual_dir = self.make_nested_dir(temp_dir, 'actual')
reference_dir = self.make_nested_dir(temp_dir, 'reference')
self.run_command_quietly(['tar', 'xf', self.get_fixture_path('{}.tar.gz'.format(commit)), '-C', reference_dir])
with self.temporary_file(suffix='restfulgit_actual_tarball.tar.gz') as pair:
actual_tar_file, actual_tar_filepath = pair
with actual_tar_file:
resp = self.client.get('/repos/restfulgit/tarball/{}/'.format(commit))
self.assert200(resp)
actual_tar_file.write(resp.data)
self.run_command_quietly(['tar', 'xf', actual_tar_filepath, '-C', actual_dir])
reference_wrapper_dir = self._only_subdirectory_in(reference_dir)
actual_wrapper_dir = self._only_subdirectory_in(actual_dir)
self.assertDirectoriesEqual(reference_wrapper_dir, actual_wrapper_dir, including_permissions=True)
def test_tarball_headers(self):
resp = self.client.get('/repos/restfulgit/tarball/7da1a61e2f566cf3094c2fea4b18b111d2638a8f/')
self.assertIsAttachment(resp)
self.assertTrue(resp.headers.get('Content-Disposition', '').endswith('filename=restfulgit-7da1a61e2f566cf3094c2fea4b18b111d2638a8f.tar.gz'))
self.assertIn(resp.headers.get('Content-Type'), {'application/x-gzip', 'application/x-tar'})
self.assertIn('max-age=0', resp.headers.get('Cache-Control', ''))
def test_tarball_on_nonexistent_repo(self):
resp = self.client.get('/repos/this-repo-does-not-exist/tarball/master/')
self.assertJson404(resp)
def test_tarball_on_nonexistent_ref(self):
resp = self.client.get('/repos/restfulgit/tarball/{}/'.format(IMPROBABLE_SHA))
self.assertJson404(resp)
class BlameTestCase(_RestfulGitTestCase): # NOTE: This API is a RestfulGit extension
def test_nonexistent_repo(self):
resp = self.client.get('/repos/this-repo-does-not-exist/blame/master/README')
self.assertJson404(resp)
def test_nonexistent_ref(self):
resp = self.client.get('/repos/restfulgit/blame/this-branch-does-not-exist/README')
self.assertJson404(resp)
def test_nonexistent_file(self):
resp = self.client.get('/repos/restfulgit/blame/master/this-file-does-not-exist')
self.assertJson404(resp)
def test_directory_with_trailing_slash(self):
resp = self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/')
self.assertJson400(resp)
def test_directory_without_trailing_slash(self):
resp = self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit')
self.assertJson400(resp)
def test_first_line_out_of_bounds(self):
# relevant file is 1027 lines long
resp = self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?firstLine=1028')
self.assertJson400(resp)
def test_last_line_out_of_bounds(self):
# relevant file is 1027 lines long
resp = self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?lastLine=1028')
self.assertJson400(resp)
def test_malformed_line_range(self):
resp = self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?firstLine=2&lastLine=1')
self.assertJson400(resp)
def test_zero_first_line(self):
resp = self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?firstLine=0')
self.assertJson400(resp)
def test_zero_last_line(self):
resp = self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?lastLine=0')
self.assertJson400(resp)
def test_non_integer_first_line(self):
resp = self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?firstLine=abc')
self.assertJson400(resp)
def test_non_integer_last_line(self):
resp = self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?lastLine=abc')
self.assertJson400(resp)
def test_basic_works(self):
resp = self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py')
self.assert200(resp)
with io.open(self.get_fixture_path('da55cbf2f13c2ec019bf02f080bc47cc4f83318c-__init__.py-blame.json'), mode='rt', encoding='utf-8') as reference_file:
reference = load_json_file(reference_file)
self.assertEqual(reference, resp.json)
def test_first_line_only(self):
# relevant file is 1027 lines long
resp = self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?firstLine=1025')
self.assert200(resp)
self.assertEqual(resp.json, {
"commits": {
"090750eec2fe5f120ad1010fc2204d06fc3ca91e": {
"committer": {
"date": "2013-05-20T19:12:03Z",
"name": "<NAME>",
"email": "<EMAIL>"
},
"author": {
"date": "2013-05-20T19:12:03Z",
"name": "<NAME>",
"email": "<EMAIL>"
},
"url": "http://localhost/repos/restfulgit/git/commits/090750eec2fe5f120ad1010fc2204d06fc3ca91e/",
"tree": {
"url": "http://localhost/repos/restfulgit/git/trees/288a19807d25403221c3f5260f4c172ec820b621/",
"sha": "288a19807d25403221c3f5260f4c172ec820b621"
},
"sha": "090750eec2fe5f120ad1010fc2204d06fc3ca91e",
"parents": [{
"url": "http://localhost/repos/restfulgit/git/commits/cff4955ef40cfce35efe282e196c840619c518f2/",
"sha": "cff4955ef40cfce35efe282e196c840619c518f2"
}],
"message": "PEP-8 minor cleanup"
},
"ebaa594a5b689d1cb552e15753bcd109f60b0a10": {
"committer": {
"date": "2013-10-06T23:44:52Z",
"name": "<NAME>", "email": "<EMAIL>"
},
"author": {
"date": "2013-10-05T04:15:22Z",
"name": "<NAME>",
"email": "<EMAIL>"
},
"url": "http://localhost/repos/restfulgit/git/commits/ebaa594a5b689d1cb552e15753bcd109f60b0a10/",
"tree": {
"url": "http://localhost/repos/restfulgit/git/trees/16507999f5b925211a48e3c97b242577b14bfc71/",
"sha": "16507999f5b925211a48e3c97b242577b14bfc71"
},
"sha": "ebaa594a5b689d1cb552e15753bcd109f60b0a10",
"parents": [{
"url": "http://localhost/repos/restfulgit/git/commits/caccc35a6f5d8e9b9a7e23d4a2ad60f4b4155739/",
"sha": "caccc35a6f5d8e9b9a7e23d4a2ad60f4b4155739"
}],
"message": "use a blueprint to enhance embedability/reuseability/modularity; fixes #25\n\nURL converter registration inspired by http://blog.adrianschreyer.eu/post/adding-custom-url-map-converters-to-flask-blueprint-objects"
}
},
"lines": [
{
"commit": "<KEY>",
"line": "app.register_blueprint(restfulgit)",
"origPath": "gitapi.py",
"lineNum": 1025
},
{
"commit": "<KEY>",
"line": "",
"origPath": "gitapi.py",
"lineNum": 1026
},
{
"commit": "<PASSWORD>",
"line": "application = app",
"origPath": "api.py",
"lineNum": 1027
}
]
})
def test_last_line_only(self):
resp = self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?lastLine=2')
self.assert200(resp)
self.assertEqual(resp.json, {
'commits': {
'34f85950f3fcc662338593bbd43ad3bebc8cbf22': {
'author': {
'date': '2013-09-24T04:42:40Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'committer': {
'date': '2013-09-24T04:42:40Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'message': 'add PEP-263 encoding declaration',
'parents': [{
'sha': 'fadadc122ac7357816d6d57515c36ed8dddfadb5',
'url': 'http://localhost/repos/restfulgit/git/commits/fadadc122ac7357816d6d57515c36ed8dddfadb5/'
}],
'sha': '34f85950f3fcc662338593bbd43ad3bebc8cbf22',
'tree': {
'sha': '029c2787239825668f3619eb02bf5a336720f5e9',
'url': 'http://localhost/repos/restfulgit/git/trees/029c2787239825668f3619eb02bf5a336720f5e9/'
},
'url': 'http://localhost/repos/restfulgit/git/commits/34f85950f3fcc662338593bbd43ad3bebc8cbf22/'
},
'ffefa5a12812d65ba4f55adeaa5bbd8131ea0c69': {
'author': {
'date': '2013-09-26T07:46:16Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'committer': {
'date': '2013-09-26T07:46:16Z',
'email': '<EMAIL>',
'name': '<NAME>'},
'message': 'improve config loading error reporting & squelch last W0702',
'parents': [{
'sha': '1f6787c238ef12413dca5305b8254c26c299718f',
'url': 'http://localhost/repos/restfulgit/git/commits/1f6787c238ef12413dca5305b8254c26c299718f/'
}],
'sha': 'ffefa5a12812d65ba4f55adeaa5bbd8131ea0c69',
'tree': {
'sha': '60859aa5e7ef3ba15006bd33f6ace219a3049ea5',
'url': 'http://localhost/repos/restfulgit/git/trees/60859aa5e7ef3ba15006bd33f6ace219a3049ea5/'
},
'url': 'http://localhost/repos/restfulgit/git/commits/ffefa5a12812d65ba4f55adeaa5bbd8131ea0c69/'
}
},
'lines': [
{
'commit': '<KEY>',
'line': '# coding=utf-8',
'lineNum': 1,
'origPath': 'gitapi.py'},
{
'commit': 'ffefa5a12812d65ba4f55adeaa5bbd8131ea0c69',
'line': 'from __future__ import print_function',
'lineNum': 2,
'origPath': 'gitapi.py'
}
]
})
def test_first_line_just_within_bounds(self):
# relevant file is 1027 lines long
resp = self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?firstLine=1027')
self.assert200(resp)
self.assertEqual(resp.json, {
'commits': {
'090750eec2fe5f120ad1010fc2204d06fc3ca91e': {
'author': {
'date': '2013-05-20T19:12:03Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'committer': {
'date': '2013-05-20T19:12:03Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'message': 'PEP-8 minor cleanup',
'parents': [{
'sha': 'cff4955ef40cfce35efe282e196c840619c518f2',
'url': 'http://localhost/repos/restfulgit/git/commits/cff4955ef40cfce35efe282e196c840619c518f2/'
}],
'sha': '090750eec2fe5f120ad1010fc2204d06fc3ca91e',
'tree': {
'sha': '288a19807d25403221c3f5260f4c172ec820b621',
'url': 'http://localhost/repos/restfulgit/git/trees/288a19807d25403221c3f5260f4c172ec820b621/'
},
'url': 'http://localhost/repos/restfulgit/git/commits/090750eec2fe5f120ad1010fc2204d06fc3ca91e/'
}
},
'lines': [{
'commit': '090750<PASSWORD>2fe5f120ad1010fc2204d06fc3ca91e',
'line': 'application = app',
'lineNum': 1027,
'origPath': 'api.py'
}]
})
def test_last_line_just_within_bounds(self):
# relevant file is 1027 lines long
resp = self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?lastLine=1027&firstLine=1026')
self.assert200(resp)
self.assertEqual(resp.json, {
'commits': {
'090750eec2fe5f120ad1010fc2204d06fc3ca91e': {
'author': {
'date': '2013-05-20T19:12:03Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'committer': {
'date': '2013-05-20T19:12:03Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'message': 'PEP-8 minor cleanup',
'parents': [{
'sha': 'cff4955ef40cfce35efe282e196c840619c518f2',
'url': 'http://localhost/repos/restfulgit/git/commits/cff4955ef40cfce35efe282e196c840619c518f2/'
}],
'sha': '090750eec2fe5f120ad1010fc2204d06fc3ca91e',
'tree': {
'sha': '288a19807d25403221c3f5260f4c172ec820b621',
'url': 'http://localhost/repos/restfulgit/git/trees/288a19807d25403221c3f5260f4c172ec820b621/'
},
'url': 'http://localhost/repos/restfulgit/git/commits/090750eec2fe5f120ad1010fc2204d06fc3ca91e/'
},
'ebaa594a5b689d1cb552e15753bcd109f60b0a10': {
'author': {
'date': '2013-10-05T04:15:22Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'committer': {
'date': '2013-10-06T23:44:52Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'message': 'use a blueprint to enhance embedability/reuseability/modularity; fixes #25\n\nURL converter registration inspired by http://blog.adrianschreyer.eu/post/adding-custom-url-map-converters-to-flask-blueprint-objects',
'parents': [{
'sha': 'caccc35a6f5d8e9b9a7e23d4a2ad60f4b4155739',
'url': 'http://localhost/repos/restfulgit/git/commits/caccc35a6f5d8e9b9a7e23d4a2ad60f4b4155739/'
}],
'sha': 'ebaa594a5b689d1cb552e15753bcd109f60b0a10',
'tree': {
'sha': '16507999f5b925211a48e3c97b242577b14bfc71',
'url': 'http://localhost/repos/restfulgit/git/trees/16507999f5b925211a48e3c97b242577b14bfc71/'
},
'url': 'http://localhost/repos/restfulgit/git/commits/ebaa594a5b689d1cb552e15753bcd109f60b0a10/'
}
},
'lines': [
{
'commit': 'ebaa594a5b689d1cb552e15753bcd109f60b0a10',
'line': '',
'lineNum': 1026,
'origPath': 'gitapi.py'
},
{
'commit': '090750eec2fe5f120ad1010fc2204d06fc3ca91e',
'line': 'application = app',
'lineNum': 1027,
'origPath': 'api.py'
},
]
})
def test_first_and_last_line_works(self):
resp = self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?firstLine=4&lastLine=6')
self.assert200(resp)
self.assertEqual(resp.json, {
'commits': {
'13e9ff41ba4704d6ca91988f9216adeeee8c79b5': {
'author': {
'date': '2013-12-23T04:16:14Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'committer': {
'date': '2013-12-30T20:01:35Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'message': 'implement tarball & zipball downloads; fixes #62\n\nReference zipball from https://github.com/hulu/restfulgit/zipball/7da1a61e2f566cf3094c2fea4b18b111d2638a8f\nReference tarball from https://github.com/hulu/restfulgit/tarball/7da1a61e2f566cf3094c2fea4b18b111d2638a8f',
'parents': [{
'sha': '129458e24667a9c32db4cb1a0549e3554bff0965',
'url': 'http://localhost/repos/restfulgit/git/commits/129458e24667a9c32db4cb1a0549e3554bff0965/'
}],
'sha': '13e9ff41ba4704d6ca91988f9216adeeee8c79b5',
'tree': {
'sha': 'a611bc827047055a6b8e9cbf7ee2827767b27328',
'url': 'http://localhost/repos/restfulgit/git/trees/a611bc827047055a6b8e9cbf7ee2827767b27328/'
},
'url': 'http://localhost/repos/restfulgit/git/commits/13e9ff41ba4704d6ca91988f9216adeeee8c79b5/'
},
'a8e4af2d7f30492bfef34ccb1c2c167df54512ba': {
'author': {
'date': '2013-12-10T03:32:32Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'committer': {
'date': '2013-12-10T03:59:40Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'message': 'use JSON error pages; fixes #39',
'parents': [{
'sha': '493431d90a21109290e4a8ab8978e523ec957531',
'url': 'http://localhost/repos/restfulgit/git/commits/493431d90a21109290e4a8ab8978e523ec957531/'
}],
'sha': 'a8e4af2d7f30492bfef34ccb1c2c167df54512ba',
'tree': {
'sha': 'b08d1b792ecba9ebb06bc8f2dad5d0877a9a42ec',
'url': 'http://localhost/repos/restfulgit/git/trees/b08d1b792ecba9ebb06bc8f2dad5d0877a9a42ec/'
},
'url': 'http://localhost/repos/restfulgit/git/commits/a8e4af2d7f30492bfef34ccb1c2c167df54512ba/'
},
'ba3f032dbd2ead6a6610f3bf3b66f05cb628f579': {
'author': {
'date': '2013-09-12T04:26:31Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'committer': {
'date': '2013-09-12T06:16:37Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'message': 'use a custom Werkzeug converter for commit SHAs; fixes #1',
'parents': [{
'sha': '98b873f9d87b110a48628b8493de2cb0383eb391',
'url': 'http://localhost/repos/restfulgit/git/commits/98b873f9d87b110a48628b8493de2cb0383eb391/'
}],
'sha': '<KEY>',
'tree': {
'sha': 'a6fb2a953ab675c8da0f3776faa160101ac301f9',
'url': 'http://localhost/repos/restfulgit/git/trees/a6fb2a953ab675c8da0f3776faa160101ac301f9/'
},
'url': 'http://localhost/repos/restfulgit/git/commits/ba3f032dbd2ead6a6610f3bf3b66f05cb628f579/'
}
},
'lines': [
{
'commit': '<PASSWORD>',
'line': 'from flask import Flask, url_for, request, Response, current_app, Blueprint, safe_join, send_from_directory, make_response, send_file',
'lineNum': 4,
'origPath': 'restfulgit/__init__.py'},
{
'commit': '<KEY>',
'line': 'from werkzeug.exceptions import NotFound, BadRequest, HTTPException, default_exceptions',
'lineNum': 5,
'origPath': 'restfulgit/__init__.py'
},
{
'commit': '<KEY>',
'line': 'from werkzeug.routing import BaseConverter',
'lineNum': 6,
'origPath': 'gitapi.py'
}
]
})
def test_single_line_works(self):
resp = self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?firstLine=1027&lastLine=1027')
self.assert200(resp)
self.assertEqual(resp.json, {
'commits': {
'090750eec2fe5f120ad1010fc2204d06fc3ca91e': {
'author': {
'date': '2013-05-20T19:12:03Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'committer': {
'date': '2013-05-20T19:12:03Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'message': 'PEP-8 minor cleanup',
'parents': [{
'sha': 'cff4955ef40cfce35efe282e196c840619c518f2',
'url': 'http://localhost/repos/restfulgit/git/commits/cff4955ef40cfce35efe282e196c840619c518f2/'
}],
'sha': '090750eec2fe5f120ad1010fc2204d06fc3ca91e',
'tree': {
'sha': '288a19807d25403221c3f5260f4c172ec820b621',
'url': 'http://localhost/repos/restfulgit/git/trees/288a19807d25403221c3f5260f4c172ec820b621/'
},
'url': 'http://localhost/repos/restfulgit/git/commits/090750eec2fe5f120ad1010fc2204d06fc3ca91e/'
}
},
'lines': [{
'commit': '090750eec2fe5f120ad1010fc2204d06fc3ca91e',
'line': 'application = app',
'lineNum': 1027,
'origPath': 'api.py'
}]
})
def test_oldest_with_nonexistent_ref(self):
resp = self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?oldest={}'.format(IMPROBABLE_SHA))
self.assertJson404(resp)
def test_oldest_works(self):
resp = self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?oldest=129458e24667a9c32db4cb1a0549e3554bff0965')
self.assert200(resp)
json = resp.json
relevant_commits = {'129458e24667a9c32db4cb1a0549e3554bff0965', '13e9ff41ba4704d6ca91988f9216adeeee8c79b5'}
self.assertEqual(relevant_commits, set(json['commits'].keys()))
self.assertEqual(relevant_commits, {line['commit'] for line in json['lines']})
class RepoContentsTestCase(_RestfulGitTestCase):
def test_nonexistent_repo(self):
resp = self.client.get('/repos/this-repo-does-not-exist/contents/README.md')
self.assertJson404(resp)
def test_nonexistent_ref(self):
resp = self.client.get('/repos/restfulgit/contents/README.md?ref=this-branch-does-not-exist')
self.assertJson404(resp)
def test_ref_is_optional(self):
resp = self.client.get('/repos/restfulgit/contents/README.md')
self.assert200(resp)
def test_extant_file(self):
resp = self.client.get('/repos/restfulgit/contents/tests/fixtures/d408fc2428bc6444cabd7f7b46edbe70b6992b16.diff?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f')
self.assert200(resp)
json = resp.json
content = json.pop('content')
self.assertEqual(sha512(content.encode()).hexdigest(), '1966b04df26b4b9168d9c294d12ff23794fc36ba7bd7e96997541f5f31814f0d2f640dd6f0c0fe719a74815439154890df467ec5b9c4322d785902b18917fecc')
# From https://api.github.com/repos/hulu/restfulgit/contents/tests/fixtures/d408fc2428bc6444cabd7f7b46edbe70b6992b16.diff?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f with necessary adjustments
self.assertEqual(json, {
"name": "d408fc2428bc6444cabd7f7b46edbe70b6992b16.diff",
"path": "tests/fixtures/d408fc2428bc6444cabd7f7b46edbe70b6992b16.diff",
"sha": "40c739b1166f47c791e87f747f0061739b49af0e",
"size": 853,
"url": "http://localhost/repos/restfulgit/contents/tests/fixtures/d408fc2428bc6444cabd7f7b46edbe70b6992b16.diff?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f",
"git_url": "http://localhost/repos/restfulgit/git/blobs/40c739b1166f47c791e87f747f0061739b49af0e/",
"type": "file",
"encoding": "utf-8",
"_links": {
"self": "http://localhost/repos/restfulgit/contents/tests/fixtures/d408fc2428bc6444cabd7f7b46edbe70b6992b16.diff?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f",
"git": "http://localhost/repos/restfulgit/git/blobs/40c739b1166f47c791e87f747f0061739b49af0e/",
}
})
def test_nonexistent_file(self):
resp = self.client.get('/repos/restfulgit/contents/this-file-does-not-exist')
self.assertJson404(resp)
def test_extant_directory_without_trailing_slash(self):
# From https://api.github.com/repos/hulu/restfulgit/contents/restfulgit?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f with necessary adjustments
resp = self.client.get('/repos/restfulgit/contents/restfulgit?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f')
self.assert200(resp)
self.assertEqual(resp.json, [
{
"name": "__init__.py",
"path": "restfulgit/__init__.py",
"sha": "db36c03e5649e6e6d23fd431deff3a52ec1faaba",
"size": 24099,
"url": "http://localhost/repos/restfulgit/contents/restfulgit/__init__.py?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f",
"git_url": "http://localhost/repos/restfulgit/git/blobs/db36c03e5649e6e6d23fd431deff3a52ec1faaba/",
"type": "file",
"_links": {
"self": "http://localhost/repos/restfulgit/contents/restfulgit/__init__.py?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f",
"git": "http://localhost/repos/restfulgit/git/blobs/db36c03e5649e6e6d23fd431deff3a52ec1faaba/",
}
},
{
"name": "__main__.py",
"path": "restfulgit/__main__.py",
"sha": "e067d7f361bd3b0f227ba1914c227ebf9539f59d",
"size": 110,
"url": "http://localhost/repos/restfulgit/contents/restfulgit/__main__.py?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f",
"git_url": "http://localhost/repos/restfulgit/git/blobs/e067d7f361bd3b0f227ba1914c227ebf9539f59d/",
"type": "file",
"_links": {
"self": "http://localhost/repos/restfulgit/contents/restfulgit/__main__.py?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f",
"git": "http://localhost/repos/restfulgit/git/blobs/e067d7f361bd3b0f227ba1914c227ebf9539f59d/",
}
}
])
def test_extant_directory_with_trailing_slash(self):
# From https://api.github.com/repos/hulu/restfulgit/contents/restfulgit?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f with necessary adjustments
resp = self.client.get('/repos/restfulgit/contents/restfulgit/?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f')
self.assert200(resp)
self.assertEqual(resp.json, [
{
"name": "__init__.py",
"path": "restfulgit/__init__.py",
"sha": "db36c03e5649e6e6d23fd431deff3a52ec1faaba",
"size": 24099,
"url": "http://localhost/repos/restfulgit/contents/restfulgit/__init__.py?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f",
"git_url": "http://localhost/repos/restfulgit/git/blobs/db36c03e5649e6e6d23fd431deff3a52ec1faaba/",
"type": "file",
"_links": {
"self": "http://localhost/repos/restfulgit/contents/restfulgit/__init__.py?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f",
"git": "http://localhost/repos/restfulgit/git/blobs/db36c03e5649e6e6d23fd431deff3a52ec1faaba/",
}
},
{
"name": "__main__.py",
"path": "restfulgit/__main__.py",
"sha": "e067d7f361bd3b0f227ba1914c227ebf9539f59d",
"size": 110,
"url": "http://localhost/repos/restfulgit/contents/restfulgit/__main__.py?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f",
"git_url": "http://localhost/repos/restfulgit/git/blobs/e067d7f361bd3b0f227ba1914c227ebf9539f59d/",
"type": "file",
"_links": {
"self": "http://localhost/repos/restfulgit/contents/restfulgit/__main__.py?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f",
"git": "http://localhost/repos/restfulgit/git/blobs/e067d7f361bd3b0f227ba1914c227ebf9539f59d/",
}
}
])
def test_root_directory(self):
resp = self.client.get('/repos/restfulgit/contents/?ref=initial')
self.assert200(resp)
self.assertEqual(resp.json, [{
'name': 'api.py',
'url': 'http://localhost/repos/restfulgit/contents/api.py?ref=initial',
'sha': 'ae9d90706c632c26023ce599ac96cb152673da7c',
'_links': {
'self': 'http://localhost/repos/restfulgit/contents/api.py?ref=initial',
'git': 'http://localhost/repos/restfulgit/git/blobs/ae9d90706c632c26023ce599ac96cb152673da7c/'
},
'git_url': 'http://localhost/repos/restfulgit/git/blobs/ae9d90706c632c26023ce599ac96cb152673da7c/',
'path': 'api.py',
'type': 'file',
'size': 5543
}])
def test_directory_with_subdirectories(self):
# From https://api.github.com/repos/hulu/restfulgit/contents/tests?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f with necessary adjustments
resp = self.client.get('/repos/restfulgit/contents/tests?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f')
self.assert200(resp)
self.assertEqual(resp.json, [
{
"name": "fixtures",
"path": "tests/fixtures",
"sha": "7a62b2e0c7e25dc66d110380844c477abf13b91f",
"size": 0,
"url": "http://localhost/repos/restfulgit/contents/tests/fixtures?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f",
"git_url": "http://localhost/repos/restfulgit/git/trees/7a62b2e0c7e25dc66d110380844c477abf13b91f/",
"type": "dir",
"_links": {
"self": "http://localhost/repos/restfulgit/contents/tests/fixtures?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f",
"git": "http://localhost/repos/restfulgit/git/trees/7a62b2e0c7e25dc66d110380844c477abf13b91f/",
}
},
{
"name": "test_restfulgit.py",
"path": "tests/test_restfulgit.py",
"sha": "3da8fd332d44b67ecd9910f5392c73cb62a76a4d",
"size": 47069,
"url": "http://localhost/repos/restfulgit/contents/tests/test_restfulgit.py?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f",
"git_url": "http://localhost/repos/restfulgit/git/blobs/3da8fd332d44b67ecd9910f5392c73cb62a76a4d/",
"type": "file",
"_links": {
"self": "http://localhost/repos/restfulgit/contents/tests/test_restfulgit.py?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f",
"git": "http://localhost/repos/restfulgit/git/blobs/3da8fd332d44b67ecd9910f5392c73cb62a76a4d/",
}
}
])
def test_nonexistent_directory(self):
resp = self.client.get('/repos/restfulgit/contents/this-directory-does-not-exist/')
self.assertJson404(resp)
def test_symlink(self):
# FIXME: implement
pass
def test_submodule(self):
# FIXME: implement
pass
class CompareTestCase(_RestfulGitTestCase):
def test_works(self):
resp = self.client.get('/repos/restfulgit/compare/{}...{}.diff'.format('initial', FIFTH_COMMIT))
self.assert200(resp)
self.assertContentTypeIsDiff(resp)
self.assertBytesEqualFixture(resp.get_data(), 'initial_c04112733fe2db2cb2f179fca1a19365cf15fef5.diff')
def test_empty_diff(self):
resp = self.client.get('/repos/restfulgit/compare/initial...initial.diff')
self.assert200(resp)
self.assertContentTypeIsDiff(resp)
self.assertEqual(resp.get_data(), b'') # From https://github.com/hulu/restfulgit/compare/initial...initial.diff
def test_nonexistent_refspec_404(self):
resp = self.client.get('/repos/restfulgit/compare/initial...this-branch-does-not-exist.diff')
self.assertJson404(resp)
def test_empty_left_refspec_rejected(self):
resp = self.client.get('/repos/restfulgit/compare/...initial.diff')
self.assertJson404(resp)
def test_right_empty_refspec_rejected(self):
resp = self.client.get('/repos/restfulgit/compare/initial....diff')
self.assertJson404(resp)
def test_branch_names_with_dots(self):
pass
def test_non_integer_context_rejected(self): # NOTE: `context` is a RestfulGit extension
resp = self.client.get('/repos/restfulgit/compare/{}...{}.diff?context=abcdef'.format('initial', FIFTH_COMMIT))
self.assert400(resp)
def test_negative_context_rejected(self): # NOTE: `context` is a RestfulGit extension
resp = self.client.get('/repos/restfulgit/compare/{}...{}.diff?context=-1'.format('initial', FIFTH_COMMIT))
self.assert400(resp)
def test_context_is_honored(self): # NOTE: `context` is a RestfulGit extension
resp = self.client.get('/repos/restfulgit/compare/{}...{}.diff?context=1'.format('initial', FIFTH_COMMIT))
self.assert200(resp)
self.assertContentTypeIsDiff(resp)
self.assertBytesEqualFixture(resp.get_data(), 'initial-c04112733fe2db2cb2f179fca1a19365cf15fef5-context-1.diff')
class ContributorsTestCase(_RestfulGitTestCase):
def test_nonexistent_repo(self):
resp = self.client.get('/repos/this-repo-does-not-exist/contributors/')
self.assert404(resp)
def test_empty_repo(self):
with self._empty_repo:
resp = self.client.get('/repos/example/contributors/')
self.assert200(resp)
self.assertEqual(resp.json, [])
def test_results_well_formed(self):
resp = self.client.get('/repos/restfulgit/contributors/')
self.assert200(resp)
contributors = resp.json
for contributor in contributors:
self.assertIsInstance(contributor, dict)
self.assertIsInstance(contributor.get('name'), str)
self.assertIsInstance(contributor.get('email'), str)
count = contributor.get('contributions')
self.assertIsInstance(count, int)
self.assertGreater(count, 0)
counts = [contributor['contributions'] for contributor in contributors]
sorted_counts = sorted(counts, reverse=True)
self.assertEqual(sorted_counts, counts)
class CommitsUniqueToBranchTestCase(_RestfulGitTestCase): # NOTE: This API is a RestfulGit extension
def test_invalid_sort_404s(self):
with self._base_repo_and_commit:
resp = self.client.get('/repos/example/branches/A/unique-commits/sorted/astrological/')
self.assertJson404(resp)
def test_first_commit(self):
with self._base_repo_and_commit:
resp = self.client.get('/repos/example/branches/A/unique-commits/sorted/topological/')
self.assert200(resp)
self.assertEqual(resp.json, {'commits': [{
'author': {
'date': '1970-01-01T00:00:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'commit': {
'author': {
'date': '1970-01-01T00:00:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'committer': {
'date': '1970-01-01T00:01:00Z',
'email': '<EMAIL>',
'name': '<NAME>brity'
},
'message': 'A',
'parents': [],
'sha': 'c655dffe0fed2a78dc5f38c1bc8e5628e2605017',
'tree': {
'sha': '617601c79811cbbae338512798318b4e5b70c9ac',
'url': 'http://localhost/repos/example/git/trees/617601c79811cbbae338512798318b4e5b70c9ac/'
},
'url': 'http://localhost/repos/example/git/commits/c655dffe0fed2a78dc5f38c1bc8e5628e2605017/'
},
'committer': {
'date': '1970-01-01T00:01:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'parents': [],
'sha': 'c655dffe0fed2a78dc5f38c1bc8e5628e2605017',
'url': 'http://localhost/repos/example/commits/c655dffe0fed2a78dc5f38c1bc8e5628e2605017/'
}]})
def test_only_branch(self):
with self._base_repo_and_commit as pair:
repo, a = pair
b = self._commit(repo, "B", [a])
repo.create_branch("A", repo[b], True) # overwrite A
resp = self.client.get('/repos/example/branches/A/unique-commits/sorted/topological/')
self.assert200(resp)
self.assertEqual(resp.json, {'commits': [
{
'author': {
'date': '1970-01-01T00:00:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'commit': {
'author': {
'date': '1970-01-01T00:00:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'committer': {
'date': '1970-01-01T00:01:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'message': 'A',
'parents': [],
'sha': 'c655dffe0fed2a78dc5f38c1bc8e5628e2605017',
'tree': {
'sha': '617601c79811cbbae338512798318b4e5b70c9ac',
'url': 'http://localhost/repos/example/git/trees/617601c79811cbbae338512798318b4e5b70c9ac/'
},
'url': 'http://localhost/repos/example/git/commits/c655dffe0fed2a78dc5f38c1bc8e5628e2605017/'
},
'committer': {
'date': '1970-01-01T00:01:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'parents': [],
'sha': 'c655dffe0fed2a78dc5f38c1bc8e5628e2605017',
'url': 'http://localhost/repos/example/commits/c655dffe0fed2a78dc5f38c1bc8e5628e2605017/'
},
{
'author': {
'date': '1970-01-01T00:02:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'commit': {
'author': {
'date': '1970-01-01T00:02:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'committer': {
'date': '1970-01-01T00:03:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'message': 'B',
'parents': [
{
'sha': 'c655dffe0fed2a78dc5f38c1bc8e5628e2605017',
'url': 'http://localhost/repos/example/commits/c655dffe0fed2a78dc5f38c1bc8e5628e2605017/'
}
],
'sha': 'e11c39e288519302f75f281b8b9a5ab585f678db',
'tree': {
'sha': '1a321342ee655cb18be26a1a9632bb9629fb3642',
'url': 'http://localhost/repos/example/git/trees/1a321342ee655cb18be26a1a9632bb9629fb3642/'
},
'url': 'http://localhost/repos/example/git/commits/e11c39e288519302f75f281b8b9a5ab585f678db/'
},
'committer': {
'date': '1970-01-01T00:03:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'parents': [
{
'sha': 'c655dffe0fed2a78dc5f38c1bc8e5628e2605017',
'url': 'http://localhost/repos/example/commits/c655dffe0fed2a78dc5f38c1bc8e5628e2605017/'
}
],
'sha': 'e11c39e288519302f75f281b8b9a5ab585f678db',
'url': 'http://localhost/repos/example/commits/e11c39e288519302f75f281b8b9a5ab585f678db/'
}
]})
def test_duplicate_is_empty(self):
with self._example_repo() as commits:
repo = commits['repo']
# J branch = I branch
repo.create_branch("J", repo[commits['i']])
resp = self.client.get('/repos/example/branches/J/unique-commits/sorted/topological/')
self.assert200(resp)
self.assertEqual(resp.json, {'commits': []})
def test_inclusion_exclusion_topological_ordering(self):
# B,C,D,,F,G,I
with self._example_repo() as commits:
resp = self.client.get('/repos/example/branches/I/unique-commits/sorted/topological/')
self.assert200(resp)
self.assertEqual(resp.json, {'commits': [
{
'author': {
'date': '1970-01-01T00:02:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'commit': {
'author': {
'date': '1970-01-01T00:02:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'committer': {
'date': '1970-01-01T00:03:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'message': 'B',
'parents': [
{
'sha': 'c655dffe0fed2a78dc5f38c1bc8e5628e2605017',
'url': 'http://localhost/repos/example/commits/c655dffe0fed2a78dc5f38c1bc8e5628e2605017/'
}
],
'sha': 'e11c39e288519302f75f281b8b9a5ab585f678db',
'tree': {
'sha': '1a321342ee655cb18be26a1a9632bb9629fb3642',
'url': 'http://localhost/repos/example/git/trees/1a321342ee655cb18be26a1a9632bb9629fb3642/'
},
'url': 'http://localhost/repos/example/git/commits/e11c39e288519302f75f281b8b9a5ab585f678db/'
},
'committer': {
'date': '1970-01-01T00:03:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'parents': [
{
'sha': 'c655dffe0fed2a78dc5f38c1bc8e5628e2605017',
'url': 'http://localhost/repos/example/commits/c655dffe0fed2a78dc5f38c1bc8e5628e2605017/'
}
],
'sha': 'e11c39e288519302f75f281b8b9a5ab585f678db',
'url': 'http://localhost/repos/example/commits/e11c39e288519302f75f281b8b9a5ab585f678db/'
},
{
'author': {
'date': '1970-01-01T00:04:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'commit': {
'author': {
'date': '1970-01-01T00:04:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'committer': {
'date': '1970-01-01T00:05:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'message': 'C',
'parents': [
{
'sha': 'e11c39e288519302f75f281b8b9a5ab585f678db',
'url': 'http://localhost/repos/example/commits/e11c39e288519302f75f281b8b9a5ab585f678db/'
}
],
'sha': 'd982d1115b558a8abe447f8bf46cc3ab8761e19f',
'tree': {
'sha': 'adaf4189f869749deba4ed69005ece57a4c2f19c',
'url': 'http://localhost/repos/example/git/trees/adaf4189f869749deba4ed69005ece57a4c2f19c/'
},
'url': 'http://localhost/repos/example/git/commits/d982d1115b558a8abe447f8bf46cc3ab8761e19f/'
},
'committer': {
'date': '1970-01-01T00:05:00Z',
'email': '<EMAIL>',
'name': 'Alien Celebrity'
},
'parents': [
{
'sha': 'e11c39e288519302f75f281b8b9a5ab585f678db',
'url': 'http://localhost/repos/example/commits/e11c39e288519302f75f281b8b9a5ab585f678db/'
}
],
'sha': 'd982d1115b558a8abe447f8bf46cc3ab8761e19f',
'url': 'http://localhost/repos/example/commits/d982d1115b558a8abe447f8bf46cc3ab8761e19f/'
},
{
'author': {
'date': '1970-01-01T00:06:00Z',
'email': '<EMAIL>',
'name': 'Alien Celebrity'
},
'commit': {
'author': {
'date': '1970-01-01T00:06:00Z',
'email': '<EMAIL>',
'name': 'Alien Celebrity'
},
'committer': {
'date': '1970-01-01T00:07:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'message': 'D',
'parents': [
{
'sha': 'd982d1115b558a8abe447f8bf46cc3ab8761e19f',
'url': 'http://localhost/repos/example/commits/d982d1115b558a8abe447f8bf46cc3ab8761e19f/'
}
],
'sha': '24c57d21498c2fc3b44153713308510d86142fe6',
'tree': {
'sha': 'c1397c050b9d4341e85d04f4b311a1cf382d7961',
'url': 'http://localhost/repos/example/git/trees/c1397c050b9d4341e85d04f4b311a1cf382d7961/'
},
'url': 'http://localhost/repos/example/git/commits/24c57d21498c2fc3b44153713308510d86142fe6/'
},
'committer': {
'date': '1970-01-01T00:07:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'parents': [
{
'sha': 'd982d1115b558a8abe447f8bf46cc3ab8761e19f',
'url': 'http://localhost/repos/example/commits/d982d1115b558a8abe447f8bf46cc3ab8761e19f/'
}
],
'sha': '24c57d21498c2fc3b44153713308510d86142fe6',
'url': 'http://localhost/repos/example/commits/24c57d21498c2fc3b44153713308510d86142fe6/'
},
{
'author': {
'date': '1970-01-01T00:10:00Z',
'email': '<EMAIL>',
'name': 'Ali<NAME>elebrity'
},
'commit': {
'author': {
'date': '1970-01-01T00:10:00Z',
'email': '<EMAIL>',
'name': 'Alien Celebrity'
},
'committer': {
'date': '1970-01-01T00:11:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'message': 'F',
'parents': [
{
'sha': '3423be4854bc3316c12f3a9c699e6cd2209fd8ea',
'url': 'http://localhost/repos/example/commits/3423be4854bc3316c12f3a9c699e6cd2209fd8ea/'
}
],
'sha': '1d040035bfb8936bd760ff226cb5c9f2c2b817a3',
'tree': {
'sha': 'ab220b156431b575f3cb3607644d05954d5e859a',
'url': 'http://localhost/repos/example/git/trees/ab220b156431b575f3cb3607644d05954d5e859a/'
},
'url': 'http://localhost/repos/example/git/commits/1d040035bfb8936bd760ff226cb5c9f2c2b817a3/'
},
'committer': {
'date': '1970-01-01T00:11:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'parents': [
{
'sha': '3423be4854bc3316c12f3a9c699e6cd2209fd8ea',
'url': 'http://localhost/repos/example/commits/3423be4854bc3316c12f3a9c699e6cd2209fd8ea/'
}
],
'sha': '1d040035bfb8936bd760ff226cb5c9f2c2b817a3',
'url': 'http://localhost/repos/example/commits/1d040035bfb8936bd760ff226cb5c9f2c2b817a3/'
},
{
'author': {
'date': '1970-01-01T00:12:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'commit': {
'author': {
'date': '1970-01-01T00:12:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'committer': {
'date': '1970-01-01T00:13:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'message': 'G',
'parents': [
{
'sha': '1d040035bfb8936bd760ff226cb5c9f2c2b817a3',
'url': 'http://localhost/repos/example/commits/1d040035bfb8936bd760ff226cb5c9f2c2b817a3/'
}
],
'sha': 'cb46532dc9a103d31a0183b5d8a5a08c09f4b94d',
'tree': {
'sha': '95146dee2fed3d5783f625fe4e48202dae4606ef',
'url': 'http://localhost/repos/example/git/trees/95146dee2fed3d5783f625fe4e48202dae4606ef/'
},
'url': 'http://localhost/repos/example/git/commits/cb46532dc9a103d31a0183b5d8a5a08c09f4b94d/'
},
'committer': {
'date': '1970-01-01T00:13:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'parents': [
{
'sha': '1d040035bfb8936bd760ff226cb5c9f2c2b817a3',
'url': 'http://localhost/repos/example/commits/1d040035bfb8936bd760ff226cb5c9f2c2b817a3/'
}
],
'sha': 'cb46532dc9a103d31a0183b5d8a5a08c09f4b94d',
'url': 'http://localhost/repos/example/commits/cb46532dc9a103d31a0183b5d8a5a08c09f4b94d/'
},
{
'author': {
'date': '1970-01-01T00:16:00Z',
'email': '<EMAIL>',
'name': '<NAME>brity'
},
'commit': {
'author': {
'date': '1970-01-01T00:16:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'committer': {
'date': '1970-01-01T00:17:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'message': 'I',
'parents': [
{
'sha': '24c57d21498c2fc3b44153713308510d86142fe6',
'url': 'http://localhost/repos/example/commits/24c57d21498c2fc3b44153713308510d86142fe6/'
},
{
'sha': 'cb46532dc9a103d31a0183b5d8a5a08c09f4b94d',
'url': 'http://localhost/repos/example/commits/cb46532dc9a103d31a0183b5d8a5a08c09f4b94d/'
}
],
'sha': '1622ddc3695d1263c629bdbb0bcdf235510ee068',
'tree': {
'sha': '0ed66f14f8548241624bcbd1d39d3d06f277a9b4',
'url': 'http://localhost/repos/example/git/trees/0ed66f14f8548241624bcbd1d39d3d06f277a9b4/'
},
'url': 'http://localhost/repos/example/git/commits/1622ddc3695d1263c629bdbb0bcdf235510ee068/'
},
'committer': {
'date': '1970-01-01T00:17:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'parents': [
{
'sha': '24c57d21498c2fc3b44153713308510d86142fe6',
'url': 'http://localhost/repos/example/commits/24c57d21498c2fc3b44153713308510d86142fe6/'
},
{
'sha': 'cb46532dc9a103d31a0183b5d8a5a08c09f4b94d',
'url': 'http://localhost/repos/example/commits/cb46532dc9a103d31a0183b5d8a5a08c09f4b94d/'
}
],
'sha': '1622ddc3695d1263c629bdbb0bcdf235510ee068',
'url': 'http://localhost/repos/example/commits/1622ddc3695d1263c629bdbb0bcdf235510ee068/'
}
]})
def test_inclusion_exclusion_chronological_ordering(self):
with self._example_repo(b_before_e=False) as commits:
resp = self.client.get('/repos/example/branches/I/unique-commits/sorted/chronological/')
self.assert200(resp)
self.assertEqual(resp.json, {'commits': [
{
'author': {
'date': '1970-01-01T00:04:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'commit': {
'author': {
'date': '1970-01-01T00:04:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'committer': {
'date': '1970-01-01T00:05:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'message': 'F',
'parents': [
{
'sha': '2d5ea1e6bf086d0ce420180f892dbf9e08d9835e',
'url': 'http://localhost/repos/example/commits/2d5ea1e6bf086d0ce420180f892dbf9e08d9835e/'
}
],
'sha': '6ccf9dc00992617fa4206ff67ffed2dcb895135c',
'tree': {
'sha': 'ab220b156431b575f3cb3607644d05954d5e859a',
'url': 'http://localhost/repos/example/git/trees/ab220b156431b575f3cb3607644d05954d5e859a/'
},
'url': 'http://localhost/repos/example/git/commits/6ccf9dc00992617fa4206ff67ffed2dcb895135c/'
},
'committer': {
'date': '1970-01-01T00:05:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'parents': [
{
'sha': '2d5ea1e6bf086d0ce420180f892dbf9e08d9835e',
'url': 'http://localhost/repos/example/commits/2d5ea1e6bf086d0ce420180f892dbf9e08d9835e/'
}
],
'sha': '6ccf9dc00992617fa4206ff67ffed2dcb895135c',
'url': 'http://localhost/repos/example/commits/6ccf9dc00992617fa4206ff67ffed2dcb895135c/'
},
{
'author': {
'date': '1970-01-01T00:06:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'commit': {
'author': {
'date': '1970-01-01T00:06:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'committer': {
'date': '1970-01-01T00:07:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'message': 'G',
'parents': [
{
'sha': '6ccf9dc00992617fa4206ff67ffed2dcb895135c',
'url': 'http://localhost/repos/example/commits/6ccf9dc00992617fa4206ff67ffed2dcb895135c/'
}
],
'sha': 'aea62655228b0b0d33f6226cf038607cfc3db8bd',
'tree': {
'sha': '95146dee2fed3d5783f625fe4e48202dae4606ef',
'url': 'http://localhost/repos/example/git/trees/95146dee2fed3d5783f625fe4e48202dae4606ef/'
},
'url': 'http://localhost/repos/example/git/commits/aea62655228b0b0d33f6226cf038607cfc3db8bd/'
},
'committer': {
'date': '1970-01-01T00:07:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'parents': [
{
'sha': '6ccf9dc00992617fa4206ff67ffed2dcb895135c',
'url': 'http://localhost/repos/example/commits/6ccf9dc00992617fa4206ff67ffed2dcb895135c/'
}
],
'sha': 'aea62655228b0b0d33f6226cf038607cfc3db8bd',
'url': 'http://localhost/repos/example/commits/aea62655228b0b0d33f6226cf038607cfc3db8bd/'
},
{
'author': {
'date': '1970-01-01T00:08:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'commit': {
'author': {
'date': '1970-01-01T00:08:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'committer': {
'date': '1970-01-01T00:09:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'message': 'B',
'parents': [
{
'sha': 'c655dffe0fed2a78dc5f38c1bc8e5628e2605017',
'url': 'http://localhost/repos/example/commits/c655dffe0fed2a78dc5f38c1bc8e5628e2605017/'
}
],
'sha': '8e994dcc81dd6d4aee6d627f946ef326363360f3',
'tree': {
'sha': '1a321342ee655cb18be26a1a9632bb9629fb3642',
'url': 'http://localhost/repos/example/git/trees/1a321342ee655cb18be26a1a9632bb9629fb3642/'
},
'url': 'http://localhost/repos/example/git/commits/8e994dcc81dd6d4aee6d627f946ef326363360f3/'
},
'committer': {
'date': '1970-01-01T00:09:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'parents': [
{
'sha': 'c655dffe0fed2a78dc5f38c1bc8e5628e2605017',
'url': 'http://localhost/repos/example/commits/c655dffe0fed2a78dc5f38c1bc8e5628e2605017/'
}
],
'sha': '8e994dcc81dd6d4aee6d627f946ef326363360f3',
'url': 'http://localhost/repos/example/commits/8e994dcc81dd6d4aee6d627f946ef326363360f3/'
},
{
'author': {
'date': '1970-01-01T00:10:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'commit': {
'author': {
'date': '1970-01-01T00:10:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'committer': {
'date': '1970-01-01T00:11:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'message': 'C',
'parents': [
{
'sha': '8e994dcc81dd6d4aee6d627f946ef326363360f3',
'url': 'http://localhost/repos/example/commits/8e994dcc81dd6d4aee6d627f946ef326363360f3/'
}
],
'sha': '729ef1b328ac57d209a608c89734043128e3be3a',
'tree': {
'sha': 'adaf4189f869749deba4ed69005ece57a4c2f19c',
'url': 'http://localhost/repos/example/git/trees/adaf4189f869749deba4ed69005ece57a4c2f19c/'
},
'url': 'http://localhost/repos/example/git/commits/729ef1b328ac57d209a608c89734043128e3be3a/'
},
'committer': {
'date': '1970-01-01T00:11:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'parents': [
{
'sha': '8e994dcc81dd6d4aee6d627f946ef326363360f3',
'url': 'http://localhost/repos/example/commits/8e994dcc81dd6d4aee6d627f946ef326363360f3/'
}
],
'sha': '729ef1b328ac57d209a608c89734043128e3be3a',
'url': 'http://localhost/repos/example/commits/729ef1b328ac57d209a608c89734043128e3be3a/'
},
{
'author': {
'date': '1970-01-01T00:12:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'commit': {
'author': {
'date': '1970-01-01T00:12:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'committer': {
'date': '1970-01-01T00:13:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'message': 'D',
'parents': [
{
'sha': '729ef1b328ac57d209a608c89734043128e3be3a',
'url': 'http://localhost/repos/example/commits/729ef1b328ac57d209a608c89734043128e3be3a/'
}
],
'sha': '842548f32658e940fc054f8c328c7639fc5c9053',
'tree': {
'sha': 'c1397c050b9d4341e85d04f4b311a1cf382d7961',
'url': 'http://localhost/repos/example/git/trees/c1397c050b9d4341e85d04f4b311a1cf382d7961/'
},
'url': 'http://localhost/repos/example/git/commits/842548f32658e940fc054f8c328c7639fc5c9053/'
},
'committer': {
'date': '1970-01-01T00:13:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'parents': [
{
'sha': '729ef1b328ac57d209a608c89734043128e3be3a',
'url': 'http://localhost/repos/example/commits/729ef1b328ac57d209a608c89734043128e3be3a/'
}
],
'sha': '842548f32658e940fc054f8c328c7639fc5c9053',
'url': 'http://localhost/repos/example/commits/842548f32658e940fc054f8c328c7639fc5c9053/'
},
{
'author': {
'date': '1970-01-01T00:16:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'commit': {
'author': {
'date': '1970-01-01T00:16:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'committer': {
'date': '1970-01-01T00:17:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'message': 'I',
'parents': [
{
'sha': '842548f32658e940fc054f8c328c7639fc5c9053',
'url': 'http://localhost/repos/example/commits/842548f32658e940fc054f8c328c7639fc5c9053/'
},
{
'sha': 'aea62655228b0b0d33f6226cf038607cfc3db8bd',
'url': 'http://localhost/repos/example/commits/aea62655228b0b0d33f6226cf038607cfc3db8bd/'
}
],
'sha': '02d221a9d6f0619a77cbf1be4ac8a27057c2b4da',
'tree': {
'sha': '0ed66f14f8548241624bcbd1d39d3d06f277a9b4',
'url': 'http://localhost/repos/example/git/trees/0ed66f14f8548241624bcbd1d39d3d06f277a9b4/'
},
'url': 'http://localhost/repos/example/git/commits/02d221a9d6f0619a77cbf1be4ac8a27057c2b4da/'
},
'committer': {
'date': '1970-01-01T00:17:00Z',
'email': '<EMAIL>',
'name': '<NAME>'
},
'parents': [
{
'sha': '842548f32658e940fc054f8c328c7639fc5c9053',
'url': 'http://localhost/repos/example/commits/842548f32658e940fc054f8c328c7639fc5c9053/'
},
{
'sha': 'aea62655228b0b0d33f6226cf038607cfc3db8bd',
'url': 'http://localhost/repos/example/commits/aea62655228b0b0d33f6226cf038607cfc3db8bd/'
}
],
'sha': '02d221a9d6f0619a77cbf1be4ac8a27057c2b4da',
'url': 'http://localhost/repos/example/commits/02d221a9d6f0619a77cbf1be4ac8a27057c2b4da/'
}
]})
if __name__ == '__main__':
unittest.main()
|
util/util.py | sangkny/EnlightenGAN | 1,077 | 12679438 | <gh_stars>1000+
# from __future__ import print_function
import numpy as np
from PIL import Image
import inspect, re
import numpy as np
import torch
import os
import collections
from torch.optim import lr_scheduler
import torch.nn.init as init
# Converts a Tensor into a Numpy array
# |imtype|: the desired type of the converted numpy array
def tensor2im(image_tensor, imtype=np.uint8):
image_numpy = image_tensor[0].cpu().float().numpy()
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
image_numpy = np.maximum(image_numpy, 0)
image_numpy = np.minimum(image_numpy, 255)
return image_numpy.astype(imtype)
def atten2im(image_tensor, imtype=np.uint8):
image_tensor = image_tensor[0]
image_tensor = torch.cat((image_tensor, image_tensor, image_tensor), 0)
image_numpy = image_tensor.cpu().float().numpy()
image_numpy = (np.transpose(image_numpy, (1, 2, 0))) * 255.0
image_numpy = image_numpy/(image_numpy.max()/255.0)
return image_numpy.astype(imtype)
def latent2im(image_tensor, imtype=np.uint8):
# image_tensor = (image_tensor - torch.min(image_tensor))/(torch.max(image_tensor)-torch.min(image_tensor))
image_numpy = image_tensor[0].cpu().float().numpy()
image_numpy = (np.transpose(image_numpy, (1, 2, 0))) * 255.0
image_numpy = np.maximum(image_numpy, 0)
image_numpy = np.minimum(image_numpy, 255)
return image_numpy.astype(imtype)
def max2im(image_1, image_2, imtype=np.uint8):
image_1 = image_1[0].cpu().float().numpy()
image_2 = image_2[0].cpu().float().numpy()
image_1 = (np.transpose(image_1, (1, 2, 0)) + 1) / 2.0 * 255.0
image_2 = (np.transpose(image_2, (1, 2, 0))) * 255.0
output = np.maximum(image_1, image_2)
output = np.maximum(output, 0)
output = np.minimum(output, 255)
return output.astype(imtype)
def variable2im(image_tensor, imtype=np.uint8):
image_numpy = image_tensor[0].data.cpu().float().numpy()
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path):
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
def info(object, spacing=10, collapse=1):
"""Print methods and doc strings.
Takes module, class, list, dictionary, or string."""
methodList = [e for e in dir(object) if isinstance(getattr(object, e), collections.Callable)]
processFunc = collapse and (lambda s: " ".join(s.split())) or (lambda s: s)
print( "\n".join(["%s %s" %
(method.ljust(spacing),
processFunc(str(getattr(object, method).__doc__)))
for method in methodList]) )
def varname(p):
for line in inspect.getframeinfo(inspect.currentframe().f_back)[3]:
m = re.search(r'\bvarname\s*\(\s*([A-Za-z_][A-Za-z0-9_]*)\s*\)', line)
if m:
return m.group(1)
def print_numpy(x, val=True, shp=False):
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def get_model_list(dirname, key):
if os.path.exists(dirname) is False:
return None
gen_models = [os.path.join(dirname, f) for f in os.listdir(dirname) if
os.path.isfile(os.path.join(dirname, f)) and key in f and ".pt" in f]
if gen_models is None:
return None
gen_models.sort()
last_model_name = gen_models[-1]
return last_model_name
def load_vgg16(model_dir):
""" Use the model from https://github.com/abhiskk/fast-neural-style/blob/master/neural_style/utils.py """
if not os.path.exists(model_dir):
os.mkdir(model_dir)
if not os.path.exists(os.path.join(model_dir, 'vgg16.weight')):
if not os.path.exists(os.path.join(model_dir, 'vgg16.t7')):
os.system('wget https://www.dropbox.com/s/76l3rt4kyi3s8x7/vgg16.t7?dl=1 -O ' + os.path.join(model_dir, 'vgg16.t7'))
vgglua = load_lua(os.path.join(model_dir, 'vgg16.t7'))
vgg = Vgg16()
for (src, dst) in zip(vgglua.parameters()[0], vgg.parameters()):
dst.data[:] = src
torch.save(vgg.state_dict(), os.path.join(model_dir, 'vgg16.weight'))
vgg = Vgg16()
vgg.load_state_dict(torch.load(os.path.join(model_dir, 'vgg16.weight')))
return vgg
def vgg_preprocess(batch):
tensortype = type(batch.data)
(r, g, b) = torch.chunk(batch, 3, dim = 1)
batch = torch.cat((b, g, r), dim = 1) # convert RGB to BGR
batch = (batch + 1) * 255 * 0.5 # [-1, 1] -> [0, 255]
mean = tensortype(batch.data.size())
mean[:, 0, :, :] = 103.939
mean[:, 1, :, :] = 116.779
mean[:, 2, :, :] = 123.680
batch = batch.sub(Variable(mean)) # subtract mean
return batch
def get_scheduler(optimizer, hyperparameters, iterations=-1):
if 'lr_policy' not in hyperparameters or hyperparameters['lr_policy'] == 'constant':
scheduler = None # constant scheduler
elif hyperparameters['lr_policy'] == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=hyperparameters['step_size'],
gamma=hyperparameters['gamma'], last_epoch=iterations)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', hyperparameters['lr_policy'])
return scheduler
def weights_init(init_type='gaussian'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find('Linear') == 0) and hasattr(m, 'weight'):
# print m.__class__.__name__
if init_type == 'gaussian':
init.normal(m.weight.data, 0.0, 0.02)
elif init_type == 'xavier':
init.xavier_normal(m.weight.data, gain=math.sqrt(2))
elif init_type == 'kaiming':
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal(m.weight.data, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, "Unsupported initialization: {}".format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant(m.bias.data, 0.0)
return init_fun |
src/app/migrations/0002_DropTheOnetimeApp.py | denkasyanov/education-backend | 151 | 12679469 | <filename>src/app/migrations/0002_DropTheOnetimeApp.py<gh_stars>100-1000
# Generated by Django 3.1.4 on 2021-01-02 18:09
from django.db import migrations
def drop_old_contenttypes(apps, schema_editor):
apps.get_model('contenttypes.ContentType').objects.filter(app_label='onetime').delete()
class Migration(migrations.Migration):
dependencies = [
('app', '0001_CourseMailLog'),
]
operations = [
migrations.RunPython(drop_old_contenttypes),
migrations.RunSQL('DROP TABLE IF EXISTS onetime_token;'),
]
|
conanfile.py | szigetics/di | 531 | 12679481 | <reponame>szigetics/di
from conans import ConanFile, CMake
class DI(ConanFile):
name = "DI"
version = "latest"
url = "https://github.com/boost-ext/di"
license = "Boost"
description = "[Boost::ext].DI - C++14 Dependency Injection Library"
settings = "os", "compiler", "arch", "build_type"
exports_sources = "include/*"
no_copy_source = True
def package(self):
self.copy("*.hpp")
|
test/python/visualization/pulse_v2/test_layouts.py | Roshan-Thomas/qiskit-terra | 1,599 | 12679507 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests for core modules of pulse drawer."""
from qiskit import pulse
from qiskit.test import QiskitTestCase
from qiskit.visualization.pulse_v2 import layouts, device_info
class TestChannelArrangement(QiskitTestCase):
"""Tests for channel mapping functions."""
def setUp(self) -> None:
super().setUp()
self.channels = [
pulse.DriveChannel(0),
pulse.DriveChannel(1),
pulse.DriveChannel(2),
pulse.MeasureChannel(1),
pulse.MeasureChannel(2),
pulse.AcquireChannel(1),
pulse.AcquireChannel(2),
pulse.ControlChannel(0),
pulse.ControlChannel(2),
pulse.ControlChannel(5),
]
self.formatter = {"control.show_acquire_channel": True}
self.device = device_info.OpenPulseBackendInfo(
name="test",
dt=1,
channel_frequency_map={
pulse.DriveChannel(0): 5.0e9,
pulse.DriveChannel(1): 5.1e9,
pulse.DriveChannel(2): 5.2e9,
pulse.MeasureChannel(1): 7.0e9,
pulse.MeasureChannel(1): 7.1e9,
pulse.MeasureChannel(2): 7.2e9,
pulse.ControlChannel(0): 5.0e9,
pulse.ControlChannel(1): 5.1e9,
pulse.ControlChannel(2): 5.2e9,
pulse.ControlChannel(3): 5.3e9,
pulse.ControlChannel(4): 5.4e9,
pulse.ControlChannel(5): 5.5e9,
},
qubit_channel_map={
0: [
pulse.DriveChannel(0),
pulse.MeasureChannel(0),
pulse.AcquireChannel(0),
pulse.ControlChannel(0),
],
1: [
pulse.DriveChannel(1),
pulse.MeasureChannel(1),
pulse.AcquireChannel(1),
pulse.ControlChannel(1),
],
2: [
pulse.DriveChannel(2),
pulse.MeasureChannel(2),
pulse.AcquireChannel(2),
pulse.ControlChannel(2),
pulse.ControlChannel(3),
pulse.ControlChannel(4),
],
3: [
pulse.DriveChannel(3),
pulse.MeasureChannel(3),
pulse.AcquireChannel(3),
pulse.ControlChannel(5),
],
},
)
def test_channel_type_grouped_sort(self):
"""Test channel_type_grouped_sort."""
out_layout = layouts.channel_type_grouped_sort(
self.channels, formatter=self.formatter, device=self.device
)
ref_channels = [
[pulse.DriveChannel(0)],
[pulse.DriveChannel(1)],
[pulse.DriveChannel(2)],
[pulse.ControlChannel(0)],
[pulse.ControlChannel(2)],
[pulse.ControlChannel(5)],
[pulse.MeasureChannel(1)],
[pulse.MeasureChannel(2)],
[pulse.AcquireChannel(1)],
[pulse.AcquireChannel(2)],
]
ref_names = ["D0", "D1", "D2", "U0", "U2", "U5", "M1", "M2", "A1", "A2"]
ref = list(zip(ref_names, ref_channels))
self.assertListEqual(list(out_layout), ref)
def test_channel_index_sort(self):
"""Test channel_index_grouped_sort."""
out_layout = layouts.channel_index_grouped_sort(
self.channels, formatter=self.formatter, device=self.device
)
ref_channels = [
[pulse.DriveChannel(0)],
[pulse.ControlChannel(0)],
[pulse.DriveChannel(1)],
[pulse.MeasureChannel(1)],
[pulse.AcquireChannel(1)],
[pulse.DriveChannel(2)],
[pulse.ControlChannel(2)],
[pulse.MeasureChannel(2)],
[pulse.AcquireChannel(2)],
[pulse.ControlChannel(5)],
]
ref_names = ["D0", "U0", "D1", "M1", "A1", "D2", "U2", "M2", "A2", "U5"]
ref = list(zip(ref_names, ref_channels))
self.assertListEqual(list(out_layout), ref)
def test_channel_index_sort_grouped_control(self):
"""Test channel_index_grouped_sort_u."""
out_layout = layouts.channel_index_grouped_sort_u(
self.channels, formatter=self.formatter, device=self.device
)
ref_channels = [
[pulse.DriveChannel(0)],
[pulse.DriveChannel(1)],
[pulse.MeasureChannel(1)],
[pulse.AcquireChannel(1)],
[pulse.DriveChannel(2)],
[pulse.MeasureChannel(2)],
[pulse.AcquireChannel(2)],
[pulse.ControlChannel(0)],
[pulse.ControlChannel(2)],
[pulse.ControlChannel(5)],
]
ref_names = ["D0", "D1", "M1", "A1", "D2", "M2", "A2", "U0", "U2", "U5"]
ref = list(zip(ref_names, ref_channels))
self.assertListEqual(list(out_layout), ref)
def test_channel_qubit_index_sort(self):
"""Test qubit_index_sort."""
out_layout = layouts.qubit_index_sort(
self.channels, formatter=self.formatter, device=self.device
)
ref_channels = [
[pulse.DriveChannel(0), pulse.ControlChannel(0)],
[pulse.DriveChannel(1), pulse.MeasureChannel(1)],
[pulse.DriveChannel(2), pulse.MeasureChannel(2), pulse.ControlChannel(2)],
[pulse.ControlChannel(5)],
]
ref_names = ["Q0", "Q1", "Q2", "Q3"]
ref = list(zip(ref_names, ref_channels))
self.assertListEqual(list(out_layout), ref)
class TestHorizontalAxis(QiskitTestCase):
"""Tests for horizontal axis mapping functions."""
def test_time_map_in_ns(self):
"""Test for time_map_in_ns."""
time_window = (0, 1000)
breaks = [(100, 200)]
dt = 1e-9
haxis = layouts.time_map_in_ns(time_window=time_window, axis_breaks=breaks, dt=dt)
self.assertListEqual(list(haxis.window), [0, 900])
self.assertListEqual(list(haxis.axis_break_pos), [100])
ref_axis_map = {
0.0: "0",
180.0: "280",
360.0: "460",
540.0: "640",
720.0: "820",
900.0: "1000",
}
self.assertDictEqual(haxis.axis_map, ref_axis_map)
self.assertEqual(haxis.label, "Time (ns)")
def test_time_map_in_without_dt(self):
"""Test for time_map_in_ns when dt is not provided."""
time_window = (0, 1000)
breaks = [(100, 200)]
dt = None
haxis = layouts.time_map_in_ns(time_window=time_window, axis_breaks=breaks, dt=dt)
self.assertListEqual(list(haxis.window), [0, 900])
self.assertListEqual(list(haxis.axis_break_pos), [100])
ref_axis_map = {
0.0: "0",
180.0: "280",
360.0: "460",
540.0: "640",
720.0: "820",
900.0: "1000",
}
self.assertDictEqual(haxis.axis_map, ref_axis_map)
self.assertEqual(haxis.label, "System cycle time (dt)")
class TestFigureTitle(QiskitTestCase):
"""Tests for figure title generation."""
def setUp(self) -> None:
super().setUp()
self.device = device_info.OpenPulseBackendInfo(name="test_backend", dt=1e-9)
self.prog = pulse.Schedule(name="test_sched")
self.prog.insert(
0, pulse.Play(pulse.Constant(100, 0.1), pulse.DriveChannel(0)), inplace=True
)
def detail_title(self):
"""Test detail_title layout function."""
ref_title = "Name: test_sched, Duration: 100.0 ns, Backend: test_backend"
out = layouts.detail_title(self.prog, self.device)
self.assertEqual(out, ref_title)
def empty_title(self):
"""Test empty_title layout function."""
ref_title = ""
out = layouts.detail_title(self.prog, self.device)
self.assertEqual(out, ref_title)
|
lldb/packages/Python/lldbsuite/test/python_api/default-constructor/sb_lineentry.py | medismailben/llvm-project | 2,338 | 12679513 | """
Fuzz tests an object after the default construction to make sure it does not crash lldb.
"""
import lldb
def fuzz_obj(obj):
obj.GetStartAddress()
obj.GetEndAddress()
obj.GetFileSpec()
obj.GetLine()
obj.GetColumn()
obj.GetDescription(lldb.SBStream())
|
app/routers/auth/user_schema.py | cdlaimin/pity | 135 | 12679553 | from pydantic import BaseModel, validator
from app.excpetions.ParamsException import ParamsError
class UserDto(BaseModel):
name: str
password: str
username: str
email: str
@validator('name', 'password', 'username', 'email')
def field_not_empty(cls, v):
if isinstance(v, str) and len(v.strip()) == 0:
raise ParamsError("不能为空")
return v
class UserForm(BaseModel):
username: str
password: str
@validator('password', 'username')
def name_not_empty(cls, v):
if isinstance(v, str) and len(v.strip()) == 0:
raise ParamsError("不能为空")
return v
|
admin_interface/admin.py | cherijs/django-admin-interface | 1,129 | 12679575 | <gh_stars>1000+
# -*- coding: utf-8 -*-
from admin_interface.compat import gettext_lazy as _
from admin_interface.models import Theme
from django.contrib import admin
class ThemeAdmin(admin.ModelAdmin):
list_display = ('name', 'active', )
list_editable = ('active', )
list_per_page = 100
show_full_result_count = False
fieldsets = (
(None, {
'classes': ('wide', ),
'fields': ('name', 'active', )
}),
(_('Environment'), {
'classes': ('wide', ),
'fields': (
'env_name',
'env_color',
'env_visible_in_header',
'env_visible_in_favicon',
)
}),
(_('Language chooser'), {
'classes': ('wide', ),
'fields': (
'language_chooser_active',
'language_chooser_display',
)
}),
(_('Logo'), {
'classes': ('wide', ),
'fields': (
'logo',
'logo_max_width',
'logo_max_height',
'logo_color',
'logo_visible',
)
}),
(_('Favicon'), {
'classes': ('wide', ),
'fields': ('favicon', )
}),
(_('Title'), {
'classes': ('wide', ),
'fields': (
'title',
'title_color',
'title_visible',
)
}),
(_('Header'), {
'classes': ('wide', ),
'fields': (
'css_header_background_color',
'css_header_text_color',
'css_header_link_color',
'css_header_link_hover_color',
)
}),
(_('Breadcrumbs / Module headers'), {
'classes': ('wide', ),
'fields': (
'css_module_background_color',
'css_module_background_selected_color',
'css_module_text_color',
'css_module_link_color',
'css_module_link_selected_color',
'css_module_link_hover_color',
'css_module_rounded_corners',
)
}),
(_('Generic Links'), {
'classes': ('wide', ),
'fields': (
'css_generic_link_color',
'css_generic_link_hover_color',
)
}),
(_('Save Buttons'), {
'classes': ('wide', ),
'fields': (
'css_save_button_background_color',
'css_save_button_background_hover_color',
'css_save_button_text_color',
)
}),
(_('Delete Buttons'), {
'classes': ('wide', ),
'fields': (
'css_delete_button_background_color',
'css_delete_button_background_hover_color',
'css_delete_button_text_color',
)
}),
(_('Related Modal'), {
'classes': ('wide', ),
'fields': (
'related_modal_active',
'related_modal_background_color',
'related_modal_background_opacity',
'related_modal_rounded_corners',
'related_modal_close_button_visible',
)
}),
(_('Form Controls'), {
'classes': ('wide', ),
'fields': (
'form_submit_sticky',
'form_pagination_sticky',
)
}),
(_('List Filter'), {
'classes': ('wide', ),
'fields': (
'list_filter_dropdown',
'list_filter_sticky',
)
}),
(_('Recent Actions'), {
'classes': ('wide', ),
'fields': ('recent_actions_visible', )
}),
)
save_on_top = True
admin.site.register(Theme, ThemeAdmin)
|
lldb/test/API/commands/target/stop-hooks/TestStopHookScripted.py | mkinsner/llvm | 2,338 | 12679627 | <filename>lldb/test/API/commands/target/stop-hooks/TestStopHookScripted.py
"""
Test stop hook functionality
"""
import lldb
import lldbsuite.test.lldbutil as lldbutil
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
class TestStopHooks(TestBase):
mydir = TestBase.compute_mydir(__file__)
# If your test case doesn't stress debug info, the
# set this to true. That way it won't be run once for
# each debug info format.
NO_DEBUG_INFO_TESTCASE = True
def setUp(self):
TestBase.setUp(self)
self.build()
self.main_source_file = lldb.SBFileSpec("main.c")
full_path = os.path.join(self.getSourceDir(), "main.c")
self.main_start_line = line_number(full_path, "main()")
def test_bad_handler(self):
"""Test that we give a good error message when the handler is bad"""
self.script_setup()
result = lldb.SBCommandReturnObject()
# First try the wrong number of args handler:
command = "target stop-hook add -P stop_hook.bad_handle_stop"
self.interp.HandleCommand(command, result)
self.assertFalse(result.Succeeded(), "Set the target stop hook")
self.assertIn("Wrong number of args", result.GetError(), "Got the wrong number of args error")
# Next the no handler at all handler:
command = "target stop-hook add -P stop_hook.no_handle_stop"
self.interp.HandleCommand(command, result)
self.assertFalse(result.Succeeded(), "Set the target stop hook")
self.assertIn('Class "stop_hook.no_handle_stop" is missing the required handle_stop callback', result.GetError(), "Got the right error")
def test_stop_hooks_scripted(self):
"""Test that a scripted stop hook works with no specifiers"""
self.stop_hooks_scripted(5)
def test_stop_hooks_scripted_right_func(self):
"""Test that a scripted stop hook fires when there is a function match"""
self.stop_hooks_scripted(5, "-n step_out_of_me")
def test_stop_hooks_scripted_wrong_func(self):
"""Test that a scripted stop hook doesn't fire when the function does not match"""
self.stop_hooks_scripted(0, "-n main")
def test_stop_hooks_scripted_right_lines(self):
"""Test that a scripted stop hook fires when there is a function match"""
self.stop_hooks_scripted(5, "-f main.c -l 1 -e %d"%(self.main_start_line))
def test_stop_hooks_scripted_wrong_lines(self):
"""Test that a scripted stop hook doesn't fire when the function does not match"""
self.stop_hooks_scripted(0, "-f main.c -l %d -e 100"%(self.main_start_line))
def test_stop_hooks_scripted_auto_continue(self):
"""Test that the --auto-continue flag works"""
self.do_test_auto_continue(False)
def test_stop_hooks_scripted_return_false(self):
"""Test that the returning False from a stop hook works"""
self.do_test_auto_continue(True)
def do_test_auto_continue(self, return_true):
"""Test that auto-continue works."""
# We set auto-continue to 1 but the stop hook only applies to step_out_of_me,
# so we should end up stopped in main, having run the expression only once.
self.script_setup()
result = lldb.SBCommandReturnObject()
if return_true:
command = "target stop-hook add -P stop_hook.stop_handler -k increment -v 5 -k return_false -v 1 -n step_out_of_me"
else:
command = "target stop-hook add -G 1 -P stop_hook.stop_handler -k increment -v 5 -n step_out_of_me"
self.interp.HandleCommand(command, result)
self.assertTrue(result.Succeeded, "Set the target stop hook")
# First run to main. If we go straight to the first stop hook hit,
# run_to_source_breakpoint will fail because we aren't at original breakpoint
(target, process, thread, bkpt) = lldbutil.run_to_source_breakpoint(self,
"Stop here first", self.main_source_file)
# Now set the breakpoint on step_out_of_me, and make sure we run the
# expression, then continue back to main.
bkpt = target.BreakpointCreateBySourceRegex("Set a breakpoint here and step out", self.main_source_file)
self.assertNotEqual(bkpt.GetNumLocations(), 0, "Got breakpoints in step_out_of_me")
process.Continue()
var = target.FindFirstGlobalVariable("g_var")
self.assertTrue(var.IsValid())
self.assertEqual(var.GetValueAsUnsigned(), 6, "Updated g_var")
func_name = process.GetSelectedThread().frames[0].GetFunctionName()
self.assertEqual("main", func_name, "Didn't stop at the expected function.")
def script_setup(self):
self.interp = self.dbg.GetCommandInterpreter()
result = lldb.SBCommandReturnObject()
# Bring in our script file:
script_name = os.path.join(self.getSourceDir(), "stop_hook.py")
command = "command script import " + script_name
self.interp.HandleCommand(command, result)
self.assertTrue(result.Succeeded(), "com scr imp failed: %s"%(result.GetError()))
# set a breakpoint at the end of main to catch our auto-continue tests.
# Do it in the dummy target so it will get copied to our target even when
# we don't have a chance to stop.
dummy_target = self.dbg.GetDummyTarget()
dummy_target.BreakpointCreateBySourceRegex("return result", self.main_source_file)
def stop_hooks_scripted(self, g_var_value, specifier = None):
self.script_setup()
result = lldb.SBCommandReturnObject()
command = "target stop-hook add -P stop_hook.stop_handler -k increment -v 5 "
if specifier:
command += specifier
self.interp.HandleCommand(command, result)
self.assertTrue(result.Succeeded, "Set the target stop hook")
(target, process, thread, bkpt) = lldbutil.run_to_source_breakpoint(self,
"Set a breakpoint here", self.main_source_file)
# At this point we've hit our stop hook so we should have run our expression,
# which increments g_var by the amount specified by the increment key's value.
while process.GetState() == lldb.eStateRunning:
continue
var = target.FindFirstGlobalVariable("g_var")
self.assertTrue(var.IsValid())
self.assertEqual(var.GetValueAsUnsigned(), g_var_value, "Updated g_var")
|
tests/messages/test_encode.py | fooker/mido | 658 | 12679643 | from mido.messages.specs import SPEC_BY_STATUS
from mido.messages.encode import encode_message
from mido.messages.decode import decode_message
def test_encode_decode_all():
"""Encode and then decode all messages on all channels.
Each data byte is different so that the test will fail if the
bytes are swapped during encoding or decoding.
"""
data_bytes = [1, 2, 3]
for status_byte, spec in SPEC_BY_STATUS.items():
if status_byte == 0xf0:
msg_bytes = [0xf0] + data_bytes + [0xf7]
else:
msg_bytes = [status_byte] + data_bytes[:spec['length'] - 1]
assert encode_message(decode_message(msg_bytes)) == msg_bytes
|
seldom/db_operation/sqlite_db.py | chelizhi2020/seldom | 374 | 12679651 | <reponame>chelizhi2020/seldom
import sqlite3
from seldom.db_operation.base_db import SQLBase
class SQLiteDB(SQLBase):
def __init__(self, db_path):
"""
Connect to the sqlite database
"""
self.connection = sqlite3.connect(db_path)
self.cursor = self.connection.cursor()
def close(self):
"""
Close the database connection
"""
self.connection.close()
def execute_sql(self, sql):
"""
Execute SQL
"""
self.cursor.execute(sql)
self.connection.commit()
def insert_data(self, table, data):
"""
insert sql statement
"""
for key in data:
data[key] = "'" + str(data[key]) + "'"
key = ','.join(data.keys())
value = ','.join(data.values())
sql = """INSERT INTO {t} ({k}) VALUES ({v})""".format(t=table, k=key, v=value)
self.execute_sql(sql)
def query_sql(self, sql):
"""
Query SQL
return: query data
"""
data_list = []
rows = self.cursor.execute(sql)
for row in rows:
data_list.append(row)
return data_list
def select_data(self, table, where=None):
"""
select sql statement
"""
sql = """select * from {} """.format(table)
if where is not None:
sql += 'where {};'.format(self.dict_to_str_and(where))
return self.query_sql(sql)
def update_data(self, table, data, where):
"""
update sql statement
"""
sql = """update {} set """.format(table)
sql += self.dict_to_str(data)
if where:
sql += ' where {};'.format(self.dict_to_str_and(where))
self.execute_sql(sql)
def delete_data(self, table, where=None):
"""
delete table data
"""
sql = """delete from {}""".format(table)
if where is not None:
sql += ' where {};'.format(self.dict_to_str_and(where))
self.execute_sql(sql)
def init_table(self, table_data):
"""
init table data
"""
for table, data_list in table_data.items():
self.delete_data(table)
for data in data_list:
self.insert_data(table, data)
self.close()
|
test/unit/model/test_views.py | rhpvorderman/galaxy | 1,085 | 12679718 | <filename>test/unit/model/test_views.py
import pytest
from sqlalchemy import (
Column,
Integer,
MetaData,
Table,
)
from sqlalchemy.sql import (
column,
text,
)
from galaxy.model.database_utils import (
create_database,
sqlalchemy_engine,
)
from galaxy.model.view.utils import (
CreateView,
View,
)
from .common import (
drop_database,
replace_database_in_url,
skip_if_not_mysql_uri,
skip_if_not_postgres_uri,
)
@pytest.fixture
def view():
# A View class we would add to galaxy.model.view
class TestView(View):
name = 'testview'
__view__ = text('SELECT id, foo FROM testfoo').columns(
column('id', Integer),
column('foo', Integer)
)
pkeys = {'id'}
View._make_table(name, __view__, pkeys)
return TestView
@skip_if_not_postgres_uri
def test_postgres_create_view(database_name, postgres_url, view):
metadata = MetaData()
make_table(metadata) # table from which the view will select
url = replace_database_in_url(postgres_url, database_name)
query = f"SELECT 1 FROM information_schema.views WHERE table_name = '{view.name}'"
create_database(postgres_url, database_name)
run_view_test(url, metadata, view, query)
drop_database(postgres_url, database_name)
def test_sqlite_create_view(sqlite_memory_url, view):
metadata = MetaData()
make_table(metadata) # table from which the view will select
url = sqlite_memory_url
query = f"SELECT 1 FROM sqlite_master WHERE type='view' AND name='{view.name}'"
run_view_test(url, metadata, view, query)
@skip_if_not_mysql_uri
def test_mysql_create_view(database_name, mysql_url, view):
metadata = MetaData()
make_table(metadata) # table from which the view will select
url = replace_database_in_url(mysql_url, database_name)
query = f"SELECT 1 FROM information_schema.views WHERE table_name = '{view.name}'"
create_database(mysql_url, database_name)
run_view_test(url, metadata, view, query)
drop_database(mysql_url, database_name)
def make_table(metadata):
users = Table('testfoo', metadata,
Column('id', Integer, primary_key=True),
Column('foo', Integer),
Column('bar', Integer)
)
return users
def run_view_test(url, metadata, view, query):
with sqlalchemy_engine(url) as engine:
with engine.connect() as conn:
metadata.create_all(conn) # create table in database
conn.execute(CreateView(view.name, view.__view__)) # create view in database
result = conn.execute(query).fetchall()
assert len(result) == 1 # assert that view exists in database
|
tests/test_stream_xep_0085.py | E-Tahta/sleekxmpp | 499 | 12679755 | import time
import unittest
from sleekxmpp.test import SleekTest
class TestStreamChatStates(SleekTest):
def tearDown(self):
self.stream_close()
def testChatStates(self):
self.stream_start(mode='client', plugins=['xep_0030', 'xep_0085'])
results = []
def handle_state(msg):
results.append(msg['chat_state'])
self.xmpp.add_event_handler('chatstate_active', handle_state)
self.xmpp.add_event_handler('chatstate_inactive', handle_state)
self.xmpp.add_event_handler('chatstate_paused', handle_state)
self.xmpp.add_event_handler('chatstate_gone', handle_state)
self.xmpp.add_event_handler('chatstate_composing', handle_state)
self.recv("""
<message>
<active xmlns="http://jabber.org/protocol/chatstates" />
</message>
""")
self.recv("""
<message>
<inactive xmlns="http://jabber.org/protocol/chatstates" />
</message>
""")
self.recv("""
<message>
<paused xmlns="http://jabber.org/protocol/chatstates" />
</message>
""")
self.recv("""
<message>
<composing xmlns="http://jabber.org/protocol/chatstates" />
</message>
""")
self.recv("""
<message>
<gone xmlns="http://jabber.org/protocol/chatstates" />
</message>
""")
# Give event queue time to process
time.sleep(0.3)
expected = ['active', 'inactive', 'paused', 'composing', 'gone']
self.failUnless(results == expected,
"Chat state event not handled: %s" % results)
suite = unittest.TestLoader().loadTestsFromTestCase(TestStreamChatStates)
|
plugins/tests/myparser_test.py | otherbeast/hackers-tool-kit | 393 | 12679765 | <gh_stars>100-1000
#
# Unit tests for myparser.py
#
import myparser
import unittest
class TestMyParser(unittest.TestCase):
def test_emails(self):
word = 'domain.com'
results = '@domain.com***a@domain***banotherdomain.com***<EMAIL>***<EMAIL>***'
p = myparser.parser(results, word)
emails = sorted(p.emails())
self.assertEquals(emails, ['<EMAIL>', '<EMAIL>'])
if __name__ == '__main__':
unittest.main()
|
test/labeling/test_utils.py | melonwater211/snorkel | 2,906 | 12679798 | <reponame>melonwater211/snorkel
import unittest
import numpy as np
import pandas as pd
from snorkel.labeling import filter_unlabeled_dataframe
class TestAnalysis(unittest.TestCase):
def test_filter_unlabeled_dataframe(self) -> None:
X = pd.DataFrame(dict(A=["x", "y", "z"], B=[1, 2, 3]))
y = np.array(
[[0.25, 0.25, 0.25, 0.25], [1.0, 0.0, 0.0, 0.0], [0.2, 0.3, 0.5, 0.0]]
)
L = np.array([[0, 1, -1], [-1, -1, -1], [1, 1, 0]])
X_filtered, y_filtered = filter_unlabeled_dataframe(X, y, L)
np.array_equal(X_filtered.values, np.array([["x", 1], ["z", 3]]))
np.testing.assert_array_almost_equal(
y_filtered, np.array([[0.25, 0.25, 0.25, 0.25], [0.2, 0.3, 0.5, 0.0]])
)
|
compiler_gym/util/truncate.py | thecoblack/CompilerGym | 562 | 12679803 | <filename>compiler_gym/util/truncate.py
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import deque
from typing import Iterable
def truncate(
string: str,
max_line_len: int = 60,
max_lines: int = 1,
tail: bool = False,
) -> str:
"""Truncate a string using ellipsis.
For multi-line inputs, each line is truncated independently.
For example:
>>> truncate("abcdefghijklmnop\n1234", max_line_len=10)
"abcdefg...\n1234"
:param string: The string to truncate.
:param max_line_len: The maximum number of characters in each line.
:param max_lines: The maximum number of lines in the output string.
:return: A (possibly truncated) string.
"""
return truncate_lines(
str(string).split("\n"),
max_line_len=max_line_len,
max_lines=max_lines,
tail=tail,
)
def truncate_lines(
lines: Iterable[str],
max_line_len: int = 60,
max_lines: int = 1,
tail: bool = False,
) -> str:
"""Truncate a sequence of lines, one string per line, using ellipsis.
Each line is truncated independently and combined into a single multi-line
string.
For example:
>>> truncate_lines(["abcdefghijklmnop", "1234"], max_line_len=10)
"abcdefg...\n1234"
:param string: The string to truncate.
:param max_line_len: The maximum number of characters in each line.
:param max_lines: The maximum number of lines in the output string.
:return: A (possibly truncated) string.
"""
if max_line_len <= 3:
raise ValueError("Lines must be greater than 3 characeters long.")
def _truncate_line(line: str):
if len(line) > max_line_len:
return f"{line[:max_line_len-3]}..."
return line
def _consume(iterable, n):
"""Consume fist or last `n` elements from iterable."""
if tail:
yield from deque(iterable, n)
else:
for _ in range(n):
try:
yield next(iterable)
except StopIteration:
return
lines = iter(lines)
truncated_lines = [_truncate_line(str(ln)) for ln in _consume(lines, max_lines)]
# Truncate the final line if required.
try:
next(lines)
truncated_lines[-1] = _truncate_line(f"{truncated_lines[-1]}...")
except StopIteration:
pass
return "\n".join(truncated_lines)
|
pyechonest/catalog.py | FosterFromGloster/PeskyDuplicates | 351 | 12679809 | #!/usr/bin/env python
# encoding: utf-8
"""
Copyright (c) 2010 The Echo Nest. All rights reserved.
Created by <NAME> on 2010-08-25.
The Catalog module loosely covers http://developer.echonest.com/docs/v4/catalog.html
Refer to the official api documentation if you are unsure about something.
"""
try:
import json
except ImportError:
import simplejson as json
import datetime
import warnings
import util
from proxies import CatalogProxy, ResultList
import artist, song
# deal with datetime in json
dthandler = lambda obj: obj.isoformat() if isinstance(obj, datetime.datetime) else None
def create_catalog_by_name(name, T="general"):
"""
Creates a catalog object, with a given name. Does not check to see if the catalog already exists.
Create a catalog object like
"""
result = util.callm("catalog/create", {}, POST=True,
data={"name":name, "type":T})
result = result['response']
return Catalog(result['id'], **dict( (k,result[k]) for k in ('name', 'type')))
class Catalog(CatalogProxy):
"""
A Catalog object
Attributes:
id (str): Catalog ID
name (str): Catalog Name
read (list): A list of catalog items (objects if they are resolved, else dictionaries)
feed (list): A list of dictionaries for news, blogs, reviews, audio, video for a catalog's artists
Create an catalog object like so:
>>> c = catalog.Catalog('CAGPXKK12BB06F9DE9') # get existing catalog
>>> c = catalog.Catalog('test_song_catalog', 'song') # get existing or create new catalog
"""
def __init__(self, id, type=None, **kwargs):
"""
Create a catalog object (get a catalog by ID or get or create one given by name and type)
Args:
id (str): A catalog id or name
Kwargs:
type (str): 'song' or 'artist', specifying the catalog type
Returns:
A catalog object
Example:
>>> c = catalog.Catalog('my_songs', type='song')
>>> c.id
u'CAVKUPC12BCA792120'
>>> c.name
u'my_songs'
>>>
"""
super(Catalog, self).__init__(id, type, **kwargs)
def __repr__(self):
return "<%s - %s>" % (self._object_type.encode('utf-8'), self.name.encode('utf-8'))
def __str__(self):
return self.name.encode('utf-8')
def update(self, items):
"""
Update a catalog object
Args:
items (list): A list of dicts describing update data and action codes (see api docs)
Kwargs:
Returns:
A ticket id
Example:
>>> c = catalog.Catalog('my_songs', type='song')
>>> items
[{'action': 'update',
'item': {'artist_name': 'dAn ThE aUtOmAtOr',
'disc_number': 1,
'genre': 'Instrumental',
'item_id': '38937DDF04BC7FC4',
'play_count': 5,
'release': 'Bombay the Hard Way: Guns, Cars & Sitars',
'song_name': 'Inspector Jay From Dehli',
'track_number': 9,
'url': 'file://localhost/Users/tylerw/Music/iTunes/iTunes%20Media/Music/Dan%20the%20Automator/Bombay%20the%20Hard%20Way_%20Guns,%20Cars%20&%20Sitars/09%20Inspector%20Jay%20From%20Dehli.m4a'}}]
>>> ticket = c.update(items)
>>> ticket
u'7dcad583f2a38e6689d48a792b2e4c96'
>>> c.status(ticket)
{u'ticket_status': u'complete', u'update_info': []}
>>>
"""
post_data = {}
items_json = json.dumps(items, default=dthandler)
post_data['data'] = items_json
response = self.post_attribute("update", data=post_data)
return response['ticket']
def status(self, ticket):
"""
Check the status of a catalog update
Args:
ticket (str): A string representing a ticket ID
Kwargs:
Returns:
A dictionary representing ticket status
Example:
>>> ticket
u'7dcad583f2a38e6689d48a792b2e4c96'
>>> c.status(ticket)
{u'ticket_status': u'complete', u'update_info': []}
>>>
"""
return self.get_attribute_simple("status", ticket=ticket)
def get_profile(self):
"""
Check the status of a catalog update
Args:
Kwargs:
Returns:
A dictionary representing ticket status
Example:
>>> c
<catalog - test_song_catalog>
>>> c.profile()
{u'id': u'CAGPXKK12BB06F9DE9',
u'name': u'test_song_catalog',
u'pending_tickets': [],
u'resolved': 2,
u'total': 4,
u'type': u'song'}
>>>
"""
result = self.get_attribute("profile")
return result['catalog']
profile = property(get_profile)
def read_items(self, buckets=None, results=15, start=0,item_ids=None):
"""
Returns data from the catalog; also expanded for the requested buckets.
This method is provided for backwards-compatibility
Args:
Kwargs:
buckets (list): A list of strings specifying which buckets to retrieve
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of objects in the catalog; list contains additional attributes 'start' and 'total'
Example:
>>> c
<catalog - my_songs>
>>> c.read_items(results=1)
[<song - Harmonice Mundi II>]
>>>
"""
warnings.warn("catalog.read_items() is depreciated. Please use catalog.get_item_dicts() instead.")
kwargs = {}
kwargs['bucket'] = buckets or []
kwargs['item_id'] = item_ids or []
response = self.get_attribute("read", results=results, start=start, **kwargs)
rval = ResultList([])
if item_ids:
rval.start=0;
rval.total=len(response['catalog']['items'])
else:
rval.start = response['catalog']['start']
rval.total = response['catalog']['total']
for item in response['catalog']['items']:
new_item = None
# song items
if 'song_id' in item:
item['id'] = item.pop('song_id')
item['title'] = item.pop('song_name')
request = item['request']
new_item = song.Song(**util.fix(item))
new_item.request = request
# artist item
elif 'artist_id' in item:
item['id'] = item.pop('artist_id')
item['name'] = item.pop('artist_name')
request = item['request']
new_item = artist.Artist(**util.fix(item))
new_item.request = request
# unresolved item
else:
new_item = item
rval.append(new_item)
return rval
read = property(read_items)
def get_item_dicts(self, buckets=None, results=15, start=0,item_ids=None):
"""
Returns data from the catalog; also expanded for the requested buckets
Args:
Kwargs:
buckets (list): A list of strings specifying which buckets to retrieve
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of dicts representing objects in the catalog; list has additional attributes 'start' and 'total'
Example:
>>> c
<catalog - my_songs>
>>> c.read_items(results=1)
[
{
"artist_id": "AR78KRI1187B98E6F2",
"artist_name": "Art of Noise",
"date_added": "2012-04-02T16:50:02",
"foreign_id": "CAHLYLR13674D1CF83:song:1000",
"request": {
"artist_name": "The Art Of Noise",
"item_id": "1000",
"song_name": "Love"
},
"song_id": "SOSBCTO1311AFE7AE0",
"song_name": "Love"
}
]
"""
kwargs = {}
kwargs['bucket'] = buckets or []
kwargs['item_id'] = item_ids or []
response = self.get_attribute("read", results=results, start=start, **kwargs)
rval = ResultList(response['catalog']['items'])
if item_ids:
rval.start=0;
rval.total=len(response['catalog']['items'])
else:
rval.start = response['catalog']['start']
rval.total = response['catalog']['total']
return rval
item_dicts = property(get_item_dicts)
def get_feed(self, buckets=None, since=None, results=15, start=0):
"""
Returns feed (news, blogs, reviews, audio, video) for the catalog artists; response depends on requested buckets
Args:
Kwargs:
buckets (list): A list of strings specifying which feed items to retrieve
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of news, blogs, reviews, audio or video document dicts;
Example:
>>> c
<catalog - my_artists>
>>> c.get_feed(results=15)
{u'date_found': u'2011-02-06T07:50:25',
u'date_posted': u'2011-02-06T07:50:23',
u'id': u'caec686c0dff361e4c53dceb58fb9d2f',
u'name': u'<NAME> \u2013 \u201cWaiting For The End\u201d + \u201cWhen They Come For Me\u201d 2/5 SNL',
u'references': [{u'artist_id': u'ARQUMH41187B9AF699',
u'artist_name': u'<NAME>'}],
u'summary': u'<span>Linkin</span> <span>Park</span> performed "Waiting For The End" and "When They Come For Me" on Saturday Night Live. Watch the videos below and pick up their album A Thousand Suns on iTunes, Amazon MP3, CD Social Bookmarking ... ',
u'type': u'blogs',
u'url': u'http://theaudioperv.com/2011/02/06/linkin-park-waiting-for-the-end-when-they-come-for-me-25-snl/'}
>>>
"""
kwargs = {}
kwargs['bucket'] = buckets or []
if since:
kwargs['since']=since
response = self.get_attribute("feed", results=results, start=start, **kwargs)
rval = ResultList(response['feed'])
return rval
feed = property(get_feed)
def delete(self):
"""
Deletes the entire catalog
Args:
Kwargs:
Returns:
The deleted catalog's id.
Example:
>>> c
<catalog - test_song_catalog>
>>> c.delete()
{u'id': u'CAXGUPY12BB087A21D'}
>>>
"""
return self.post_attribute("delete")
def play(self, items, plays=None):
return self.get_attribute("play", item=items, plays=plays)
def skip(self, items, skips=None):
return self.get_attribute("skip", item=items, skips=skips)
def keyvalues(self):
return self.get_attribute("keyvalues")['keyvalues']
def favorite(self, items, favorite=None):
if favorite != None:
favorite = str(favorite).lower()
return self.get_attribute("favorite", item=items, favorite=favorite)
def ban(self, items, ban=None):
if ban != None:
ban = str(ban).lower()
return self.get_attribute("ban", item=items, ban=ban)
def rate(self, items, rating=None):
return self.get_attribute("rate", item=items, rating=rating)
def get_catalog_by_name(name):
"""
Grabs a catalog by name, if its there on the api key.
Otherwise, an error is thrown (mirroring the API)
"""
kwargs = {
'name' : name,
}
result = util.callm("%s/%s" % ('catalog', 'profile'), kwargs)
return Catalog(**util.fix(result['response']['catalog']))
def list_catalogs(results=30, start=0):
"""
Returns list of all catalogs created on this API key
Args:
Kwargs:
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of catalog objects
Example:
>>> catalog.list_catalogs()
[<catalog - test_artist_catalog>, <catalog - test_song_catalog>, <catalog - my_songs>]
>>>
"""
result = util.callm("%s/%s" % ('catalog', 'list'), {'results': results, 'start': start})
cats = [Catalog(**util.fix(d)) for d in result['response']['catalogs']]
start = result['response']['start']
total = result['response']['total']
return ResultList(cats, start, total)
|
proxy/core/ssh/tunnel.py | sakurai-youhei/proxy.py | 1,891 | 12679819 | <gh_stars>1000+
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by <NAME> and contributors.
:license: BSD, see LICENSE for more details.
"""
from typing import Optional, Tuple, Callable
import paramiko
class Tunnel:
"""Establishes a tunnel between local (machine where Tunnel is running) and remote host.
Once a tunnel has been established, remote host can route HTTP(s) traffic to
localhost over tunnel.
"""
def __init__(
self,
ssh_username: str,
remote_addr: Tuple[str, int],
private_pem_key: str,
remote_proxy_port: int,
conn_handler: Callable[[paramiko.channel.Channel], None]) -> None:
self.remote_addr = remote_addr
self.ssh_username = ssh_username
self.private_pem_key = private_pem_key
self.remote_proxy_port = remote_proxy_port
self.conn_handler = conn_handler
def run(self) -> None:
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.WarningPolicy())
try:
ssh.connect(
hostname=self.remote_addr[0],
port=self.remote_addr[1],
username=self.ssh_username,
key_filename=self.private_pem_key
)
print('SSH connection established...')
transport: Optional[paramiko.transport.Transport] = ssh.get_transport(
)
assert transport is not None
transport.request_port_forward('', self.remote_proxy_port)
print('Tunnel port forward setup successful...')
while True:
conn: Optional[paramiko.channel.Channel] = transport.accept(
timeout=1)
assert conn is not None
e = transport.get_exception()
if e:
raise e
if conn is None:
continue
self.conn_handler(conn)
except KeyboardInterrupt:
pass
finally:
ssh.close()
|
sources/scripts/terraform_client.py | andrew-glenn/terraform-aws-control_tower_account_factory | 219 | 12679823 | <filename>sources/scripts/terraform_client.py
#!/usr/bin/python
# Copyright Amazon.com, Inc. or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
import os
import time
import requests
TERRAFORM_API_ENDPOINT = ""
LOCAL_CONFIGURATION_PATH = ""
TERRAFORM_VERSION = ""
def init(api_endpoint, tf_version, config_path):
global TERRAFORM_API_ENDPOINT
global TERRAFORM_VERSION
global LOCAL_CONFIGURATION_PATH
TERRAFORM_API_ENDPOINT = api_endpoint
TERRAFORM_VERSION = tf_version
LOCAL_CONFIGURATION_PATH = config_path
def check_workspace_exists(organization_name, workspace_name, api_token):
endpoint = "{}/organizations/{}/workspaces/{}".format(
TERRAFORM_API_ENDPOINT, organization_name, workspace_name
)
headers = __build_standard_headers(api_token)
tf_dist = os.environ.get("TF_DISTRIBUTION")
response = requests.get(endpoint, headers=headers, verify=tf_dist != "tfe")
data = response.json()
if "data" in data.keys():
if "id" in data["data"].keys():
return data["data"]["id"]
return None
def create_workspace(organization_name, workspace_name, api_token):
workspace_id = check_workspace_exists(organization_name, workspace_name, api_token)
if workspace_id:
return workspace_id
else:
endpoint = "{}/organizations/{}/workspaces".format(
TERRAFORM_API_ENDPOINT, organization_name
)
headers = __build_standard_headers(api_token)
payload = {
"data": {
"attributes": {
"name": workspace_name,
"terraform-version": TERRAFORM_VERSION,
"auto-apply": True,
},
"type": "workspaces",
}
}
response = __post(endpoint, headers, payload)
return response["data"]["id"]
def create_configuration_version(workspace_id, api_token):
endpoint = "{}/workspaces/{}/configuration-versions".format(
TERRAFORM_API_ENDPOINT, workspace_id
)
headers = __build_standard_headers(api_token)
payload = {
"data": {
"type": "configuration-versions",
"attributes": {"auto-queue-runs": False},
}
}
response = __post(endpoint, headers, payload)
cv_id = response["data"]["id"]
upload_url = response["data"]["attributes"]["upload-url"]
return cv_id, upload_url
def upload_configuration_content(data, upload_url):
headers = {"Content-Type": "application/octet-stream", "Accept": "application/json"}
tf_dist = os.environ.get("TF_DISTRIBUTION")
requests.put(upload_url, data=data, headers=headers, verify=tf_dist != "tfe")
def set_environment_variable(
key, value, description, workspace_id, sensitive, category, api_token
):
endpoint = "{}/workspaces/{}/vars".format(TERRAFORM_API_ENDPOINT, workspace_id)
headers = __build_standard_headers(api_token)
payload = {
"data": {
"attributes": {
"key": key,
"value": value,
"description": description,
"category": category,
"sensitive": sensitive,
},
"type": "vars",
}
}
__post(endpoint, headers, payload)
def get_workspace_vars(workspace_id, api_token):
endpoint = "{}/workspaces/{}/vars".format(TERRAFORM_API_ENDPOINT, workspace_id)
headers = __build_standard_headers(api_token)
response = __get(endpoint, headers)
return response["data"]
def update_environment_variable(
var_id, key, value, description, workspace_id, sensitive, category, api_token
):
endpoint = "{}/workspaces/{}/vars/{}".format(
TERRAFORM_API_ENDPOINT, workspace_id, var_id
)
headers = __build_standard_headers(api_token)
payload = {
"data": {
"attributes": {
"key": key,
"value": value,
"description": description,
"category": category,
"sensitive": sensitive,
},
"type": "vars",
}
}
__patch(endpoint, headers, payload)
def create_run(workspace_id, cv_id, api_token):
endpoint = "{}/runs".format(TERRAFORM_API_ENDPOINT)
headers = __build_standard_headers(api_token)
payload = {
"data": {
"attributes": {"is-destroy": False, "message": "Run created by AFT"},
"type": "runs",
"relationships": {
"workspace": {"data": {"type": "workspaces", "id": workspace_id}},
"configuration-version": {
"data": {"type": "configuration-versions", "id": cv_id}
},
},
}
}
response = __post(endpoint, headers, payload)
return response["data"]["id"]
def create_destroy_run(workspace_id, api_token):
endpoint = "{}/runs".format(TERRAFORM_API_ENDPOINT)
headers = __build_standard_headers(api_token)
payload = {
"data": {
"attributes": {"is-destroy": True, "message": "Destroy run created by AFT"},
"type": "runs",
"relationships": {
"workspace": {"data": {"type": "workspaces", "id": workspace_id}}
},
}
}
response = __post(endpoint, headers, payload)
return response["data"]["id"]
def delete_workspace(workspace_id, api_token):
endpoint = "{}/workspaces/{}".format(TERRAFORM_API_ENDPOINT, workspace_id)
headers = __build_standard_headers(api_token)
response = __delete(endpoint, headers)
if response is not None:
errors = response["errors"]
if len(errors) == 0:
print("Successfully deleted workspace {}".format(workspace_id))
else:
print("Error occured deleting workspace {}".format(workspace_id))
print(str(errors))
else:
print("Successfully deleted workspace {}".format(workspace_id))
def wait_to_stabilize(entity_type, entity_id, target_states, api_token):
while True:
status = get_action_status(entity_type, entity_id, api_token)
if status in target_states:
break
print("{} not yet ready. In status {}".format(entity_type, status))
time.sleep(10)
return status
def get_action_status(object_type, object_id, api_token):
endpoint = "{}/{}/{}".format(TERRAFORM_API_ENDPOINT, object_type, object_id)
print(endpoint)
headers = __build_standard_headers(api_token)
response = __get(endpoint, headers)
return response["data"]["attributes"]["status"]
def __build_standard_headers(api_token):
return {
"Authorization": "Bearer {}".format(api_token),
"Content-type": "application/vnd.api+json",
}
def __post(endpoint, headers, payload):
tf_dist = os.environ.get("TF_DISTRIBUTION")
response = requests.post(
endpoint, headers=headers, json=payload, verify=tf_dist != "tfe"
)
__handle_errors(response)
return response.json()
def __patch(endpoint, headers, payload):
tf_dist = os.environ.get("TF_DISTRIBUTION")
response = requests.patch(
endpoint, headers=headers, json=payload, verify=tf_dist != "tfe"
)
__handle_errors(response)
return response.json()
def __get(endpoint, headers):
tf_dist = os.environ.get("TF_DISTRIBUTION")
response = requests.get(endpoint, headers=headers, verify=tf_dist != "tfe")
__handle_errors(response)
return response.json()
def __delete(endpoint, headers):
tf_dist = os.environ.get("TF_DISTRIBUTION")
response = requests.delete(endpoint, headers=headers, verify=tf_dist != "tfe")
# __handle_errors(response)
return response.json()
def __handle_errors(response):
if response is None or response.json() is None or "errors" not in response.json():
return
errors = response.json()["errors"]
print("Handling errors: {}".format(errors))
if len(errors) == 0:
print("Empty set of errors returned by client; raising internal failure")
raise ClientError(status="500", message="Internal failure")
elif len(errors) == 1:
error = errors[0]
raise ClientError(status=error["status"], message=error["title"])
else:
print(
"More than one error returned by client; raising internal failure and placing all errors in the message"
)
raise ClientError(status="500", message=str(errors))
class ClientError(Exception):
def __init__(self, status, message):
self.status = status
super().__init__(message)
|
spyglass/convert_report_to_junit.py | davideschiavone/pulpissimo | 228 | 12679836 | <reponame>davideschiavone/pulpissimo
#!/usr/bin/env python3.6
import argparse
from itertools import groupby, zip_longest
from textwrap import wrap
from xml.dom import minidom
import re
import sys
from xml.dom.minidom import Document
parser = argparse.ArgumentParser(description="Converts Spyglass lint reports of the lint/lint_rtl goal in 'moresimple' format to Junit XML reports. The resuling XML is printed to STDOUT. Pipe it into a file if necessary.")
parser.add_argument("src_report", type=argparse.FileType('r'), help="Spyglass 'moresimple' report location.")
parser.add_argument("--error-level", type=str, choices=['warning', 'error', 'fatal'], default='warning')
parser.add_argument("--fail-on-error", help= "Exit with return code 1 if there are lint errors. Use this for CI jobs to report failing.", action="store_true")
args = parser.parse_args()
report_file = args.src_report
regex = re.compile(r"#+ (?:[\w\-]* -> \w*=)?(?P<rule_type>[\w\-/ ]+)#+ *\n\+* *\n(?P<header>.*)\n=+\n(?P<messages>(?:(?!\+).*\n)*)")
header_regex = re.compile(r"(?P<id>ID *)(?P<rule>Rule *)(?P<alias>Alias *)(?P<severity>Severity *)(?P<file>File *)(?P<line>Line *)(?P<wt>Wt *)(?P<message>Message *)")
FIELDS = ["id", "rule", "alias", "severity", "file", "line", "wt", "message"]
error_levels = {
'fatal' : ['Fatal'],
'error' : ['Fatal', 'Error', 'SynthesisError'],
'warning': ['Fatal', 'Error', 'SynthesisError', 'Warning', 'SynthesisWarning']
}
try:
messages = []
for match in re.finditer(regex, report_file.read()):
# Create a new testsuite for each group of lint messages
rule_type = match.group("rule_type")
header = match.group("header")
header_match = re.match(header_regex, header)
if not header_match:
parser.error("Error while parsing report file")
# Parse fields of individual messages
for line in match.group("messages").splitlines():
message = {"rule_type": rule_type}
for field_name in FIELDS:
start_pos = header_match.start(field_name)
end_pos = header_match.end(field_name)
# If the end position equals the end position of th whole
# header line, consume the field until the end of the message
# line
if end_pos == header_match.end():
end_pos = len(line)
field_value = line[start_pos:end_pos]
message[field_name] = field_value.strip()
if message['severity'] in error_levels[args.error_level]:
messages.append(message)
doc = Document()
nr_of_failures = len([message for message in messages if message['severity'] in error_levels[args.error_level]])
testsuite = doc.createElement("testsuite")
testsuite.setAttribute("name", "lint_rtl")
testsuite.setAttribute("failures", str(nr_of_failures))
testsuite.setAttribute("errors", str(nr_of_failures))
# Group messages by rule_type
for rule_type, messages_by_ruletype in groupby(messages, lambda msg: msg['rule_type']):
for rule, messages_by_rule in groupby(messages_by_ruletype, lambda msg: msg['alias'] if msg['alias'] else msg['rule']):
testcase = doc.createElement("testcase")
testcase.setAttribute("name", rule)
testcase.setAttribute("classname", rule_type)
failure = doc.createElement("failure")
# Generate an ASCII table from the collected messages
header = "{: <10s} {: <17s} {: <37s}".format("Severity", "File", "Message").rstrip()
body = []
for msg in messages_by_rule:
severity = msg['severity']
for severity, file, message in zip_longest(wrap(msg['severity'], 10), wrap(msg['file']+", line "+msg['line'], 17), wrap(msg['message'], 37), fillvalue=""):
body.append("{: <10s} {: <17s} {: <37s}".format(severity, file, message).rstrip())
body.append("")
content_text ="\n".join([header, "-"*70, *body])
content = doc.createTextNode(content_text)
failure.appendChild(content)
testcase.appendChild(failure)
testsuite.appendChild(testcase)
doc.appendChild(testsuite)
print(doc.toprettyxml())
sys.exit(1 if nr_of_failures and args.fail_on_error else 0)
except Exception as e:
# parser.error("Failed to parse the report. Error: {}".format(e))
raise e
|
venv/lib/python3.8/site-packages/statsmodels/tsa/tests/results/results_arima.py | johncollinsai/post-high-frequency-data | 6,931 | 12679856 | <reponame>johncollinsai/post-high-frequency-data<gh_stars>1000+
import os
import numpy as np
from numpy import genfromtxt
cur_dir = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(cur_dir, "results_arima_forecasts.csv")
with open(path, "rb") as fd:
forecast_results = genfromtxt(fd, names=True, delimiter=",", dtype=float)
# NOTE:
# stata gives no indication of no convergence for 112 CSS but gives a
# different answer than x12arima, gretl simply fails to converge
# redid stata with starting parameters from x12arima
# it looks like stata uses a different formula for the CSS likelihood
# they appear to be using a larger sample than R, gretl, or us.
# CSS results are therefore taken from R and gretl
class ARIMA111(object):
def __init__(self, method="mle"):
self.k_ar = 1
self.k_diff = 1
self.k_ma = 1
if method == "mle":
# from stata
from .arima111_results import results
# unpack stata results
self.__dict__.update(results)
self.resid = self.resid[1:]
self.params = self.params[:-1]
self.sigma2 = self.sigma**2
self.aic = self.icstats[4]
self.bic = self.icstats[5]
self.fittedvalues = self.xb[1:] # no idea why this initial value
self.linear = self.y[1:]
# stata bse are OPG
# self.bse = np.diag(self.cov_params) ** .5
# from gretl
self.arroots = [1.0640 + 0j]
self.maroots = [1.2971 + 0j]
self.hqic = 496.8653
self.aic_gretl = 491.5112
self.bic_gretl = 504.7442
self.tvalues = [4.280, 20.57, -8.590]
self.pvalues = [1.87e-5, 5.53e-94, 8.73e-18]
self.cov_params = [[0.0423583, -0.00167449, 0.00262911],
[-0.00167449, 0.00208858, -0.0035068],
[0.00262911, -0.0035068, 0.00805622]]
self.bse = np.sqrt(np.diag(self.cov_params))
# these bse are approx [.205811, .0457010, .0897565]
# from stata
# forecast = genfromtxt(open(cur_dir+"/arima111_forecasts.csv"),
# delimiter=",", skip_header=1, usecols=[1,2,3,4,5])
# self.forecast = forecast[203:,1]
# self.fcerr = forecast[203:,2]
# self.fc_conf_int = forecast[203:,3:]
# from gretl
self.forecast = forecast_results['fc111c'][-25:]
self.forecasterr = forecast_results['fc111cse'][-25:]
self.forecast_dyn = forecast_results['fc111cdyn']
self.forecasterr_dyn = forecast_results['fc111cdynse']
else:
# coefs, bse, tvalues, and pvalues taken from R because gretl
# uses mean not constant
self.bse = [0.21583833, 0.03844939, 0.08566390]
self.params = [1.0087257, 0.9455393, -0.8021834]
self.sigma2 = 0.6355913
self.tvalues = [4.673524, 24.591788, -9.364311]
self.pvalues = [5.464467e-06, 0, 0]
self.cov_params = np.array([
[0.046586183, 0.002331183, -0.004647432],
[0.002331183, 0.001478356, -0.002726201],
[-0.004647432, -0.002726201, 0.007338304]])
# from gretl
self.llf = -239.6601
self.aic = 487.3202
self.bic = 500.5334
self.hqic = 492.6669
self.arroots = [1.0578 + 0j]
self.maroots = [1.2473 + 0j]
# cov_params = np.array([[0.00369569, -0.00271777, 0.00269806],
# [0, 0.00209573, -0.00224559],
# [0, 0, 0.00342769]])
# self.cov_params = cov_params + cov_params.T - \
# np.diag(np.diag(cov_params))
# self.bse = np.sqrt(np.diag(self.cov_params))
self.resid = [-0.015830, -0.236884, -0.093946, -0.281152,
-0.089983, -0.226336, -0.351666, -0.198703,
-0.258418, -0.259026, -0.149513, -0.325703,
-0.165703, -0.279229, -0.295711, -0.120018,
-0.289870, -0.154243, -0.348403, -0.273902,
-0.240894, -0.182791, -0.252930, -0.152441,
-0.296412, -0.128941, 0.024068, -0.243972,
-0.011436, -0.392437, -0.217022, -0.118190,
-0.133489, -0.045755, -0.169953, 0.025010,
-0.107754, -0.119661, 0.070794, -0.065586,
-0.080390, 0.007741, -0.016138, -0.235283,
-0.121907, -0.125546, -0.428463, -0.087713,
-0.298131, -0.277757, -0.261422, -0.248326,
-0.137826, -0.043771, 0.437100, -0.150051,
0.751890, 0.424180, 0.450514, 0.277089,
0.732583, 0.225086, -0.403648, -0.040509,
-0.132975, -0.112572, -0.696214, 0.003079,
-0.003491, -0.108758, 0.401383, -0.162302,
-0.141547, 0.175094, 0.245346, 0.607134, 0.519045,
0.248419, 0.920521, 1.097613, 0.755983, 1.271156,
1.216969, -0.121014, 0.340712, 0.732750, 0.068915,
0.603912, 0.060157, -0.803110, -1.044392, 1.040311,
-0.984497, -1.611668, -0.258198, -0.112970,
-0.091071, 0.226487, 0.097475, -0.311423, -0.061105,
-0.449488, 0.317277, -0.329734, -0.181248, 0.443263,
-2.223262, 0.096836, -0.033782, 0.456032, 0.476052,
0.197564, 0.263362, 0.021578, 0.216803, 0.284249,
0.343786, 0.196981, 0.773819, 0.169070, -0.343097,
0.918962, 0.096363, 0.298610, 1.571685, -0.236620,
-1.073822, -0.194208, -0.250742, -0.101530,
-0.076437, -0.056319, 0.059811, -0.041620,
-0.128404, -0.403446, 0.059654, -0.347208,
-0.095257, 0.217668, -0.015057, 0.087431, 0.275062,
-0.263580, -0.122746, 0.195629, 0.367272,
-0.184188, 0.146368, 0.127777, -0.587128,
-0.498538, 0.172490, -0.456741, -0.694000,
0.199392, -0.140634, -0.029636, 0.364818,
-0.097080, 0.510745, 0.230842, 0.595504, 0.709721,
0.012218, 0.520223, -0.445174, -0.168341,
-0.935465, -0.894203, 0.733417, -0.279707,
0.258861, 0.417969, -0.443542, -0.477955, 0.288992,
0.442126, 0.075826, 0.665759, 0.571509, -0.204055,
0.835901, -0.375693, 3.292828, -1.469299,
-0.122206, 0.617909, -2.250468, 0.570871, 1.166013,
0.079873, 0.463372, 1.981434, -0.142869, 3.023376,
-3.713161, -6.120150, -0.007487, 1.267027, 1.176930]
self.linear = [
29.3658, 29.6069, 29.6339, 29.8312, 29.8400,
30.0663, 30.1617, 30.1187, 30.2384, 30.2990,
30.3595, 30.5457, 30.5457, 30.7192, 30.7757,
30.8100, 31.0399, 31.0942, 31.2984, 31.2939,
31.3609, 31.4628, 31.6329, 31.7324, 31.9464,
32.0089, 32.2559, 32.6940, 32.8614, 33.2924,
33.3170, 33.5182, 33.8335, 34.1458, 34.5700,
34.8750, 35.4078, 35.8197, 36.2292, 36.8656,
37.3804, 37.8923, 38.5161, 39.1353, 39.5219,
40.0255, 40.5285, 40.6877, 41.1981, 41.4778,
41.7614, 42.0483, 42.3378, 42.7438, 43.2629,
44.3501, 44.8481, 46.3758, 47.6495, 49.0229,
50.2674, 52.0749, 53.4036, 54.0405, 55.0330,
55.9126, 56.7962, 56.9969, 57.9035, 58.8088,
59.5986, 60.9623, 61.7415, 62.5249, 63.6547,
64.8929, 66.5810, 68.2516, 69.6795, 71.9024,
74.4440, 76.7288, 79.6830, 82.7210, 84.3593,
86.4672, 89.0311, 90.8961, 93.3398, 95.2031,
96.0444, 96.4597, 99.0845, 99.5117, 99.0582,
99.9130, 100.8911, 101.8735, 103.2025, 104.4114,
105.1611, 106.1495, 106.6827, 108.0297, 108.6812,
109.4567, 110.9233, 109.4032, 110.2338, 110.9440,
112.2239, 113.6024, 114.7366, 115.9784, 116.9832,
118.2158, 119.5562, 121.0030, 122.3262, 124.3309,
125.7431, 126.5810, 128.8036, 130.2014, 131.8283,
134.9366, 136.1738, 136.3942, 137.4507, 138.4015,
139.4764, 140.5563, 141.6402, 142.8416, 143.9284,
144.9034, 145.5403, 146.6472, 147.2953, 148.1823,
149.4151, 150.4126, 151.5249, 152.8636, 153.6227,
154.5044, 155.7327, 157.1842, 158.0536, 159.2722,
160.4871, 160.8985, 161.3275, 162.4567, 162.8940,
163.0006, 164.0406, 164.7296, 165.5352, 166.7971,
167.5893, 169.0692, 170.3045, 171.9903, 173.8878,
175.0798, 176.8452, 177.5683, 178.5355, 178.5942,
178.5666, 180.2797, 180.9411, 182.1820, 183.6435,
184.1780, 184.6110, 185.8579, 187.3242, 188.4342,
190.2285, 192.0041, 192.9641, 195.0757, 195.9072,
200.8693, 200.8222, 202.0821, 204.1505, 203.0031,
204.7540, 207.2581, 208.6696, 210.5136, 214.1399,
215.5866, 220.6022, 218.2942, 212.6785, 213.2020,
215.2081]
# forecasting is not any different for css
# except you lose the first p+1 observations for in-sample
# these results are from x-12 arima
self.forecast = forecast_results['fc111c_css'][-25:]
self.forecasterr = forecast_results['fc111cse_css'][-25:]
self.forecast_dyn = forecast_results['fc111cdyn_css']
self.forecasterr_dyn = forecast_results['fc111cdynse_css']
class ARIMA211(object):
def __init__(self, method="mle"):
if method == 'mle':
# from stata
from .arima111_results import results
self.__dict__.update(results)
self.resid = self.resid[1:]
self.params = self.params[:-1]
self.sigma2 = self.sigma**2
self.aic = self.icstats[4]
self.bic = self.icstats[5]
self.fittedvalues = self.xb[1:] # no idea why this initial value
self.linear = self.y[1:]
self.k_diff = 1
# stata bse are OPG
# self.bse = np.diag(self.cov_params) ** .5
# from gretl
self.arroots = [1.027 + 0j, 5.7255 + 0j]
self.maroots = [1.1442+0j]
self.hqic = 496.5314
self.aic_gretl = 489.8388
self.bic_gretl = 506.3801
self.tvalues = [3.468, 11.14, -1.941, 12.55]
self.pvalues = [.0005, 8.14e-29, .0522, 3.91e-36]
cov_params = np.array([
[0.0616906, -0.00250187, 0.0010129, 0.00260485],
[0, 0.0105302, -0.00867819, -0.00525614],
[0, 0, 0.00759185, 0.00361962],
[0, 0, 0, 0.00484898]])
self.cov_params = (
cov_params + cov_params.T - np.diag(np.diag(cov_params)))
self.bse = np.sqrt(np.diag(self.cov_params))
# these bse are approx [0.248376, 0.102617, 0.0871312, 0.0696346]
self.forecast = forecast_results['fc211c'][-25:]
self.forecasterr = forecast_results['fc211cse'][-25:]
self.forecast_dyn = forecast_results['fc211cdyn'][-25:]
self.forecasterr_dyn = forecast_results['fc211cdynse'][-25:]
else:
from .arima211_css_results import results
self.__dict__.update(results)
self.resid = self.resid[1:]
self.params = self.params[:-1]
self.sigma2 = self.sigma**2
self.aic = self.icstats[4]
self.bic = self.icstats[5]
self.fittedvalues = self.xb[1:] # no idea why this initial value
self.linear = self.y[1:]
self.k_diff = 1
# from gretl
self.arroots = [1.0229 + 0j, 4.4501 + 0j]
self.maroots = [1.0604 + 0j]
self.hqic = 489.3225
self.aic_gretl = 482.6486
self.bic_gretl = 499.1402
self.tvalues = [.7206, 22.54, -19.04]
self.pvalues = [.4712, 1.52e-112, 2.19e-10, 8.00e-81]
cov_params = np.array([
[8.20496e-04, -0.0011992, 4.57078e-04, 0.00109907],
[0, 0.00284432, -0.0016752, -0.00220223],
[0, 0, 0.00119783, 0.00108868],
[0, 0, 0, 0.00245324]])
self.cov_params = (
cov_params + cov_params.T - np.diag(np.diag(cov_params)))
self.bse = np.sqrt(np.diag(self.cov_params))
# forecasting is not any different for css
# except you lose the first p+1 observations for in-sample
self.forecast = forecast_results['fc111c_css'][-25:]
self.forecasterr = forecast_results['fc111cse_css'][-25:]
self.forecast_dyn = forecast_results['fc111cdyn_css']
self.forecasterr_dyn = forecast_results['fc111cdynse_css']
class ARIMA112(object):
def __init__(self, method="mle"):
self.df_model = 3
self.k = 5
self.k_ar = 1
self.k_ma = 2
self.k_exog = 1
self.k_diff = 1
if method == "mle":
from .arima112_results import results
# from gretl
self.arroots = [1.0324 + 0j]
self.maroots = [1.1447 + 0j, -4.8613+0j]
self.hqic = 495.5852
self.aic_gretl = 488.8925
self.bic_gretl = 505.4338
self.tvalues = [3.454, 31.10, -7.994, -2.127]
self.pvalues = [0.0006, 2.1e-212, 1.31e-15, .0334]
cov_params = np.array([
[0.0620096, -0.00172172, 0.00181301, 0.00103271],
[0, 9.69682e-04, -9.70767e-04, -8.99814e-04],
[0, 0, 0.00698068, -0.00443871],
[0, 0, 0, 0.00713662]])
self.cov_params = (
cov_params + cov_params.T - np.diag(np.diag(cov_params)))
self.bse = np.sqrt(np.diag(self.cov_params))
# from gretl
self.forecast = forecast_results['fc112c'][-25:]
self.forecasterr = forecast_results['fc112cse'][-25:]
self.forecast_dyn = forecast_results['fc112cdyn']
self.forecasterr_dyn = forecast_results['fc112cdynse']
# unpack stata results
self.__dict__ = results
self.resid = self.resid[1:]
self.params = self.params[:-1]
self.sigma2 = self.sigma**2
self.aic = self.icstats[4]
self.bic = self.icstats[5]
self.fittedvalues = self.xb[1:] # no idea why this initial value
self.linear = self.y[1:]
# stata bse are OPG
# self.bse = np.diag(self.cov_params) ** .5
else:
# NOTE: this looks like a "hard" problem
# unable to replicate stata's results even with their starting
# values
# unable to replicate x12 results in stata using their starting
# values. x-12 has better likelihood and we can replicate so
# use their results
# taken from R using X12-arima values as init params
self.bse = [0.07727588, 0.09356658, 0.10503567, 0.07727970]
self.params = [0.9053219, -0.692412, 1.0736728, 0.1720008]
self.sigma2 = 0.6820727
self.tvalues = [11.715452, -7.400215, 10.221983, 2.225692]
self.pvalues = [0, 3.791634e-12, 0, 2.716275e-02]
self.cov_params = np.array([
[0.0059715623, 0.001327824, -0.001592129, -0.0008061933],
[0.0013278238, 0.008754705, -0.008024634, -0.0045933413],
[-0.0015921293, -0.008024634, 0.011032492, 0.0072509641],
[-0.0008061933, -0.004593341, 0.007250964, 0.0059721516]])
# from x12arima via gretl
# gretl did not converge for this model...
self.llf = -246.7534
self.nobs = 202
# self.params = [.905322, -.692425, 1.07366, 0.172024]
# self.sigma2 = 0.682072819129
# self.bse = [0.0756430, 0.118440, 0.140691, 0.105266]
self.resid = [
-1.214477, -0.069772, -1.064510, -0.249555,
-0.874206, -0.322177, -1.003579, -0.310040, -0.890506,
-0.421211, -0.715219, -0.564119, -0.636560, -0.580912,
-0.717440, -0.424277, -0.747835, -0.424739, -0.805958,
-0.516877, -0.690127, -0.473072, -0.694766, -0.435627,
-0.736474, -0.388060, -0.429596, -0.557224, -0.342308,
-0.741842, -0.442199, -0.491319, -0.420884, -0.388057,
-0.466176, -0.257193, -0.429646, -0.349683, -0.205870,
-0.335547, -0.290300, -0.216572, -0.234272, -0.427951,
-0.255446, -0.338097, -0.579033, -0.213860, -0.556756,
-0.389907, -0.510060, -0.409759, -0.396778, -0.258727,
0.160063, -0.467109, 0.688004, -0.021120, 0.503044,
0.031500, 0.878365, -0.003548, -0.079327, 0.038289,
0.032773, -0.050780, -0.560124, 0.185655, -0.111981,
-0.020714, 0.363254, -0.218484, -0.006161, 0.165950,
0.252365, 0.599220, 0.488921, 0.347677, 1.079814,
1.102745, 0.959907, 1.570836, 1.454934, 0.343521,
1.125826, 1.154059, 0.666141, 1.269685, 0.551831,
-0.027476, -0.305192, 1.715665, -0.990662, -0.548239,
-0.011636, 0.197796, -0.050128, 0.480031, 0.061198,
-0.049562, 0.064436, -0.300420, 0.494730, -0.411527,
0.109242, 0.375255, -2.184482, 0.717733, -0.673064,
0.751681, -0.092543, 0.438016, -0.024881, 0.250085,
0.096010, 0.452618, 0.265491, 0.374299, 0.820424,
0.238176, -0.059646, 1.214061, 0.028679, 0.797567,
1.614444, -0.094717, -0.408067, 0.299198, -0.021561,
0.231915, 0.084190, 0.199192, 0.201132, 0.148509,
0.035431, -0.203352, 0.264744, -0.319785, 0.150305,
0.184628, 0.074637, 0.148340, 0.357372, -0.241250,
0.119294, 0.204413, 0.458730, -0.190477, 0.416587,
0.084216, -0.363361, -0.310339, 0.309728, -0.549677,
-0.449092, 0.183025, -0.259015, -0.000883, 0.267255,
-0.188068, 0.577697, 0.049310, 0.746401, 0.565829,
0.178270, 0.709983, -0.348012, 0.273262, -0.873288,
-0.403100, 0.720072, -0.428076, 0.488246, 0.248152,
-0.313214, -0.323137, 0.414843, 0.308909, 0.134180,
0.732275, 0.535639, -0.056128, 1.128355, -0.449151,
3.879123, -2.303860, 1.712549, -0.074407, -1.162052,
0.848316, 1.262031, 0.009320, 1.017563, 1.978597,
-0.001637, 3.782223, -4.119563, -3.666488, 0.345244,
0.869998, 0.635321]
self.linear = [
30.5645, 29.4398, 30.6045, 29.7996, 30.6242,
30.1622, 30.8136, 30.2300, 30.8705, 30.4612, 30.9252,
30.7841, 31.0166, 31.0209, 31.1974, 31.1143, 31.4978,
31.3647, 31.7560, 31.5369, 31.8101, 31.7531, 32.0748,
32.0156, 32.3865, 32.2681, 32.7096, 33.0072, 33.1923,
33.6418, 33.5422, 33.8913, 34.1209, 34.4881, 34.8662,
35.1572, 35.7296, 36.0497, 36.5059, 37.1355, 37.5903,
38.1166, 38.7343, 39.3280, 39.6554, 40.2381, 40.6790,
40.8139, 41.4568, 41.5899, 42.0101, 42.2098, 42.5968,
42.9587, 43.5399, 44.6671, 44.9120, 46.8211, 47.5970,
49.2685, 50.1216, 52.3035, 53.0793, 53.9617, 54.8672,
55.8508, 56.6601, 56.8143, 58.0120, 58.7207, 59.6367,
61.0185, 61.6062, 62.5340, 63.6476, 64.9008, 66.6111,
68.1523, 69.5202, 71.8973, 74.2401, 76.4292, 79.4451,
82.2565, 83.5742, 86.0459, 88.4339, 90.2303, 92.8482,
94.4275, 95.3052, 95.7843, 99.0907, 98.4482, 98.8116,
99.6022, 100.8501, 101.6200, 103.2388, 104.1496,
105.0356, 106.0004, 106.5053, 108.1115, 108.3908,
109.5247, 110.8845, 108.7823, 110.8731, 110.6483,
112.7925, 113.3620, 115.0249, 115.7499, 117.1040,
118.0474, 119.6345, 120.8257, 122.2796, 124.2618,
125.4596, 126.2859, 128.8713, 129.7024, 131.7856,
134.7947, 135.5081, 135.9008, 137.2216, 138.0681,
139.3158, 140.3008, 141.4989, 142.6515, 143.7646,
144.7034, 145.3353, 146.6198, 147.0497, 148.2154,
149.3254, 150.3517, 151.4426, 152.8413, 153.3807,
154.4956, 155.6413, 157.1905, 157.7834, 159.3158,
160.2634, 160.7103, 161.1903, 162.5497, 162.6491,
163.0170, 164.1590, 164.7009, 165.6327, 166.8881,
167.5223, 169.2507, 170.1536, 172.1342, 173.7217,
174.8900, 176.7480, 177.1267, 178.4733, 178.1031,
178.5799, 180.4281, 180.7118, 182.3518, 183.5132,
184.0231, 184.4852, 185.9911, 187.2658, 188.3677,
190.2644, 191.8561, 192.6716, 195.1492, 195.3209,
201.7039, 198.9875, 202.7744, 203.0621, 202.7257,
204.6580, 207.3287, 208.1154, 210.5164, 213.9986,
214.8278, 221.0086, 215.8405, 212.3258, 213.5990,
215.7497]
self.yr = []
self.arroots = [-1.4442 + 0j]
self.maroots = [-1.1394 + 0j, -5.1019+0j]
self.hqic = 510.1902
self.aic = 503.5069
self.bic = 520.0234
# TODO: Document source for these non-used results
# (and why they are not used)
# self.tvalues = [11.97, -5.846, 7.631, 1.634]
# self.pvalues = [5.21e-33, 5.03e-9, 2.32e-14, .1022]
# cov_params = np.array([
# [0.0620096, -0.00172172, 0.00181301, 0.00103271],
# [0, 9.69682e-04, -9.70767e-04, -8.99814e-04],
# [0, 0, 0.00698068, -0.00443871],
# [0, 0, 0, 0.00713662]])
# self.cov_params = cov_params + cov_params.T - \
# np.diag(np.diag(cov_params))
# self.bse = np.sqrt(np.diag(self.cov_params))
self.forecast = forecast_results['fc112c_css'][-25:]
self.forecasterr = forecast_results['fc112cse_css'][-25:]
self.forecast_dyn = forecast_results['fc112cdyn_css']
self.forecasterr_dyn = forecast_results['fc112cdynse_css']
|
cupy_alias/manipulation/basic.py | fixstars/clpy | 142 | 12679858 | from clpy.manipulation.basic import * # NOQA
|
tools/wptserve/wptserve/logger.py | meyerweb/wpt | 14,668 | 12679865 | import logging
def get_logger():
# Use the root logger
return logging.getLogger()
|
src/genie/libs/parser/iosxe/show_config.py | nujo/genieparser | 204 | 12679878 | ''' show_config.py
IOSXE parsers for the following show command
* show configuration lock
'''
# Python
import re
# Metaparser
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Schema, \
Optional, \
Any
# ==================================================
# Parser for 'show configuration lock'
# ==================================================
class ShowConfigurationLockSchema(MetaParser):
"""
Schema for show configuration lock
"""
schema = {
Optional('config_session_lock'): {
Optional('owner_pid'): {
Any(): {
'tty_number': int,
'tty_username': str,
'user_debug_info': str,
'lock_active_time_in_sec': int,
}
}
},
Optional('parser_configure_lock'): {
Optional('owner_pid'): {
Any(): {
Optional('user'): str,
Optional('tty'): int,
Optional('type'): str,
Optional('state'): str,
Optional('class'): str,
Optional('count'): int,
Optional('pending_requests'): int,
Optional('user_debug_info'): str,
Optional('session_idle_state'): str,
Optional('num_of_exec_cmds_executed'): int,
Optional('num_of_exec_cmds_blocked'): int,
Optional('config_wait_for_show_completion'): str,
Optional('remote_ip_address'): str,
Optional('lock_active_time_in_sec'): int,
Optional('lock_expiration_timer_in_sec'): int,
}
}
}
}
class ShowConfigurationLock(ShowConfigurationLockSchema):
""" Parser for show configuration lock"""
cli_command = 'show configuration lock'
def cli(self,output=None):
if output is None:
# execute command to get output
out = self.device.execute(self.cli_command)
else:
out = output
# initial variables
ret_dict = {}
parser_lock_found = False
# Owner PID : -1
# Owner PID : 543
# Owner PID :10
p1 = re.compile(r'^\s*Owner +PID +: *(?P<owner_pid>(\-)?\d+)$')
# TTY number : 2
p2 = re.compile(r'^\s*TTY +number +: +(?P<tty_number>\d+)$')
# TTY username : Test1
p3 = re.compile(r'^\s*TTY +username +: +(?P<tty_username>\S+)$')
# User debug info : CLI Session Lock
p4 = re.compile(r'^\s*User +debug +info +: '\
'+(?P<user_debug_info>(\w+ *)+)$')
# Look Active time (in Sec) : 63
p5 = re.compile(r'^\s*Lock +(a|A)ctive +time +\(in +Sec\) +: '\
'+(?P<lock_active_time_in_sec>\d+)$')
# Parser Configure Lock
p6 = re.compile(r'^\s*Parser +Configure +Lock$')
# User : User1
# User:User1
p7 = re.compile(r'^\s*User *: *(?P<user>\S+)$')
# TTY : 3
# TTY:3
p8 = re.compile(r'^\s*TTY *: *(?P<tty>(\-)?\d+)$')
# Type : EXCLUSIVE
# Type:EXCLUSIVE
p9 = re.compile(r'^\s*Type *: *(?P<type>[\w\W]+)$')
# State : LOCKED
# State:LOCKED
p10 = re.compile(r'^\s*State *: *(?P<state>\S+)$')
# Class : Exposed
# Class:Exposed
p11 = re.compile(r'^\s*Class *: *(?P<class_name>\S+)$')
# Count : 0
# Count:0
p12 = re.compile(r'^\s*Count *: *(?P<count>\d+)$')
# Pending Requests : 0
# Pending Requests:0
p13 = re.compile(r'^\s*Pending +Requests *: '\
'*(?P<pending_requests>\d+)$')
# User debug info : 0
# User debug info:0
p14 = re.compile(r'^\s*User +debug +info *: '\
'*(?P<user_debug_info>[\w\W]+)$')
# Session idle state : TRUE
p15 = re.compile(r'^Session +idle +state *: *(?P<session_idle_state>[\w]+)$')
# No of exec cmds getting executed : 0
p16 = re.compile(r'^No +of +exec +cmds +getting +executed *: *(?P<num_of_exec_cmds_executed>\d+)$')
# No of exec cmds blocked : 0
p17 = re.compile(r'^No +of +exec +cmds +blocked *: *(?P<num_of_exec_cmds_blocked>\d+)$')
# Config wait for show completion : FALSE
p18 = re.compile(r'^Config +wait +for +show +completion *: *(?P<config_wait_for_show_completion>[\w]+)$')
# Remote ip address : Unknown
p19 = re.compile(r'^Remote +ip +address *: *(?P<remote_ip_address>[\w]+)$')
# Lock Expiration timer (in Sec) : 593
p20 = re.compile(r'^Lock +Expiration +timer +\(in +Sec\) *: *(?P<lock_expiration_timer_in_sec>[\w]+)$')
for line in out.splitlines():
line = line.strip()
if not parser_lock_found:
# Owner PID : 513
m = p1.match(line)
if m:
group = m.groupdict()
config_session_lock = ret_dict.\
setdefault('config_session_lock',{}).\
setdefault('owner_pid',{}).\
setdefault(int(group['owner_pid']),{})
continue
# TTY number : 2
m = p2.match(line)
if m:
group = m.groupdict()
config_session_lock.update({'tty_number' :
int(group['tty_number'])})
continue
# TTY username : Test1
m = p3.match(line)
if m:
group = m.groupdict()
config_session_lock.update({'tty_username' :
group['tty_username']})
continue
# User debug info : CLI Session Lock
m = p4.match(line)
if m:
group = m.groupdict()
config_session_lock.update({'user_debug_info' :
group['user_debug_info']})
continue
# Lock Active time (in Sec) : 63
m = p5.match(line)
if m:
group = m.groupdict()
config_session_lock.update({'lock_active_time_in_sec' :
int(group['lock_active_time_in_sec'])})
continue
# Parser Configure Lock
m = p6.match(line)
if m:
parser_lock_found = True
continue
else:
# Owner PID : 10
m = p1.match(line)
if m:
group = m.groupdict()
parser_configure_lock = ret_dict.\
setdefault('parser_configure_lock',{}).\
setdefault('owner_pid',{}).setdefault( \
int(group['owner_pid']),{})
continue
# User : User1
m = p7.match(line)
if m:
group = m.groupdict()
parser_configure_lock.update({'user' : group['user']})
continue
# TTY : 3
m = p8.match(line)
if m:
group = m.groupdict()
parser_configure_lock.update({'tty' : int(group['tty'])})
continue
# Type : Exclusive
m = p9.match(line)
if m:
group = m.groupdict()
parser_configure_lock.update({'type' : group['type']})
continue
# State : Locked
m = p10.match(line)
if m:
group = m.groupdict()
parser_configure_lock.update({'state' : group['state']})
continue
# Class : Exposed
m = p11.match(line)
if m:
group = m.groupdict()
parser_configure_lock.update({'class' : \
group['class_name']})
continue
# Count : 0
m = p12.match(line)
if m:
group = m.groupdict()
parser_configure_lock.update({'count' :
int(group['count'])})
continue
# Pending Requests : 0
m = p13.match(line)
if m:
group = m.groupdict()
parser_configure_lock.update({'pending_requests' :
int(group['pending_requests'])})
continue
# User debug info : 0
m = p14.match(line)
if m:
group = m.groupdict()
parser_configure_lock.update({'user_debug_info' :
group['user_debug_info']})
continue
# Session idle state : TRUE
m = p15.match(line)
if m:
group = m.groupdict()
parser_configure_lock.update({'session_idle_state' :
group['session_idle_state']})
continue
# No of exec cmds getting executed : 0
m = p16.match(line)
if m:
group = m.groupdict()
parser_configure_lock.update({'num_of_exec_cmds_executed' :
int(group['num_of_exec_cmds_executed'])})
continue
# No of exec cmds blocked : 0
m = p17.match(line)
if m:
group = m.groupdict()
parser_configure_lock.update({'num_of_exec_cmds_blocked' :
int(group['num_of_exec_cmds_blocked'])})
continue
# Config wait for show completion : FALSE
m = p18.match(line)
if m:
group = m.groupdict()
parser_configure_lock.update({'config_wait_for_show_completion' :
group['config_wait_for_show_completion']})
continue
# Remote ip address : Unknown
m = p19.match(line)
if m:
group = m.groupdict()
parser_configure_lock.update({'remote_ip_address' :
group['remote_ip_address']})
continue
# Lock Expiration timer (in Sec) : 593
m = p20.match(line)
if m:
group = m.groupdict()
parser_configure_lock.update({'lock_expiration_timer_in_sec' :
int(group['lock_expiration_timer_in_sec'])})
continue
# Lock Active time (in Sec) : 63
m = p5.match(line)
if m:
group = m.groupdict()
parser_configure_lock.update({'lock_active_time_in_sec' :
int(group['lock_active_time_in_sec'])})
continue
return ret_dict
|
DQM/L1TMonitor/python/L1TBMTFAlgoSelector_cfi.py | ckamtsikis/cmssw | 852 | 12679890 | import FWCore.ParameterSet.Config as cms
l1tBmtfAlgoSelector = cms.EDProducer(
'L1TBMTFAlgoSelector',
# verbose = cms.untracked.bool(False),
bmtfKalman = cms.InputTag("simKBmtfDigis:BMTF"),
bmtfLegacy = cms.InputTag("simBmtfDigis:BMTF"),
feds = cms.InputTag("rawDataCollector")
)
|
examples/vae/utils/custom_mlp.py | nipunbatra/pyro | 4,959 | 12679905 | <filename>examples/vae/utils/custom_mlp.py
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
from inspect import isclass
import torch
import torch.nn as nn
from pyro.distributions.util import broadcast_shape
class Exp(nn.Module):
"""
a custom module for exponentiation of tensors
"""
def __init__(self):
super().__init__()
def forward(self, val):
return torch.exp(val)
class ConcatModule(nn.Module):
"""
a custom module for concatenation of tensors
"""
def __init__(self, allow_broadcast=False):
self.allow_broadcast = allow_broadcast
super().__init__()
def forward(self, *input_args):
# we have a single object
if len(input_args) == 1:
# regardless of type,
# we don't care about single objects
# we just index into the object
input_args = input_args[0]
# don't concat things that are just single objects
if torch.is_tensor(input_args):
return input_args
else:
if self.allow_broadcast:
shape = broadcast_shape(*[s.shape[:-1] for s in input_args]) + (-1,)
input_args = [s.expand(shape) for s in input_args]
return torch.cat(input_args, dim=-1)
class ListOutModule(nn.ModuleList):
"""
a custom module for outputting a list of tensors from a list of nn modules
"""
def __init__(self, modules):
super().__init__(modules)
def forward(self, *args, **kwargs):
# loop over modules in self, apply same args
return [mm.forward(*args, **kwargs) for mm in self]
def call_nn_op(op):
"""
a helper function that adds appropriate parameters when calling
an nn module representing an operation like Softmax
:param op: the nn.Module operation to instantiate
:return: instantiation of the op module with appropriate parameters
"""
if op in [nn.Softmax, nn.LogSoftmax]:
return op(dim=1)
else:
return op()
class MLP(nn.Module):
def __init__(
self,
mlp_sizes,
activation=nn.ReLU,
output_activation=None,
post_layer_fct=lambda layer_ix, total_layers, layer: None,
post_act_fct=lambda layer_ix, total_layers, layer: None,
allow_broadcast=False,
use_cuda=False,
):
# init the module object
super().__init__()
assert len(mlp_sizes) >= 2, "Must have input and output layer sizes defined"
# get our inputs, outputs, and hidden
input_size, hidden_sizes, output_size = (
mlp_sizes[0],
mlp_sizes[1:-1],
mlp_sizes[-1],
)
# assume int or list
assert isinstance(
input_size, (int, list, tuple)
), "input_size must be int, list, tuple"
# everything in MLP will be concatted if it's multiple arguments
last_layer_size = input_size if type(input_size) == int else sum(input_size)
# everything sent in will be concatted together by default
all_modules = [ConcatModule(allow_broadcast)]
# loop over l
for layer_ix, layer_size in enumerate(hidden_sizes):
assert type(layer_size) == int, "Hidden layer sizes must be ints"
# get our nn layer module (in this case nn.Linear by default)
cur_linear_layer = nn.Linear(last_layer_size, layer_size)
# for numerical stability -- initialize the layer properly
cur_linear_layer.weight.data.normal_(0, 0.001)
cur_linear_layer.bias.data.normal_(0, 0.001)
# use GPUs to share data during training (if available)
if use_cuda:
cur_linear_layer = nn.DataParallel(cur_linear_layer)
# add our linear layer
all_modules.append(cur_linear_layer)
# handle post_linear
post_linear = post_layer_fct(
layer_ix + 1, len(hidden_sizes), all_modules[-1]
)
# if we send something back, add it to sequential
# here we could return a batch norm for example
if post_linear is not None:
all_modules.append(post_linear)
# handle activation (assumed no params -- deal with that later)
all_modules.append(activation())
# now handle after activation
post_activation = post_act_fct(
layer_ix + 1, len(hidden_sizes), all_modules[-1]
)
# handle post_activation if not null
# could add batch norm for example
if post_activation is not None:
all_modules.append(post_activation)
# save the layer size we just created
last_layer_size = layer_size
# now we have all of our hidden layers
# we handle outputs
assert isinstance(
output_size, (int, list, tuple)
), "output_size must be int, list, tuple"
if type(output_size) == int:
all_modules.append(nn.Linear(last_layer_size, output_size))
if output_activation is not None:
all_modules.append(
call_nn_op(output_activation)
if isclass(output_activation)
else output_activation
)
else:
# we're going to have a bunch of separate layers we can spit out (a tuple of outputs)
out_layers = []
# multiple outputs? handle separately
for out_ix, out_size in enumerate(output_size):
# for a single output object, we create a linear layer and some weights
split_layer = []
# we have an activation function
split_layer.append(nn.Linear(last_layer_size, out_size))
# then we get our output activation (either we repeat all or we index into a same sized array)
act_out_fct = (
output_activation
if not isinstance(output_activation, (list, tuple))
else output_activation[out_ix]
)
if act_out_fct:
# we check if it's a class. if so, instantiate the object
# otherwise, use the object directly (e.g. pre-instaniated)
split_layer.append(
call_nn_op(act_out_fct) if isclass(act_out_fct) else act_out_fct
)
# our outputs is just a sequential of the two
out_layers.append(nn.Sequential(*split_layer))
all_modules.append(ListOutModule(out_layers))
# now we have all of our modules, we're ready to build our sequential!
# process mlps in order, pretty standard here
self.sequential_mlp = nn.Sequential(*all_modules)
# pass through our sequential for the output!
def forward(self, *args, **kwargs):
return self.sequential_mlp.forward(*args, **kwargs)
|
src/compas_rhino/conversions/_surfaces.py | funkchaser/compas | 235 | 12679938 | <reponame>funkchaser/compas
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas.geometry import Point
from Rhino.Geometry import NurbsSurface as RhinoNurbsSurface
from ._primitives import point_to_rhino
from ._primitives import point_to_compas
def surface_to_compas_data(surface):
"""Convert a Rhino surface to a COMPAS surface.
Parameters
----------
surface: :class:`Rhino.Geometry.Surface`
Returns
-------
:obj:`dict`
"""
surface = surface.ToNurbsSurface()
points = []
weights = []
for j in range(surface.Points.VCount):
_points = []
_weights = []
for i in range(surface.Points.UCount):
point = surface.Points.GetPoint(i, j)
weight = surface.Points.GetWeight(i, j)
_points.append(point_to_compas(point))
_weights.append(weight)
points.append(_points)
weights.append(_weights)
u_knots = []
u_mults = []
for index in range(surface.KnotsU.Count):
u_knots.append(surface.KnotsU.Item[index])
u_mults.append(surface.KnotsU.KnotMultiplicity(index))
v_knots = []
v_mults = []
for index in range(surface.KnotsV.Count):
v_knots.append(surface.KnotsV.Item[index])
v_mults.append(surface.KnotsV.KnotMultiplicity(index))
u_degree = surface.OrderU - 1
v_degree = surface.OrderV - 1
is_u_periodic = False
is_v_periodic = False
return {
'points': [[point.data for point in row] for row in points],
'weights': weights,
'u_knots': u_knots,
'v_knots': v_knots,
'u_mults': u_mults,
'v_mults': v_mults,
'u_degree': u_degree,
'v_degree': v_degree,
'is_u_periodic': is_u_periodic,
'is_v_periodic': is_v_periodic
}
def data_to_rhino_surface(data):
"""Convert a COMPAS surface to a Rhino surface.
Parameters
----------
data: :obj:`dict`
Returns
-------
:class:`Rhino.Geometry.NurbsSurface`
"""
points = [[Point.from_data(point) for point in row] for row in data['points']]
nu = len(points[0])
nv = len(points)
nurbs = RhinoNurbsSurface.Create(3,
False,
data['u_degree'] + 1,
data['v_degree'] + 1,
nu,
nv)
for i in range(nu):
for j in range(nv):
nurbs.Points.SetPoint(i, j, point_to_rhino(points[j][i]))
nurbs.Points.SetWeight(i, j, data['weights'][j][i])
u_knotvector = []
for knot, mult in zip(data['u_knots'], data['u_mults']):
for i in range(mult):
u_knotvector.append(knot)
for index, knot in enumerate(u_knotvector):
nurbs.KnotsU.Item[index] = knot
v_knotvector = []
for knot, mult in zip(data['v_knots'], data['v_mults']):
for i in range(mult):
v_knotvector.append(knot)
for index, knot in enumerate(v_knotvector):
nurbs.KnotsV.Item[index] = knot
return nurbs
|
nuplan/planning/script/builders/training_callback_builder.py | motional/nuplan-devkit | 128 | 12679954 | import logging
from typing import List
import pytorch_lightning as pl
from hydra.utils import instantiate
from omegaconf import DictConfig
from nuplan.planning.script.builders.utils.utils_type import validate_type
logger = logging.getLogger(__name__)
def build_callbacks(cfg: DictConfig) -> List[pl.Callback]:
"""
Build callbacks based on config.
:param cfg: Dict config.
:return List of callbacks.
"""
logger.info('Building callbacks...')
instantiated_callbacks = []
for callback_type in cfg.values():
callback: pl.Callback = instantiate(callback_type)
validate_type(callback, pl.Callback)
instantiated_callbacks.append(callback)
logger.info('Building callbacks...DONE!')
return instantiated_callbacks
|
train.py | DalasNoin/ACER | 235 | 12679988 | <filename>train.py
# -*- coding: utf-8 -*-
import math
import random
import gym
import torch
from torch import nn
from torch.nn import functional as F
from memory import EpisodicReplayMemory
from model import ActorCritic
from utils import state_to_tensor
# Knuth's algorithm for generating Poisson samples
def _poisson(lmbd):
L, k, p = math.exp(-lmbd), 0, 1
while p > L:
k += 1
p *= random.uniform(0, 1)
return max(k - 1, 0)
# Transfers gradients from thread-specific model to shared model
def _transfer_grads_to_shared_model(model, shared_model):
for param, shared_param in zip(model.parameters(), shared_model.parameters()):
if shared_param.grad is not None:
return
shared_param._grad = param.grad
# Adjusts learning rate
def _adjust_learning_rate(optimiser, lr):
for param_group in optimiser.param_groups:
param_group['lr'] = lr
# Updates networks
def _update_networks(args, T, model, shared_model, shared_average_model, loss, optimiser):
# Zero shared and local grads
optimiser.zero_grad()
"""
Calculate gradients for gradient descent on loss functions
Note that math comments follow the paper, which is formulated for gradient ascent
"""
loss.backward()
# Gradient L2 normalisation
nn.utils.clip_grad_norm_(model.parameters(), args.max_gradient_norm)
# Transfer gradients to shared model and update
_transfer_grads_to_shared_model(model, shared_model)
optimiser.step()
if args.lr_decay:
# Linearly decay learning rate
_adjust_learning_rate(optimiser, max(args.lr * (args.T_max - T.value()) / args.T_max, 1e-32))
# Update shared_average_model
for shared_param, shared_average_param in zip(shared_model.parameters(), shared_average_model.parameters()):
shared_average_param = args.trust_region_decay * shared_average_param + (1 - args.trust_region_decay) * shared_param
# Computes an "efficient trust region" loss (policy head only) based on an existing loss and two distributions
def _trust_region_loss(model, distribution, ref_distribution, loss, threshold, g, k):
kl = - (ref_distribution * (distribution.log()-ref_distribution.log())).sum(1).mean(0)
# Compute dot products of gradients
k_dot_g = (k*g).sum(1).mean(0)
k_dot_k = (k**2).sum(1).mean(0)
# Compute trust region update
if k_dot_k.item() > 0:
trust_factor = ((k_dot_g - threshold) / k_dot_k).clamp(min=0).detach()
else:
trust_factor = torch.zeros(1)
# z* = g - max(0, (k^T∙g - δ) / ||k||^2_2)∙k
trust_loss = loss + trust_factor*kl
return trust_loss
# Trains model
def _train(args, T, model, shared_model, shared_average_model, optimiser, policies, Qs, Vs, actions, rewards, Qret, average_policies, old_policies=None):
off_policy = old_policies is not None
action_size = policies[0].size(1)
policy_loss, value_loss = 0, 0
# Calculate n-step returns in forward view, stepping backwards from the last state
t = len(rewards)
for i in reversed(range(t)):
# Importance sampling weights ρ ← π(∙|s_i) / µ(∙|s_i); 1 for on-policy
if off_policy:
rho = policies[i].detach() / old_policies[i]
else:
rho = torch.ones(1, action_size)
# Qret ← r_i + γQret
Qret = rewards[i] + args.discount * Qret
# Advantage A ← Qret - V(s_i; θ)
A = Qret - Vs[i]
# Log policy log(π(a_i|s_i; θ))
log_prob = policies[i].gather(1, actions[i]).log()
# g ← min(c, ρ_a_i)∙∇θ∙log(π(a_i|s_i; θ))∙A
single_step_policy_loss = -(rho.gather(1, actions[i]).clamp(max=args.trace_max) * log_prob * A.detach()).mean(0) # Average over batch
# Off-policy bias correction
if off_policy:
# g ← g + Σ_a [1 - c/ρ_a]_+∙π(a|s_i; θ)∙∇θ∙log(π(a|s_i; θ))∙(Q(s_i, a; θ) - V(s_i; θ)
bias_weight = (1 - args.trace_max / rho).clamp(min=0) * policies[i]
single_step_policy_loss -= (bias_weight * policies[i].log() * (Qs[i].detach() - Vs[i].expand_as(Qs[i]).detach())).sum(1).mean(0)
if args.trust_region:
# KL divergence k ← ∇θ0∙DKL[π(∙|s_i; θ_a) || π(∙|s_i; θ)]
k = -average_policies[i].gather(1, actions[i]) / (policies[i].gather(1, actions[i]) + 1e-10)
if off_policy:
g = (rho.gather(1, actions[i]).clamp(max=args.trace_max) * A / (policies[i] + 1e-10).gather(1, actions[i]) \
+ (bias_weight * (Qs[i] - Vs[i].expand_as(Qs[i]))/(policies[i] + 1e-10)).sum(1)).detach()
else:
g = (rho.gather(1, actions[i]).clamp(max=args.trace_max) * A / (policies[i] + 1e-10).gather(1, actions[i])).detach()
# Policy update dθ ← dθ + ∂θ/∂θ∙z*
policy_loss += _trust_region_loss(model, policies[i].gather(1, actions[i]) + 1e-10, average_policies[i].gather(1, actions[i]) + 1e-10, single_step_policy_loss, args.trust_region_threshold, g, k)
else:
# Policy update dθ ← dθ + ∂θ/∂θ∙g
policy_loss += single_step_policy_loss
# Entropy regularisation dθ ← dθ + β∙∇θH(π(s_i; θ))
policy_loss -= args.entropy_weight * -(policies[i].log() * policies[i]).sum(1).mean(0) # Sum over probabilities, average over batch
# Value update dθ ← dθ - ∇θ∙1/2∙(Qret - Q(s_i, a_i; θ))^2
Q = Qs[i].gather(1, actions[i])
value_loss += ((Qret - Q) ** 2 / 2).mean(0) # Least squares loss
# Truncated importance weight ρ¯_a_i = min(1, ρ_a_i)
truncated_rho = rho.gather(1, actions[i]).clamp(max=1)
# Qret ← ρ¯_a_i∙(Qret - Q(s_i, a_i; θ)) + V(s_i; θ)
Qret = truncated_rho * (Qret - Q.detach()) + Vs[i].detach()
# Update networks
_update_networks(args, T, model, shared_model, shared_average_model, policy_loss + value_loss, optimiser)
# Acts and trains model
def train(rank, args, T, shared_model, shared_average_model, optimiser):
torch.manual_seed(args.seed + rank)
env = gym.make(args.env)
env.seed(args.seed + rank)
model = ActorCritic(env.observation_space, env.action_space, args.hidden_size)
model.train()
if not args.on_policy:
# Normalise memory capacity by number of training processes
memory = EpisodicReplayMemory(args.memory_capacity // args.num_processes, args.max_episode_length)
t = 1 # Thread step counter
done = True # Start new episode
while T.value() <= args.T_max:
# On-policy episode loop
while True:
# Sync with shared model at least every t_max steps
model.load_state_dict(shared_model.state_dict())
# Get starting timestep
t_start = t
# Reset or pass on hidden state
if done:
hx, avg_hx = torch.zeros(1, args.hidden_size), torch.zeros(1, args.hidden_size)
cx, avg_cx = torch.zeros(1, args.hidden_size), torch.zeros(1, args.hidden_size)
# Reset environment and done flag
state = state_to_tensor(env.reset())
done, episode_length = False, 0
else:
# Perform truncated backpropagation-through-time (allows freeing buffers after backwards call)
hx = hx.detach()
cx = cx.detach()
# Lists of outputs for training
policies, Qs, Vs, actions, rewards, average_policies = [], [], [], [], [], []
while not done and t - t_start < args.t_max:
# Calculate policy and values
policy, Q, V, (hx, cx) = model(state, (hx, cx))
average_policy, _, _, (avg_hx, avg_cx) = shared_average_model(state, (avg_hx, avg_cx))
# Sample action
action = torch.multinomial(policy, 1)[0, 0]
# Step
next_state, reward, done, _ = env.step(action.item())
next_state = state_to_tensor(next_state)
reward = args.reward_clip and min(max(reward, -1), 1) or reward # Optionally clamp rewards
done = done or episode_length >= args.max_episode_length # Stop episodes at a max length
episode_length += 1 # Increase episode counter
if not args.on_policy:
# Save (beginning part of) transition for offline training
memory.append(state, action, reward, policy.detach()) # Save just tensors
# Save outputs for online training
[arr.append(el) for arr, el in zip((policies, Qs, Vs, actions, rewards, average_policies),
(policy, Q, V, torch.LongTensor([[action]]), torch.Tensor([[reward]]), average_policy))]
# Increment counters
t += 1
T.increment()
# Update state
state = next_state
# Break graph for last values calculated (used for targets, not directly as model outputs)
if done:
# Qret = 0 for terminal s
Qret = torch.zeros(1, 1)
if not args.on_policy:
# Save terminal state for offline training
memory.append(state, None, None, None)
else:
# Qret = V(s_i; θ) for non-terminal s
_, _, Qret, _ = model(state, (hx, cx))
Qret = Qret.detach()
# Train the network on-policy
_train(args, T, model, shared_model, shared_average_model, optimiser, policies, Qs, Vs, actions, rewards, Qret, average_policies)
# Finish on-policy episode
if done:
break
# Train the network off-policy when enough experience has been collected
if not args.on_policy and len(memory) >= args.replay_start:
# Sample a number of off-policy episodes based on the replay ratio
for _ in range(_poisson(args.replay_ratio)):
# Act and train off-policy for a batch of (truncated) episode
trajectories = memory.sample_batch(args.batch_size, maxlen=args.t_max)
# Reset hidden state
hx, avg_hx = torch.zeros(args.batch_size, args.hidden_size), torch.zeros(args.batch_size, args.hidden_size)
cx, avg_cx = torch.zeros(args.batch_size, args.hidden_size), torch.zeros(args.batch_size, args.hidden_size)
# Lists of outputs for training
policies, Qs, Vs, actions, rewards, old_policies, average_policies = [], [], [], [], [], [], []
# Loop over trajectories (bar last timestep)
for i in range(len(trajectories) - 1):
# Unpack first half of transition
state = torch.cat(tuple(trajectory.state for trajectory in trajectories[i]), 0)
action = torch.LongTensor([trajectory.action for trajectory in trajectories[i]]).unsqueeze(1)
reward = torch.Tensor([trajectory.reward for trajectory in trajectories[i]]).unsqueeze(1)
old_policy = torch.cat(tuple(trajectory.policy for trajectory in trajectories[i]), 0)
# Calculate policy and values
policy, Q, V, (hx, cx) = model(state, (hx, cx))
average_policy, _, _, (avg_hx, avg_cx) = shared_average_model(state, (avg_hx, avg_cx))
# Save outputs for offline training
[arr.append(el) for arr, el in zip((policies, Qs, Vs, actions, rewards, average_policies, old_policies),
(policy, Q, V, action, reward, average_policy, old_policy))]
# Unpack second half of transition
next_state = torch.cat(tuple(trajectory.state for trajectory in trajectories[i + 1]), 0)
done = torch.Tensor([trajectory.action is None for trajectory in trajectories[i + 1]]).unsqueeze(1)
# Do forward pass for all transitions
_, _, Qret, _ = model(next_state, (hx, cx))
# Qret = 0 for terminal s, V(s_i; θ) otherwise
Qret = ((1 - done) * Qret).detach()
# Train the network off-policy
_train(args, T, model, shared_model, shared_average_model, optimiser, policies, Qs, Vs,
actions, rewards, Qret, average_policies, old_policies=old_policies)
done = True
env.close()
|
opta/commands/init_templates/variables/azure_location.py | riddopic/opta | 595 | 12679993 | from opta.commands.init_templates.helpers import dictionary_deep_set
from opta.commands.init_templates.template import TemplateVariable
LOCATIONS = [
"australiaeast",
"brazilsouth",
"canadacentral",
"centralus",
"eastus",
"eastus2",
"francecentral",
"germanywestcentral",
"japaneast",
"southafricanorth",
"southcentralus",
"southeastasia",
"uksouth",
"westeurope",
"westus2",
"westus3",
]
def validate(location_name: str) -> bool:
return location_name in LOCATIONS
def apply(d: dict, v: str) -> dict:
set_path = dictionary_deep_set(["providers", "azurerm", "location"])
set_path(d, v)
return d
indented_locations = [f"\t{location}" for location in LOCATIONS]
location_string = "\n".join(indented_locations)
azureLocationVariable = TemplateVariable(
prompt="Azure location",
applier=apply,
validator=validate,
error_message=f"Must be one of\n{location_string}",
default_value="centralus",
)
|
examples/pageWindow.py | tgolsson/appJar | 666 | 12679995 | <gh_stars>100-1000
import sys
sys.path.append("../")
from appJar import gui
lid = 0
def add(btn):
global lid
app.openPage("Main Title", app.getSpinBox("spin"))
app.addLabel(str(lid), str(lid))
lid +=1
app.stopPage()
app=gui()
app.setBg("DarkKhaki")
app.setGeometry(280,400)
app.startPagedWindow("Main Title")
app.startPage()
app.addLabel("l13", "Label 1")
app.addSpinBoxRange("spin", 1, 5)
app.addButton("addLabel", add)
app.stopPage()
app.startPage()
app.stopPage()
app.startPage()
app.addLabel("l3", "Label 3")
app.stopPage()
app.startPage()
app.addLabel("l4", "Label 4")
app.stopPage()
app.stopPagedWindow()
app.go()
|
pynq/lib/video/hierarchies.py | michalkouril/PYNQ | 1,537 | 12680000 | # Copyright (c) 2018, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, Xilinx"
__email__ = "<EMAIL>"
import contextlib
from pynq import DefaultHierarchy
from .pipeline import ColorConverter, PixelPacker
from .frontend import VideoInFrontend, VideoOutFrontend
from .dma import AxiVDMA
from .common import *
class VideoIn(DefaultHierarchy):
"""Wrapper for the input video pipeline.
This wrapper assumes the following pipeline structure and naming
color_convert_in -> pixel_pack ->axi_vdma
with vtc_in and axi_gpio_hdmiiin helper IP
Attributes
----------
frontend : pynq.lib.video.HDMIInFrontend
The HDMI frontend for signal detection
color_convert : pynq.lib.video.ColorConverter
The input color format converter
pixel_pack : pynq.lib.video.PixelPacker
Converts the input pixel size to that required by the VDMA
"""
@staticmethod
def checkhierarchy(description):
if 'frontend' in description['hierarchies']:
frontend_dict = description['hierarchies']['frontend']
elif 'frontend' in description['ip']:
frontend_dict = description['ip']['frontend']
else:
return False
return (
'pixel_pack' in description['ip'] and
'color_convert' in description['ip'] and
description['ip']['pixel_pack']['driver'] == PixelPacker and
description['ip']['color_convert']['driver'] == ColorConverter and
issubclass(frontend_dict['driver'], VideoInFrontend))
def __init__(self, description, vdma=None):
"""Initialise the drivers for the pipeline
Parameters
----------
path : str
name of the hierarchy containing all of the video blocks
"""
super().__init__(description)
ip_dict = self.description
self._vdma = vdma
self._color = self.color_convert
self._pixel = self.pixel_pack
self._hdmi = self.frontend
def configure(self, pixelformat=PIXEL_BGR):
"""Configure the pipeline to use the specified pixel format.
If the pipeline is running it is stopped prior to the configuration
being changed
Parameters
----------
pixelformat : PixelFormat
The pixel format to configure the pipeline for
"""
if self._vdma.readchannel.running:
self._vdma.readchannel.stop()
self._color.colorspace = pixelformat.in_color
self._pixel.bits_per_pixel = pixelformat.bits_per_pixel
self._hdmi.start()
input_mode = self._hdmi.mode
self._vdma.readchannel.mode = VideoMode(input_mode.width,
input_mode.height,
pixelformat.bits_per_pixel,
input_mode.fps)
return self._closecontextmanager()
def start(self):
"""Start the pipeline
"""
self._vdma.readchannel.start()
return self._stopcontextmanager()
def stop(self):
"""Stop the pipeline
"""
self._vdma.readchannel.stop()
@contextlib.contextmanager
def _stopcontextmanager(self):
"""Context Manager to stop the VDMA at the end of the block
"""
yield
self.stop()
@contextlib.contextmanager
def _closecontextmanager(self):
"""Context Manager to close the HDMI port at the end of the block
"""
yield
self.close()
def close(self):
"""Uninitialise the drivers, stopping the pipeline beforehand
"""
self.stop()
self._hdmi.stop()
@property
def colorspace(self):
"""The colorspace of the pipeline, can be changed without stopping
the pipeline
"""
return self._color.colorspace
@colorspace.setter
def colorspace(self, new_colorspace):
self._color.colorspace = new_colorspace
@property
def mode(self):
"""Video mode of the input
"""
return self._vdma.readchannel.mode
@property
def cacheable_frames(self):
"""Whether frames should be cacheable or non-cacheable
Only valid if a VDMA has been specified
"""
if self._vdma:
return self._vdma.readchannel.cacheable_frames
else:
raise RuntimeError("No VDMA specified")
@cacheable_frames.setter
def cacheable_frames(self, value):
if self._vdma:
self._vdma.readchannel.cacheable_frames = value
else:
raise RuntimeError("No VDMA specified")
def readframe(self):
"""Read a video frame
See AxiVDMA.S2MMChannel.readframe for details
"""
return self._vdma.readchannel.readframe()
async def readframe_async(self):
"""Read a video frame
See AxiVDMA.S2MMChannel.readframe for details
"""
return await self._vdma.readchannel.readframe_async()
def tie(self, output):
"""Mirror the video input on to an output channel
Parameters
----------
output : HDMIOut
The output to mirror on to
"""
self._vdma.readchannel.tie(output._vdma.writechannel)
class VideoOut(DefaultHierarchy):
"""Wrapper for the output video pipeline.
This wrapper assumes the following pipeline structure and naming
axi_vdma -> pixel_unpack -> color_convert -> frontend
with vtc_out and axi_dynclk helper IP
Attributes
----------
frontend : pynq.lib.video.HDMIOutFrontend
The HDMI frontend for mode setting
color_convert : pynq.lib.video.ColorConverter
The output color format converter
pixel_unpack : pynq.lib.video.PixelPacker
Converts the input pixel size to 24 bits-per-pixel
"""
@staticmethod
def checkhierarchy(description):
if 'frontend' in description['hierarchies']:
frontend_dict = description['hierarchies']['frontend']
elif 'frontend' in description['ip']:
frontend_dict = description['ip']['frontend']
else:
return False
return (
'pixel_unpack' in description['ip'] and
'color_convert' in description['ip'] and
description['ip']['pixel_unpack']['driver'] == PixelPacker and
description['ip']['color_convert']['driver'] == ColorConverter and
issubclass(frontend_dict['driver'], VideoOutFrontend))
def __init__(self, description, vdma=None):
"""Initialise the drivers for the pipeline
Parameters
----------
path : str
name of the hierarchy containing all of the video blocks
"""
super().__init__(description)
self._vdma = vdma
self._color = self.color_convert
self._pixel = self.pixel_unpack
self._hdmi = self.frontend
def configure(self, mode, pixelformat=None):
"""Configure the pipeline to use the specified pixel format and size.
If the pipeline is running it is stopped prior to the configuration
being changed
Parameters
----------
mode : VideoMode
The video mode to output
pixelformat : PixelFormat
The pixel format to configure the pipeline for
"""
if self._vdma.writechannel.running:
self._vdma.writechannel.stop()
if pixelformat is None:
if mode.bits_per_pixel == 8:
pixelformat = PIXEL_GRAY
elif mode.bits_per_pixel == 24:
pixelformat = PIXEL_BGR
elif mode.bits_per_pixel == 32:
pixelformat = PIXEL_RGBA
else:
raise ValueError(
"No default pixel format for ${mode.bits_per_pixel} bpp")
if pixelformat.bits_per_pixel != mode.bits_per_pixel:
raise ValueError(
"Video mode and pixel format have different sized pixels")
self._color.colorspace = pixelformat.out_color
self._pixel.bits_per_pixel = pixelformat.bits_per_pixel
self._hdmi.mode = mode
self._vdma.writechannel.mode = mode
self._hdmi.start()
return self._closecontextmanager()
def start(self):
"""Start the pipeline
"""
self._vdma.writechannel.start()
return self._stopcontextmanager()
def stop(self):
"""Stop the pipeline
"""
self._vdma.writechannel.stop()
def close(self):
"""Close the pipeline an unintialise the drivers
"""
self.stop()
self._hdmi.stop()
@contextlib.contextmanager
def _stopcontextmanager(self):
"""Context Manager to stop the VDMA at the end of the block
"""
yield
self.stop()
@contextlib.contextmanager
def _closecontextmanager(self):
"""Context Manager to close the HDMI port at the end of the block
"""
yield
self.close()
@property
def colorspace(self):
"""Set the colorspace for the pipeline - can be done without
stopping the pipeline
"""
return self._color.colorspace
@colorspace.setter
def colorspace(self, new_colorspace):
self._color.colorspace = new_colorspace
@property
def mode(self):
"""The currently configured video mode
"""
return self._vdma.writechannel.mode
@property
def cacheable_frames(self):
"""Whether frames should be cacheable or non-cacheable
Only valid if a VDMA has been specified
"""
if self._vdma:
return self._vdma.writechannel.cacheable_frames
else:
raise RuntimeError("No VDMA specified")
@cacheable_frames.setter
def cacheable_frames(self, value):
if self._vdma:
self._vdma.writechannel.cacheable_frames = value
else:
raise RuntimeError("No VDMA specified")
def newframe(self):
"""Return an unintialised video frame of the correct type for the
pipeline
"""
return self._vdma.writechannel.newframe()
def writeframe(self, frame):
"""Write the frame to the video output
See AxiVDMA.MM2SChannel.writeframe for more details
"""
self._vdma.writechannel.writeframe(frame)
async def writeframe_async(self, frame):
"""Write the frame to the video output
See AxiVDMA.MM2SChannel.writeframe for more details
"""
await self._vdma.writechannel.writeframe_async(frame)
class HDMIWrapper(DefaultHierarchy):
"""Hierarchy driver for the entire video subsystem.
Exposes the input, output and video DMA as attributes. For most
use cases the wrappers for the input and output pipelines are
sufficient and the VDMA will not need to be used directly.
Attributes
----------
hdmi_in : pynq.lib.video.HDMIIn
The HDMI input pipeline
hdmi_out : pynq.lib.video.HDMIOut
The HDMI output pipeline
axi_vdma : pynq.lib.video.AxiVDMA
The video DMA.
"""
@staticmethod
def checkhierarchy(description):
in_pipeline = None
out_pipeline = None
dma = None
for hier, details in description['hierarchies'].items():
if details['driver'] == VideoIn:
in_pipeline = hier
elif details['driver'] == VideoOut:
out_pipeline = hier
for ip, details in description['ip'].items():
if details['driver'] == AxiVDMA:
dma = ip
return (in_pipeline is not None and
out_pipeline is not None and
dma is not None)
def __init__(self, description):
super().__init__(description)
in_pipeline = None
out_pipeline = None
dma = None
for hier, details in description['hierarchies'].items():
if details['driver'] == VideoIn:
in_pipeline = hier
elif details['driver'] == VideoOut:
out_pipeline = hier
for ip, details in description['ip'].items():
if details['driver'] == AxiVDMA:
dma = ip
getattr(self, in_pipeline)._vdma = getattr(self, dma)
getattr(self, out_pipeline)._vdma = getattr(self, dma)
|
tests/extmod/uctypes_byteat.py | learnforpractice/micropython-cpp | 198 | 12680030 | try:
import uctypes
except ImportError:
print("SKIP")
raise SystemExit
data = bytearray(b'01234567')
print(uctypes.bytes_at(uctypes.addressof(data), 4))
print(uctypes.bytearray_at(uctypes.addressof(data), 4))
|
pylayers/antprop/diffRT.py | usmanwardag/pylayers | 143 | 12680042 | """
.. currentmodule:: pylayers.antprop.diffRT
.. autosummary::
:members:
"""
from __future__ import print_function
import doctest
import os
import glob
#!/usr/bin/python
# -*- coding: latin1 -*-
import numpy as np
import scipy.special as sps
import matplotlib.pyplot as plt
import pdb
def diff(fGHz,phi0,phi,si,sd,N,mat0,matN,beta=np.pi/2,mode='tab',debug=False):
""" Luebbers Diffration coefficient
for Ray tracing
Parameters
----------
Nf : number of frequencies
Nr : number of rays
fGHz : np.array (Nf)
phi0 : np.array (Nr)
phi : np.array (Nr)
si : np.array (Nr)
sd : np.array (Nr)
N: np.array (Nb)
mat0 : Mat
matN : Mat
beta : np.array (Nb)
skew incidence angle (rad)
mode : str ( 'tab','exact')
if 'tab': the Fresnel function is interpolated
( increase speed)
if 'exact': the Fresnel function is computed for each values
( increase accuracy)
(see FreF)
Returns
-------
Ds : numpy array
Diffraction soft
Dh : numpy array
Diffraction hard
Examples
--------
.. plot::
:include-source:
>>> import numpy as np
>>> from pylayers.antprop.slab import *
>>> Nf=3
>>> Nr=10
>>> Nb=5
>>> fGHz = np.linspace(0,10,Nf)
>>> N = np.linspace(1,10,Nb)#320/180.
>>> phi0 = np.linspace(0.01,2*np.pi-0.01,Nr)#40*np.pi/180.
>>> phi = np.linspace(0.01,2*np.pi-0.01,Nr)
>>> dm = MatDB()
>>> mat0 = dm['METAL']
>>> matN = dm['METAL']
>>> si = 10000.*np.ones(Nr)
>>> sd = 1.*np.ones(Nr)
>>> plt.ion()
>>> Ds,Dh,D1,D2,D3,D4 = diff(fGHz,phi0,phi,si,sd,N,mat0,matN)
"""
if not isinstance(fGHz,np.ndarray):
fGHz = np.array([fGHz])
if not isinstance(phi0,np.ndarray):
phi0 = np.array([phi0])
if not isinstance(phi,np.ndarray):
phi = np.array([phi])
if not isinstance(si,np.ndarray):
si = np.array([si])
if not isinstance(sd,np.ndarray):
sd = np.array([sd])
if not isinstance(N,np.ndarray):
N = np.array([N])
if not isinstance(beta,np.ndarray):
beta = np.array([beta])
fGHz = fGHz[:,None]
phi0 = phi0[None,:]
phi = phi[None,:]
si = si[None,:]
sd = sd[None,:]
N = N[None,:]
beta = beta[None,:]
L = si*sd/(si+sd)
k = 2*np.pi*fGHz/0.3
#--------------------------------------------------
# R on faces 'o' and 'n'
#--------------------------------------------------
tho = np.empty((fGHz.shape[0],phi.shape[1]))
thn = np.empty((fGHz.shape[0],phi.shape[1]))
# PHI0 = phi0 * np.ones(phi.shape)
# PHI = np.ones(phi0.shape)*phi
# BN = np.ones(phi0.shape)*N
c1 = phi>phi0
c2 = ~c1
tho[:,c1[0,:]] = phi0[:,c1[0,:]]
thn[:,c1[0,:]] = N[:,c1[0,:]]*np.pi-phi[:,c1[0,:]]
tho[:,c2[0,:]] = phi[:,c2[0,:]]
thn[:,c2[0,:]] = N[:,c2[0,:]]*np.pi-phi0[:,c2[0,:]]
er0 = np.real(mat0['epr'])
err0 = np.imag(mat0['epr'])
ur0 = np.real(mat0['mur'])
urr0 = np.imag(mat0['mur'])
sigma0 = mat0['sigma']
deltah0 = mat0['roughness']
erN = np.real(matN['epr'])
errN = np.imag(matN['epr'])
urN = np.real(mat0['mur'])
urrN = np.imag(mat0['mur'])
sigmaN = matN['sigma']
deltahN = matN['roughness']
Rsofto,Rhardo = R(tho,k,er0,err0,sigma0,ur0,urr0,deltah0)
Rsoftn,Rhardn = R(thn,k,erN,errN,sigmaN,urN,urrN,deltahN)
#--------------------------------------------------
# grazing angle Go et Gn
#--------------------------------------------------
Gsofto,Gsoftn = G(N,phi0,Rsofto,Rsoftn)
Ghardo,Ghardn = G(N,phi0,Rhardo,Rhardn)
#--------------------------------------------------
#calcul des 4 termes du coeff diff
#--------------------------------------------------
#by construction
#0 < KLA < 2*k*L
klamax = 2*np.max(k)*np.max(L)
if mode == 'tab':
#xF0 = np.logspace(-6,-2,1000)
#xF1 = np.logspace(-2,np.log10(klamax),1000)
#xF = np.hstack((xF0,xF1))
#pdb.set_trace()
# xF = np.logspace(-6,np.log10(klamax),1000)
xF = np.linspace(-8,np.log10(klamax),2000)
pxF = 10**xF
F = FreF(pxF)[0]
else :
xF = []
F=[]
sign = 1.0
D1 = Dfunc(sign,k,N,phi-phi0,si,sd,xF,F,beta)
sign = -1.0
D2 = Dfunc(sign,k,N,phi-phi0,si,sd,xF,F,beta)
sign = +1.0
D3 = Dfunc(sign,k,N,phi+phi0,si,sd,xF,F,beta)
sign = -1.0
D4 = Dfunc(sign,k,N,phi+phi0,si,sd,xF,F,beta)
#--------------------------------------
#n>=1 : exterior wedge
#--------------------------------------
Dsoft =np.empty(np.shape(D1),dtype=complex)
Dhard =np.empty(np.shape(D1),dtype=complex)
#c1 = BN>=1.0
Dsoft = D1+D2+Rsoftn*D3+Rsofto*D4
Dhard = D1+D2+Rhardn*D3+Rhardo*D4
# Dsoft = D2-D4
# Dhard = D2+D4
#Dsoft = D1+D2-D3-D4
#Dhard = D1+D2+D3+D4
# Dsoft = Gsoftn*(D1+Rsoftn*D3)+Gsofto*(D2+Rsofto*D4)
# Dhard = Ghardn*(D1+Rhardn*D3)+Ghardo*(D2+Rhardo*D4)
# c1 = abs(Gsoftn+1.0) < 1e-6
# c2 = abs(Gsofto+1.0) < 1e-6
# c3 = abs(Ghardn+1.0) < 1e-6
# c4 = abs(Ghardo+1.0) < 1e-6
#
# Dsoft[c1]= 0.5*(D1[c1]+D3[c1])+Gsofto[c1]*(D2[c1]+Rsofto[c1]*D4[c1])
# Dsoft[c2]= Gsoftn[c2]*(D1[c2]+Rsoftn[c2]*D3[c2])+0.5*(D2[c2]+D4[c2])
# Dhard[c3]= 0.5*(D1[c3]+D3[c3])+Ghardo[c3]*(D2[c3]+Rhardo[c3]*D4[c3])
# Dhard[c4]= Ghardn[c4]*(D1[c4]+Rhardn[c4]*D3[c4])+0.5*(D2[c4]+D4[c4])
#--------------------------------------
#traitement des cas ou Go (ou Gn) = -1
#--------------------------------------
# if (abs(Gsoftn+1.0) < 1e-6):
# DTsoft = 0.5*(D1+D3)+Gsofto*(D2+Rsofto*D4)
#
# if (abs(Gsofto+1.0)<1e-6):
# DTsoft = Gsoftn*(D1+Rsoftn*D3)+0.5*(D2+D4)
#
# if (abs(Ghardn+1.0) < 1.0e-6):
# DThard = 0.5*(D1+D3)+Ghardo*(D2+Rhardo*D4)
#
# if (abs(Ghardo+1.0)<1e-6):
# DThard = Ghardn*(D1+Rhardn*D3)+0.5*(D2+D4)
#
##--------------------------------------
##cas ou n<1 : interior wedge
##--------------------------------------
# else:
#
# thoz = N*np.pi-tho
# thnz = N*np.pi-thn
#
#
# [Rsoftnz,Rhardnz] = R(thnz,k,ero,erro,condo,uro,deltaho)
# [Rsoftoz,Rhardoz] = R(thoz,k,ern,errn,condn,urn,deltahn)
#
# DTsoft = Rsoftoz*Rsoftnz*D1+Rsoftn*D3+(Rsofto*Rsoftn*D2+Rsofto*D4)
#
# DThard = Rhardoz*Rhardnz*D1+Rhardn*D3+(Rhardo*Rhardn*D2+Rhardo*D4)
if np.isnan(Dsoft).any():
u = np.isnan(Dsoft)
pdb.set_trace()
if np.isnan(Dhard).any():
v = np.where(Dhard==np.nan)
pdb.set_trace()
if debug:
return Dsoft,Dhard,D1,D2,D3,D4
else :
return Dsoft,Dhard#,D1,D2,D3,D4
def G(N,phi0,Ro,Rn):
""" grazing angle correction
Parameters
----------
N : wedge parameter
phi0 : incidence angle (rad)
Ro : R coefficient on face o
Rn : R coefficient on face n
Luebbers 89 "a heuristique UTD slope diffraction coefficient for
rough lossy wedges"
"""
if not isinstance(phi0,np.ndarray):
phi0 = np.array([phi0])
if not isinstance(N,np.ndarray):
N = np.array([N])
PHI0 = phi0 * np.ones(Ro.shape)
BN = N * np.ones(Ro.shape)
# face o
Go = np.ones(np.shape(Ro),dtype='complex')
c1 = (abs(PHI0) < 1.0e-6) * (abs(Ro+1.0)>1.0e-6)
c2 = (abs(PHI0) < 1.0e-6) * (abs(Ro+1.0)<1.0e-6)
c3 = abs(PHI0-BN*np.pi) < 1.0e-6
Go[c1] = 1.0/(1.0+Ro[c1])
Go[c2] = -1.
Go[c3] = 0.5
# face n
Gn = np.ones(np.shape(Rn),dtype='complex')
c1 = (abs(PHI0-BN*np.pi) < 1.0e-6) * (abs(Rn+1.0)>1.0e-6)
c2 = (abs(PHI0-BN*np.pi) < 1.0e-6) * (abs(Rn+1.0)<1.0e-6)
c3 = abs(PHI0) < 1.0e-6
Gn[c1] = 1.0/(1.0+Rn[c1])
Gn[c2] = -1.
Gn[c3] = 0.5
return Go,Gn
def Dfunc(sign,k,N,dphi,si,sd,xF=[],F=[],beta=np.pi/2):
"""
Parameters
----------
sign : int
+1 | -1
k : wave number
N : wedge parameter
dphi : phi-phi0 or phi+phi0
si : distance source-D
sd : distance D-observation
beta : skew incidence angle
xF : array
support of Fresnel function.
F : array
Values of Fresnel function in regard of support
if F =[], fresnel function is computed
otherwise the passed interpolation F is used.
Reference
---------
[1] KOUYOUMJIAN-PATHAK a uniform geometrical theory of diffraction for an edge
in a perfectly conducting surface" IEEE AP nov 74 vol 62 N11
Notes
-----
e-jnp.pi/4 1
Di= ------------------ * ----------- * F(kla) ([1] eq 25)
2n*racine(2*np.pi*k) np.tan(dphi/n)sin(beta)
"""
cste = (1.0-1.0*1j)*(1.0/(4.0*N*np.sqrt(k*np.pi)*np.sin(beta)))
rnn = (dphi+np.pi*sign)/(2.0*N*np.pi)
nn = np.zeros(np.shape(rnn))
nn[rnn>0.5] = 1
nn[rnn>1.5] = 2
nn[rnn<-0.5] = -1
nn[rnn<-1.5] = -2
# KLA ref[1] eq 27
L = ((si*sd)*np.sin(beta)**2)/(1.*(si+sd))
AC = np.cos( (2.0*N*nn*np.pi-dphi) / 2.0 )
A = 2*AC**2
KLA = k * L * A
epsi = AC*2.0
angle = (np.pi+sign*dphi)/(2.0*N)
tan = np.tan(angle)
Di = np.empty(KLA.shape)
if len(F) == 0:
Fkla,ys,yL = FreF(KLA)
else :
#pxF = 10**xF
#uF = (np.abs(KLA[:,:]-pxF[:,None,None])).argmin(axis=0)
val = np.maximum(np.log10(np.abs(KLA))-xF[0,None,None],0)
uF2 = (len(F)-1)*(val)/(xF[-1,None,None]-xF[0,None,None])
uF2_int = np.floor(uF2).astype('int')
Fkla = F[uF2_int]
#if np.max(Fkla) > 1:
# Warning('diffRT : Fkla tab probably wrong')
# 4.56 Mac Namara
try:
Di = -cste*Fkla/tan
except:
print('tan=0 : It can happen')
pdb.set_trace()
c5 = np.where(np.abs(tan)<1e-9)
BL = np.ones(Di.shape)*L
Di[:,c5] = 0.5*np.sqrt(BL[c5])
# if np.isinf(Di).any():
# pdb.set_trace()
return(Di)
def FresnelI(x) :
""" calculates Fresnel integral
Parameters
----------
x : array
real argument
"""
v = np.empty(x.shape,dtype=complex)
y = np.abs(x)
z = .25*y
u1 = np.where(z>1)
u2 = np.where(z<=1)
y1 = y[u1]
y2 = y[u2]
d1 = np.cos(y1)
d2 = np.cos(y2)
e1 = np.sin(y1)
e2 = np.sin(y2)
z1 = z[u1]
z2 = z[u2]
c1 = np.sqrt(z1)
c2 = np.sqrt(z2)
# ----------------------------------------
# x>4, z>1
# ----------------------------------------
v1 = 0.5 - 0.5*1j
c1 = (1.0)/c1
z1 = c1*c1
a1=((((((((((
.23393900e-3*z1 -.12179300e-2)*z1 +.21029670e-2)*z1
+.2464200e-3)*z1 -.67488730e-2)*z1 +.11948809e-1)*z1
-.9497136e-2)*z1 +.68989200e-3)*z1 +.57709560e-2)*z1
+.3936000e-5)*z1 -.24933975e-1)*z1*c1
b1=(((((((((((
.838386000e-3*z1 -.55985150e-2)*z1 +.16497308e-1)*z1
-.27928955e-1)*z1 +.29064067e-1)*z1 -.17122914e-1)*z1
+.19032180e-2)*z1 +.48514660e-2)*z1 +.23006000e-4)*z1
-.93513410e-2)*z1 +.23000000e-7)*z1 +.19947114000)*c1
# ----------------------------------------
# x<4, z<1
# ----------------------------------------
a2=(((((((((((
0.34404779e-1 *z2 - 0.15023096)*z2 - 0.25639041e-1)*z2
+0.850663781 )*z2 - 0.75752419e-1 )*z2 - 0.305048566e1)*z2
-0.16898657e-1 )*z2 + 0.6920691902e1)*z2 - 0.576361e-3 )*z2
-0.6808568854e1)*z2 - 0.1702e-5)*z2 + 0.159576914e1)*c2
b2=(((((((((((
.19547031e-1 *z2 -.216195929e0 )*z2 +.702222016e0)*z2
-.4033492760e0)*z2 -.1363729124e1)*z2 -.138341947e0)*z2
+.5075161298e1)*z2 -.952089500e-2)*z2 -.778002040e1)*z2
-.928100000e-4)*z2 +.4255387524e1)*z2 -.33000000e-7)*c2
w1 = a1*d1+b1*e1+ 1j*(b1*d1-a1*e1) + v1
w2 = a2*d2+b2*e2+ 1j*(b2*d2-a2*e2)
v[u1] = w1
v[u2] = w2
y = v*(np.sqrt(np.pi/2.0))
return y
def FreF(x) :
""" F function from Pathack
Parameters
----------
x : array
real argument
Examples
--------
.. plot::
:include-source:
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> x = np.logspace(-4,2,400);
>>> F = FreF(x)
>>> plt.semilogx(x,,np.abs(F))
>>> plt.grid()
"""
ejp4 = np.exp(1j*np.pi/4)
emjp4 = np.exp(-1j*np.pi/4)
y = np.empty(x.shape,dtype=complex)
u1 = np.where(x>10)[0]
u2 = np.where(x<=10)[0]
xu1 = x[u1]
xu2 = x[u2]
x2 = xu1*xu1
x3 = x2*xu1
x4 = x3*xu1
w1 = 1-0.75/x2+4.6875/x4 + 1j*( 0.5/xu1 -1.875/x3)
cst = (1.0 - 1j )*0.5*np.sqrt(np.pi/2)
carx = abs(xu2)
racx = np.sqrt(carx)
modx = np.mod(xu2,2*np.pi)
expjx = np.exp(1j*modx)
fr = FresnelI(carx)
into = cst - fr
w2 = 2.0*racx*1j*expjx*into
y[u1] = w1
y[u2] = w2
# [1] eq 30
ys = (np.sqrt(np.pi*x)-2*x*ejp4-(2/3.)*x**2*emjp4)*np.exp(1j*(np.pi/4+x))
yl = 1-0.75/(x*x)+4.6875/(x*x*x*x) + 1j*( 0.5/x -1.875/(x*x*x))
return y,ys,yl
def FreF2(x):
""" F function using numpy fresnel function
Parameters
----------
Not working for large argument
"""
y = np.empty(x.shape,dtype=complex)
u1 = np.where(x>5)[0]
u2 = np.where(x<=5)[0]
xu1 = x[u1]
xu2 = x[u2]
x2 = xu1*xu1
x3 = x2*xu1
x4 = x3*xu1
w1 = 1-0.75/x2+4.6875/x4 + 1j*( 0.5/xu1 -1.875/x3)
cst = np.sqrt(np.pi/2.)
sF,cF = sps.fresnel(np.sqrt(xu2/cst))
Fc = (0.5-cF)*cst
Fs = (0.5-sF)*cst
modx = np.mod(xu2,2*np.pi)
expjx = np.exp(1j*modx)
w2 = 2*1j*np.sqrt(xu2)*expjx*(Fc-1j*Fs)
y[u1] = w1
y[u2] = w2
return(y)
def R(th,k,er,err,sigma,ur,urr,deltah):
""" R coeff
Parameters
----------
th : np.array
incidence angle (axe 0)
k : np.array
wave number (axe 1)
er : real part of permittivity
err : imaginary part of permittivity
sigma : conductivity
ur : real part of permeability
urr : imaginary part of permeability
deltah : height standard deviation
Examples
--------
.. plot::
:include-source:
>>> import numpy as np
>>> th = np.linspace(0,np.pi/2,180)[None,:]
>>> fGHz = 0.3
>>> lamda = 0.3/fGHz
>>> k = np.array([2*np.pi/2])[:,None]
>>> Rs,Rh = R(th,k,9,0,0.01,1,0,0)
"""
cel = 299792458
#--------------------------------------------
#cas des surfaces dielectriques (sinon er=-1)
#--------------------------------------------
if (er >= 0.0 ):
if ( (( ur-1.0)<1e-16) & ((er-1.0)<1e-16) ):
Rs = np.zeros(len(th),dtype=complex)
Rh = np.zeros(len(th),dtype=complex)
u1 = np.where(th >= 1.5*np.pi)
u2 = np.where(th >= np.pi )
u3 = np.where(th >= 0.5*np.pi)
th[u1] = 2.0*np.pi - th[u1]
th[u2] = th[u2] - np.pi
th[u3] = np.pi - th[u3]
#if (th >= 1.5*np.pi ):
# th = 2.0*np.pi - th
#elif (th >= np.pi ):
# th = th - np.pi
#elif (th >= 0.5*np.pi):
# th = np.pi - th
uo = 4.0*np.pi*1e-7
eo = 1.0/(uo*cel*cel)
pulse = k*cel
permi = (er-1j*err)-(1j*sigma)/(pulse*eo)
perme = ur - 1j*urr
yy = (permi/perme)
st = np.sin(th)
ct = np.cos(th)
bb = np.sqrt(yy-ct**2)
Rs = (st - bb) / (st + bb )
Rh = (yy*st-bb)/(yy*st+bb)
else: # metalic case
Rs = -np.ones(th.shape,dtype=complex)
Rh = np.ones(th.shape,dtype=complex)
roughness = 1.0
Rs = Rs* roughness
Rh = Rh* roughness
return Rs,Rh
|
ndscheduler/corescheduler/datastore/base_test.py | JonathanCalderon/ndscheduler | 1,038 | 12680068 | """Unit tests for DatastoreBase."""
import datetime
import unittest
from apscheduler.schedulers.blocking import BlockingScheduler
from ndscheduler.corescheduler import constants
from ndscheduler.corescheduler.datastore.providers.sqlite import DatastoreSqlite
class DatastoreBaseTest(unittest.TestCase):
def setUp(self):
fake_scheduler = BlockingScheduler()
self.store = DatastoreSqlite.get_instance()
self.store.start(fake_scheduler, None)
def test_add_execution_get_execution(self):
eid = '12345'
job_id = '321'
self.store.add_execution(eid, job_id, state=constants.EXECUTION_STATUS_SCHEDULED)
execution = self.store.get_execution(eid)
self.assertEqual(execution['execution_id'], eid)
def test_update_execution_get_execution(self):
eid = '12346'
job_id = '321'
self.store.add_execution(eid, job_id, state=constants.EXECUTION_STATUS_SCHEDULED)
self.store.update_execution(eid, state=constants.EXECUTION_STATUS_RUNNING)
execution = self.store.get_execution(eid)
self.assertEqual(execution['execution_id'], eid)
self.assertEqual(execution['state'],
constants.EXECUTION_STATUS_DICT[constants.EXECUTION_STATUS_RUNNING])
def test_get_executions_by_time_interval(self):
now = datetime.datetime.now()
start_time = (now + datetime.timedelta(minutes=20)).isoformat()
end_time = (now + datetime.timedelta(minutes=100)).isoformat()
self.store.add_execution('12', '34', state=constants.EXECUTION_STATUS_SCHEDULED,
scheduled_time=now + datetime.timedelta(minutes=5))
self.store.add_execution('13', '34', state=constants.EXECUTION_STATUS_SCHEDULED,
scheduled_time=now + datetime.timedelta(minutes=50))
self.store.add_execution('14', '34', state=constants.EXECUTION_STATUS_SCHEDULED,
scheduled_time=now + datetime.timedelta(minutes=70))
self.store.add_execution('15', '34', state=constants.EXECUTION_STATUS_SCHEDULED,
scheduled_time=now + datetime.timedelta(minutes=120))
executions = self.store.get_executions(start_time, end_time)
self.assertEqual(len(executions['executions']), 2)
def test_add_audit_log_get_audit_logs(self):
job_id = '234'
job_name = 'asdfs'
event = constants.AUDIT_LOG_ADDED
user = 'aa'
description = 'hihi'
self.store.add_audit_log(job_id, job_name, event, user=user, description=description)
now = datetime.datetime.utcnow()
five_min_ago = now - datetime.timedelta(minutes=5)
logs = self.store.get_audit_logs(five_min_ago.isoformat(), now.isoformat())
self.assertEqual(len(logs['logs']), 1)
|
adb/systrace/catapult/common/py_trace_event/py_trace_event/trace_time_unittest.py | mohanedmoh/TBS | 2,151 | 12680092 | <reponame>mohanedmoh/TBS
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import logging
import platform
import sys
import unittest
from py_trace_event import trace_time
class TimerTest(unittest.TestCase):
# Helper methods.
@contextlib.contextmanager
def ReplacePlatformProcessorCall(self, f):
try:
old_proc = platform.processor
platform.processor = f
yield
finally:
platform.processor = old_proc
@contextlib.contextmanager
def ReplaceQPCCheck(self, f):
try:
old_qpc = trace_time.IsQPCUsable
trace_time.IsQPCUsable = f
yield
finally:
trace_time.IsQPCUsable = old_qpc
# Platform detection tests.
def testInitializeNowFunction_platformNotSupported(self):
with self.assertRaises(RuntimeError):
trace_time.InitializeNowFunction('invalid_platform')
def testInitializeNowFunction_windows(self):
if not (sys.platform.startswith(trace_time._PLATFORMS['windows'])
or sys.platform.startswith(trace_time._PLATFORMS['cygwin'])):
return True
trace_time.InitializeNowFunction(sys.platform)
self.assertTrue(trace_time.GetClock() == trace_time._WIN_HIRES
or trace_time.GetClock() == trace_time._WIN_LORES)
def testInitializeNowFunction_linux(self):
if not sys.platform.startswith(trace_time._PLATFORMS['linux']):
return True
trace_time.InitializeNowFunction(sys.platform)
self.assertEqual(trace_time.GetClock(), trace_time._LINUX_CLOCK)
def testInitializeNowFunction_mac(self):
if not sys.platform.startswith(trace_time._PLATFORMS['mac']):
return True
trace_time.InitializeNowFunction(sys.platform)
self.assertEqual(trace_time.GetClock(), trace_time._MAC_CLOCK)
# Windows Tests
def testIsQPCUsable_buggyAthlonProcReturnsFalse(self):
if not (sys.platform.startswith(trace_time._PLATFORMS['windows'])
or sys.platform.startswith(trace_time._PLATFORMS['cygwin'])):
return True
def BuggyAthlonProc():
return 'AMD64 Family 15 Model 23 Stepping 6, AuthenticAMD'
with self.ReplacePlatformProcessorCall(BuggyAthlonProc):
self.assertFalse(trace_time.IsQPCUsable())
def testIsQPCUsable_returnsTrueOnWindows(self):
if not (sys.platform.startswith(trace_time._PLATFORMS['windows'])
or sys.platform.startswith(trace_time._PLATFORMS['cygwin'])):
return True
def Proc():
return 'Intel64 Family 15 Model 23 Stepping 6, GenuineIntel'
with self.ReplacePlatformProcessorCall(Proc):
self.assertTrue(trace_time.IsQPCUsable())
def testGetWinNowFunction_QPC(self):
if not (sys.platform.startswith(trace_time._PLATFORMS['windows'])
or sys.platform.startswith(trace_time._PLATFORMS['cygwin'])):
return True
# Test requires QPC to be available on platform.
if not trace_time.IsQPCUsable():
return True
self.assertGreater(trace_time.monotonic(), 0)
# Works even if QPC would work.
def testGetWinNowFunction_GetTickCount(self):
if not (sys.platform.startswith(trace_time._PLATFORMS['windows'])
or sys.platform.startswith(trace_time._PLATFORMS['cygwin'])):
return True
with self.ReplaceQPCCheck(lambda: False):
self.assertGreater(trace_time.monotonic(), 0)
# Linux tests.
def testGetClockGetTimeClockNumber_linux(self):
self.assertEquals(trace_time.GetClockGetTimeClockNumber('linux'), 1)
def testGetClockGetTimeClockNumber_freebsd(self):
self.assertEquals(trace_time.GetClockGetTimeClockNumber('freebsd'), 4)
def testGetClockGetTimeClockNumber_bsd(self):
self.assertEquals(trace_time.GetClockGetTimeClockNumber('bsd'), 3)
def testGetClockGetTimeClockNumber_sunos(self):
self.assertEquals(trace_time.GetClockGetTimeClockNumber('sunos5'), 4)
# Smoke Test.
def testMonotonic(self):
time_one = trace_time.Now()
for _ in xrange(1000):
time_two = trace_time.Now()
self.assertLessEqual(time_one, time_two)
time_one = time_two
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main(verbosity=2)
|
tests/test_labeler.py | timgates42/dedupe | 2,190 | 12680103 | import dedupe
import unittest
import random
import pytest
SAMPLE = [({"name": "Bob", "age": "50"}, {"name": "Charlie", "age": "75"}),
({"name": "Meredith", "age": "40"}, {"name": "Sue", "age": "10"}),
({"name": "Willy", "age": "35"}, {"name": "William", "age": "35"}),
({"name": "Jimmy", "age": "20"}, {"name": "Jimbo", "age": "21"})]
class ActiveLearningTest(unittest.TestCase):
def setUp(self):
self.data_model = dedupe.datamodel.DataModel([{'field': 'name',
'type': 'String'},
{'field': 'age',
'type': 'String'}])
def test_AL(self):
random.seed(1111111111110)
original_N = len(SAMPLE)
active_learner = dedupe.labeler.RLRLearner(self.data_model)
active_learner.candidates = SAMPLE
assert len(active_learner) == original_N
pair = active_learner.pop()
print(pair)
assert pair == ({"name": "Willy", "age": "35"},
{"name": "William", "age": "35"})
assert len(active_learner) == original_N - 1
pair = active_learner.pop()
print(pair)
assert pair == ({"name": "Jimmy", "age": "20"},
{"name": "Jimbo", "age": "21"})
assert len(active_learner) == original_N - 2
pair = active_learner.pop()
assert pair == ({"name": "Meredith", "age": "40"},
{"name": "Sue", "age": "10"})
assert len(active_learner) == original_N - 3
active_learner.pop()
with pytest.raises(IndexError):
active_learner.pop()
if __name__ == "__main__":
unittest.main()
|
carla_twist_to_control/src/carla_twist_to_control/carla_twist_to_control.py | SebastianHuch/ros-bridge | 314 | 12680118 | #!/usr/bin/env python
#
# Copyright (c) 2020 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
receive geometry_nav_msgs::Twist and publish carla_msgs::CarlaEgoVehicleControl
use max wheel steer angle
"""
import sys
import ros_compatibility as roscomp
from ros_compatibility.exceptions import ROSException
from ros_compatibility.node import CompatibleNode
from ros_compatibility.qos import QoSProfile, DurabilityPolicy
from carla_msgs.msg import CarlaEgoVehicleControl, CarlaEgoVehicleInfo # pylint: disable=import-error
from geometry_msgs.msg import Twist # pylint: disable=import-error
class TwistToVehicleControl(CompatibleNode): # pylint: disable=too-few-public-methods
"""
receive geometry_nav_msgs::Twist and publish carla_msgs::CarlaEgoVehicleControl
use max wheel steer angle
"""
MAX_LON_ACCELERATION = 10
def __init__(self):
"""
Constructor
"""
super(TwistToVehicleControl, self).__init__("twist_to_control")
self.role_name = self.get_param("role_name", "ego_vehicle")
self.max_steering_angle = None
self.new_subscription(
CarlaEgoVehicleInfo,
"/carla/{}/vehicle_info".format(self.role_name),
self.update_vehicle_info,
qos_profile=QoSProfile(depth=1, durability=DurabilityPolicy.TRANSIENT_LOCAL))
self.new_subscription(
Twist,
"/carla/{}/twist".format(self.role_name),
self.twist_received,
qos_profile=10)
self.pub = self.new_publisher(
CarlaEgoVehicleControl,
"/carla/{}/vehicle_control_cmd".format(self.role_name),
qos_profile=10)
def update_vehicle_info(self, vehicle_info):
"""
callback to receive ego-vehicle info
"""
if not vehicle_info.wheels: # pylint: disable=no-member
self.logerr("Cannot determine max steering angle: Vehicle has no wheels.")
sys.exit(1)
self.max_steering_angle = vehicle_info.wheels[0].max_steer_angle # pylint: disable=no-member
if not self.max_steering_angle:
self.logerr("Cannot determine max steering angle: Value is %s",
self.max_steering_angle)
sys.exit(1)
self.loginfo("Vehicle info received. Max steering angle={}".format(self.max_steering_angle))
def twist_received(self, twist):
"""
receive twist and convert to carla vehicle control
"""
if self.max_steering_angle is None:
self.logwarn("Did not yet receive vehicle info.")
return
control = CarlaEgoVehicleControl()
if twist == Twist():
# stop
control.throttle = 0.
control.brake = 1.
control.steer = 0.
else:
if twist.linear.x > 0:
control.throttle = min(TwistToVehicleControl.MAX_LON_ACCELERATION,
twist.linear.x) / TwistToVehicleControl.MAX_LON_ACCELERATION
else:
control.reverse = True
control.throttle = max(-TwistToVehicleControl.MAX_LON_ACCELERATION,
twist.linear.x) / -TwistToVehicleControl.MAX_LON_ACCELERATION
if twist.angular.z > 0:
control.steer = -min(self.max_steering_angle, twist.angular.z) / \
self.max_steering_angle
else:
control.steer = -max(-self.max_steering_angle, twist.angular.z) / \
self.max_steering_angle
try:
self.pub.publish(control)
except ROSException as e:
if roscomp.ok():
self.logwarn("Error while publishing control: {}".format(e))
def main(args=None):
"""
main function
:return:
"""
roscomp.init("twist_to_control", args)
twist_to_vehicle_control = None
try:
twist_to_vehicle_control = TwistToVehicleControl()
twist_to_vehicle_control.spin()
except KeyboardInterrupt:
pass
finally:
if twist_to_vehicle_control is not None:
twist_to_vehicle_control.loginfo("Done, deleting twist to control")
del twist_to_vehicle_control
roscomp.shutdown()
if __name__ == "__main__":
main()
|
mindinsight/datavisual/data_transform/data_manager.py | mindspore-ai/mindinsight | 216 | 12680119 | # Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Management of all events data.
This module exists to all loaders.
It can read events data through the DataLoader.
This module also acts as a thread pool manager.
"""
import abc
import datetime
import threading
import time
import os
from typing import Iterable, Optional
from mindinsight.datavisual.data_transform.summary_watcher import SummaryWatcher
from mindinsight.conf import settings
from mindinsight.datavisual.common import exceptions
from mindinsight.datavisual.common.enums import CacheStatus
from mindinsight.datavisual.common.log import logger
from mindinsight.datavisual.common.enums import DataManagerStatus
from mindinsight.datavisual.common.enums import PluginNameEnum
from mindinsight.datavisual.common.exceptions import TrainJobNotExistError
from mindinsight.datavisual.data_transform.loader_generators.loader_generator import MAX_DATA_LOADER_SIZE
from mindinsight.datavisual.data_transform.loader_generators.data_loader_generator import DataLoaderGenerator
from mindinsight.utils.computing_resource_mgr import ComputingResourceManager
from mindinsight.utils.exceptions import MindInsightException
from mindinsight.utils.exceptions import ParamValueError
from mindinsight.utils.exceptions import UnknownError
from mindinsight.datavisual.utils.tools import exception_wrapper
class _BasicTrainJob:
"""
Basic info about train job.
Args:
abs_summary_base_dir (str): The canonical path of summary base directory. It should be the return value of
realpath().
entry (dict): The summary dir entry listed by SummaryWatcher.
"""
def __init__(self, abs_summary_base_dir, entry):
self._abs_summary_base_dir = abs_summary_base_dir
self._entry = entry
@property
def abs_summary_dir(self):
"""Get summary directory path."""
return os.path.realpath(os.path.join(self._abs_summary_base_dir, self._entry['relative_path']))
@property
def summary_base_dir(self):
"""Get summary base directory path."""
return self._abs_summary_base_dir
@property
def train_id(self):
"""Get train id."""
return self._entry['relative_path']
@property
def profiler_dir(self):
"""Get profiler directory path."""
if self._entry['profiler'] is not None:
return self._entry['profiler']['directory']
return None
@property
def create_time(self):
"""Get create time."""
return self._entry['create_time']
@property
def update_time(self):
"""Get update time."""
return self._entry['update_time']
@property
def profiler_type(self):
"""Get profiler type"""
if self._entry['profiler'] is not None:
return self._entry['profiler']['profiler_type']
return ''
@property
def summary_files(self):
"""Get the summary files count in the summary dir."""
return self._entry['summary_files']
@property
def graph_files(self):
"""Get the graph pb files count in the summary dir."""
return self._entry['graph_files']
@property
def lineage_files(self):
"""Get the lineage files count in the summary dir."""
return self._entry['lineage_files']
@property
def dump_dir(self):
"""Get the dump file path in the summary dir."""
return self._entry.get('dump_dir', None)
class CachedTrainJob:
"""
Cache item for BriefCacheManager.
DetailCacheManager will also wrap it's return value with this class.
Args:
basic_info (_BasicTrainJob): Basic info about the train job.
"""
def __init__(self, basic_info: _BasicTrainJob):
self._basic_info = basic_info
self._last_access_time = datetime.datetime.utcnow()
# Other cached content is stored here.
self._content = {}
self._cache_status = CacheStatus.NOT_IN_CACHE
self._key_locks = {}
@property
def cache_status(self):
"""Get cache status."""
return self._cache_status
@cache_status.setter
def cache_status(self, value):
"""Set cache status."""
self._cache_status = value
def update_access_time(self):
"""Update last access time of this cache item."""
self._last_access_time = datetime.datetime.utcnow()
@property
def last_access_time(self):
"""Get last access time for purposes such as LRU."""
return self._last_access_time
@property
def abs_summary_dir(self):
"""Get summary directory path."""
return self._basic_info.abs_summary_dir
@property
def summary_base_dir(self):
"""Get summary base directory path."""
return self._basic_info.summary_base_dir
def set(self, key, value):
"""Set value to cache."""
self._content[key] = value
def delete(self, key, raise_exception=True):
"""Delete key in cache."""
try:
self._content.pop(key)
except KeyError:
if raise_exception:
raise ParamValueError("Delete failed. Invalid cache key({}).".format(key))
def get(self, key, raise_exception=True):
"""
Get value from cache.
Args:
key (str): Key of content.
raise_exception (bool): If the key does not exist and
raise_exception is True, it will raise an Exception.
Returns:
Union[Object, None], Return value if key in content,
return False else if raise_exception is False.
Raises:
ParamValueError, if the key does not exist and raise_exception is True.
"""
try:
return self._content[key]
except KeyError:
if raise_exception:
raise ParamValueError("Invalid cache key({}).".format(key))
return None
@property
def basic_info(self):
"""Get basic train job info."""
return self._basic_info
@basic_info.setter
def basic_info(self, value):
"""Set basic train job info."""
self._basic_info = value
def lock_key(self, key):
"""Threading lock with given key."""
return self._key_locks.setdefault(key, threading.Lock())
@property
def train_id(self):
"""Get train id."""
return self._basic_info.train_id
class TrainJob:
"""
Train job object.
You must not create TrainJob objects manually. You should always get TrainJob objects from DataManager.
Args:
brief_train_job (CachedTrainJob): Brief info about train job.
detail_train_job (Optional[CachedTrainJob]): Detailed info about train job. Default: None.
"""
def __init__(self,
brief_train_job: CachedTrainJob,
detail_train_job: Optional[CachedTrainJob] = None):
self._brief = brief_train_job
self._detail = detail_train_job
if self._detail is None:
self._cache_status = CacheStatus.NOT_IN_CACHE
else:
self._cache_status = self._detail.cache_status
def has_detail(self):
"""Whether this train job has detailed info in cache."""
return bool(self._detail is not None)
def get_detail(self, key):
"""
Get detail content.
Args:
key (Any): Cache key.
Returns:
Any, cache content.
Raises:
TrainJobDetailNotInCacheError: when this train job has no detail cache.
"""
if not self.has_detail():
raise exceptions.TrainJobDetailNotInCacheError()
return self._detail.get(key)
def get_brief(self, key):
"""
Get brief content.
Args:
key (Any): Cache key.
Returns:
Any, cache content.
"""
return self._brief.get(key)
def get_basic_info(self):
"""
Get basic info.
Returns:
basic_info (_BasicTrainJob): Basic info about the train job.
"""
return self._brief.basic_info
@property
def cache_status(self):
"""Get cache status."""
return self._cache_status
@cache_status.setter
def cache_status(self, cache_status):
"""Set cache status."""
self._cache_status = cache_status
class BaseCacheItemUpdater(abc.ABC):
"""Abstract base class for other modules to update cache content."""
def update_item(self, cache_item: CachedTrainJob):
"""
Update cache item in place.
Args:
cache_item (CachedTrainJob): The cache item to be processed.
"""
raise NotImplementedError()
class _BaseCacheManager:
"""Base class for cache manager."""
def __init__(self, summary_base_dir):
self._summary_base_dir = summary_base_dir
# Use dict to remove duplicate updaters.
self._updaters = {}
# key is train_id
self._lock = threading.Lock()
self._cache_items = {}
def size(self):
"""Gets used cache slots."""
return len(self._cache_items)
def register_cache_item_updater(self, updater: BaseCacheItemUpdater):
"""Register cache item updater."""
self._updaters[updater.__class__.__qualname__] = updater
def get_train_jobs(self):
"""Get cached train jobs."""
copied_train_jobs = dict(self._cache_items)
return copied_train_jobs
def get_train_job(self, train_id):
"""Get cached train job."""
try:
return self._cache_items[train_id]
except KeyError:
raise TrainJobNotExistError(train_id)
def cache_train_job(self, train_id) -> bool:
"""
Cache given train job and update train job's last access time.
This method should return true if reload actions should be taken to cache the train job.
Args:
train_id (str): Train Id.
"""
raise NotImplementedError()
def delete_train_job(self, train_id):
"""Delete train job from cache."""
if train_id in self._cache_items:
del self._cache_items[train_id]
def has_content(self):
"""Whether this cache manager has train jobs."""
return bool(self._cache_items)
def update_cache(self, executor):
"""
Update cache according to given train jobs on disk.
Different cache manager should implement different cache update policies in this method.
Args:
executor (Executor): The Executor instance.
"""
raise NotImplementedError()
class _BriefCacheManager(_BaseCacheManager):
"""A cache manager that holds all disk train jobs on disk."""
def __init__(self, summary_base_dir):
super(_BriefCacheManager, self).__init__(summary_base_dir)
self._summary_watcher = SummaryWatcher()
def cache_train_job(self, train_id):
"""
Cache given train job.
All disk train jobs are cached on every reload, so this method always return false.
Args:
train_id (str): Train Id.
"""
if train_id in self._cache_items:
self._cache_items[train_id].update_access_time()
return False
def update_cache(self, executor):
"""Update cache."""
logger.info('Start to update BriefCacheManager.')
summaries_info = self._summary_watcher.list_summary_directories(self._summary_base_dir)
basic_train_jobs = []
for info in summaries_info:
basic_train_jobs.append(_BasicTrainJob(
abs_summary_base_dir=self._summary_base_dir,
entry=info
))
with self._lock:
new_cache_items = self._merge_with_disk(basic_train_jobs)
self._cache_items = new_cache_items
for updater in self._updaters.values():
for cache_item in self._cache_items.values():
updater.update_item(cache_item)
def _merge_with_disk(self, disk_train_jobs: Iterable[_BasicTrainJob]):
"""
Merge train jobs in cache with train jobs from disk
This method will remove train jobs not on disk. Call this function with lock for thread safety.
Args:
disk_train_jobs (Iterable[_BasicTrainJob]): Basic train jobs info from disk.
Returns:
dict, a dict containing train jobs to be cached.
"""
new_cache_items = {}
for train_job in disk_train_jobs:
if train_job.train_id not in self._cache_items:
new_cache_items[train_job.train_id] = CachedTrainJob(train_job)
else:
reused_train_job = self._cache_items[train_job.train_id]
reused_train_job.basic_info = train_job
new_cache_items[train_job.train_id] = reused_train_job
return new_cache_items
def register_folder_analyzer(self, analyzer):
"""Register folder analyzer."""
self._summary_watcher.register_folder_analyzer(analyzer)
@property
def cache_items(self):
"""Get cache items."""
return self._cache_items
# Key for plugin tags.
DATAVISUAL_PLUGIN_KEY = "tag_mapping"
# Detail train job cache key for datavisual content.
DATAVISUAL_CACHE_KEY = "datavisual"
class _DetailCacheManager(_BaseCacheManager):
"""A cache manager that holds detailed info for most recently used train jobs."""
def __init__(self, summary_base_dir):
super().__init__(summary_base_dir)
self._loader_pool = {}
self._deleted_id_list = []
self._loader_pool_mutex = threading.Lock()
self._loader_generators = [DataLoaderGenerator(summary_base_dir)]
self._loading_mutex = threading.Lock()
def has_content(self):
"""Whether this cache manager has train jobs."""
return bool(self._loader_pool)
def register_folder_analyzer(self, analyzer):
"""Register folder analyzer."""
for generator in self._loader_generators:
generator.register_folder_analyzer(analyzer)
def size(self):
"""
Get the number of items in this cache manager.
To be implemented.
Returns:
int, the number of items in this cache manager.
"""
raise NotImplementedError()
def loader_pool_size(self):
"""Get loader pool size."""
return len(self._loader_pool)
def update_cache(self, executor):
"""
Update cache.
Will switch to using disk_train_jobs in the future.
Args:
executor (Executor): The Executor instance.
"""
with self._loading_mutex:
load_in_cache = exception_wrapper(self._execute_load_data)
try:
while not load_in_cache(executor):
yield
except UnknownError as ex:
logger.warning("Load event data failed. Detail: %s.", str(ex))
def cache_train_job(self, train_id):
"""Cache given train job."""
loader = None
need_reload = False
with self._loader_pool_mutex:
if self._is_loader_in_loader_pool(train_id, self._loader_pool):
loader = self._loader_pool.get(train_id)
if loader is None:
for generator in self._loader_generators:
tmp_loader = generator.generate_loader_by_train_id(train_id)
if loader and loader.latest_update_time > tmp_loader.latest_update_time:
continue
loader = tmp_loader
if loader is None:
raise TrainJobNotExistError(train_id)
self._add_loader(loader)
need_reload = True
self._update_loader_latest_update_time(loader.loader_id)
return need_reload
def get_train_jobs(self):
"""
Get train jobs
To be implemented.
"""
def _add_loader(self, loader):
"""
Add a loader to load data.
Args:
loader (LoaderStruct): A object of `Loader`.
"""
if len(self._loader_pool) >= MAX_DATA_LOADER_SIZE:
delete_number = len(self._loader_pool) - MAX_DATA_LOADER_SIZE + 1
sorted_loaders = sorted(self._loader_pool.items(),
key=lambda loader: loader[1].latest_update_time)
for index in range(delete_number):
delete_loader_id = sorted_loaders[index][0]
self._delete_loader(delete_loader_id)
self._loader_pool.update({loader.loader_id: loader})
def _delete_loader(self, loader_id):
"""
Delete loader from loader pool by loader id.
Args:
loader_id (str): ID of loader.
"""
if self._loader_pool.get(loader_id) is not None:
logger.debug("delete loader %s", loader_id)
self._loader_pool.pop(loader_id)
def _execute_loader(self, loader_id, executor):
"""
Load data form data_loader.
If there is something wrong by loading, add logs and delete the loader.
Args:
loader_id (str): An ID for `Loader`.
executor (Executor): The Executor instance.
Returns:
bool, True if the loader is finished loading.
"""
try:
with self._loader_pool_mutex:
loader = self._loader_pool.get(loader_id, None)
if loader is None:
logger.debug("Loader %r has been deleted, will not load data.", loader_id)
return True
loader.cache_status = CacheStatus.CACHING
if loader.data_loader.load(executor):
# Update loader cache status to CACHED.
# Loader with cache status CACHED should remain the same cache status.
loader.cache_status = CacheStatus.CACHED
return True
return False
except MindInsightException as ex:
logger.warning("Data loader %r load data failed. "
"Delete data_loader. Detail: %s", loader_id, ex)
with self._loader_pool_mutex:
self._delete_loader(loader_id)
return True
def _generate_loaders(self):
"""This function generates the loader from given path."""
loader_dict = {}
for generator in self._loader_generators:
loader_dict.update(generator.generate_loaders(self._loader_pool))
sorted_loaders = sorted(loader_dict.items(), key=lambda loader: loader[1].latest_update_time)
latest_loaders = sorted_loaders[-MAX_DATA_LOADER_SIZE:]
self._deal_loaders(latest_loaders)
def _deal_loaders(self, latest_loaders):
"""
This function determines which loaders to keep or remove or added.
It is based on the given dict of loaders.
Args:
latest_loaders (list[dict]): A list of <loader_id: LoaderStruct>.
"""
with self._loader_pool_mutex:
for loader_id, loader in latest_loaders:
if self._loader_pool.get(loader_id, None) is None:
self._add_loader(loader)
continue
# If this loader was updated manually before,
# its latest_update_time may bigger than update_time in summary.
if self._loader_pool[loader_id].latest_update_time < loader.latest_update_time:
self._update_loader_latest_update_time(loader_id, loader.latest_update_time)
def _execute_load_data(self, executor):
"""Load data through multiple threads."""
self._generate_loaders()
loader_pool = self._get_snapshot_loader_pool()
loaded = True
for loader_id in loader_pool:
loaded = self._execute_loader(loader_id, executor) and loaded
return loaded
def delete_train_job(self, train_id):
"""
Delete train job with a train id.
Args:
train_id (str): ID for train job.
"""
with self._loader_pool_mutex:
self._delete_loader(train_id)
def list_tensors(self, train_id, tag):
"""
List tensors of the given train job and tag.
If the tensor can not find by the given tag, will raise exception.
Args:
train_id (str): ID for train job.
tag (str): The tag name.
Returns:
list, the NameTuple format is `collections.namedtuple('_Tensor', ['wall_time', 'event_step', 'value'])`.
the value will contain the given tag data.
"""
loader_pool = self._get_snapshot_loader_pool()
if not self._is_loader_in_loader_pool(train_id, loader_pool):
raise TrainJobNotExistError("Can not find the given train job in cache.")
data_loader = loader_pool[train_id].data_loader
tensors = []
try:
events_data = data_loader.get_events_data()
tensors = events_data.tensors(tag)
except KeyError:
error_msg = "Can not find any data in this train job by given tag."
raise ParamValueError(error_msg)
except AttributeError:
logger.debug("Train job %r has been deleted or it has not loaded data, "
"and set tags to empty list.", train_id)
return tensors
def _check_train_job_exist(self, train_id, loader_pool):
"""
Check train job exist, if not exist, will raise exception.
Args:
train_id (str): The given train job id.
loader_pool (dict[str, LoaderStruct]): Refer to self._loader_pool.
Raises:
TrainJobNotExistError: Can not find train job in data manager.
"""
is_exist = False
if train_id in loader_pool:
return
for generator in self._loader_generators:
if generator.check_train_job_exist(train_id):
is_exist = True
break
if not is_exist:
raise TrainJobNotExistError("Can not find the train job in data manager.")
def _is_loader_in_loader_pool(self, train_id, loader_pool):
"""
Check train job exist, if not exist, return False. Else, return True.
Args:
train_id (str): The given train job id.
loader_pool (dict): See self._loader_pool.
Returns:
bool, if loader in loader pool, return True.
"""
if train_id in loader_pool:
return True
return False
def _get_snapshot_loader_pool(self):
"""
Create a snapshot of data loader pool to avoid concurrent mutation and iteration issues.
Returns:
dict, a copy of `self._loader_pool`.
"""
with self._loader_pool_mutex:
return dict(self._loader_pool)
def get_train_job(self, train_id):
"""
Get train job by train ID.
This method overrides parent method.
Args:
train_id (str): Train ID for train job.
Returns:
dict, single train job, if can not find any data, will return None.
"""
self._check_train_job_exist(train_id, self._loader_pool)
loader = self._get_loader(train_id)
if loader is None:
logger.info("No valid summary log in train job %s, or it is not in the cache.", train_id)
return None
train_job = loader.to_dict()
train_job.pop('data_loader')
plugin_data = {}
for plugin_name in PluginNameEnum.list_members():
job = self.get_train_job_by_plugin(train_id, plugin_name=plugin_name)
if job is None:
plugin_data[plugin_name] = []
else:
plugin_data[plugin_name] = job['tags']
train_job.update({DATAVISUAL_PLUGIN_KEY: plugin_data})
# Will fill basic_info value in future.
train_job_obj = CachedTrainJob(basic_info=None)
train_job_obj.set(DATAVISUAL_CACHE_KEY, train_job)
train_job_obj.cache_status = loader.cache_status
return train_job_obj
def _get_loader(self, train_id):
"""
Get loader by train id.
Args:
train_id (str): Train Id.
Returns:
LoaderStruct, the loader.
"""
loader = None
with self._loader_pool_mutex:
if self._is_loader_in_loader_pool(train_id, self._loader_pool):
loader = self._loader_pool.get(train_id)
return loader
def _update_loader_latest_update_time(self, loader_id, latest_update_time=None):
"""
Update loader with latest_update_time.
Args:
loader_id (str): ID of loader.
latest_update_time (float): Timestamp.
"""
if latest_update_time is None:
latest_update_time = time.time()
self._loader_pool[loader_id].latest_update_time = latest_update_time
def get_train_job_by_plugin(self, train_id, plugin_name):
"""
Get a train job by train job id.
If the given train job does not has the given plugin data, the tag list will be empty.
Args:
train_id (str): Get train job info by the given id.
plugin_name (str): Get tags by given plugin.
Returns:
TypedDict('TrainJobEntity', {'id': str, 'name': str, 'tags': List[str]}),
a train job object.
"""
self._check_train_job_exist(train_id, self._loader_pool)
loader = self._get_loader(train_id)
if loader is None:
logger.warning("No valid summary log in train job %s, "
"or it is not in the cache.", train_id)
return None
name = loader.name
data_loader = loader.data_loader
tags = []
try:
events_data = data_loader.get_events_data()
tags = events_data.list_tags_by_plugin(plugin_name)
except KeyError:
logger.debug("Plugin name %r does not exist "
"in train job %r, and set tags to empty list.", plugin_name, name)
except AttributeError:
logger.debug("Train job %r has been deleted or it has not loaded data, "
"and set tags to empty list.", name)
result = dict(id=train_id, name=name, tags=tags)
return result
class DataManager:
"""
DataManager manages a pool of loader which help access events data.
Each loader helps deal the data of the events.
A loader corresponds to an events_data.
The DataManager build a pool including all the data_loader.
The data_loader provides extracting
method to get the information of events.
"""
def __init__(self, summary_base_dir):
"""
Initialize the pool of loader and the dict of name-to-path.
Args:
summary_base_dir (str): Base summary directory.
self._status: Refer `datavisual.common.enums.DataManagerStatus`.
"""
self._summary_base_dir = os.path.realpath(summary_base_dir)
self._status = DataManagerStatus.INIT.value
self._status_mutex = threading.Lock()
self._detail_cache = _DetailCacheManager(self._summary_base_dir)
self._brief_cache = _BriefCacheManager(self._summary_base_dir)
# This lock is used to make sure that only one self._load_data_in_thread() is running.
# Because self._load_data_in_thread() will create process pool when loading files, we can not
# afford to run multiple self._load_data_in_thread() simultaneously (will create too many processes).
self._load_data_lock = threading.Lock()
@property
def summary_base_dir(self):
"""Get summary base dir."""
return self._summary_base_dir
def start_load_data(self, reload_interval=0):
"""
Start threads for loading data.
Args:
reload_interval (int): Time to reload data again.
Returns:
Thread, the background Thread instance.
"""
logger.info("Start to load data")
DataManager.check_reload_interval(reload_interval)
thread = threading.Thread(target=self._load_data_in_thread,
name='start_load_data_thread',
args=(reload_interval,),
daemon=True)
thread.start()
return thread
@staticmethod
def check_reload_interval(reload_interval):
"""
Check reload interval is valid.
Args:
reload_interval (int): Reload interval >= 0.
"""
if not isinstance(reload_interval, int):
raise ParamValueError("The value of reload interval should be integer.")
if reload_interval < 0:
raise ParamValueError("The value of reload interval should be >= 0.")
def _load_data_in_thread(self, reload_interval):
"""Wrapper for load data in thread."""
if self._load_data_lock.locked():
return
with self._load_data_lock:
while True:
try:
exception_wrapper(self._load_data)()
except UnknownError as exc:
# Not raising the exception here to ensure that data reloading does not crash.
logger.warning(exc.message)
finally:
self._status = DataManagerStatus.DONE.value
if not reload_interval:
break
time.sleep(reload_interval)
def _load_data(self):
"""This function will load data once and ignore it if the status is loading."""
with self._status_mutex:
if self.status == DataManagerStatus.LOADING.value:
logger.debug("Current status is %s , will ignore to load data.", self.status)
return
self.status = DataManagerStatus.LOADING.value
with ComputingResourceManager.get_instance().get_executor(
max_processes_cnt=settings.MAX_PROCESSES_COUNT) as executor:
self._brief_cache.update_cache(executor)
brief_cache_update = time.time()
for _ in self._detail_cache.update_cache(executor):
update_interval = time.time() - brief_cache_update
logger.debug('Loading one round of detail cache taking %ss.', update_interval)
if update_interval > 3: # Use 3 seconds as threshold to avoid updating too often
self._brief_cache.update_cache(executor)
brief_cache_update += update_interval
with self._status_mutex:
if not self._brief_cache.has_content() and not self._detail_cache.has_content():
self.status = DataManagerStatus.INVALID.value
else:
self.status = DataManagerStatus.DONE.value
logger.info("Load brief data end, and loader pool size is %r.", self._detail_cache.loader_pool_size())
def get_train_job_by_plugin(self, train_id, plugin_name):
"""
Get a train job by train job id.
If the given train job does not has the given plugin data, the tag list will be empty.
Args:
train_id (str): Get train job info by the given id.
plugin_name (str): Get tags by given plugin.
Returns:
TypedDict('TrainJobEntity', {'id': str, 'name': str, 'tags': List[str]}),
a train job object.
"""
self._check_status_valid()
return self._detail_cache.get_train_job_by_plugin(train_id, plugin_name)
def delete_train_job(self, train_id, only_delete_from_cache=True):
"""
Delete train job with a train id.
Args:
train_id (str): ID for train job.
"""
if not only_delete_from_cache:
raise NotImplementedError("Delete from both cache and disk is not supported.")
self._brief_cache.delete_train_job(train_id)
self._detail_cache.delete_train_job(train_id)
def list_tensors(self, train_id, tag):
"""
List tensors of the given train job and tag.
If the tensor can not find by the given tag, will raise exception.
Args:
train_id (str): ID for train job.
tag (str): The tag name.
Returns:
NamedTuple, the tuple format is `collections.namedtuple('_Tensor', ['wall_time', 'event_step', 'value'])`.
the value will contain the given tag data.
"""
self._check_status_valid()
return self._detail_cache.list_tensors(train_id, tag)
def _check_status_valid(self):
"""Check if the status is valid to load data."""
if self.status == DataManagerStatus.INIT.value:
raise exceptions.SummaryLogIsLoading("Data is being loaded, current status: %s." % self._status)
def get_train_job(self, train_id):
"""
Get train job by train ID.
Args:
train_id (str): Train ID for train job.
Returns:
dict, single train job, if can not find any data, will return None.
"""
self._check_status_valid()
detail_train_job = self._detail_cache.get_train_job(train_id)
brief_train_job = self._brief_cache.get_train_job(train_id)
return TrainJob(brief_train_job, detail_train_job)
@property
def status(self):
"""
Get the status of data manager.
Returns:
DataManagerStatus, the status of data manager.
"""
return self._status
@status.setter
def status(self, status):
"""Set data manager status."""
self._status = status
def cache_train_job(self, train_id):
"""Cache given train job (async)."""
brief_need_reload = self._brief_cache.cache_train_job(train_id)
detail_need_reload = self._detail_cache.cache_train_job(train_id)
if brief_need_reload or detail_need_reload:
self.start_load_data()
def register_brief_cache_item_updater(self, updater: BaseCacheItemUpdater):
"""Register brief cache item updater for brief cache manager."""
self._brief_cache.register_cache_item_updater(updater)
def register_folder_analyzer(self, analyzer):
"""Register folder analyzer."""
self._brief_cache.register_folder_analyzer(analyzer)
self._detail_cache.register_folder_analyzer(analyzer)
def get_brief_cache(self):
"""Get brief cache."""
return self._brief_cache
def get_brief_train_job(self, train_id):
"""Get brief train job."""
return self._brief_cache.get_train_job(train_id)
DATA_MANAGER = DataManager(settings.SUMMARY_BASE_DIR)
|
sunpy/io/ana.py | mridullpandey/sunpy | 628 | 12680124 | """
This module provides an ANA file Reader.
This is a modified version of `pyana <https://github.com/tvwerkhoven/pyana>`__.
.. warning::
The reading and writing of ana files is not supported under Windows.
"""
import os
import collections
from sunpy.io.header import FileHeader
try:
from sunpy.io import _pyana
except ImportError:
_pyana = None
__all__ = ['read', 'get_header', 'write']
HDPair = collections.namedtuple('HDPair', ['data', 'header'])
def read(filename, debug=False, **kwargs):
"""
Loads an ANA file and returns the data and a header in a list of (data,
header) tuples.
Parameters
----------
filename : `str`
Name of file to be read.
debug : `bool`, optional
Prints verbose debug information.
Returns
-------
out : `list`
A list of (data, header) tuples
Examples
--------
>>> data = sunpy.io.ana.read(filename) # doctest: +SKIP
"""
if not os.path.isfile(filename):
raise OSError("File does not exist!")
if _pyana is None:
raise ImportError("C extension for ANA is missing, please rebuild.")
data = _pyana.fzread(filename, debug)
return [HDPair(data['data'], FileHeader(data['header']))]
def get_header(filename, debug=False):
"""
Loads an ANA file and only return the header consisting of the dimensions,
size (defined as the product of all dimensions times the size of the
datatype, this not relying on actual filesize) and comments.
Parameters
----------
filename : `str`
Name of file to be read.
debug : `bool`, optional
Prints verbose debug information.
Returns
-------
out : `list`
A list of `~sunpy.io.header.FileHeader` headers.
Examples
--------
>>> header = sunpy.io.ana.get_header(filename) # doctest: +SKIP
"""
if _pyana is None:
raise ImportError("C extension for ANA is missing, please rebuild")
data = _pyana.fzread(filename, debug)
return [FileHeader(data['header'])]
def write(filename, data, comments=False, compress=True, debug=False):
"""
Saves a 2D `numpy.array` as an ANA file and returns the bytes written or
``NULL``.
Parameters
----------
filename : `str`
Name of file to be created.
data : `numpy.ndarray`
The data to be stored.
comments : `~sunpy.io.header.FileHeader`, optional
The comments to be stored as a header.
compress : `bool`, optional
Compress the data with `True` (the default).
debug : `bool`, optional
Prints verbose debug information, defaults to `False`.
Returns
-------
out: ANA compressed archive
A new ANA compressed archive containing the data and header.
Examples
--------
>>> written = sunpy.io.ana.write(filename, data, comments=False, compress=True) # doctest: +SKIP
"""
if _pyana is None:
raise ImportError("C extension for ANA is missing, please rebuild")
if comments:
return _pyana.fzwrite(filename, data, int(compress), comments, debug)
else:
return _pyana.fzwrite(filename, data, int(compress), '', debug)
|
Python/tdw/FBOutput/EnvironmentCollision.py | felixbinder/tdw | 307 | 12680125 | <gh_stars>100-1000
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: FBOutput
import tdw.flatbuffers
class EnvironmentCollision(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsEnvironmentCollision(cls, buf, offset):
n = tdw.flatbuffers.encode.Get(tdw.flatbuffers.packer.uoffset, buf, offset)
x = EnvironmentCollision()
x.Init(buf, n + offset)
return x
# EnvironmentCollision
def Init(self, buf, pos):
self._tab = tdw.flatbuffers.table.Table(buf, pos)
# EnvironmentCollision
def ObjectId(self):
o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(tdw.flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# EnvironmentCollision
def State(self):
o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(tdw.flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 1
# EnvironmentCollision
def Contacts(self, j):
o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
x = self._tab.Vector(o)
x += tdw.flatbuffers.number_types.UOffsetTFlags.py_type(j) * 24
from .ContactPoint import ContactPoint
obj = ContactPoint()
obj.Init(self._tab.Bytes, x)
return obj
return None
# EnvironmentCollision
def ContactsLength(self):
o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.VectorLen(o)
return 0
# EnvironmentCollision
def Floor(self):
o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return bool(self._tab.Get(tdw.flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
def EnvironmentCollisionStart(builder): builder.StartObject(4)
def EnvironmentCollisionAddObjectId(builder, objectId): builder.PrependInt32Slot(0, objectId, 0)
def EnvironmentCollisionAddState(builder, state): builder.PrependUint8Slot(1, state, 1)
def EnvironmentCollisionAddContacts(builder, contacts): builder.PrependUOffsetTRelativeSlot(2, tdw.flatbuffers.number_types.UOffsetTFlags.py_type(contacts), 0)
def EnvironmentCollisionStartContactsVector(builder, numElems): return builder.StartVector(24, numElems, 4)
def EnvironmentCollisionAddFloor(builder, floor): builder.PrependBoolSlot(3, floor, 0)
def EnvironmentCollisionEnd(builder): return builder.EndObject()
|
scripts/extract_accuracy.py | awesome-archive/mixmatch | 1,124 | 12680132 | <reponame>awesome-archive/mixmatch
#!/usr/bin/env python
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract and save accuracy to 'stats/accuracy.json'.
The accuracy is extracted from the most recent eventfile.
"""
import glob
import json
import os.path
from absl import app
from absl import flags
import numpy as np
import tensorflow as tf
FLAGS = flags.FLAGS
TAG = 'accuracy'
def summary_dict(accuracies):
return {
'last%02d' % x: np.median(accuracies[-x:]) for x in [1, 10, 20, 50]
}
def main(argv):
if len(argv) > 2:
raise app.UsageError('Too many command-line arguments.')
folder = argv[1]
matches = sorted(glob.glob(os.path.join(folder, 'tf/events.out.tfevents.*')))
assert matches, 'No events files found'
tags = set()
accuracies = []
for event_file in matches:
for e in tf.train.summary_iterator(event_file):
for v in e.summary.value:
if v.tag == TAG:
accuracies.append(v.simple_value)
break
elif not accuracies:
tags.add(v.tag)
assert accuracies, 'No "accuracy" tag found. Found tags = %s' % tags
target_dir = os.path.join(folder, 'stats')
target_file = os.path.join(target_dir, 'accuracy.json')
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
with open(target_file, 'w') as f:
json.dump(summary_dict(accuracies), f, sort_keys=True, indent=4)
print('Saved: %s' % target_file)
if __name__ == '__main__':
app.run(main)
|
mindinsight/explainer/manager/explain_loader.py | lvyufeng/mindconverter_standalone | 216 | 12680144 | <filename>mindinsight/explainer/manager/explain_loader.py<gh_stars>100-1000
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""ExplainLoader."""
import math
import os
import re
import threading
from collections import defaultdict
from datetime import datetime
from enum import Enum
from typing import Dict, Iterable, List, Optional, Union
from mindinsight.datavisual.common.exceptions import TrainJobNotExistError
from mindinsight.datavisual.data_access.file_handler import FileHandler
from mindinsight.explainer.common.enums import ExplainFieldsEnum
from mindinsight.explainer.common.log import logger
from mindinsight.explainer.manager.explain_parser import ExplainParser
from mindinsight.utils.exceptions import ParamValueError, UnknownError
_NAN_CONSTANT = 'NaN'
_NUM_DIGITS = 6
_EXPLAIN_FIELD_NAMES = [
ExplainFieldsEnum.SAMPLE_ID,
ExplainFieldsEnum.BENCHMARK,
ExplainFieldsEnum.METADATA,
]
_SAMPLE_FIELD_NAMES = [
ExplainFieldsEnum.GROUND_TRUTH_LABEL,
ExplainFieldsEnum.INFERENCE,
ExplainFieldsEnum.EXPLANATION,
ExplainFieldsEnum.HIERARCHICAL_OCCLUSION
]
class _LoaderStatus(Enum):
STOP = 'STOP'
LOADING = 'LOADING'
PENDING = 'PENDING'
LOADED = 'LOADED'
def _round(score):
"""Take round of a number to given precision."""
try:
return round(score, _NUM_DIGITS)
except TypeError:
return score
class ExplainLoader:
"""ExplainLoader which manage the record in the summary file."""
def __init__(self,
loader_id: str,
summary_dir: str):
self._parser = ExplainParser(summary_dir)
self._loader_info = {
'loader_id': loader_id,
'summary_dir': summary_dir,
'create_time': os.stat(summary_dir).st_ctime,
'update_time': os.stat(summary_dir).st_mtime,
'query_time': os.stat(summary_dir).st_ctime,
'uncertainty_enabled': False,
}
self._samples = defaultdict(dict)
self._metadata = {'explainers': [], 'metrics': [], 'labels': [], 'min_confidence': 0.5}
self._benchmark = {'explainer_score': defaultdict(dict), 'label_score': defaultdict(dict)}
self._status = _LoaderStatus.PENDING.value
self._status_mutex = threading.Lock()
@property
def all_classes(self) -> List[Dict]:
"""
Return a list of detailed label information, including label id, label name and sample count of each label.
Returns:
list[dict], a list of dict, each dict contains:
- id (int): Label id.
- label (str): Label name.
- sample_count (int): Number of samples for each label.
"""
sample_count_per_label = defaultdict(int)
saliency_count_per_label = defaultdict(int)
hoc_count_per_label = defaultdict(int)
for sample in self._samples.values():
if sample.get('image') and (sample.get('ground_truth_label') or sample.get('predicted_label')):
for label in set(sample['ground_truth_label'] + sample['predicted_label']):
sample_count_per_label[label] += 1
if sample['inferences'][label]['saliency_maps']:
saliency_count_per_label[label] += 1
if sample['inferences'][label]['hoc_layers']:
hoc_count_per_label[label] += 1
all_classes_return = [{'id': label_id,
'label': label_name,
'sample_count': sample_count_per_label[label_id],
'saliency_sample_count': saliency_count_per_label[label_id],
'hoc_sample_count': hoc_count_per_label[label_id]}
for label_id, label_name in enumerate(self._metadata['labels'])]
return all_classes_return
@property
def query_time(self) -> float:
"""Return query timestamp of explain loader."""
return self._loader_info['query_time']
@query_time.setter
def query_time(self, new_time: Union[datetime, float]):
"""
Update the query_time timestamp manually.
Args:
new_time (datetime.datetime or float): Updated query_time for the explain loader.
"""
if isinstance(new_time, datetime):
self._loader_info['query_time'] = new_time.timestamp()
elif isinstance(new_time, float):
self._loader_info['query_time'] = new_time
else:
raise TypeError('new_time should have type of datetime.datetime or float, but receive {}'
.format(type(new_time)))
@property
def create_time(self) -> float:
"""Return the create timestamp of summary file."""
return self._loader_info['create_time']
@create_time.setter
def create_time(self, new_time: Union[datetime, float]):
"""
Update the create_time manually
Args:
new_time (datetime.datetime or float): Updated create_time of summary_file.
"""
if isinstance(new_time, datetime):
self._loader_info['create_time'] = new_time.timestamp()
elif isinstance(new_time, float):
self._loader_info['create_time'] = new_time
else:
raise TypeError('new_time should have type of datetime.datetime or float, but receive {}'
.format(type(new_time)))
@property
def explainers(self) -> List[str]:
"""Return a list of explainer names recorded in the summary file."""
return self._metadata['explainers']
@property
def explainer_scores(self) -> List[Dict]:
"""
Return evaluation results for every explainer.
Returns:
list[dict], A list of evaluation results of each explainer. Each item contains:
- explainer (str): Name of evaluated explainer.
- evaluations (list[dict]): A list of evaluation results by different metrics.
- class_scores (list[dict]): A list of evaluation results on different labels.
Each item in the evaluations contains:
- metric (str): name of metric method
- score (float): evaluation result
Each item in the class_scores contains:
- label (str): Name of label
- evaluations (list[dict]): A list of evaluation results on different labels by different metrics.
Each item in evaluations contains:
- metric (str): Name of metric method
- score (float): Evaluation scores of explainer on specific label by the metric.
"""
explainer_scores = []
for explainer, explainer_score_on_metric in self._benchmark['explainer_score'].copy().items():
metric_scores = [{'metric': metric, 'score': _round(score)}
for metric, score in explainer_score_on_metric.items()]
label_scores = []
for label, label_score_on_metric in self._benchmark['label_score'][explainer].copy().items():
score_of_single_label = {
'label': self._metadata['labels'][label],
'evaluations': [
{'metric': metric, 'score': _round(score)} for metric, score in label_score_on_metric.items()
],
}
label_scores.append(score_of_single_label)
explainer_scores.append({
'explainer': explainer,
'evaluations': metric_scores,
'class_scores': label_scores,
})
return explainer_scores
@property
def labels(self) -> List[str]:
"""Return the label recorded in the summary."""
return self._metadata['labels']
@property
def metrics(self) -> List[str]:
"""Return a list of metric names recorded in the summary file."""
return self._metadata['metrics']
@property
def min_confidence(self) -> Optional[float]:
"""Return minimum confidence used to filter the predicted labels."""
return self._metadata['min_confidence']
@property
def sample_count(self) -> int:
"""
Return total number of samples in the loader.
Since the loader only return available samples (i.e. with original image data and ground_truth_label loaded in
cache), the returned count only takes the available samples into account.
Return:
int, total number of available samples in the loading job.
"""
sample_count = 0
for sample in self._samples.values():
if sample.get('image', False):
sample_count += 1
return sample_count
@property
def samples(self) -> List[Dict]:
"""Return the information of all samples in the job."""
return self._samples
@property
def train_id(self) -> str:
"""Return ID of explain loader."""
return self._loader_info['loader_id']
@property
def uncertainty_enabled(self):
"""Whether uncertainty is enabled."""
return self._loader_info['uncertainty_enabled']
@property
def update_time(self) -> float:
"""Return latest modification timestamp of summary file."""
return self._loader_info['update_time']
@update_time.setter
def update_time(self, new_time: Union[datetime, float]):
"""
Update the update_time manually.
Args:
new_time (datetime.datetime or float): Updated time for the summary file.
"""
if isinstance(new_time, datetime):
self._loader_info['update_time'] = new_time.timestamp()
elif isinstance(new_time, float):
self._loader_info['update_time'] = new_time
else:
raise TypeError('new_time should have type of datetime.datetime or float, but receive {}'
.format(type(new_time)))
def load(self):
"""Start loading data from the latest summary file to the loader."""
if self.status != _LoaderStatus.LOADED.value:
self.status = _LoaderStatus.LOADING.value
filenames = []
for filename in FileHandler.list_dir(self._loader_info['summary_dir']):
if FileHandler.is_file(FileHandler.join(self._loader_info['summary_dir'], filename)):
filenames.append(filename)
filenames = ExplainLoader._filter_files(filenames)
if not filenames:
raise TrainJobNotExistError('No summary file found in %s, explain job will be delete.'
% self._loader_info['summary_dir'])
is_end = False
while not is_end and self.status != _LoaderStatus.STOP.value:
try:
file_changed, is_end, event_dict = self._parser.list_events(filenames)
except UnknownError:
is_end = True
break
if file_changed:
logger.info('Summary file in %s update, reload the data in the summary.',
self._loader_info['summary_dir'])
self._clear_job()
if self.status != _LoaderStatus.STOP.value:
self.status = _LoaderStatus.LOADING.value
if event_dict:
self._import_data_from_event(event_dict)
self._reform_sample_info()
if is_end:
self.status = _LoaderStatus.LOADED.value
@property
def status(self):
"""Get the status of this class with lock."""
with self._status_mutex:
return self._status
@status.setter
def status(self, status):
"""Set the status of this class with lock."""
with self._status_mutex:
self._status = status
def stop(self):
"""Stop load data."""
self.status = _LoaderStatus.STOP.value
def get_all_samples(self) -> List[Dict]:
"""
Return a list of sample information cached in the explain job.
Returns:
sample_list (list[SampleObj]): a list of sample objects, each object consists of:
- id (int): Sample id.
- name (str): Basename of image.
- inferences (list[dict]): List of inferences for all labels.
"""
returned_samples = [{'id': sample_id, 'name': info['name'], 'image': info['image'],
'inferences': list(info['inferences'].values())} for sample_id, info in
self._samples.items() if info.get('image', False)]
return returned_samples
def _import_data_from_event(self, event_dict: Dict):
"""Parse and import data from the event data."""
if 'metadata' not in event_dict and self._is_metadata_empty():
raise ParamValueError('metadata is incomplete, should write metadata first in the summary.')
for tag, event in event_dict.items():
if tag == ExplainFieldsEnum.METADATA.value:
self._import_metadata_from_event(event.metadata)
elif tag == ExplainFieldsEnum.BENCHMARK.value:
self._import_benchmark_from_event(event.benchmark)
elif tag == ExplainFieldsEnum.SAMPLE_ID.value:
self._import_sample_from_event(event)
else:
logger.info('Unknown ExplainField: %s.', tag)
def _is_metadata_empty(self):
"""Check whether metadata is completely loaded first."""
if not self._metadata['labels']:
return True
return False
def _import_metadata_from_event(self, metadata_event):
"""Import the metadata from event into loader."""
def take_union(existed_list, imported_data):
"""Take union of existed_list and imported_data."""
if isinstance(imported_data, Iterable):
for sample in imported_data:
if sample not in existed_list:
existed_list.append(sample)
take_union(self._metadata['explainers'], metadata_event.explain_method)
take_union(self._metadata['metrics'], metadata_event.benchmark_method)
take_union(self._metadata['labels'], metadata_event.label)
def _import_benchmark_from_event(self, benchmarks):
"""
Parse the benchmark event.
Benchmark data are separated into 'explainer_score' and 'label_score'. 'explainer_score' contains overall
evaluation results of each explainer by different metrics, while 'label_score' additionally divides the results
w.r.t different labels.
The structure of self._benchmark['explainer_score'] demonstrates below:
{
explainer_1: {metric_name_1: score_1, ...},
explainer_2: {metric_name_1: score_1, ...},
...
}
The structure of self._benchmark['label_score'] is:
{
explainer_1: {label_id: {metric_1: score_1, metric_2: score_2, ...}, ...},
explainer_2: {label_id: {metric_1: score_1, metric_2: score_2, ...}, ...},
...
}
Args:
benchmarks (BenchmarkContainer): Parsed benchmarks data from summary file.
"""
explainer_score = self._benchmark['explainer_score']
label_score = self._benchmark['label_score']
for benchmark in benchmarks:
explainer = benchmark.explain_method
metric = benchmark.benchmark_method
metric_score = benchmark.total_score
label_score_event = benchmark.label_score
explainer_score[explainer][metric] = _NAN_CONSTANT if math.isnan(metric_score) else metric_score
new_label_score_dict = ExplainLoader._score_event_to_dict(label_score_event, metric)
for label, scores_of_metric in new_label_score_dict.items():
if label not in label_score[explainer]:
label_score[explainer][label] = {}
label_score[explainer][label].update(scores_of_metric)
def _import_sample_from_event(self, sample):
"""
Parse the sample event.
Detailed data of each sample are store in self._samples, identified by sample_id. Each sample data are stored
in the following structure:
- ground_truth_labels (list[int]): A list of ground truth labels of the sample.
- ground_truth_probs (list[float]): A list of confidences of ground-truth label from black-box model.
- predicted_labels (list[int]): A list of predicted labels from the black-box model.
- predicted_probs (list[int]): A list of confidences w.r.t the predicted labels.
- explanations (dict): Explanations is a dictionary where the each explainer name mapping to a dictionary
of saliency maps. The structure of explanations demonstrates below:
{
explainer_name_1: {label_1: saliency_id_1, label_2: saliency_id_2, ...},
explainer_name_2: {label_1: saliency_id_1, label_2: saliency_id_2, ...},
...
}
- hierarchical_occlusion (dict): A dictionary where each label is matched to a dictionary:
{label_1: [{prob: layer1_prob, bbox: []}, {prob: layer2_prob, bbox: []}],
label_2:
}
"""
if getattr(sample, 'sample_id', None) is None:
raise ParamValueError('sample_event has no sample_id')
sample_id = sample.sample_id
if sample_id not in self._samples:
self._samples[sample_id] = {
'id': sample_id,
'name': str(sample_id),
'image': sample.image_path,
'ground_truth_label': [],
'predicted_label': [],
'inferences': defaultdict(dict),
'explanation': defaultdict(dict),
'hierarchical_occlusion': defaultdict(dict)
}
if sample.image_path:
self._samples[sample_id]['image'] = sample.image_path
for tag in _SAMPLE_FIELD_NAMES:
if tag == ExplainFieldsEnum.GROUND_TRUTH_LABEL:
if not self._samples[sample_id]['ground_truth_label']:
self._samples[sample_id]['ground_truth_label'].extend(list(sample.ground_truth_label))
elif tag == ExplainFieldsEnum.INFERENCE:
self._import_inference_from_event(sample, sample_id)
elif tag == ExplainFieldsEnum.EXPLANATION:
self._import_explanation_from_event(sample, sample_id)
elif tag == ExplainFieldsEnum.HIERARCHICAL_OCCLUSION:
self._import_hoc_from_event(sample, sample_id)
def _reform_sample_info(self):
"""Reform the sample info."""
for _, sample_info in self._samples.items():
inferences = sample_info['inferences']
res_dict = defaultdict(list)
for explainer, label_heatmap_path_dict in sample_info['explanation'].items():
for label, heatmap_path in label_heatmap_path_dict.items():
res_dict[label].append({'explainer': explainer, 'overlay': heatmap_path})
for label, item in inferences.items():
item['saliency_maps'] = res_dict[label]
for label, item in sample_info['hierarchical_occlusion'].items():
inferences[label]['hoc_layers'] = item['hoc_layers']
def _import_inference_from_event(self, event, sample_id):
"""Parse the inference event."""
inference = event.inference
if inference.ground_truth_prob_sd or inference.predicted_prob_sd:
self._loader_info['uncertainty_enabled'] = True
if not self._samples[sample_id]['predicted_label']:
self._samples[sample_id]['predicted_label'].extend(list(inference.predicted_label))
if not self._samples[sample_id]['inferences']:
inferences = {}
for label, prob in zip(list(event.ground_truth_label) + list(inference.predicted_label),
list(inference.ground_truth_prob) + list(inference.predicted_prob)):
inferences[label] = {
'label': self._metadata['labels'][label],
'confidence': _round(prob),
'saliency_maps': [],
'hoc_layers': {},
}
if not event.ground_truth_label:
inferences[label]['prediction_type'] = None
else:
if prob < self.min_confidence:
inferences[label]['prediction_type'] = 'FN'
elif label in event.ground_truth_label:
inferences[label]['prediction_type'] = 'TP'
else:
inferences[label]['prediction_type'] = 'FP'
if self._loader_info['uncertainty_enabled']:
for label, std, low, high in zip(
list(event.ground_truth_label) + list(inference.predicted_label),
list(inference.ground_truth_prob_sd) + list(inference.predicted_prob_sd),
list(inference.ground_truth_prob_itl95_low) + list(inference.predicted_prob_itl95_low),
list(inference.ground_truth_prob_itl95_hi) + list(inference.predicted_prob_itl95_hi)):
inferences[label]['confidence_sd'] = _round(std)
inferences[label]['confidence_itl95'] = [_round(low), _round(high)]
self._samples[sample_id]['inferences'] = inferences
def _import_explanation_from_event(self, event, sample_id):
"""Parse the explanation event."""
if self._samples[sample_id]['explanation'] is None:
self._samples[sample_id]['explanation'] = defaultdict(dict)
sample_explanation = self._samples[sample_id]['explanation']
for explanation_item in event.explanation:
explainer = explanation_item.explain_method
label = explanation_item.label
sample_explanation[explainer][label] = explanation_item.heatmap_path
def _import_hoc_from_event(self, event, sample_id):
"""Parse the mango event."""
sample_hoc = self._samples[sample_id]['hierarchical_occlusion']
if event.hierarchical_occlusion:
for hoc_item in event.hierarchical_occlusion:
label = hoc_item.label
sample_hoc[label] = {}
sample_hoc[label]['label'] = label
sample_hoc[label]['mask'] = hoc_item.mask
sample_hoc[label]['confidence'] = self._samples[sample_id]['inferences'][label]['confidence']
sample_hoc[label]['hoc_layers'] = []
for hoc_layer in hoc_item.layer:
sample_hoc_dict = {'confidence': hoc_layer.prob}
box_lst = list(hoc_layer.box)
box = [box_lst[i: i + 4] for i in range(0, len(hoc_layer.box), 4)]
sample_hoc_dict['boxes'] = box
sample_hoc[label]['hoc_layers'].append(sample_hoc_dict)
def _clear_job(self):
"""Clear the cached data and update the time info of the loader."""
self._samples.clear()
self._loader_info['create_time'] = os.stat(self._loader_info['summary_dir']).st_ctime
self._loader_info['update_time'] = os.stat(self._loader_info['summary_dir']).st_mtime
self._loader_info['query_time'] = max(self._loader_info['update_time'], self._loader_info['query_time'])
def clear_inner_dict(outer_dict):
"""Clear the inner structured data of the given dict."""
for item in outer_dict.values():
item.clear()
map(clear_inner_dict, [self._metadata, self._benchmark])
@staticmethod
def _filter_files(filenames):
"""
Gets a list of summary files.
Args:
filenames (list[str]): File name list, like [filename1, filename2].
Returns:
list[str], filename list.
"""
return list(filter(
lambda filename: (re.search(r'summary\.\d+', filename) and filename.endswith("_explain")), filenames))
@staticmethod
def _is_inference_valid(sample):
"""
Check whether the inference data is empty or have the same length.
If probs have different length with the labels, it can be confusing when assigning each prob to label.
'_is_inference_valid' returns True only when the data size of match to each other. Note that prob data could be
empty, so empty prob will pass the check.
"""
ground_truth_len = len(sample['ground_truth_label'])
for name in ['ground_truth_prob', 'ground_truth_prob_sd',
'ground_truth_prob_itl95_low', 'ground_truth_prob_itl95_hi']:
if sample[name] and len(sample[name]) != ground_truth_len:
logger.info('Length of %s not match the ground_truth_label. Length of ground_truth_label: %d,'
'length of %s: %d', name, ground_truth_len, name, len(sample[name]))
return False
predicted_len = len(sample['predicted_label'])
for name in ['predicted_prob', 'predicted_prob_sd',
'predicted_prob_itl95_low', 'predicted_prob_itl95_hi']:
if sample[name] and len(sample[name]) != predicted_len:
logger.info('Length of %s not match the predicted_labels. Length of predicted_label: %d,'
'length of %s: %d', name, predicted_len, name, len(sample[name]))
return False
return True
@staticmethod
def _score_event_to_dict(label_score_event, metric) -> Dict:
"""Transfer metric scores per label to pre-defined structure."""
new_label_score_dict = defaultdict(dict)
for label_id, label_score in enumerate(label_score_event):
new_label_score_dict[label_id][metric] = _NAN_CONSTANT if math.isnan(label_score) else label_score
return new_label_score_dict
|
h2o-py/tests/testdir_misc/pyunit_factoring.py | ahmedengu/h2o-3 | 6,098 | 12680169 | <filename>h2o-py/tests/testdir_misc/pyunit_factoring.py
from __future__ import print_function
from builtins import zip
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
from h2o import H2OFrame
from h2o.exceptions import H2OTypeError, H2OValueError
def compare_frames(expected, actual):
assert actual.shape == expected.shape
assert actual.columns == expected.columns, "Columns differ: %r vs %r" % (actual.columns, colnames)
for i in range(len(actual.columns)):
colname = actual.columns[i]
t1 = expected.types[colname]
t2 = actual.types[colname]
assert t1 == t2, ("Bad types %s: expected %s, got %s" %(colname, t1, t2))
col1 = expected[colname]
s1 = str(h2o.as_list(col1))
col2 = actual[colname]
s2 = str(h2o.as_list(col2))
assert s1 == s2, ("bad values: expected[%d] = %r, actual[%d] = %r"
% (i, s1, i, s2))
def test1():
badFrame = H2OFrame({"one": [4, 6, 1], "two": ["a", "b", "cde"], "three": [0, 5.2, 14]})
badClone = H2OFrame({"one": [4, 6, 1], "two": ["a", "b", "cde"], "three": [0, 5.2, 14]})
compare_frames(badFrame, badClone)
try:
badFrame.asfactor()
assert False, "The frame contaied a real number, an error should be thrown"
except H2OValueError: # as designed
pass
compare_frames(badFrame, badClone)
originalAfterOp = H2OFrame.get_frame(badFrame.frame_id)
compare_frames(badFrame, originalAfterOp)
goodFrame = H2OFrame({"one": [4, 6, 1], "two": ["a", "b", "cde"]})
goodClone = H2OFrame({"one": [4, 6, 1], "two": ["a", "b", "cde"]})
compare_frames(goodFrame, goodClone)
factoredFrame = goodFrame.asfactor()
originalAfterOp = H2OFrame.get_frame(goodFrame.frame_id)
compare_frames(goodFrame, originalAfterOp)
expectedFactoredFrame = H2OFrame({"one": [4, 6, 1], "two": ["a", "b", "cde"]}, column_types={"one":"categorical", "two": "enum"})
compare_frames(expectedFactoredFrame, factoredFrame)
refactoredFrame = expectedFactoredFrame.asfactor()
factoredAfterOp = H2OFrame.get_frame(refactoredFrame.frame_id)
compare_frames(expectedFactoredFrame, factoredAfterOp)
if __name__ == "__main__":
pyunit_utils.standalone_test(test1)
else:
test1()
|
vega/algorithms/nas/sp_nas/spnas_trainer_callback.py | This-50m/vega | 724 | 12680200 | # -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""The trainer program for Auto Lane."""
import logging
import os
import time
import numpy as np
from pycocotools.coco import COCO
from vega.common import ClassFactory, ClassType
from vega.trainer.trainer_ms import TrainerMs
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor
import mindspore.common.dtype as mstype
from mindspore.train import Model as MsModel
from mindspore import Tensor
from mindspore.nn import SGD
from .src.model_utils.config import config
from .src.dataset import data_to_mindrecord_byte_image, create_fasterrcnn_dataset
from .src.lr_schedule import dynamic_lr
from .src.network_define import WithLossCell, TrainOneStepCell, LossNet
from .src.util import coco_eval, bbox2result_1image, results2json
from vega.datasets.conf.dataset import DatasetConfig
logger = logging.getLogger(__name__)
def valid():
"""Construct the trainer of SpNas."""
config = DatasetConfig().to_dict()
config = config['_class_data'].val
prefix = "FasterRcnn_eval.mindrecord"
mindrecord_dir = config.mindrecord_dir
mindrecord_file = os.path.join(mindrecord_dir, prefix)
if not os.path.exists(mindrecord_file):
if not os.path.isdir(mindrecord_dir):
os.makedirs(mindrecord_dir)
if config.dataset == "coco":
if os.path.isdir(config.coco_root):
data_to_mindrecord_byte_image(config, "coco", False, prefix, file_num=1)
else:
logging.info("coco_root not exits.")
else:
if os.path.isdir(config.IMAGE_DIR) and os.path.exists(config.ANNO_PATH):
data_to_mindrecord_byte_image(config, "other", False, prefix, file_num=1)
else:
logging.info("IMAGE_DIR or ANNO_PATH not exits.")
dataset = create_fasterrcnn_dataset(config, mindrecord_file, batch_size=config.test_batch_size, is_training=False)
return dataset
def train():
"""Train fasterrcnn dataset."""
config = DatasetConfig().to_dict()
config = config['_class_data'].train
prefix = "FasterRcnn.mindrecord"
mindrecord_dir = config.mindrecord_dir
mindrecord_file = os.path.join(mindrecord_dir, prefix + "0")
print("CHECKING MINDRECORD FILES ...")
rank = int(os.getenv('RANK_ID', '0'))
device_num = int(os.getenv('RANK_SIZE', '1'))
if rank == 0 and not os.path.exists(mindrecord_file):
if not os.path.isdir(mindrecord_dir):
os.makedirs(mindrecord_dir)
if config.dataset == "coco":
if os.path.isdir(config.coco_root):
if not os.path.exists(config.coco_root):
logging.info("Please make sure config:coco_root is valid.")
raise ValueError(config.coco_root)
data_to_mindrecord_byte_image(config, "coco", True, prefix)
else:
logging.info("coco_root not exits.")
else:
if os.path.isdir(config.image_dir) and os.path.exists(config.anno_path):
if not os.path.exists(config.image_dir):
logging.info("Please make sure config:image_dir is valid.")
raise ValueError(config.image_dir)
data_to_mindrecord_byte_image(config, "other", True, prefix)
else:
logging.info("image_dir or anno_path not exits.")
while not os.path.exists(mindrecord_file + ".db"):
time.sleep(5)
dataset = create_fasterrcnn_dataset(config, mindrecord_file, batch_size=config.batch_size,
device_num=device_num, rank_id=rank,
num_parallel_workers=config.num_parallel_workers,
python_multiprocessing=config.python_multiprocessing)
return dataset
@ClassFactory.register(ClassType.TRAINER)
class SpNasTrainerCallback(TrainerMs):
"""Construct the trainer of SpNas."""
disable_callbacks = ['ProgressLogger']
def build(self):
"""Construct the trainer of SpNas."""
logging.debug("Trainer Config: {}".format(self.config))
self._init_hps()
self.use_syncbn = self.config.syncbn
if not self.train_loader:
self.train_loader = train()
if not self.valid_loader:
self.valid_loader = valid()
self.batch_num_train = self.train_loader.get_dataset_size()
self.batch_num_valid = self.valid_loader.get_dataset_size()
self.valid_metrics = self._init_metrics()
def _train_epoch(self):
"""Construct the trainer of SpNas."""
dataset = self.train_loader
dataset_size = dataset.get_dataset_size()
self.model = self.model.set_train()
self.model.to_float(mstype.float16)
self.loss = LossNet()
lr = Tensor(dynamic_lr(config, dataset_size), mstype.float32)
self.optimizer = SGD(params=self.model.trainable_params(), learning_rate=lr, momentum=config.momentum,
weight_decay=config.weight_decay, loss_scale=config.loss_scale)
net_with_loss = WithLossCell(self.model, self.loss)
net = TrainOneStepCell(net_with_loss, self.optimizer, sens=config.loss_scale)
config_ck = CheckpointConfig(save_checkpoint_steps=self.config.save_steps, keep_checkpoint_max=1)
save_path = self.get_local_worker_path(self.step_name, self.worker_id)
ckpoint_cb = ModelCheckpoint(config=config_ck, directory=save_path)
loss_cb = LossMonitor(per_print_times=1)
callback_list = [ckpoint_cb, loss_cb]
self.ms_model = MsModel(net)
try:
self.ms_model.train(epoch=self.config.epochs,
train_dataset=dataset,
callbacks=callback_list,
dataset_sink_mode=False)
except RuntimeError as e:
logging.warning(f"failed to train the model, skip it, message: {str(e)}")
def _valid_epoch(self):
"""Construct the trainer of SpNas."""
dataset = self.valid_loader
self.model.set_train(False)
self.model.to_float(mstype.float16)
outputs = []
dataset_coco = COCO(self.config.metric.params.anno_path)
max_num = 128
for data in dataset.create_dict_iterator(num_epochs=1):
img_data = data['image']
img_metas = data['image_shape']
gt_bboxes = data['box']
gt_labels = data['label']
gt_num = data['valid_num']
output = self.model(img_data, img_metas, gt_bboxes, gt_labels, gt_num)
all_bbox = output[0]
all_label = output[1]
all_mask = output[2]
for j in range(config.test_batch_size):
all_bbox_squee = np.squeeze(all_bbox.asnumpy()[j, :, :])
all_label_squee = np.squeeze(all_label.asnumpy()[j, :, :])
all_mask_squee = np.squeeze(all_mask.asnumpy()[j, :, :])
all_bboxes_tmp_mask = all_bbox_squee[all_mask_squee, :]
all_labels_tmp_mask = all_label_squee[all_mask_squee]
if all_bboxes_tmp_mask.shape[0] > max_num:
inds = np.argsort(-all_bboxes_tmp_mask[:, -1])
inds = inds[:max_num]
all_bboxes_tmp_mask = all_bboxes_tmp_mask[inds]
all_labels_tmp_mask = all_labels_tmp_mask[inds]
outputs_tmp = bbox2result_1image(all_bboxes_tmp_mask, all_labels_tmp_mask, config.num_classes)
outputs.append(outputs_tmp)
eval_types = ["bbox"]
result_files = results2json(dataset_coco, outputs, "./results.pkl")
metrics = coco_eval(result_files, eval_types, dataset_coco, single_result=True)
self.valid_metrics.update(metrics)
valid_logs = dict()
valid_logs['cur_valid_perfs'] = self.valid_metrics.results
self.callbacks.after_valid(valid_logs)
|
third_party/blink/renderer/build/scripts/blinkbuild/name_style_converter.py | zealoussnow/chromium | 14,668 | 12680201 | <reponame>zealoussnow/chromium
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=import-error,print-statement,relative-import
import copy
import re
SPECIAL_TOKENS = [
# This list should be sorted by length.
'WebCodecs',
'WebSocket',
'String16',
'Float32',
'Float64',
'Base64',
'IFrame',
'Latin1',
'MathML',
'PlugIn',
'SQLite',
'Uint16',
'Uint32',
'WebGL2',
'webgl2',
'WebGPU',
'ASCII',
'CSSOM',
'CType',
'DList',
'Int16',
'Int32',
'MPath',
'OList',
'TSpan',
'UList',
'UTF16',
'Uint8',
'WebGL',
'XPath',
'ETC1',
'etc1',
'HTML',
'Int8',
'S3TC',
's3tc',
'SPv2',
'UTF8',
'sRGB',
'URLs',
'API',
'CSS',
'DNS',
'DOM',
'EXT',
'RTC',
'SVG',
'XSS',
'2D',
'AX',
'FE',
'JS',
'V0',
'V8',
'v8',
'XR',
]
_SPECIAL_TOKENS_WITH_NUMBERS = [
token for token in SPECIAL_TOKENS if re.search(r'[0-9]', token)
]
# Applying _TOKEN_PATTERNS repeatedly should capture any sequence of a-z, A-Z,
# 0-9.
_TOKEN_PATTERNS = [
# 'Foo' 'foo'
'[A-Z]?[a-z]+',
# The following pattern captures only 'FOO' in 'FOOElement'.
'[A-Z]+(?![a-z])',
# '2D' '3D', but not '2Dimension'
'[0-9][Dd](?![a-z])',
'[0-9]+',
]
_TOKEN_RE = re.compile(r'(' + '|'.join(SPECIAL_TOKENS + _TOKEN_PATTERNS) +
r')')
def tokenize_name(name):
"""Tokenize the specified name.
A token consists of A-Z, a-z, and 0-9 characters. Other characters work as
token delimiters, and the resultant list won't contain such characters.
Capital letters also work as delimiters. E.g. 'FooBar-baz' is tokenized to
['Foo', 'Bar', 'baz']. See _TOKEN_PATTERNS for more details.
This function detects special cases that are not easily discernible without
additional knowledge, such as recognizing that in SVGSVGElement, the first
two SVGs are separate tokens, but WebGL is one token.
Returns:
A list of token strings.
"""
# In case |name| is written in lowerCamelCase, we try to match special
# tokens that contains numbers ignoring cases only at the first step.
tokens = []
match = re.search(r'^(' + '|'.join(_SPECIAL_TOKENS_WITH_NUMBERS) + r')',
name, re.IGNORECASE)
if match:
tokens.append(match.group(0))
name = name[match.end(0):]
return tokens + _TOKEN_RE.findall(name)
class NameStyleConverter(object):
"""Converts names from camelCase to various other styles.
"""
def __init__(self, name):
self.tokens = tokenize_name(name)
self._original = name
@property
def original(self):
return self._original
def __str__(self):
return self._original
# Make this class workable with sort().
def __lt__(self, other):
return self.original < other.original
# Make this class workable with groupby().
def __eq__(self, other):
return self.original == other.original
# If __eq__() is defined then a custom __hash__() needs to be defined.
def __hash__(self):
return hash(self.original)
def to_snake_case(self):
"""Snake case is the file and variable name style per Google C++ Style
Guide:
https://google.github.io/styleguide/cppguide.html#Variable_Names
Also known as the hacker case.
https://en.wikipedia.org/wiki/Snake_case
"""
return '_'.join([token.lower() for token in self.tokens])
def to_upper_camel_case(self):
"""Upper-camel case is the class and function name style per
Google C++ Style Guide:
https://google.github.io/styleguide/cppguide.html#Function_Names
Also known as the PascalCase.
https://en.wikipedia.org/wiki/Camel_case.
"""
tokens = self.tokens
# If the first token is one of SPECIAL_TOKENS, we should replace the
# token with the matched special token.
# e.g. ['css', 'External', 'Scanner', 'Preload'] => 'CSSExternalScannerPreload'
if tokens and tokens[0].lower() == tokens[0]:
for special in SPECIAL_TOKENS:
if special.lower() == tokens[0]:
tokens = copy.deepcopy(tokens)
tokens[0] = special
break
return ''.join([token[0].upper() + token[1:] for token in tokens])
def to_lower_camel_case(self):
"""Lower camel case is the name style for attribute names and operation
names in web platform APIs.
e.g. 'addEventListener', 'documentURI', 'fftSize'
https://en.wikipedia.org/wiki/Camel_case.
"""
if not self.tokens:
return ''
return self.tokens[0].lower() + ''.join(
[token[0].upper() + token[1:] for token in self.tokens[1:]])
def to_macro_case(self):
"""Macro case is the macro name style per Google C++ Style Guide:
https://google.github.io/styleguide/cppguide.html#Macro_Names
"""
return '_'.join([token.upper() for token in self.tokens])
def to_all_cases(self):
return {
'snake_case': self.to_snake_case(),
'upper_camel_case': self.to_upper_camel_case(),
'macro_case': self.to_macro_case(),
}
# Use the following high level naming functions which describe the semantics
# of the name, rather than a particular style.
def to_class_name(self, prefix=None, suffix=None):
"""Represents this name as a class name in Chromium C++ style.
i.e. UpperCamelCase.
"""
camel_prefix = prefix[0].upper() + prefix[1:].lower() if prefix else ''
camel_suffix = suffix[0].upper() + suffix[1:].lower() if suffix else ''
return camel_prefix + self.to_upper_camel_case() + camel_suffix
def to_class_data_member(self, prefix=None, suffix=None):
"""Represents this name as a data member name in Chromium C++ style.
i.e. snake_case_with_trailing_underscore_.
"""
lower_prefix = prefix.lower() + '_' if prefix else ''
lower_suffix = suffix.lower() + '_' if suffix else ''
return lower_prefix + self.to_snake_case() + '_' + lower_suffix
def to_function_name(self, prefix=None, suffix=None):
"""Represents this name as a function name in Blink C++ style.
i.e. UpperCamelCase
Note that this function should not be used for IDL operation names and
C++ functions implementing IDL operations and attributes.
"""
camel_prefix = prefix[0].upper() + prefix[1:].lower() if prefix else ''
camel_suffix = ''
if type(suffix) is list:
for item in suffix:
camel_suffix += item[0].upper() + item[1:].lower()
elif suffix:
camel_suffix = suffix[0].upper() + suffix[1:].lower()
return camel_prefix + self.to_upper_camel_case() + camel_suffix
def to_enum_value(self):
"""Represents this name as an enum value in Blink C++ style.
i.e. kUpperCamelCase
"""
return 'k' + self.to_upper_camel_case()
def to_header_guard(self):
"""Represents this name as a header guard style in Chromium C++ style.
i.e. THIRD_PARTY_BLINK_RENDERER_MODULES_MODULES_EXPORT_H_
"""
return re.sub(r'[-/.]', '_', self.to_macro_case()) + '_'
|
aztk/client/cluster/helpers/wait_for_task_to_complete.py | Geims83/aztk | 161 | 12680205 | <gh_stars>100-1000
import time
import azure.batch.models as batch_models
def wait_for_task_to_complete(core_cluster_operations, job_id: str, task_id: str):
while True:
task = core_cluster_operations.batch_client.task.get(job_id=job_id, task_id=task_id)
if task.state != batch_models.TaskState.completed:
time.sleep(2)
else:
return
|
grove/display/jhd1802.py | Mehmet-Erkan/grove.py | 122 | 12680230 | <filename>grove/display/jhd1802.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
# Copyright (C) 2018 Seeed Technology Co.,Ltd.
#
# This is the library for Grove Base Hat
# which used to connect grove sensors for Raspberry Pi.
'''
This is the code for
- `Grove - 16 x 2 LCD (Black on Red) <https://www.seeedstudio.com/Grove-16-x-2-LCD-Black-on-Re-p-3197.html>`_
- `Grove - 16 x 2 LCD (Black on Yellow) <https://www.seeedstudio.com/Grove-16-x-2-LCD-Black-on-Yello-p-3198.html>`_
- `Grove - 16 x 2 LCD (White on Blue) <https://www.seeedstudio.com/Grove-16-x-2-LCD-White-on-Blu-p-3196.html>`_
Examples:
.. code-block:: python
import time
from grove.factory import Factory
# LCD 16x2 Characters
lcd = Factory.getDisplay("JHD1802")
rows, cols = lcd.size()
print("LCD model: {}".format(lcd.name))
print("LCD type : {} x {}".format(cols, rows))
lcd.setCursor(0, 0)
lcd.write("hello world!")
lcd.setCursor(0, cols - 1)
lcd.write('X')
lcd.setCursor(rows - 1, 0)
for i in range(cols):
lcd.write(chr(ord('A') + i))
time.sleep(3)
lcd.clear()
'''
import upm.pyupm_jhd1313m1 as upmjhd
from grove.display.base import *
import sys, mraa
# sphinx autoapi required
__all__ = ["JHD1802"]
class JHD1802(Display):
'''
Grove - 16 x 2 LCD, using chip JHD1802.
- Grove - 16 x 2 LCD (Black on Yellow)
- Grove - 16 x 2 LCD (Black on Red)
- Grove - 16 x 2 LCD (White on Blue)
Also, it's our class name,
which could drive the above three LCDs.
Args:
address(int): I2C device address, default to 0x3E.
'''
def __init__(self, address = 0x3E):
self._bus = mraa.I2c(0)
self._addr = address
self._bus.address(self._addr)
if self._bus.writeByte(0):
print("Check if the LCD {} inserted, then try again"
.format(self.name))
sys.exit(1)
self.jhd = upmjhd.Jhd1313m1(0, address, address)
@property
def name(self):
'''
Get device name
Returns:
string: JHD1802
'''
return "JHD1802"
def type(self):
'''
Get device type
Returns:
int: ``TYPE_CHAR``
'''
return TYPE_CHAR
def size(self):
'''
Get display size
Returns:
(Rows, Columns): the display size, in characters.
'''
# Charactor 16x2
# return (Rows, Columns)
return 2, 16
def clear(self):
'''
Clears the screen and positions the cursor in the upper-left corner.
'''
self.jhd.clear()
def draw(self, data, bytes):
'''
Not implement for char type display device.
'''
return False
def home(self):
'''
Positions the cursor in the upper-left of the LCD.
That is, use that location in outputting subsequent text to the display.
'''
self.jhd.home()
def setCursor(self, row, column):
'''
Position the LCD cursor; that is, set the location
at which subsequent text written to the LCD will be displayed.
Args:
row (int): the row at which to position cursor, with 0 being the first row
column(int): the column at which to position cursor, with 0 being the first column
Returns:
None
'''
self.jhd.setCursor(row, column)
def write(self, msg):
'''
Write character(s) to the LCD.
Args:
msg (string): the character(s) to write to the display
Returns:
None
'''
self.jhd.write(msg)
def _cursor_on(self, enable):
if enable:
self.jhd.cursorOn()
else:
self.jhd.cursorOff()
def main():
import time
lcd = JHD1802()
rows, cols = lcd.size()
print("LCD model: {}".format(lcd.name))
print("LCD type : {} x {}".format(cols, rows))
lcd.backlight(False)
time.sleep(1)
lcd.backlight(True)
lcd.setCursor(0, 0)
lcd.write("hello world!")
lcd.setCursor(0, cols - 1)
lcd.write('X')
lcd.setCursor(rows - 1, 0)
for i in range(cols):
lcd.write(chr(ord('A') + i))
time.sleep(3)
lcd.clear()
if __name__ == '__main__':
main()
|
tests/routers/test_routers_social.py | theoohoho/authx | 141 | 12680248 | <filename>tests/routers/test_routers_social.py
from unittest import mock
import pytest
from fastapi import FastAPI
from fastapi.testclient import TestClient
from starlette.middleware.sessions import SessionMiddleware
from authx import get_social_router
from tests.utils import ACCESS_COOKIE_NAME, REFRESH_COOKIE_NAME, MockAuthBackend
app = FastAPI()
app.add_middleware(SessionMiddleware, secret_key="SECRET", max_age=10)
router = get_social_router(
None,
MockAuthBackend(None, None, None, None, None),
False,
"http://127.0.0.1",
ACCESS_COOKIE_NAME,
REFRESH_COOKIE_NAME,
None,
None,
["google", "facebook"],
{
"google": {
"id": "id",
"secret": "secret",
},
"facebook": {
"id": "id",
"secret": "secret",
},
},
)
app.include_router(router, prefix="/auth")
test_client = TestClient(app)
ACCESS_TOKEN = "<PASSWORD>"
REFRESH_TOKEN = "<PASSWORD>"
@pytest.mark.parametrize("provider", ["google", "facebook"])
def test_login(provider: str):
"""
Test login with social provider
Args:
provider (str): social provider
"""
url = app.url_path_for("social:login", provider=provider)
with mock.patch(
f"authx.routers.social.SocialService.login_{provider}",
mock.Mock(return_value="/"),
) as mock_method:
response = test_client.get(url, allow_redirects=False)
mock_method.assert_called_once()
assert response.status_code == 307
@pytest.mark.parametrize("provider", ["google", "facebook"])
@mock.patch(
"authx.routers.social.check_state",
mock.Mock(return_value=True),
)
def test_callback(provider: str):
"""
Test callback with social provider
Args:
provider (str): social provider
"""
patcher_callback = mock.patch(
f"authx.routers.social.SocialService.callback_{provider}",
mock.AsyncMock(
return_value=(
None,
None,
)
),
)
mock_callback = patcher_callback.start()
patcher_resolve_user = mock.patch(
"authx.routers.social.SocialService.resolve_user",
mock.AsyncMock(return_value={"access": ACCESS_TOKEN, "refresh": REFRESH_TOKEN}),
)
mock_resolve_user = patcher_resolve_user.start()
url = app.url_path_for("social:callback", provider=provider)
response = test_client.get(url, allow_redirects=False)
assert response.status_code == 307
assert response.cookies.get(ACCESS_COOKIE_NAME) == ACCESS_TOKEN
assert response.cookies.get(REFRESH_COOKIE_NAME) == REFRESH_TOKEN
mock_callback.assert_awaited_once()
mock_resolve_user.assert_awaited_once_with(provider, None, None)
patcher_callback.stop()
patcher_resolve_user.stop()
|
src/third_party/swiftshader/third_party/llvm-7.0/llvm/utils/lit/tests/discovery.py | rhencke/engine | 171 | 12680264 | # Check the basic discovery process, including a sub-suite.
#
# RUN: %{lit} %{inputs}/discovery \
# RUN: -j 1 --debug --show-tests --show-suites \
# RUN: -v > %t.out 2> %t.err
# RUN: FileCheck --check-prefix=CHECK-BASIC-OUT < %t.out %s
# RUN: FileCheck --check-prefix=CHECK-BASIC-ERR < %t.err %s
#
# CHECK-BASIC-ERR: loading suite config '{{.*(/|\\\\)discovery(/|\\\\)lit.cfg}}'
# CHECK-BASIC-ERR-DAG: loading suite config '{{.*(/|\\\\)discovery(/|\\\\)subsuite(/|\\\\)lit.cfg}}'
# CHECK-BASIC-ERR-DAG: loading local config '{{.*(/|\\\\)discovery(/|\\\\)subdir(/|\\\\)lit.local.cfg}}'
#
# CHECK-BASIC-OUT: -- Test Suites --
# CHECK-BASIC-OUT: sub-suite - 2 tests
# CHECK-BASIC-OUT: Source Root: {{.*[/\\]discovery[/\\]subsuite$}}
# CHECK-BASIC-OUT: Exec Root : {{.*[/\\]discovery[/\\]subsuite$}}
# CHECK-BASIC-OUT: top-level-suite - 3 tests
# CHECK-BASIC-OUT: Source Root: {{.*[/\\]discovery$}}
# CHECK-BASIC-OUT: Exec Root : {{.*[/\\]discovery$}}
#
# CHECK-BASIC-OUT: -- Available Tests --
# CHECK-BASIC-OUT: sub-suite :: test-one
# CHECK-BASIC-OUT: sub-suite :: test-two
# CHECK-BASIC-OUT: top-level-suite :: subdir/test-three
# CHECK-BASIC-OUT: top-level-suite :: test-one
# CHECK-BASIC-OUT: top-level-suite :: test-two
# Check discovery when providing the special builtin 'config_map'
# RUN: %{python} %{inputs}/config-map-discovery/driver.py \
# RUN: %{inputs}/config-map-discovery/main-config/lit.cfg \
# RUN: %{inputs}/config-map-discovery/lit.alt.cfg \
# RUN: --single-process --debug --show-tests --show-suites > %t.out 2> %t.err
# RUN: FileCheck --check-prefix=CHECK-CONFIG-MAP-OUT < %t.out %s
# RUN: FileCheck --check-prefix=CHECK-CONFIG-MAP-ERR < %t.err %s
# CHECK-CONFIG-MAP-OUT-NOT: ERROR: lit.cfg invoked
# CHECK-CONFIG-MAP-OUT: -- Test Suites --
# CHECK-CONFIG-MAP-OUT: config-map - 2 tests
# CHECK-CONFIG-MAP-OUT: Source Root: {{.*[/\\]config-map-discovery[/\\]tests}}
# CHECK-CONFIG-MAP-OUT: Exec Root : {{.*[/\\]tests[/\\]Inputs[/\\]config-map-discovery}}
# CHECK-CONFIG-MAP-OUT: -- Available Tests --
# CHECK-CONFIG-MAP-OUT-NOT: invalid-test.txt
# CHECK-CONFIG-MAP-OUT: config-map :: test1.txt
# CHECK-CONFIG-MAP-OUT: config-map :: test2.txt
# CHECK-CONFIG-MAP-ERR: loading suite config '{{.*}}lit.alt.cfg'
# CHECK-CONFIG-MAP-ERR: loaded config '{{.*}}lit.alt.cfg'
# CHECK-CONFIG-MAP-ERR: resolved input '{{.*(/|\\\\)config-map-discovery(/|\\\\)main-config}}' to 'config-map'::()
# Check discovery when exact test names are given.
#
# RUN: %{lit} \
# RUN: %{inputs}/discovery/subdir/test-three.py \
# RUN: %{inputs}/discovery/subsuite/test-one.txt \
# RUN: -j 1 --show-tests --show-suites -v > %t.out
# RUN: FileCheck --check-prefix=CHECK-EXACT-TEST < %t.out %s
#
# CHECK-EXACT-TEST: -- Available Tests --
# CHECK-EXACT-TEST: sub-suite :: test-one
# CHECK-EXACT-TEST: top-level-suite :: subdir/test-three
# Check discovery when config files end in .py
# RUN: %{lit} %{inputs}/py-config-discovery \
# RUN: -j 1 --debug --show-tests --show-suites \
# RUN: -v > %t.out 2> %t.err
# RUN: FileCheck --check-prefix=CHECK-PYCONFIG-OUT < %t.out %s
# RUN: FileCheck --check-prefix=CHECK-PYCONFIG-ERR < %t.err %s
#
# CHECK-PYCONFIG-ERR: loading suite config '{{.*(/|\\\\)py-config-discovery(/|\\\\)lit.site.cfg.py}}'
# CHECK-PYCONFIG-ERR: load_config from '{{.*(/|\\\\)discovery(/|\\\\)lit.cfg}}'
# CHECK-PYCONFIG-ERR: loaded config '{{.*(/|\\\\)discovery(/|\\\\)lit.cfg}}'
# CHECK-PYCONFIG-ERR: loaded config '{{.*(/|\\\\)py-config-discovery(/|\\\\)lit.site.cfg.py}}'
# CHECK-PYCONFIG-ERR-DAG: loading suite config '{{.*(/|\\\\)discovery(/|\\\\)subsuite(/|\\\\)lit.cfg}}'
# CHECK-PYCONFIG-ERR-DAG: loading local config '{{.*(/|\\\\)discovery(/|\\\\)subdir(/|\\\\)lit.local.cfg}}'
#
# CHECK-PYCONFIG-OUT: -- Test Suites --
# CHECK-PYCONFIG-OUT: sub-suite - 2 tests
# CHECK-PYCONFIG-OUT: Source Root: {{.*[/\\]discovery[/\\]subsuite$}}
# CHECK-PYCONFIG-OUT: Exec Root : {{.*[/\\]discovery[/\\]subsuite$}}
# CHECK-PYCONFIG-OUT: top-level-suite - 3 tests
# CHECK-PYCONFIG-OUT: Source Root: {{.*[/\\]discovery$}}
# CHECK-PYCONFIG-OUT: Exec Root : {{.*[/\\]py-config-discovery$}}
#
# CHECK-PYCONFIG-OUT: -- Available Tests --
# CHECK-PYCONFIG-OUT: sub-suite :: test-one
# CHECK-PYCONFIG-OUT: sub-suite :: test-two
# CHECK-PYCONFIG-OUT: top-level-suite :: subdir/test-three
# CHECK-PYCONFIG-OUT: top-level-suite :: test-one
# CHECK-PYCONFIG-OUT: top-level-suite :: test-two
# Check discovery when using an exec path.
#
# RUN: %{lit} %{inputs}/exec-discovery \
# RUN: -j 1 --debug --show-tests --show-suites \
# RUN: -v > %t.out 2> %t.err
# RUN: FileCheck --check-prefix=CHECK-ASEXEC-OUT < %t.out %s
# RUN: FileCheck --check-prefix=CHECK-ASEXEC-ERR < %t.err %s
#
# CHECK-ASEXEC-ERR: loading suite config '{{.*(/|\\\\)exec-discovery(/|\\\\)lit.site.cfg}}'
# CHECK-ASEXEC-ERR: load_config from '{{.*(/|\\\\)discovery(/|\\\\)lit.cfg}}'
# CHECK-ASEXEC-ERR: loaded config '{{.*(/|\\\\)discovery(/|\\\\)lit.cfg}}'
# CHECK-ASEXEC-ERR: loaded config '{{.*(/|\\\\)exec-discovery(/|\\\\)lit.site.cfg}}'
# CHECK-ASEXEC-ERR-DAG: loading suite config '{{.*(/|\\\\)discovery(/|\\\\)subsuite(/|\\\\)lit.cfg}}'
# CHECK-ASEXEC-ERR-DAG: loading local config '{{.*(/|\\\\)discovery(/|\\\\)subdir(/|\\\\)lit.local.cfg}}'
#
# CHECK-ASEXEC-OUT: -- Test Suites --
# CHECK-ASEXEC-OUT: sub-suite - 2 tests
# CHECK-ASEXEC-OUT: Source Root: {{.*[/\\]discovery[/\\]subsuite$}}
# CHECK-ASEXEC-OUT: Exec Root : {{.*[/\\]discovery[/\\]subsuite$}}
# CHECK-ASEXEC-OUT: top-level-suite - 3 tests
# CHECK-ASEXEC-OUT: Source Root: {{.*[/\\]discovery$}}
# CHECK-ASEXEC-OUT: Exec Root : {{.*[/\\]exec-discovery$}}
#
# CHECK-ASEXEC-OUT: -- Available Tests --
# CHECK-ASEXEC-OUT: sub-suite :: test-one
# CHECK-ASEXEC-OUT: sub-suite :: test-two
# CHECK-ASEXEC-OUT: top-level-suite :: subdir/test-three
# CHECK-ASEXEC-OUT: top-level-suite :: test-one
# CHECK-ASEXEC-OUT: top-level-suite :: test-two
# Check discovery when exact test names are given.
#
# FIXME: Note that using a path into a subsuite doesn't work correctly here.
#
# RUN: %{lit} \
# RUN: %{inputs}/exec-discovery/subdir/test-three.py \
# RUN: -j 1 --show-tests --show-suites -v > %t.out
# RUN: FileCheck --check-prefix=CHECK-ASEXEC-EXACT-TEST < %t.out %s
#
# CHECK-ASEXEC-EXACT-TEST: -- Available Tests --
# CHECK-ASEXEC-EXACT-TEST: top-level-suite :: subdir/test-three
# Check that we don't recurse infinitely when loading an site specific test
# suite located inside the test source root.
#
# RUN: %{lit} \
# RUN: %{inputs}/exec-discovery-in-tree/obj/ \
# RUN: -j 1 --show-tests --show-suites -v > %t.out
# RUN: FileCheck --check-prefix=CHECK-ASEXEC-INTREE < %t.out %s
#
# Try it again after cd'ing into the test suite using a short relative path.
#
# RUN: cd %{inputs}/exec-discovery-in-tree/obj/
# RUN: %{lit} . \
# RUN: -j 1 --show-tests --show-suites -v > %t.out
# RUN: FileCheck --check-prefix=CHECK-ASEXEC-INTREE < %t.out %s
#
# CHECK-ASEXEC-INTREE: exec-discovery-in-tree-suite - 1 tests
# CHECK-ASEXEC-INTREE-NEXT: Source Root: {{.*[/\\]exec-discovery-in-tree$}}
# CHECK-ASEXEC-INTREE-NEXT: Exec Root : {{.*[/\\]exec-discovery-in-tree[/\\]obj$}}
# CHECK-ASEXEC-INTREE-NEXT: -- Available Tests --
# CHECK-ASEXEC-INTREE-NEXT: exec-discovery-in-tree-suite :: test-one
|
eval/multi_bin/all_bins/run_single.py | cpbscholten/karonte | 294 | 12680265 | <gh_stars>100-1000
# 1) fw dir
# 2) result file
# 3) log file
import os
import sys
import json
class Runner:
def __init__(self, cmd):
self.cmd = cmd
def run_it(self):
os.system(self.cmd)
def run(config, log_dir):
jconfig = json.load(open(config, 'r'))
core_script = '/'.join(__file__.split('/')[:-1]) + '/run_core.py'
cmd = 'python ./' + core_script + ' -d ' + jconfig['fw_path'] + ' -l ' + log_dir
obj = Runner(cmd)
obj.run_it()
if __name__ == '__main__':
run(sys.argv[1], sys.argv[2])
|
icevision/models/mmdet/fastai/__init__.py | ai-fast-track/mantisshrimp | 580 | 12680290 | <filename>icevision/models/mmdet/fastai/__init__.py
from icevision.models.mmdet.fastai.callbacks import *
from icevision.models.mmdet.fastai.learner import *
|
examples/global_code.py | quynhanh-ngx/pytago | 206 | 12680313 | A = [1, 2, 3]
for i, x in enumerate(A):
A[i] += x
B = A[0]
C = A[0]
D: int = 3
while C < A[2]:
C += 1
if C == A[2]:
print('True')
def main():
print("Main started")
print(A)
print(B)
print(C)
print(D)
if __name__ == '__main__':
main()
|
.travis/run-make-gateware-filter.py | auscompgeek/litex-buildenv | 198 | 12680332 | <gh_stars>100-1000
#!/usr/bin/python
import collections
import os
import pprint
import re
import signal
import subprocess
import sys
import threading
import time
log_file = open(sys.argv[1], 'w+')
# Suppressions for warning / info messages
#suppressions = [x.strip() for x in open(sys.argv[2], 'r').readlines() if not x.startswith('#')]
#suppressions = [re.compile(x) for x in suppressions]
top_path = os.path.normpath(os.getcwd())
def output(s, *args, **kw):
with keepalive_thread.lock:
if "before_next_output" in kw:
before_next_output = kw["before_next_output"]
del kw["before_next_output"]
else:
before_next_output = ""
keepalive_thread.output = True
if args:
assert not kw
data = (s % args).encode('utf-8')
elif kw:
data = (s % kw).encode('utf-8')
else:
data = s.encode('utf-8')
if data:
sys.stdout.write(keepalive_thread.before_next_output)
keepalive_thread.before_next_output = before_next_output
sys.stdout.flush()
sys.stdout.write(data)
keepalive_thread.last_output_time = time.time()
keepalive_thread.last_output = data
sys.stdout.flush()
class KeepAliveThread(threading.Thread):
ROTATE = [" - ", " \\ ", " | ", " / "]
def __init__(self):
threading.Thread.__init__(self)
self.lock = threading.RLock()
self.daemon = True
self.pos = 0
self.output = False
self.before_next_output = ""
self.last_output_time = time.time()
self.last_output = ""
def run(self):
while True:
if (time.time() - self.last_output_time) > 1:
output(self.ROTATE[self.pos], before_next_output="\b\b\b")
self.pos = (self.pos + 1) % len(self.ROTATE)
time.sleep(1)
keepalive_thread = KeepAliveThread()
keepalive_thread.start()
BUFFER_SIZE=200
DELIM_MAJOR = "========================================================================="
ERROR = "*~"*35+"*"
linesbuffer = collections.deque()
for i in range(0, BUFFER_SIZE):
linesbuffer.appendleft('')
def shorten_path(line):
outputline = line
for path in re.finditer('"([^"]+)"', line):
pathname = os.path.normpath(path.group(1))
if os.path.exists(pathname) and os.path.isfile(pathname):
common_path = os.path.commonprefix([top_path, pathname])
relative_path = pathname[len(common_path)+1:]
outputline = outputline.replace(path.group(1), relative_path)
return outputline
fsm_triggered = False
found_specials = []
last_path = None
for lineno, rawline in enumerate(sys.stdin.xreadlines()):
log_file.write(rawline)
log_file.flush()
line = rawline.strip('\n\r')
sline = line.strip()
linesbuffer.appendleft(line)
while len(linesbuffer) > BUFFER_SIZE:
linesbuffer.pop()
if line.startswith("make"):
output('\n%s ', line)
continue
if not sline:
continue
if line.startswith("ERROR:"):
linesbuffer.popleft()
output("\n\nError detected! - %s\n%s\n%s\n%s\n%s", line, ERROR, "\n".join(reversed(linesbuffer)), ERROR, rawline)
break
# WARNING:HDLCompiler:1016 - "/home/tansell/foss/timvideos/hdmi2usb/HDMI2USB-misoc-firmware/build/misoc/build/atlys_hdmi2usb-hdmi2usbsoc-atlys.v" Line 24382: Port I_LOCK_O is not connected to this instance
# WARNING:HDLCompiler:1016 - "/home/tansell/foss/timvideos/hdmi2usb/HDMI2USB-misoc-firmware/build/misoc/build/atlys_hdmi2usb-hdmi2usbsoc-atlys.v" Line 24475: Port IOCLK is not connected to this instance
# WARNING:Xst:3035 - Index value(s) does not match array range for signal <storage_20>, simulation mismatch.
# INFO:Xst:2774 - HDL ADVISOR - KEEP property attached to signal eth_rx_clk may hinder XST clustering optimizations.
keepalive_thread.output = False
if line.startswith("WARNING:"):
output("w")
continue
elif line.startswith("INFO:"):
output("i")
continue
# Different tools start with the following
# Release 14.7 - Map P.20131013 (lin64)
# Copyright (c) 1995-2013 Xilinx, Inc. All rights reserved.
if sline.startswith("Copyright (c)"):
output("\n\n\n%s\n# %-66s #\n%s\n", "#"*70, linesbuffer[1], "#"*70)
continue
######################################################################
# Release 14.7 - xst P.20131013 (lin64) #
######################################################################
# Output a header which looks like this unless it is a summary header
# =========================================================================
# * HDL Parsing *
# =========================================================================
if linesbuffer[0] == DELIM_MAJOR and \
linesbuffer[1].startswith("*") and \
linesbuffer[2] == DELIM_MAJOR:
if "Summary" in linesbuffer[1]:
continue
else:
output("\n\n%s\n* %-66s *\n%s\n", '*'*70, linesbuffer[1][2:-2].strip(), '*'*70)
continue
# When we see a filename, output it. Examples;
# Analyzing Verilog file "/home/tansell/foss/timvideos/hdmi2usb/HDMI2USB-misoc-firmware/build/misoc/extcores/lm32/submodule/rtl/lm32_dp_ram.v" into library work
# Parsing verilog file "/home/tansell/foss/timvideos/hdmi2usb/HDMI2USB-misoc-firmware/build/misoc/extcores/lm32/submodule/rtl/lm32_include.v" included at line 31.
# Parsing module <lm32_dp_ram>.
# Analyzing Verilog file "/home/tansell/foss/timvideos/hdmi2usb/HDMI2USB-misoc-firmware/build/misoc/extcores/lm32/submodule/rtl/lm32_shifter.v" into library work
# Parsing verilog file "/home/tansell/foss/timvideos/hdmi2usb/HDMI2USB-misoc-firmware/build/misoc/extcores/lm32/submodule/rtl/lm32_include.v" included at line 50.
# Parsing module <lm32_shifter>.
#for path in re.finditer('"([^"]+)"', line):
# pathname = os.path.normpath(path.group(1))
# if os.path.exists(pathname) and os.path.isfile(pathname):
# if pathname != last_path:
# last_path = pathname
#
# common_path = os.path.commonprefix([top_path, pathname])
# relative_path = pathname[len(common_path)+1:]
#
# output("\n %s ", relative_path)
#########################################################################
# Synthesis Options Summary && 8) Design Summary
###########################################################################
# =========================================================================
# * Synthesis Options Summary *
# =========================================================================
# ---- Source Parameters
# Input File Name : "atlys_hdmi2usb-hdmi2usbsoc-atlys.prj"
# ...
# ...
# =========================================================================
###########################################################################
# =========================================================================
# * Design Summary *
# =========================================================================
# ...
#
# =========================================================================
if line == DELIM_MAJOR:
summary_start = None
for bufferno, bufline in enumerate(linesbuffer):
if bufferno == 0 or bufferno+1 == len(linesbuffer):
continue
sbufline = bufline.strip()
if sbufline != DELIM_MAJOR:
continue
summaryline = linesbuffer[bufferno+1].strip()
if not summaryline.startswith("*") or "Summary" not in summaryline:
break
summary_start = bufferno+2
break
if summary_start:
output("\n\n")
for i in range(summary_start, 0, -1):
sbufline = linesbuffer[i].strip()
if sbufline == DELIM_MAJOR:
output("%s\n", "*"*70)
elif sbufline.startswith('*'):
output("* %-66s *\n", sbufline[2:-2].strip())
else:
if linesbuffer[i].startswith('#'):
output("%s\n", linesbuffer[i][1:])
else:
output("%s\n", linesbuffer[i])
#########################################################################
# 2) HDL Parsing
# -----------------------------------------------------------------------
# Analyzing Verilog file "/home/tansell/foss/timvideos/hdmi2usb/HDMI2USB-misoc-firmware/build/misoc/extcores/lm32/submodule/rtl/lm32_dtlb.v" into library work
ANALYZING_VERILOG = "Analyzing Verilog"
if sline.startswith(ANALYZING_VERILOG):
output("\n%s ", shorten_path(sline))
# Parsing package <MDCT_PKG>.
# Parsing module <lm32_addsub>.
# Parsing entity <ByteStuffer>.
# Parsing architecture <RTL> of entity <bytestuffer>.
PARSING = "Parsing "
if sline.startswith(PARSING):
if sline.startswith("Parsing VHDL"):
output("\n%s ", shorten_path(sline))
else:
if sline.endswith('.'):
sline = sline[:-1]
output("\n %s ", shorten_path(sline))
#bits = re.match(PARSING+"([^ ]+) .*<([^>]+)>\\.", sline)
#if bits:
# output("\nParsing %s %s ", bits.group(1), bits.group(2))
#########################################################################
# 3) HDL Elaboration
# -----------------------------------------------------------------------
# Elaborating module <$unit_1>.
ELABORATION = "Elaborating "
if sline.startswith(ELABORATION):
if sline.endswith('.'):
sline = sline[:-1]
output("\n%s ", sline)
#bits = re.match(ELABORATION+"([^ ]+) .*?<([^>]+)>(.+from library <[^>]+>)?\\.", sline)
#if bits:
# output("\nElaborating %s %s ", bits.group(1), bits.group(2))
# Reading initialization file
READING_INIT = "Reading initialization file"
if sline.startswith(READING_INIT):
if sline.endswith('.'):
sline = sline[:-1]
output("\n %s ", sline.replace('\\"', '"'))
#########################################################################
# 4) HDL Synthesis
# -----------------------------------------------------------------------
# Synthesizing Unit <top>.
# Related source file is "/home/tansell/foss/timvideos/hdmi2usb/HDMI2USB-misoc-firmware/build/misoc/build/atlys_hdmi2usb-hdmi2usbsoc-atlys.v".
# Set property "register_balancing = no" for signal <ethmac_tx_cdc_graycounter0_q>.
# Set property "register_balancing = no" for signal <ethmac_tx_cdc_graycounter1_q>.
# Set property "register_balancing = no" for signal <ethmac_rx_cdc_graycounter0_q>.
SYNTH = "Synthesizing Unit"
if sline.startswith(SYNTH):
if sline.endswith('.'):
sline = sline[:-1]
output("\n%s ", sline)
RELATED_SOURCE = "Related source file is"
if sline.startswith(RELATED_SOURCE):
if sline.endswith('.'):
sline = sline[:-1]
output("\n %s ", shorten_path(sline))
# Collect special blocks for output in summary.
# Found 128x24-bit dual-port RAM <Mram_mem> for signal <mem>.
SPECIALS = ["RAM"]
if sline.startswith("Found"):
special_found = False
for special in SPECIALS:
if special in sline:
special_found = True
break
if special_found:
found_specials.append(sline)
# Summary:
# inferred 64 RAM(s).
# inferred 16 Multiplier(s).
# inferred 409 Adder/Subtractor(s).
# inferred 8597 D-type flip-flop(s).
# inferred 140 Comparator(s).
# inferred 1536 Multiplexer(s).
# inferred 6 Combinational logic shifter(s).
# inferred 2 Tristate(s).
# inferred 32 Finite State Machine(s).
# Unit <top> synthesized.
SUMMARY = "Summary:"
SYTH = "synthesized."
if sline.endswith(SYTH):
summary_start = None
for bufferno, bufline in enumerate(linesbuffer):
sbufline = bufline.strip()
if sbufline.endswith(SUMMARY):
summary_start = bufferno
break
if summary_start:
output("\n Summary:\n")
for special in sorted(found_specials):
output(" %s\n", special)
if found_specials:
output(" %s\n", "--")
found_specials = []
for bufferno in range(summary_start-1, 0, -1):
sbufline = linesbuffer[bufferno].strip()
output(" %s\n", sbufline[0].upper()+sbufline[1:-1])
#########################################################################
# 5) Advanced HDL Synthesis
# -----------------------------------------------------------------------
SYNTH_ADV = "Synthesizing (advanced) Unit"
if sline.startswith(SYNTH_ADV):
if sline.endswith('.'):
sline = sline[:-1]
output("\n%s ", sline)
#########################################################################
# 6) Low Level Synthesis
# -----------------------------------------------------------------------
# Analyzing FSM <MFsm> for best encoding.
# Optimizing FSM <FSM_0> on signal <clockdomainsrenamer3_state[1:2]> with user encoding.
if sline.startswith("Analyzing FSM") or sline.startswith("Optimizing FSM"):
if not fsm_triggered:
output("\nAnalyzing and optimizing FSMs ")
fsm_triggered = True
else:
output(".")
continue
# Optimizing unit <JpegEnc> ...
# Optimizing unit <RAMZ_1> ...
OPTIMIZING = "Optimizing unit"
if sline.startswith(OPTIMIZING):
if sline.endswith('.'):
sline = sline[:-1]
output("\n%s ", sline)
# Processing Unit <top> :
PROCESSING = "Processing Unit "
if sline.startswith(PROCESSING):
if sline.endswith(' :'):
sline = sline[:-2]
output("\n%s ", sline)
# Final Macro Processing ...
if sline == "Final Macro Processing ...":
output("\n%s" % sline)
#########################################################################
# 7) Partition Report
# -----------------------------------------------------------------------
# Nothing?
#########################################################################
# 8) Design Summary
# -----------------------------------------------------------------------
######################################################################
# Release 14.7 - ngdbuild P.20131013 (lin64) #
######################################################################
######################################################################
# Release 14.7 - Map P.20131013 (lin64) #
######################################################################
if sline.startswith("Peak Memory Usage"):
summary_start = None
for bufferno, bufline in enumerate(linesbuffer):
sbufline = bufline.strip()
if sbufline.startswith("Design Summary:"):
summary_start = bufferno
break
if summary_start:
output("""\n
**********************************************************************
* Design Summary *
**********************************************************************
""")
for bufferno in range(summary_start-1, 0, -1):
bufline = linesbuffer[bufferno]
output("%s\n", bufline)
output("%s\n", "*" * 70)
######################################################################
# Release 14.7 - par P.20131013 (lin64) #
######################################################################
if sline.startswith("Starting initial Timing Analysis"):
summary_start = None
for bufferno, bufline in enumerate(linesbuffer):
sbufline = bufline.strip()
if sbufline.startswith("Device Utilization Summary:"):
summary_start = bufferno
break
if summary_start:
output("""\n
**********************************************************************
* Device Utilization Summary *
**********************************************************************
""")
for bufferno in range(summary_start-1, 0, -1):
bufline = linesbuffer[bufferno]
output("%s\n", bufline)
output("%s\n", "*" * 70)
if sline.startswith("Phase"):
if "REAL time:" in sline and not "unrouted" in sline:
phase, rtime = sline.split("REAL time:")
output(" (completed in %s)\n", rtime.strip())
else:
bits = sline.split()
if keepalive_thread.last_output[-1] != '\n':
output('\n')
output("%5s %-5s - %s ", bits[0], bits[1], " ".join(bits[2:]))
# Saving bit stream in
if sline.startswith("Saving bit stream in"):
output("\n%s", sline)
# If the line wasn't caught elsewhere, just output a dot.
if not keepalive_thread.output:
output(".")
for lineno, rawline in enumerate(sys.stdin.xreadlines()):
output(rawline)
output("\n\n")
output("Raw output saved in %r\n", sys.argv[1])
|
proto/ptf/l3_host_fwd/test/l3_host_fwd.py | mkruskal-google/PI | 149 | 12680352 | #!/usr/bin/env python
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# <NAME> (<EMAIL>)
#
#
import ptf
import os
from ptf import config
import ptf.testutils as testutils
from google.rpc import code_pb2
from base_test import P4RuntimeTest, autocleanup, stringify, ipv4_to_binary
class L3HostFwdTest(P4RuntimeTest):
pass
class FwdTest(L3HostFwdTest):
@autocleanup
def runTest(self):
ip_dst_addr = "10.0.0.1"
ip_dst_addr_str = ipv4_to_binary(ip_dst_addr)
ig_port = self.swports(1)
eg_port = self.swports(2)
# port is 9-bit in v1model, i.e. 2 bytes
eg_port_str = stringify(eg_port, 2)
smac = "\xee\xcd\x00\x7e\x70\x00"
dmac = "\xee\x30\xca\x9d\x1e\x00"
# we do not care about the src mac address or the src IP address
pkt = testutils.simple_tcp_packet(
eth_dst=smac, ip_dst=ip_dst_addr, ip_ttl=64)
# no forwarding entry: packet must be dropped
testutils.send_packet(self, ig_port, pkt)
testutils.verify_no_other_packets(self)
# add a forwarding entry
self.send_request_add_entry_to_action(
"l3_host_fwd", [self.Exact("hdr.ipv4.dst_addr", ip_dst_addr_str)],
"set_nexthop",
[("port", eg_port_str), ("smac", smac), ("dmac", dmac)])
# check that the entry is hit and that no other packets are received
exp_pkt = testutils.simple_tcp_packet(
eth_src=smac, eth_dst=dmac, ip_dst=ip_dst_addr, ip_ttl=63)
testutils.send_packet(self, ig_port, pkt)
testutils.verify_packets(self, exp_pkt, [eg_port])
class DupEntryTest(L3HostFwdTest):
@autocleanup
def runTest(self):
ip_dst_addr_str = "\x0a\x00\x00\x01"
eg_port = self.swports(2)
eg_port_str = stringify(eg_port, 2)
smac = "\xee\xcd\x00\x7e\x70\x00"
dmac = "\xee\x30\xca\x9d\x1e\x00"
def add_entry_once():
self.send_request_add_entry_to_action(
"l3_host_fwd",
[self.Exact("hdr.ipv4.dst_addr", ip_dst_addr_str)],
"set_nexthop",
[("port", eg_port_str), ("smac", smac), ("dmac", dmac)])
add_entry_once()
with self.assertP4RuntimeError():
add_entry_once()
class BadMatchKeyTest(L3HostFwdTest):
@autocleanup
def runTest(self):
ip_dst_addr_str = "\x0a\x00\x00\x01"
bad_ip_dst_addr_str = "\x0a\x00\x00" # missing one byte
eg_port = self.swports(2)
eg_port_str = stringify(eg_port, 2)
smac = "\xee\xcd\x00\x7e\x70\x00"
dmac = "\xee\x30\xca\x9d\x1e\x00"
# missing one byte
with self.assertP4RuntimeError(code_pb2.INVALID_ARGUMENT):
self.send_request_add_entry_to_action(
"l3_host_fwd",
[self.Exact("hdr.ipv4.dst_addr", bad_ip_dst_addr_str)],
"set_nexthop",
[("port", eg_port_str), ("smac", smac), ("dmac", dmac)])
# unexpected match type
with self.assertP4RuntimeError(code_pb2.INVALID_ARGUMENT):
self.send_request_add_entry_to_action(
"l3_host_fwd",
[self.Lpm("hdr.ipv4.dst_addr", ip_dst_addr_str, 24)],
"set_nexthop",
[("port", eg_port_str), ("smac", smac), ("dmac", dmac)])
class BadChecksumTest(L3HostFwdTest):
@autocleanup
def runTest(self):
# TODO
pass
|
dataviva/utils/profanities_filter.py | dogobox/datavivamaster | 126 | 12680358 | """
Module that provides a class that filters profanities
f = ProfanitiesFilter(['bad', 'un\w+'], replacements="-")
example = "I am doing bad ungood badlike things."
print f.clean(example)
# Returns "I am doing --- ------ badlike things."
f.inside_words = True
print f.clean(example)
# Returns "I am doing --- ------ ---like things."
f.complete = False
print f.clean(example)
# Returns "I am doing b-d u----d b-dlike things."
"""
import random
import re
class ProfanitiesFilter(object):
def __init__(self, filterlist, ignore_case=True, replacements="$@%-?!",
complete=True, inside_words=False):
"""
Inits the profanity filter.
filterlist -- a list of regular expressions that
matches words that are forbidden
ignore_case -- ignore capitalization
replacements -- string with characters to replace the forbidden word
complete -- completely remove the word or keep the first and last char?
inside_words -- search inside other words?
"""
self.badwords = filterlist
self.ignore_case = ignore_case
self.replacements = replacements
self.complete = complete
self.inside_words = inside_words
def _make_clean_word(self, length):
"""
Generates a random replacement string of a given length
using the chars in self.replacements.
"""
return ''.join([random.choice(self.replacements) for i in
range(length)])
def __replacer(self, match):
value = match.group()
if self.complete:
return self._make_clean_word(len(value))
else:
return value[0]+self._make_clean_word(len(value)-2)+value[-1]
def clean(self, text):
"""Cleans a string from profanity."""
regexp_insidewords = {
True: r'(%s)',
False: r'\b(%s)\b',
}
regexp = (regexp_insidewords[self.inside_words] %
'|'.join(self.badwords))
r = re.compile(regexp, re.IGNORECASE if self.ignore_case else 0)
return r.sub(self.__replacer, text)
|
example-import-image/script.py | teachthenet/TeachCraft-Challenges | 503 | 12680375 | <filename>example-import-image/script.py
"""
This script does the following:
- Take an image as input, and read it pixel by pixel.
- For each pixel in the image, set a block in the minecraft world closest to the color of that pixel.
- End result should be pixel art in minecraft world.
- Use mario.gif in this directory to start with.
Try to understand how this script works - it uses various data structures and functions.
Try to figure out how it takes an image as input, and change the image to something else you'd like to insert into the minecraft world.
NOTE! This script requires installing the Python library 'pillow' - instructions located here:
https://pillow.readthedocs.org/en/latest/installation.html
"""
import sys
sys.path.append("../")
import mcpi.minecraft as minecraft
mc = minecraft.Minecraft.create(address="172.16.31.10", name="seanybob") #NOTE - replace "seanybob" with your name
from math import sqrt
from PIL import Image
# Possible blocks in (Name, ID, (RGB1,RGB2,..),Data)
#RGBs are used to color match.
possibleBlocks = (
("Air", 0, ( (0, 136, 255) ,),0),
("Smooth Stone", 1, ( (125,125, 125) ,),0),
("Dirt", 3, ( (133,96,66),),0),
("Cobblestone", 4, ( (117,117,117),),0),
("Wooden Plank", 5, ( (156,127,78),),0),
("Bedrock", 7, ( (83,83,83),),0),
("Sand", 12, ( (217,210,158),),0),
("Gravel", 13, ( (136, 126, 125),),0),
("Gold Ore", 14, ( (143,139,124),),0),
("Iron Ore", 15, ( (135,130,126),),0),
("Coal Ore", 16, ( (115,115,115),),0),
("Wood", 17, ( (154,125,77),),0),
("Sponge", 19, ( (182,182,57),),0),
("White Wool", 35, ( (221,221,221),),0),
("Orange Wool", 35, ( (233,126,55),),1),
("Magenta Wool", 35, ( (179,75,200),),2),
("Light Blue Wool", 35, ( (103,137,211),),3),
("Yellow Wool", 35, ( (192,179,28),),4),
("Light Green Wool", 35, ( (59,187,47),),5),
("Pink Wool", 35, ( (217,132,153),),6),
("Dark Gray Wool", 35, ( (66,67,67),),7),
("Gray Wool", 35, ( (157,164,165),),8),
("Cyan Wool", 35, ( (39,116,148),),9),
("Purple Wool", 35, ( (128,53,195),),10),
("Blue Wool", 35, ( (39,51,153),),11),
("Brown Wool", 35, ( (85,51,27),),12),
("Dark Green Wool", 35, ( (55,76,24),),13),
("Red Wool", 35, ( (162,44,42),),14),
("Black Wool", 35, ( (26,23,23),),15),
("Gold", 41, ( (249,236,77),),0),
("Iron", 42, ( (230,230,230),),0),
("TwoHalves", 43, ( (159,159,159),),0),
("Brick", 45, ( (155,110,97),),0),
("Mossy Cobblestone", 48, ( (90,108,90),),0),
("Obsidian", 49, ( (20,18,29),),0),
("Diamond Ore", 56, ( (129,140,143),),0),
("Diamond Block", 57, ( (99,219,213),),0),
("Workbench", 58, ( (107,71,42),),0),
("Redstone Ore", 73, ( (132,107,107),),0),
("Snow Block", 80, ( (239,251,251),),0),
("Clay", 82, ( (158,164,176),),0),
("Jukebox", 84, ( (107,73,55),),0),
("Pumpkin", 86, ( (192,118,21),),0),
("Netherrack", 87, ( (110,53,51),),0),
("Soul Sand", 88, ( (84,64,51),),0),
("Glowstone", 89, ( (137,112,64),),0)
)
def getBlockFromColor(RGB):
smallestDistIndex = -1
smallestDist = 300000
curIndex = 0
for block in possibleBlocks:
for blockRGB in block[2]:
curDist = getColorDist(RGB, blockRGB)
if (curDist < smallestDist):
smallestDist = curDist
smallestDistIndex = curIndex
curIndex = curIndex + 1
if (smallestDistIndex == -1):
return -1
return possibleBlocks[smallestDistIndex]
def getColorDist(colorRGB, blockRGB):
return sqrt( pow(colorRGB[0]-blockRGB[0],2) + pow(colorRGB[1]-blockRGB[1],2) + pow(colorRGB[2]-blockRGB[2],2))
pos = mc.player.getPos()
maxsize = (100, 100)
im = Image.open('mario.gif')
im.thumbnail(maxsize, Image.ANTIALIAS)
rgb_im = im.convert('RGB')
rows, columns = rgb_im.size
print rows, columns
for r in range(rows):
for c in range(columns):
rgb = rgb_im.getpixel((r, c))
mc_block = getBlockFromColor(rgb)
mc.setBlock(pos.x+r, pos.y, pos.z+c, mc_block[1])
|
LeetCode/0064_Minimum_Path_Sum.py | Achyut-sudo/PythonAlgorithms | 144 | 12680383 | <gh_stars>100-1000
class Solution:
def minPathSum(self, grid: List[List[int]]) -> int:
rows=len(grid)
columns=len(grid[0])
for i in range(1,columns):
grid[0][i]+=grid[0][i-1]
for j in range(1,rows):
grid[j][0]+=grid[j-1][0]
for k in range(1,rows):
for l in range(1,columns):
grid[k][l]+=min(grid[k][l-1],grid[k-1][l])
return grid[-1][-1]
|
InvenTree/build/migrations/0006_auto_20190913_1407.py | ArakniD/InvenTree | 656 | 12680387 | <filename>InvenTree/build/migrations/0006_auto_20190913_1407.py
# Generated by Django 2.2.5 on 2019-09-13 14:07
import InvenTree.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('build', '0005_auto_20190604_2217'),
]
operations = [
migrations.AlterField(
model_name='build',
name='URL',
field=InvenTree.fields.InvenTreeURLField(blank=True, help_text='Link to external URL'),
),
]
|
modoboa/policyd/core.py | HarshCasper/modoboa | 1,602 | 12680388 | <filename>modoboa/policyd/core.py
"""Core components of the policy daemon."""
import asyncio
import concurrent.futures
from email.message import EmailMessage
import logging
import aiosmtplib
from dateutil.relativedelta import relativedelta
import aioredis
from django.conf import settings
from django.db import connections
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils import translation
from django.utils.translation import ugettext as _, ugettext_lazy
from modoboa.admin import constants as admin_constants
from modoboa.admin import models as admin_models
from modoboa.core import models as core_models
from modoboa.lib.email_utils import split_mailbox
from . import constants
logger = logging.getLogger("modoboa.policyd")
SUCCESS_ACTION = b"dunno"
FAILURE_ACTION = b"defer_if_permit Daily limit reached, retry later"
def close_db_connections(func, *args, **kwargs):
"""
Make sure to close all connections to DB.
To use in threads.
"""
def _close_db_connections(*args, **kwargs):
ret = None
try:
ret = func(*args, **kwargs)
finally:
for conn in connections.all():
conn.close()
return ret
return _close_db_connections
async def wait_for(dt):
"""sleep until the specified datetime."""
one_day = 86400
while True:
now = timezone.now()
remaining = (dt - now).total_seconds()
if remaining < one_day:
break
# asyncio.sleep doesn't like long sleeps, so don't sleep more
# than a day at a time
await asyncio.sleep(one_day)
await asyncio.sleep(remaining)
async def run_at(dt, coro, *args):
"""Run coroutine at given datetime."""
await wait_for(dt)
return await coro(*args)
@close_db_connections
def get_local_config():
"""Return local configuration."""
return core_models.LocalConfig.objects.first()
@close_db_connections
def get_notification_recipients():
"""Return superadmins with a mailbox."""
return (
core_models.User.objects
.filter(is_superuser=True, mailbox__isnull=False)
)
@close_db_connections
def create_alarm(ltype, name):
"""Create a new alarm."""
title = _("Daily sending limit reached")
internal_name = "sending_limit"
if ltype == "domain":
domain = admin_models.Domain.objects.get(name=name)
domain.alarms.create(title=title, internal_name=internal_name)
else:
localpart, domain = split_mailbox(name)
mailbox = admin_models.Mailbox.objects.get(
address=localpart, domain__name=domain)
mailbox.alarms.create(
domain=mailbox.domain, title=title, internal_name=internal_name)
async def notify_limit_reached(ltype, name):
"""Send a notification to super admins about item."""
ltype_translations = {
"account": ugettext_lazy("account"),
"domain": ugettext_lazy("domain")
}
# We're going to execute sync code so we need an executor
executor = concurrent.futures.ThreadPoolExecutor(max_workers=3)
loop = asyncio.get_event_loop()
futures = [
loop.run_in_executor(executor, get_local_config),
loop.run_in_executor(executor, get_notification_recipients),
loop.run_in_executor(executor, create_alarm, ltype, name),
]
lc, recipients, junk = await asyncio.gather(*futures)
sender = lc.parameters.get_value("sender_address", app="core")
for recipient in recipients:
with translation.override(recipient.language):
content = render_to_string(
"policyd/notifications/limit_reached.html", {
"ltype": ltype_translations[ltype], "name": name
})
subject = _("[modoboa] Sending limit reached")
msg = EmailMessage()
msg["From"] = sender
msg["To"] = recipient.email
msg["Subject"] = subject
msg.set_content(content)
await aiosmtplib.send(msg)
async def decrement_limit(rclient, ltype, name):
"""Decrement the given limit by one."""
new_counter = await rclient.hincrby(constants.REDIS_HASHNAME, name, -1)
if new_counter <= 0:
logger.info("Limit reached for {} {}".format(ltype, name))
asyncio.ensure_future(notify_limit_reached(ltype, name))
async def apply_policies(attributes):
"""Apply defined policies to received request."""
sasl_username = attributes.get("sasl_username")
if not sasl_username:
return SUCCESS_ACTION
rclient = await aioredis.create_redis_pool(settings.REDIS_URL)
decr_domain = False
decr_user = False
localpart, domain = split_mailbox(sasl_username)
if await rclient.hexists(constants.REDIS_HASHNAME, domain):
counter = await rclient.hget(constants.REDIS_HASHNAME, domain)
logger.info("Domain {} current counter: {}".format(domain, counter))
if int(counter) <= 0:
return FAILURE_ACTION
decr_domain = True
if await rclient.hexists(constants.REDIS_HASHNAME, sasl_username):
counter = await rclient.hget(constants.REDIS_HASHNAME, sasl_username)
logger.info("Account {} current counter: {}".format(
sasl_username, counter))
if int(counter) <= 0:
return FAILURE_ACTION
decr_user = True
if decr_domain:
await decrement_limit(rclient, "domain", domain)
if decr_user:
await decrement_limit(rclient, "account", sasl_username)
rclient.close()
await rclient.wait_closed()
logger.debug("Let it pass")
return SUCCESS_ACTION
async def handle_connection(reader, writer):
"""Coroutine to handle a new connection to the server."""
action = SUCCESS_ACTION
try:
logger.debug("Reading data")
data = await reader.readuntil(b"\n\n")
except asyncio.IncompleteReadError:
pass
else:
attributes = {}
for line in data.decode().split("\n"):
if not line:
continue
try:
name, value = line.split("=")
except ValueError:
continue
attributes[name] = value
state = attributes.get("protocol_state")
if state == "RCPT":
logger.debug("Applying policies")
action = await apply_policies(attributes)
logger.debug("Done")
logger.debug("Sending action %s", action)
writer.write(b"action=" + action + b"\n\n")
await writer.drain()
async def new_connection(reader, writer):
try:
await asyncio.wait_for(handle_connection(reader, writer), timeout=5)
except asyncio.TimeoutError as err:
logger.warning("Timeout received while handling connection: %s", err)
finally:
writer.close()
if hasattr(writer, "wait_closed"):
# Python 3.7+ only
await writer.wait_closed()
logger.info("exit")
def get_next_execution_dt():
"""Return next execution date and time."""
return (timezone.now() + relativedelta(days=1)).replace(
hour=0, minute=0, second=0)
@close_db_connections
def get_domains_to_reset():
"""
Return a list of domain to reset.
We also close all associated alarms.
"""
qset = admin_models.Domain.objects.filter(message_limit__isnull=False)
admin_models.Alarm.objects.filter(
internal_name="limit_reached", domain__in=qset,
status=admin_constants.ALARM_OPENED
).update(
status=admin_constants.ALARM_CLOSED, closed=timezone.now()
)
return qset
@close_db_connections
def get_mailboxes_to_reset():
"""
Return a list of mailboxes to reset.
We also close all associated alarms.
"""
qset = (
admin_models.Mailbox.objects.filter(message_limit__isnull=False)
.select_related("domain")
)
admin_models.Alarm.objects.filter(
internal_name="limit_reached", mailbox__in=qset,
status=admin_constants.ALARM_OPENED
).update(
status=admin_constants.ALARM_CLOSED, closed=timezone.now()
)
return qset
async def reset_counters():
"""Reset all counters."""
rclient = await aioredis.create_redis_pool(settings.REDIS_URL)
logger.info("Resetting all counters")
# We're going to execute sync code so we need an executor
executor = concurrent.futures.ThreadPoolExecutor(max_workers=3)
loop = asyncio.get_event_loop()
futures = [
loop.run_in_executor(executor, get_domains_to_reset),
loop.run_in_executor(executor, get_mailboxes_to_reset)
]
domains, mboxes = await asyncio.gather(*futures)
for domain in domains:
rclient.hset(
constants.REDIS_HASHNAME, domain.name, domain.message_limit)
for mb in mboxes:
rclient.hset(
constants.REDIS_HASHNAME, mb.full_address, mb.message_limit)
rclient.close()
await rclient.wait_closed()
# reschedule
asyncio.ensure_future(run_at(get_next_execution_dt(), reset_counters))
def start_reset_counters_coro():
"""Start coroutine."""
first_time = (timezone.now() + relativedelta(days=1)).replace(
hour=0, minute=0, second=0)
asyncio.ensure_future(run_at(first_time, reset_counters))
|
tests/test_lr_schedulers.py | breezedeus/cnstd | 266 | 12680395 | import torch
import torch.nn as nn
from torch.optim import lr_scheduler
import matplotlib.pyplot as plt
from cnstd.lr_scheduler import WarmupCosineAnnealingRestarts
class NullModule(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(1, 1)
ori_lr = 5e-4
model = NullModule()
optimizer = torch.optim.Adam(model.parameters())
def plot_lr(scheduler, step=900):
lrs = []
for i in range(step):
lr = optimizer.param_groups[0]['lr']
scheduler.step()
lrs.append(lr)
plt.plot(lrs)
plt.show()
def test_CosineAnnealingWarmRestarts():
CAW = lr_scheduler.CosineAnnealingWarmRestarts(
optimizer, T_0=200, T_mult=1, eta_min=ori_lr / 10.0
)
plot_lr(CAW, step=1000)
def test_WarmupCosineAnnealingRestarts():
CAW = WarmupCosineAnnealingRestarts(
optimizer,
first_cycle_steps=95600,
cycle_mult=1.0,
max_lr=0.001,
min_lr=0.0001,
warmup_steps=100,
gamma=1.0,
)
plot_lr(CAW, step=95600)
def test_CyclicLR():
Cyc = lr_scheduler.CyclicLR(
optimizer,
base_lr=ori_lr / 10.0,
max_lr=ori_lr,
step_size_up=200,
cycle_momentum=False,
)
plot_lr(Cyc, 1000)
def test_OneCycleLR():
Cyc = lr_scheduler.OneCycleLR(
optimizer, max_lr=0.1, epochs=20, steps_per_epoch=50,
)
plot_lr(Cyc, 1000)
|
5]. Projects/Desktop Development/GUI Projects/05). Advanced Notepad/TextEditor.py | MLinesCode/The-Complete-FAANG-Preparation | 6,969 | 12680408 | <gh_stars>1000+
import tkinter as tk
from tkinter import font, colorchooser, filedialog, messagebox
from tkinter import ttk
import os
main_application = tk.Tk()
main_application.geometry("1450x700+50+40")
# main_application.geometry("1530x800+0+0")
main_application.title("Text Editor:- By <NAME>")
# MAIN MENU
main_menu = tk.Menu()
# FILE ICONS
new_icon = tk.PhotoImage(file='icons/new.png')
open_icon = tk.PhotoImage(file='icons/open.png')
save_icon = tk.PhotoImage(file='icons/save.png')
save_as_icon = tk.PhotoImage(file='icons/save_as.png')
exit_icon = tk.PhotoImage(file='icons/exit.png')
file = tk.Menu(main_menu, tearoff=False)
# EDIT
copy_icon = tk.PhotoImage(file='icons/copy.png')
paste_icon = tk.PhotoImage(file='icons/paste.png')
cut_icon = tk.PhotoImage(file='icons/cut.png')
clear_all_icon = tk.PhotoImage(file='icons/clear_all.png')
find_icon = tk.PhotoImage(file='icons/find.png')
edit = tk.Menu(main_menu, tearoff=False)
# VIEW ICONS
tool_bar_icon = tk.PhotoImage(file='icons/tool_bar.png')
status_bar_icon = tk.PhotoImage(file='icons/status_bar.png')
view = tk.Menu(main_menu, tearoff=False)
# COLOR THEME
light_default_icon = tk.PhotoImage(file='icons/light_default.png')
light_plus_icon = tk.PhotoImage(file='icons/light_plus.png')
dark_icon = tk.PhotoImage(file='icons/dark.png')
red_icon = tk.PhotoImage(file='icons/red.png')
monokai_icon = tk.PhotoImage(file='icons/monokai.png')
night_blue_icon = tk.PhotoImage(file='icons/night_blue.png')
color_theme = tk.Menu(main_menu, tearoff=False)
theme_choice = tk.StringVar()
color_icons = (light_default_icon, light_plus_icon, dark_icon, red_icon, monokai_icon, night_blue_icon)
color_dict = {
'light_default_icon' : ('#000000', '#ffffff'),
'light_plus_icon' : ('#474747', '#e0e0e0'),
'dark_icon' : ('#c4c4c4', '#2d2d2d'),
'red_icon' : ('#2d2d2d', '#ffe8e8'),
'monokai_icon' : ('#d3b774', '#474747'),
'night_blue_icon' : ('#ededed', '#6b9dc2')
}
# CASCADE
main_menu.add_cascade(label='File', menu=file)
main_menu.add_cascade(label='Edit', menu=edit)
main_menu.add_cascade(label='View', menu=view)
main_menu.add_cascade(label='Color Theme', menu=color_theme)
# TOOLBAR
tool_bar = ttk.Label(main_application)
tool_bar.pack(side=tk.TOP, fill=tk.X)
#FONT BOX
font_tuple = tk.font.families()
font_family = tk.StringVar()
font_box = ttk.Combobox(tool_bar, width=30, textvariable=font_family, state='readonly')
font_box['values'] = font_tuple
font_box.current(font_tuple.index('Arial'))
font_box.grid(row=0, column=0, padx=5)
# SIZE BOX
size_var = tk.IntVar()
font_size = ttk.Combobox(tool_bar, width=14, textvariable=size_var, state='readonly')
font_size['values'] = tuple(range(8, 81))
font_size.current(4)
font_size.grid(row=0, column=1, padx=5)
# BOLD BUTTON
bold_icon = tk.PhotoImage(file='icons/bold.png')
bold_btn = ttk.Button(tool_bar, image=bold_icon)
bold_btn.grid(row=0, column=2, padx=5)
# ITALIC BUTTON
italic_icon = tk.PhotoImage(file='icons/italic.png')
italic_btn = ttk.Button(tool_bar, image=italic_icon)
italic_btn.grid(row=0, column=3, padx=5)
# UNDERLINE BUTTON
underline_icon = tk.PhotoImage(file='icons/underline.png')
underline_btn = ttk.Button(tool_bar, image=underline_icon)
underline_btn.grid(row=0, column=4, padx=5)
# FONT COLOR BUTTON
font_color_icon = tk.PhotoImage(file='icons/font_color.png')
font_color_btn = ttk.Button(tool_bar, image=font_color_icon)
font_color_btn.grid(row=0, column=5, padx=5)
# ALIGN LEFT BUTTON
align_left_icon = tk.PhotoImage(file='icons/align_left.png')
align_left_btn = ttk.Button(tool_bar, image=align_left_icon)
align_left_btn.grid(row=0, column=6, padx=5)
# ALIGN CENTER BUTTON
align_center_icon = tk.PhotoImage(file='icons/align_center.png')
align_center_btn = ttk.Button(tool_bar, image=align_center_icon)
align_center_btn.grid(row=0, column=7, padx=5)
# ALIGN RIGFT BUTTON
align_right_icon = tk.PhotoImage(file='icons/align_right.png')
align_right_btn = ttk.Button(tool_bar, image=align_right_icon)
align_right_btn.grid(row=0, column=8, padx=5)
# TEXT EDITOR
text_editor = tk.Text(main_application)
text_editor.config(wrap='word', relief=tk.FLAT)
scroll_bar = tk.Scrollbar(main_application)
text_editor.focus_set()
scroll_bar.pack(side=tk.RIGHT, fill=tk.Y)
text_editor.pack(fill=tk.BOTH, expand=True)
scroll_bar.config(command=text_editor.yview)
text_editor.config(yscrollcommand=scroll_bar.set)
current_font_family = 'Arial'
current_font_size = 12
def change_font(event=None):
global current_font_family
current_font_family = font_family.get()
text_editor.configure(font=(current_font_family, current_font_size))
def change_fontsize(event=None):
global current_font_size
current_font_size = size_var.get()
text_editor.configure(font=(current_font_family, current_font_size))
font_box.bind("<<ComboboxSelected>>", change_font)
font_size.bind("<<ComboboxSelected>>", change_fontsize)
# BUTTONS FUNCTIONALITY
# BOLD
def change_bold():
text_property = tk.font.Font(font=text_editor['font'])
if text_property.actual()['weight'] == 'normal':
text_editor.configure(font=(current_font_family, current_font_size, 'bold'))
if text_property.actual()['weight'] == 'bold':
text_editor.configure(font=(current_font_family, current_font_size, 'normal'))
bold_btn.configure(command=change_bold)
# ITALIC
def change_italic():
text_property = tk.font.Font(font=text_editor['font'])
if text_property.actual()['slant'] == 'roman':
text_editor.configure(font=(current_font_family, current_font_size, 'italic'))
if text_property.actual()['slant'] == 'italic':
text_editor.configure(font=(current_font_family, current_font_size, 'normal'))
italic_btn.configure(command=change_italic)
# UNDERSCORE
def change_underline():
text_property = tk.font.Font(font=text_editor['font'])
if text_property.actual()['underline'] == 0:
text_editor.configure(font=(current_font_family, current_font_size, 'underline'))
if text_property.actual()['underline'] == 1:
text_editor.configure(font=(current_font_family, current_font_size, 'normal'))
underline_btn.configure(command=change_underline)
# FONT COLOR
def change_font_color():
color_var = tk.colorchooser.askcolor()
text_editor.configure(fg=color_var[1])
font_color_btn.configure(command=change_font_color)
# ALIGN LEFT
def align_left():
text_content = text_editor.get(1.0, 'end')
text_editor.tag_config('left', justify=tk.LEFT)
text_editor.delete(1.0, tk.END)
text_editor.insert(tk.INSERT, text_content, 'left')
align_left_btn.configure(command=align_left)
# ALIGN CENTER
def align_center():
text_content = text_editor.get(1.0, 'end')
text_editor.tag_config('center', justify=tk.CENTER)
text_editor.delete(1.0, tk.END)
text_editor.insert(tk.INSERT, text_content, 'center')
align_center_btn.configure(command=align_center)
# ALIGN RIGHT
def align_right():
text_content = text_editor.get(1.0, 'end')
text_editor.tag_config('right', justify=tk.RIGHT)
text_editor.delete(1.0, tk.END)
text_editor.insert(tk.INSERT, text_content, 'right')
align_right_btn.configure(command=align_right)
text_editor.configure(font=('Arial', 12))
# STATUS BAR
status_bar = ttk.Label(main_application, text='Statud Bar')
status_bar.pack(side=tk.BOTTOM)
text_changed = False
def changed(event=None):
global text_changed
if text_editor.edit_modified():
text_changed = True
words = len(text_editor.get(1.0, 'end-1c').split())
characters = len(text_editor.get(1.0, 'end-1c'))
status_bar.config(text=f'Characters : {characters} Word : {words}')
text_editor.edit_modified(False)
text_editor.bind('<<Modified>>', changed)
# MAIN MENU FUNCTIONALITY
url = ''
def new_file(event=None):
global url
url = ''
text_editor.delete(1.0, tk.END)
file.add_command(label='New', image=new_icon, compound=tk.LEFT, accelerator='Ctrl+N', command=new_file)
def open_file(event=None):
global url
url = filedialog.askopenfilename(initialdir=os.getcwd(), title='Select File', filetypes=(('Text File', '*.txt'), ('All Files', '*.*')))
try:
with open(url, 'r') as fr:
text_editor.delete(1.0, tk.END)
text_editor.insert(1.0, tk.read())
except FileNotFoundError:
return
except:
return
main_application.title(os.path.basename(url))
file.add_command(label='Open', image=open_icon, compound=tk.LEFT, accelerator='Ctrl+O', command=open_file)
def save_file(event=None):
global url
try:
if url:
content = str(text_editor.get(1.0, tk.END))
with open(url, 'w', encoding='utf-8') as fw:
fw.write(content)
else:
url = filedialog.asksaveasfile(mode = 'w', defaultextension='.txt', filetypes=(('Text File', '*.txt'), ('All Files', '*.*')))
content2 = text_editor.get(1.0, tk.END)
url.write(content2)
url.close()
except:
return
file.add_command(label='Save', image=save_icon, compound=tk.LEFT, accelerator='Ctrl+S', command=save_file)
def save_as(event=None):
global url
try:
content = text_editor.get(1.0, tk.END)
url = filedialog.asksaveasfile(mode = 'w', defaultextension='.txt', filetypes=(('Text File', '*.txt'), ('All Files', '*.*')))
url.write(content)
url.close()
except:
return
file.add_command(label='Save As', image=new_icon, compound=tk.LEFT, accelerator='Ctrl+Alt+S', command=save_as)
def exit_func(event=None):
global url, text_changed
try:
if text_changed:
mbox = messagebox.askyesnocancel('Warning', 'Do you want to Save the file ?')
if mbox is True:
if url:
content = text_editor.get(1.0, tk.END)
with open(url, 'w', encoding='utf-8') as fw:
fw.write(content)
main_application.destroy()
else:
content2 = str(text_editor.get(1.0, tk.END))
url = filedialog.asksaveasfile(mode = 'w', defaultextension='.txt', filetypes=(('Text File', '*.txt'), ('All Files', '*.*')))
url.write(content2)
url.close()
main_application.destroy()
elif mbox is False:
main_application.destroy()
else:
main_application.destroy()
except:
return
file.add_command(label='Exit', image=exit_icon, compound=tk.LEFT, accelerator='Ctrl+Q', command=exit_func)
def find_func(event=None):
def find():
word = find_input.get()
text_editor.tag_remove('match', '1.0', tk.END)
matches=0
if word:
start_pos = '1.0'
while True:
start_pos = text_editor.search(word, start_pos, stopindex=tk.END)
if not start_pos:
break
end_pos = f'{start_pos}+{len(word)}c'
text_editor.tag_add('match', start_pos, end_pos)
matches += 1
start_pos = end_pos
text_editor.tag_config('match', foreground='red', background='yellow')
def replace():
word = find_input.get()
replace_text = replace_text.get()
content = text_editor.get(1.0, tk.END)
new_content = content.replace(word, replace_text)
text_editor.delete(1.0, tk.END)
text_editor.insert(1.0, new_content)
find_dialogue = tk.Toplevel()
find_dialogue.geometry('450x250+500+200')
find_dialogue.title('Find')
find_dialogue.resizable(0, 0)
# FRAME
find_frame = ttk.LabelFrame(find_dialogue, text='Find/Replace')
find_frame.pack(pady=20)
# LABELS
text_find_label = ttk.Label(find_frame, text='Find: ')
text_replace_label = ttk.Label(find_frame, text='Replace: ')
# ENTRY
find_input = ttk.Entry(find_frame, width=30)
replace_input = ttk.Entry(find_frame, widget=30)
# BUTTON
find_button = ttk.Button(find_frame, text='Find', command=find)
replace_button = ttk.Button(find_frame, text='Replace', command=replace)
# LABEL GRID
text_find_label.grid(row=0, column=0, padx=4, pady=4)
text_replace_label.grid(row=1, column=0, padx=4, pady=4)
# ENTRY GRID
find_input.grid(row=0, column=1, padx=4, pady=4)
replace_input.grid(row=1, column=1, padx=4, pady=4)
# BUTTON GRID
find_button.grid(row=2, column=0, padx=4, pady=4)
replace_button.grid(row=2, column=1, padx=4, pady=4)
find_dialogue.mainloop()
# EDIT COMMAND
edit.add_command(label='Copy', image=copy_icon, compound=tk.LEFT, accelerator='Ctrl+C', command=lambda:text_editor.event_generate("<Control-c>"))
edit.add_command(label='Paste', image=paste_icon, compound=tk.LEFT, accelerator='Ctrl+V', command=lambda:text_editor.event_generate("<Control-v>"))
edit.add_command(label='Cut', image=cut_icon, compound=tk.LEFT, accelerator='Ctrl+X', command=lambda:text_editor.event_generate("<Control-x>"))
edit.add_command(label='Clear All', image=clear_all_icon, compound=tk.LEFT, accelerator='Ctrl+Alt+X', command=lambda:text_editor.delete(1.0, tk.END))
edit.add_command(label='Find', image=find_icon, compound=tk.LEFT, accelerator='Ctrl+F', command=find_func)
# VIEW CHECK BUTTON
show_statusbar = tk.BooleanVar()
show_statusbar.set(True)
show_toolbar = tk.BooleanVar()
show_toolbar.set(True)
def hide_toolbar():
global show_toolbar
if show_toolbar:
tool_bar.pack_forget()
show_toolbar = False
else:
text_editor.pack_forget()
status_bar.pack_forget()
tool_bar.pack(side=tk.TOP, fill=tk.X)
text_editor.pack(fill=tk.BOTH, expand=True)
status_bar.pack(side=tk.BOTTOM)
show_toolbar = True
def hide_statusbar():
global show_statusbar
if show_statusbar:
status_bar.pack_forget()
show_statusbar = False
else:
status_bar.pack(side=tk.BOTTOM)
show_statusbar = True
view.add_checkbutton(label='Tool Bar', onvalue=True, offvalue=0, variable=show_toolbar, image=tool_bar_icon, compound=tk.LEFT, command=hide_toolbar)
view.add_checkbutton(label='Status Bar', onvalue=1, offvalue=False, variable=show_statusbar, image=status_bar_icon, compound=tk.LEFT, command=hide_statusbar)
# COLOR THEME
def change_theme():
chosen_theme = theme_choice.get()
color_tuple = color_dict.get(chosen_theme)
fg_color, bg_color = color_tuple[0], color_tuple[1]
text_editor.config(background=bg_color, fg=fg_color)
count=0
for i in color_dict:
color_theme.add_radiobutton(label=i, image=color_icons[count], variable=theme_choice, compound=tk.LEFT, command=change_theme)
count += 1
main_application.config(menu=main_menu)
# BIND SHORTCUT KEYS
main_application.bind("<Control-n>", new_file)
main_application.bind("<Control-o>", open_file)
main_application.bind("<Control-s>", save_file)
main_application.bind("<Control-Alt-s>", save_as)
main_application.bind("<Control-q>", exit_func)
main_application.bind("<Control-f>", find_func)
main_application.mainloop() |
oracle_analysis/motion_pattern_analysis.py | noahcao/DanceTrack | 137 | 12680422 | <reponame>noahcao/DanceTrack
"""
Script to calculate the average IoU of the same obejct on consecutive
frames, and the relative switch frequency (Figure3(b) and Figure3(c)).
The original data in paper is calculated on all sets: train+val+test.
On the train-set:
* Average IoU on consecutive frames = 0.894
* Relative Position Switch frequency = 0.031
On the val-set:
* Average IoU on consecutive frames = 0.909
* Relative Position Switch frequency = 0.030
The splitting of subsets is
"""
import numpy as np
import os
source_dir = "train"
# source_dir = "val"
def box_area(arr):
# arr: np.array([[x1, y1, x2, y2]])
width = arr[:, 2] - arr[:, 0]
height = arr[:, 3] - arr[:, 1]
return width * height
def _box_inter_union(arr1, arr2):
# arr1 of [N, 4]
# arr2 of [N, 4]
area1 = box_area(arr1)
area2 = box_area(arr2)
# Intersection
top_left = np.maximum(arr1[:, :2], arr2[:, :2]) # [[x, y]]
bottom_right = np.minimum(arr1[:, 2:], arr2[:, 2:]) # [[x, y]]
wh = bottom_right - top_left
# clip: if boxes not overlap then make it zero
intersection = wh[:, 0].clip(0) * wh[:, 1].clip(0)
#union
union = area1 + area2 - intersection
return intersection, union
def box_iou(arr1, arr2):
# arr1[N, 4]
# arr2[N, 4]
# N = number of bounding boxes
assert(arr1[:, 2:] > arr1[:, :2]).all()
assert(arr2[:, 2:] > arr2[:, :2]).all()
inter, union = _box_inter_union(arr1, arr2)
iou = inter / union
return iou
def consecutive_iou(annos):
"""
calculate the IoU over bboxes on the consecutive frames
"""
max_frame = int(annos[:, 0].max())
min_frame = int(annos[:, 0].min())
total_iou = 0
total_frequency = 0
for find in range(min_frame, max_frame):
anno_cur = annos[np.where(annos[:,0]==find)]
anno_next = annos[np.where(annos[:,0]==find+1)]
ids_cur = np.unique(anno_cur[:,1])
ids_next = np.unique(anno_next[:,1])
common_ids = np.intersect1d(ids_cur, ids_next)
for tid in common_ids:
cur_box = anno_cur[np.where(anno_cur[:,1]==tid)][:, 2:6]
next_box = anno_next[np.where(anno_next[:,1]==tid)][:, 2:6]
cur_box[:, 2:] += cur_box[:, :2]
next_box[:, 2:] += next_box[:, :2]
iou = box_iou(cur_box, next_box).item()
total_iou += iou
total_frequency += 1
return total_iou, total_frequency
def center(box):
return (box[0]+0.5*box[2], box[1]+0.5*box[3])
def relative_switch(annos):
"""
calculate the frequency of relative position switch regarding center location
"""
max_frame = int(annos[:, 0].max())
min_frame = int(annos[:, 0].min())
switch = 0
sw_freq = 0
for find in range(min_frame, max_frame):
anno_cur = annos[np.where(annos[:,0]==find)]
anno_next = annos[np.where(annos[:,0]==find+1)]
ids_cur = np.unique(anno_cur[:,1])
ids_next = np.unique(anno_next[:,1])
common_ids = np.intersect1d(ids_cur, ids_next)
for id1 in common_ids:
for id2 in common_ids:
sw_freq += 1
if id1 == id2:
continue
box_cur_1 = anno_cur[np.where(anno_cur[:,1]==id1)][0][2:6]
box_cur_2 = anno_cur[np.where(anno_cur[:,1]==id2)][0][2:6]
box_next_1 = anno_next[np.where(anno_next[:,1]==id1)][0][2:6]
box_next_2 = anno_next[np.where(anno_next[:,1]==id2)][0][2:6]
left_right_cur = center(box_cur_1)[0] >= center(box_cur_2)[0]
left_right_next = center(box_next_1)[0] >= center(box_next_2)[0]
top_down_cur = center(box_cur_1)[1] >= center(box_cur_2)[1]
top_down_next = center(box_next_1)[1] >= center(box_next_2)[1]
if (left_right_cur != left_right_next) or (top_down_cur != top_down_next):
switch += 1
return switch, sw_freq
if __name__ == "__main__":
seqs = os.listdir(source_dir)
all_iou, all_freq = 0, 0
all_switch, all_sw_freq = 0, 0
for seq in seqs:
if seq == ".DS_Store":
continue
anno_file = os.path.join(source_dir, seq, "gt/gt.txt")
annos = np.loadtxt(anno_file, delimiter=",")
seq_iou, seq_freq = consecutive_iou(annos)
seq_switch, seq_sw_freq = relative_switch(annos)
all_iou += seq_iou
all_freq += seq_freq
all_switch += seq_switch
all_sw_freq += seq_sw_freq
print("Average IoU on consecutive frames = {}".format(all_iou / all_freq))
print("Relative Position Switch frequency = {}".format(all_switch / all_sw_freq)) |
tests/python/unittest/test_auto_scheduler_search_task.py | XiaoSong9905/tvm | 4,640 | 12680428 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test search policy"""
import numpy as np
import tempfile
import tvm
import tvm.testing
from tvm import auto_scheduler
from tvm.auto_scheduler.utils import get_const_tuple
from tvm.testing.auto_scheduler import (
matmul_auto_scheduler_test,
zero_rank_compute_auto_scheduler_test,
zero_rank_reduce_auto_scheduler_test,
)
def test_search_task_add_task_input():
auto_scheduler.search_task.TASK_INPUT_BUFFER_TABLE.clear()
N = 64
target = "llvm"
test_input_0 = tvm.runtime.ndarray.empty((64, 64))
test_input_1 = tvm.runtime.ndarray.empty((10, 20))
test_input_2 = tvm.runtime.ndarray.empty((30, 40, 50))
task = auto_scheduler.SearchTask(
func="matmul_auto_scheduler_test",
args=(N, N, N),
target=target,
task_inputs={
"test_input_0": test_input_0,
"test_input_1": test_input_1,
"test_input_2": test_input_2,
},
task_inputs_overwrite=True,
)
assert len(task.task_input_names) == 3
assert task.task_input_names[0] == "test_input_0"
assert task.task_input_names[1] == "test_input_1"
assert task.task_input_names[2] == "test_input_2"
def test_search_task_record():
auto_scheduler.search_task.TASK_INPUT_BUFFER_TABLE.clear()
N = 64
target = "llvm"
# Log with no task input
task = auto_scheduler.SearchTask(
func="matmul_auto_scheduler_test", args=(N, N, N), target=target
)
task_record = auto_scheduler._ffi_api.SerializeSearchTask(task)
new_task = auto_scheduler._ffi_api.DeserializeSearchTask(task_record)
# TODO(jcf94): Check the compute dag & hardware parameter
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
# Log with 1 task input
test_input_0 = tvm.runtime.ndarray.empty((64, 64))
task = auto_scheduler.SearchTask(
func="matmul_auto_scheduler_test",
args=(N, N, N),
target=target,
task_inputs={"test_input_0": test_input_0},
task_inputs_overwrite=True,
)
task_record = auto_scheduler._ffi_api.SerializeSearchTask(task)
new_task = auto_scheduler._ffi_api.DeserializeSearchTask(task_record)
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
assert len(new_task.task_input_names) == 1
assert new_task.task_input_names[0] == "test_input_0"
# Log with multiple task inputs
test_input_1 = tvm.runtime.ndarray.empty((64, 64))
task = auto_scheduler.SearchTask(
func="matmul_auto_scheduler_test",
args=(N, N, N),
target=target,
task_inputs={
"test_input_0": test_input_0,
"test_input_1": test_input_1,
},
task_inputs_overwrite=True,
)
task_record = auto_scheduler._ffi_api.SerializeSearchTask(task)
new_task = auto_scheduler._ffi_api.DeserializeSearchTask(task_record)
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
assert len(new_task.task_input_names) == 2
assert new_task.task_input_names[0] == "test_input_0"
assert new_task.task_input_names[1] == "test_input_1"
# Log with version 0.5
v5_log = """["[\\\"matmul_auto_scheduler_test\\\", 64, 64, 64]", "llvm -keys=cpu", [6, 64, 64, 0, 0, 0, 0, 0], "", 1]"""
new_task = auto_scheduler._ffi_api.DeserializeSearchTask(v5_log)
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
assert len(new_task.task_input_names) == 0
def test_recover_measure_input_with_task_input():
auto_scheduler.search_task.TASK_INPUT_BUFFER_TABLE.clear()
# Since this file is tests for search_task, we only check the search_task here
# Log with no task input
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(512, 512, 512), target="llvm"
)
inp = auto_scheduler.measure.MeasureInput(task, task.compute_dag.init_state)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
measure_record = auto_scheduler.measure_record.dump_record_to_string(inp, res)
measure_log = auto_scheduler.measure_record.load_record_from_string(measure_record)
new_task = measure_log[0].task
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
# Log with 1 task input
test_input_0 = tvm.runtime.ndarray.empty((64, 64))
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test,
args=(512, 512, 512),
target="llvm",
task_inputs={
"test_input_0": test_input_0,
},
task_inputs_overwrite=True,
)
inp = auto_scheduler.measure.MeasureInput(task, task.compute_dag.init_state)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
measure_record = auto_scheduler.measure_record.dump_record_to_string(inp, res)
measure_log = auto_scheduler.measure_record.load_record_from_string(measure_record)
new_task = measure_log[0].task
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
assert len(new_task.task_input_names) == 1
assert new_task.task_input_names[0] == "test_input_0"
# Log with multiple task inputs
test_input_1 = tvm.runtime.ndarray.empty((64, 64))
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test,
args=(512, 512, 512),
target="llvm",
task_inputs={
"test_input_0": test_input_0,
"test_input_1": test_input_1,
},
task_inputs_overwrite=True,
)
inp = auto_scheduler.measure.MeasureInput(task, task.compute_dag.init_state)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
measure_record = auto_scheduler.measure_record.dump_record_to_string(inp, res)
measure_log = auto_scheduler.measure_record.load_record_from_string(measure_record)
new_task = measure_log[0].task
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
assert len(new_task.task_input_names) == 2
assert new_task.task_input_names[0] == "test_input_0"
assert new_task.task_input_names[1] == "test_input_1"
# Log with version 0.5
v5_log = """{"i": [["[\\\"matmul_auto_scheduler_test\\\", 512, 512, 512]", "llvm -keys=cpu", [6, 64, 64, 0, 0, 0, 0, 0], "", 1], [[], []]], "r": [[0.1], 0, 0.2, 1], "v": "v0.6"}"""
measure_log = auto_scheduler.measure_record.load_record_from_string(v5_log)
new_task = measure_log[0].task
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
assert len(new_task.task_input_names) == 0
if __name__ == "__main__":
test_search_task_add_task_input()
test_search_task_record()
test_recover_measure_input_with_task_input()
|
predictions/pred_constants.py | bfortuner/VOCdetect | 336 | 12680433 | <filename>predictions/pred_constants.py<gh_stars>100-1000
PRED_TYPE = 'Basic'
TTA_PRED_TYPE = 'TTA'
ENS_TYPE = 'Ens'
MEGA_ENS_TYPE = 'MegaEns'
|
tests/python/test_bls_assume_in_range.py | kxxt/taichi | 11,699 | 12680435 | <filename>tests/python/test_bls_assume_in_range.py<gh_stars>1000+
import taichi as ti
from .bls_test_template import bls_particle_grid
@ti.test(require=ti.extension.bls)
def test_scattering():
bls_particle_grid(N=128,
ppc=10,
block_size=8,
scatter=True,
use_offset=False)
@ti.test(require=ti.extension.bls)
def test_scattering_offset():
bls_particle_grid(N=128,
ppc=10,
block_size=8,
scatter=True,
use_offset=True)
@ti.test(require=ti.extension.bls)
def test_scattering_two_pointer_levels():
bls_particle_grid(N=128,
ppc=10,
block_size=8,
scatter=True,
pointer_level=2,
use_offset=False)
@ti.test(require=ti.extension.bls)
def test_gathering():
bls_particle_grid(N=128,
ppc=10,
block_size=8,
scatter=False,
use_offset=False)
@ti.test(require=ti.extension.bls)
def test_gathering_offset():
bls_particle_grid(N=128,
ppc=10,
block_size=8,
scatter=False,
use_offset=True)
# TODO: debug mode behavior of assume_in_range
|
Code/Chenglong/gen_best_ensemble_model.py | ChenglongChen/Kaggle_Homedepot | 465 | 12680439 | <filename>Code/Chenglong/gen_best_ensemble_model.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
@author: <NAME> <<EMAIL>>
@brief: script for generating the best ensemble model from Chenglong's side
@note: 1. make sure you have run `python run_data.py` first
2. make sure you have built `some diverse` 1st level models first (see `./Log/level1_models` for example)
"""
import os
cmd = "python run_stacking_ridge.py -l 2 -d 0 -t 10 -c 1 -L reg_ensemble -o"
os.system(cmd)
|
py/clam.py | seahorn/crab-llvm | 161 | 12680472 | <reponame>seahorn/crab-llvm
#!/usr/bin/env python3
"""
Entry point to Clam Abstract Interpreter
"""
import argparse as a
import atexit
#from datetime import datetime
import errno
import io
import os
import os.path
import platform
import resource
import shutil
import subprocess as sub
import signal
import stats
import sys
import tempfile
import threading
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
verbose = True
running_process = None
####### SPECIAL ERROR CODES USEFUL FOR DEBUGGING ############
# Exit codes are between 0 and 255.
# Do not use 1, 2, 126, 127, 128 and negative integers.
## special error codes for the frontend (clang + opt + pp)
FRONTEND_TIMEOUT=20
FRONTEND_MEMORY_OUT=21
#### specific errors for each frontend component
CLANG_ERROR = 22
OPT_ERROR = 23
PP_ERROR = 24
### special error codes for crab
CRAB_ERROR = 25 ## errors caught by crab
CRAB_TIMEOUT = 26
CRAB_MEMORY_OUT = 27
CRAB_SEGFAULT = 28 ## unexpected segfaults
#############################################################
llvm_version = "10.0"
def isexec(fpath):
if fpath is None:
return False
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def which(program):
if isinstance(program, str):
choices = [program]
else:
choices = program
for p in choices:
fpath, _ = os.path.split(p)
if fpath:
if isexec(p):
return p
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, p)
if isexec(exe_file):
return exe_file
return None
# Return a tuple (returnvalue:int, timeout:bool, out_of_memory:bool, segfault:bool, unknown:bool)
# - Only one boolean flag can be enabled at any time.
# - If all flags are false then returnvalue cannot be None.
def run_command_with_limits(cmd, cpu, mem, out = None):
timeout = False
out_of_memory = False
segfault = False
unknown_error = False
returnvalue = 0
def set_limits():
if mem > 0:
mem_bytes = mem * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, [mem_bytes, mem_bytes])
def kill(proc):
try:
proc.terminate()
proc.kill()
proc.wait()
global running_process
running_process = None
except OSError:
pass
if out is not None:
p = sub.Popen(cmd, stdout = out, preexec_fn=set_limits)
else:
p = sub.Popen(cmd, preexec_fn=set_limits)
global running_process
running_process = p
timer = threading.Timer(cpu, kill, [p])
if cpu > 0:
timer.start()
try:
(_, status, _) = os.wait4(p.pid, 0)
signalvalue = status & 0xff
returnvalue = status >> 8
if signalvalue != 0:
returnvalue = None
if signalvalue > 127:
segfault = True
else:
print("** Killed by signal " + str(signalvalue))
# 9: 'SIGKILL', 14: 'SIGALRM', 15: 'SIGTERM'
if signalvalue in (9, 14, 15):
## kill sends SIGTERM by default.
## The timer set above uses kill to stop the process.
timeout = True
else:
unknown_error = True
running_process = None
except OSError as e:
returnvalue = None
print("** OS Error: " + str(e))
if errno.errorcode[e.errno] == 'ECHILD':
## The children has been killed. We assume it has been killed by the timer.
## But I think it can be killed by others
timeout = True
elif errno.errorcode[e.errno] == 'ENOMEM':
out_of_memory = True
else:
unknown_error = True
finally:
## kill the timer if the process has terminated already
if timer.is_alive():
timer.cancel()
return (returnvalue, timeout, out_of_memory, segfault, unknown_error)
def loadEnv(filename):
if not os.path.isfile(filename): return
f = open(filename)
for line in f:
sl = line.split('=', 1)
# skip lines without equality
if len(sl) != 2:
continue
(key, val) = sl
os.environ [key] = os.path.expandvars(val.rstrip())
def parseArgs(argv):
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
if v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
raise a.ArgumentTypeError('Boolean value expected.')
def add_bool_argument(parser, name, default,
help=None, dest=None, **kwargs):
"""
Add boolean option that can be turned on and off
"""
dest_name = dest if dest else name
mutex_group = parser.add_mutually_exclusive_group(required=False)
mutex_group.add_argument('--' + name, dest=dest_name, type=str2bool,
nargs='?', const=True, help=help,
metavar='BOOL', **kwargs)
mutex_group.add_argument('--no-' + name, dest=dest_name,
type=lambda v: not(str2bool(v)),
nargs='?', const=False,
help=a.SUPPRESS, **kwargs)
default_value = {dest_name : default}
parser.set_defaults(**default_value)
p = a.ArgumentParser(description='Abstract Interpretation-based Analyzer for LLVM bitecode',
formatter_class=a.RawTextHelpFormatter)
p.add_argument ('-oll', '--oll', dest='asm_out_name', metavar='FILE',
help='Output analyzed bitecode')
p.add_argument ('-ocrab', '--ocrab', dest='crab_out_name', metavar='FILE',
help='Output analyzed CrabIR with (optionally) annotations')
p.add_argument('--log', dest='log', default=None,
metavar='STR', help='Log level for clam')
p.add_argument('-o', dest='out_name', metavar='FILE',
help='Output file name')
p.add_argument("--save-temps", dest="save_temps",
help="Do not delete temporary files",
action="store_true",
default=False)
p.add_argument("--temp-dir", dest="temp_dir", metavar='DIR',
help="Temporary directory",
default=None)
p.add_argument('-g', default=False, action='store_true', dest='debug_info',
help='Compile with debug information')
p.add_argument('-m', type=int, dest='machine',
help='Machine architecture MACHINE:[32,64]', default=32)
p.add_argument ('-I', default=None, dest='include_dir', help='Include')
p.add_argument("--no-preprocess", dest="preprocess",
help='Skip compilation and preprocessing', action='store_false',
default=True)
p.add_argument("--only-preprocess", dest="only_preprocess",
help='Run only the preprocessor', action='store_true',
default=False)
p.add_argument('-O', type=int, dest='L', metavar='INT',
help='Optimization level L:[0,1,2,3]', default=0)
p.add_argument('--cpu', type=int, dest='cpu', metavar='SEC',
help='CPU time limit (seconds)', default=-1)
p.add_argument('--mem', type=int, dest='mem', metavar='MB',
help='MEM limit (MB)', default=-1)
p.add_argument('--llvm-version',
help='Print llvm version', dest='llvm_version',
default=False, action='store_true')
p.add_argument('--clang-version',
help='Print clang version', dest='clang_version',
default=False, action='store_true')
p.add_argument('--llvm-dot-cfg',
help='Print LLVM CFG of function to dot file',
dest='dot_cfg', default=False, action='store_true')
##---------------------------------------------------------------------##
## Optimizations/transformations that take place at LLVM IR level
##---------------------------------------------------------------------##
p.add_argument('--llvm-inline-threshold', dest='inline_threshold',
type=int, metavar='NUM',
help='Inline threshold (default = 255)')
p.add_argument('--llvm-pp-loops',
help='Optimizing loops',
dest='pp_loops', default=False, action='store_true')
p.add_argument('--llvm-peel-loops', dest='peel_loops',
type=int, metavar='NUM', default=0,
help='Number of iterations to peel (default = 0)')
# p.add_argument('--llvm-unroll-threshold', type=int,
# help='Unrolling threshold (default = 150)',
# dest='unroll_threshold',
# default=150, metavar='NUM')
p.add_argument('--inline', dest='inline', help='Inline all functions',
default=False, action='store_true')
p.add_argument('--turn-undef-nondet',
help='Turn undefined behaviour into non-determinism',
dest='undef_nondet', default=False, action='store_true')
add_bool_argument(p, 'promote-malloc',
help='Promote top-level malloc to alloca',
dest='promote_malloc', default=True)
p.add_argument('--lower-select',
help='Lower select instructions',
dest='lower_select', default=False, action='store_true')
p.add_argument('--lower-unsigned-icmp',
help='Lower ULT and ULE instructions',
dest='lower_unsigned_icmp', default=False, action='store_true')
p.add_argument('--disable-scalarize',
help='Disable lowering of vector operations into scalar ones',
dest='disable_scalarize', default=False, action='store_true')
p.add_argument('--disable-lower-constant-expr',
help='Disable lowering of constant expressions to instructions',
dest='disable_lower_cst_expr', default=False, action='store_true')
p.add_argument('--disable-lower-switch',
help='Disable lowering of switch instructions',
dest='disable_lower_switch', default=False, action='store_true')
p.add_argument('--devirt-functions',
help="Resolve indirect calls (needed for soundness):\n"
"- none : do not resolve indirect calls (default)\n"
"- types: select all functions with same type signature\n"
"- sea-dsa: use sea-dsa analysis to select the callees\n",
dest='devirt',
choices=['none','types','sea-dsa'],
default='none')
p.add_argument ('--externalize-functions',
help='Externalize these functions',
dest='extern_funcs', type=str, metavar='str,...')
p.add_argument('--externalize-addr-taken-functions',
help='Externalize uses of address-taken functions (potentially unsound)',
dest='extern_addr_taken_funcs', default=False,
action='store_true')
p.add_argument('--print-after-all',
help='Print IR after each pass (for debugging)',
dest='print_after_all', default=False,
action='store_true')
p.add_argument('--debug-pass',
help='Print all LLVM passes executed (--debug-pass=Structure)',
dest='debug_pass', default=False,
action='store_true')
p.add_argument('file', metavar='FILE', help='Input file')
##---------------------------------------------------------------------##
### BEGIN CRAB
## Here options that are passed to Crab or transformations that
## take place at CrabIR level
##---------------------------------------------------------------------##
p.add_argument('--crab-verbose', type=int,
help='Enable verbose messages',
dest='crab_verbose',
default=0, metavar='UINT')
p.add_argument("--crab-only-cfg", dest="crab_only_cfg",
help='Build only the Crab CFG', action='store_true',
default=False)
p.add_argument('--crab-cfg-simplify',
help='Perform some crab CFG transformations',
dest='crab_cfg_simplify', default=False, action='store_true')
p.add_argument('--crab-dom',
help="Choose abstract domain:\n"
"- int: intervals\n"
"- sign-const: reduced product of sign and constant domains\n"
"- ric: reduced product of intervals and congruences\n"
"- term-int: int with uninterpreted functions\n"
"- dis-int: disjunctive intervals based on Clousot's DisInt domain\n"
"- term-dis-int: dis-int with uninterpreted functions\n"
"- boxes: disjunctive intervals based on LDDs\n"
"- zones: zones domain using sparse DBM in Split Normal Form\n"
"- oct: octagons domain\n"
"- pk: polyhedra domain\n"
"- rtz: reduced product of term-dis-int with zones\n"
"- w-int: wrapped intervals\n",
choices=['int', 'sign-const', 'ric', 'term-int',
'dis-int', 'term-dis-int', 'boxes',
'zones', 'oct', 'pk', 'rtz',
'w-int'],
dest='crab_dom', default='zones')
p.add_argument('--crab-dom-params', dest='crab_dom_params', default=None,
help="Set abstract domain options STR=\"param1=val1:param2=val2:...\"",
metavar='STR')
p.add_argument('--crab-widening-delay',
type=int, dest='widening_delay',
help='Max number of iterations until performing widening', default=1)
p.add_argument('--crab-widening-jump-set',
type=int, dest='widening_jump_set',
help='Size of the jump set used in widening', default=0)
p.add_argument('--crab-narrowing-iterations',
type=int, dest='narrowing_iterations',
help='Max number of narrowing iterations', default=3)
p.add_argument('--crab-relational-threshold',
type=int, dest='num_threshold',
help='Max number of live vars per block before switching to a non-relational domain',
default=10000)
p.add_argument('--crab-track',
help='Track integers (num), num + singleton memory objects (sing-mem), and num + all memory objects (mem)',
choices=['num', 'sing-mem', 'mem'], dest='track', default='num')
p.add_argument('--crab-heap-analysis',
help="Heap analysis used for memory disambiguation (if --crab-track != num):\n"
"- ci-sea-dsa: context-insensitive sea-dsa\n"
"- cs-sea-dsa: context-sensitive sea-dsa\n"
"- ci-sea-dsa-types: context-insensitive sea-dsa with types (default)\n"
"- cs-sea-dsa-types: context-sensitive sea-dsa with types\n",
choices=['none', 'ci-sea-dsa', 'cs-sea-dsa', 'ci-sea-dsa-types', 'cs-sea-dsa-types'],
dest='crab_heap_analysis',
default='ci-sea-dsa-types')
p.add_argument('--crab-singleton-aliases',
help='Translate singleton alias sets (mostly globals) as scalar values',
dest='crab_singleton_aliases', default=False, action='store_true')
p.add_argument('--crab-inter',
help='Run summary-based, inter-procedural analysis',
dest='crab_inter', default=False, action='store_true')
p.add_argument('--crab-inter-max-summaries',
type=int, dest='inter_max_summaries',
help='Max number of summaries per function',
default=1000000)
add_bool_argument(p, 'crab-inter-recursive-functions', default=False,
help='Precise analysis of recursive functions (more expensive). False by default',
dest='crab_inter_recursive')
add_bool_argument(p, 'crab-inter-exact-summary-reuse', default=True,
help='Reuse summaries without losing precision (more expensive). True by default',
dest='crab_inter_exact_summary_reuse')
add_bool_argument(p, 'crab-inter-entry-main', default=False,
help='Start analysis only from main (not applicable to libraries). False by default',
dest='crab_inter_entry_main')
p.add_argument('--crab-backward',
help='Run iterative forward/backward analysis for proving assertions (only intra version available and very experimental)',
dest='crab_backward', default=False, action='store_true')
# WARNING: --crab-live may lose precision.
# If x=z in bb1 and y=z in bb2 and z is dead after bb1 and bb2 then
# the equality x=y is lost.
p.add_argument('--crab-live',
help='Delete dead symbols: may lose precision with relational domains.',
dest='crab_live', default=False, action='store_true')
add_bool_argument(p, 'crab-lower-unsigned-icmp', default=False,
help='Replace unsigned comparison with signed comparisons in CrabIR',
dest='crab_lower_unsigned_icmp')
add_bool_argument(p, 'crab-lower-with-overflow-intrinsics', default=False,
help='Replace llvm.OP.with.overflow.* in CrabIR with OP assuming no overflow occurs.\n'
'This option should be only used if the arithmetic operations are known to not overflow',
dest='crab_lower_with_overflow_intrinsics')
p.add_argument('--crab-opt',
help='Optimize LLVM bitcode using invariants',
choices=['none',
'dce',
'add-invariants',
'replace-with-constants',
'all'],
dest='crab_optimizer', default='none')
p.add_argument('--crab-opt-invariants-loc',
help='Specify the location where invariants are added (only if crab-opt=add-invariants)',
choices=['none',
'block-entry',
'loop-header',
'after-load',
'all'],
dest='crab_optimizer_inv_loc', default='none')
add_bool_argument(p, 'crab-preserve-invariants',
help='Preserve invariants for queries after analysis has finished',
dest='store_invariants', default=True)
p.add_argument('--crab-promote-assume',
help='Promote verifier.assume calls to llvm.assume intrinsics',
dest='crab_promote_assume', default=False, action='store_true')
p.add_argument('--crab-check',
help='Check properties (default no check)',
choices=['none', 'assert', 'null', 'uaf', 'bounds', 'null-legacy', 'uaf-legacy', 'is-deref'],
dest='crab_check', default='none')
add_bool_argument(p, 'crab-check-only-typed', default=False,
help='Add checks only on typed regions (only for null and uaf). False by default',
dest='crab_check_only_typed')
add_bool_argument(p, 'crab-check-only-noncyclic', default=False,
help='Add checks only on noncyclic regions (only for null and uaf). False by default',
dest='crab_check_only_noncyclic')
p.add_argument('--crab-check-verbose', metavar='INT',
help='Print verbose information about checks\n' +
'>=1: only error checks\n' +
'>=2: error and warning checks\n' +
'>=3: error, warning, and safe checks',
dest='check_verbose', type=int, default=0)
p.add_argument('--crab-print-summaries',
#help='Display computed summaries (if --crab-inter)',
help=a.SUPPRESS,
dest='print_summs', default=False, action='store_true')
add_bool_argument(p, 'crab-print-cfg',
help='Display Crab CFG',
dest='print_cfg', default=False)
add_bool_argument(p, 'crab-dot-cfg', default=False,
help='Print Crab CFG of function to dot file',
dest='crab_dot_cfg')
add_bool_argument(p, 'crab-print-invariants',
help='Print invariants',
dest='crab_print_invariants', default=True)
add_bool_argument(p, 'crab-print-unjustified-assumptions',
help='Print unjustified assumptions done by the analyzer (experimental, only for integer overflows)',
dest='print_assumptions', default=False)
add_bool_argument(p, 'crab-print-voi',
help='Print variables-of-influence of assertions',
dest='print_voi', default=False)
p.add_argument('--crab-stats',
help='Display crab statistics',
dest='print_stats', default=False, action='store_true')
p.add_argument('--crab-disable-warnings',
help='Disable clam and crab warnings',
dest='crab_disable_warnings', default=False, action='store_true')
p.add_argument('--crab-sanity-checks',
help='Enable clam and crab sanity checks',
dest='crab_sanity_checks', default=False, action='store_true')
######################################################################
# Hidden options
######################################################################
## These three might be unsound if enabled
p.add_argument('--crab-dsa-disambiguate-unknown',
help=a.SUPPRESS,
dest='crab_dsa_unknown', default=False, action='store_true')
p.add_argument('--crab-dsa-disambiguate-ptr-cast',
help=a.SUPPRESS,
dest='crab_dsa_ptr_cast', default=False, action='store_true')
p.add_argument('--crab-dsa-disambiguate-external',
help=a.SUPPRESS,
dest='crab_dsa_external', default=False, action='store_true')
# Choose between own crab way of naming values and instnamer
add_bool_argument(p, 'crab-name-values', default=True,
help=a.SUPPRESS, dest='crab_name_values')
add_bool_argument(p, 'crab-keep-shadows', default=False,
help=a.SUPPRESS, dest='crab_keep_shadows')
add_bool_argument(p, 'crab-enable-bignums', default=False,
help=a.SUPPRESS, dest='crab_enable_bignums')
#### END CRAB
args = p.parse_args(argv)
if args.L < 0 or args.L > 3:
p.error("Unknown option: -O%s" % args.L)
if args.machine != 32 and args.machine != 64:
p.error("Unknown option -m%s" % args.machine)
return args
def createWorkDir(dname = None, save = False):
if dname is None:
workdir = tempfile.mkdtemp(prefix='clam-')
else:
workdir = dname
if False: #verbose:
print("Working directory {0}".format(workdir))
if not save:
atexit.register(shutil.rmtree, path=workdir)
return workdir
def getClam():
clam_cmd = None
if 'CLAM' in os.environ:
clam_cmd = os.environ ['CLAM']
if not isexec(clam_cmd):
clam_cmd = os.path.join(root, "bin/clam")
if not isexec(clam_cmd):
clam_cmd = which('clam')
if not isexec(clam_cmd):
raise IOError("Cannot find clam")
return clam_cmd
def getClamPP():
crabpp_cmd = None
if 'CLAMPP' in os.environ:
crabpp_cmd = os.environ ['CLAMPP']
if not isexec(crabpp_cmd):
crabpp_cmd = os.path.join(root, "bin/clam-pp")
if not isexec(crabpp_cmd): crabpp_cmd = which('clam-pp')
if not isexec(crabpp_cmd):
raise IOError("Cannot find clam pre-processor")
return crabpp_cmd
def getClangVersion(clang_cmd):
p = sub.Popen([clang_cmd,'--version'], stdout = sub.PIPE)
out, _ = p.communicate()
clang_version = "not-found"
found = False # true if string 'version' is found
tokens = out.split()
for t in tokens:
if found is True:
clang_version = t
break
if t == 'version':
found = True
return clang_version
def getClang(is_plus_plus):
cmd_name = None
if is_plus_plus:
cmd_name = which (['clang++-mp-' + llvm_version, 'clang++-' + llvm_version, 'clang++'])
else:
cmd_name = which (['clang-mp-' + llvm_version, 'clang-' + llvm_version, 'clang'])
if cmd_name is None:
raise IOError('clang was not found')
return cmd_name
# return a pair: the first element is the command and the second is a
# bool that it is true if seaopt has been found.
def getOptLlvm ():
cmd_name = which (['seaopt'])
if cmd_name is not None:
return (cmd_name, True)
cmd_name = which (['opt-mp-' + llvm_version, 'opt-' + llvm_version, 'opt'])
if cmd_name is None:
raise IOError ('neither seaopt nor opt where found')
return (cmd_name, False)
### Passes
def defBCName(name, wd=None):
base = os.path.basename(name)
if wd is None:
wd = os.path.dirname (name)
fname = os.path.splitext(base)[0] + '.bc'
return os.path.join(wd, fname)
def defPPName(name, wd=None):
base = os.path.basename(name)
if wd is None:
wd = os.path.dirname (name)
fname = os.path.splitext(base)[0] + '.pp.bc'
return os.path.join(wd, fname)
def defOptName(name, wd=None):
base = os.path.basename(name)
if wd is None:
wd = os.path.dirname (name)
fname = os.path.splitext(base)[0] + '.o.bc'
return os.path.join(wd, fname)
def defOutPPName(name, wd=None):
base = os.path.basename(name)
if wd is None:
wd = os.path.dirname (name)
fname = os.path.splitext(base)[0] + '.ll'
return os.path.join(wd, fname)
def _bc_or_ll_file (name):
ext = os.path.splitext (name)[1]
return ext == '.bc' or ext == '.ll'
def _plus_plus_file(name):
ext = os.path.splitext(name)[1]
return ext in ('.cpp', '.cc')
# Run Clang
def clang(in_name, out_name, args, arch=32, extra_args=[]):
if _bc_or_ll_file(in_name):
if verbose:
print('--- Clang skipped: input file is already bitecode')
shutil.copy2(in_name, out_name)
return
if out_name in ('', None):
out_name = defBCName(in_name)
clang_cmd = getClang(_plus_plus_file(in_name))
clang_version = getClangVersion(clang_cmd)
if clang_version != "not-found":
if not clang_version.startswith(llvm_version):
print("WARNING clam.py: clang version " + clang_version + \
" different from " + llvm_version)
clang_args = [clang_cmd, '-emit-llvm', '-o', out_name, '-c', in_name ]
# New for clang >= 5.0: to avoid add optnone if -O0
# Otherwise, seaopt cannot optimize.
clang_args.append('-Xclang')
clang_args.append('-disable-O0-optnone')
clang_args.extend (extra_args)
clang_args.append ('-m{0}'.format (arch))
if args.include_dir is not None:
if ':' in args.include_dir:
idirs = ["-I{}".format(x.strip()) \
for x in args.include_dir.split(":") if x.strip() != '']
clang_args.extend(idirs)
else:
clang_args.append ('-I' + args.include_dir)
include_dir = os.path.dirname (sys.argv[0])
include_dir = os.path.dirname (include_dir)
include_dir = os.path.join (include_dir, 'include')
clang_args.append ('-I' + include_dir)
# Disable always vectorization
if not args.disable_scalarize:
clang_args.append('-fno-vectorize') ## disable loop vectorization
clang_args.append('-fno-slp-vectorize') ## disable store/load vectorization
## Hack for OSX Mojave that no longer exposes libc and libstd headers by default
osx_sdk_dirs = ['/Applications/Xcode.app/Contents/Developer/Platforms/' + \
'MacOSX.platform/Developer/SDKs/MacOSX10.14.sdk',
'/Applications/Xcode.app/Contents/Developer/Platforms/' + \
'MacOSX.platform/Developer/SDKs/MacOSX10.15.sdk'] + \
['/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk']
for osx_sdk_dir in osx_sdk_dirs:
if os.path.isdir(osx_sdk_dir):
clang_args.append('--sysroot=' + osx_sdk_dir)
break
if verbose:
print('Clang command: ' + ' '.join(clang_args))
returnvalue, timeout, out_of_mem, segfault, unknown = \
run_command_with_limits(clang_args, -1, -1)
if timeout:
sys.exit(FRONTEND_TIMEOUT)
elif out_of_mem:
sys.exit(FRONTEND_MEMORY_OUT)
elif segfault or unknown or returnvalue != 0:
sys.exit(CLANG_ERROR)
# Run llvm optimizer
def optLlvm(in_name, out_name, args, extra_args=[], cpu = -1, mem = -1):
if out_name in ('', None):
out_name = defOptName(in_name)
opt_cmd, is_seaopt = getOptLlvm()
opt_args = [opt_cmd, '-f']
if out_name is not None: opt_args.extend(['-o', out_name])
opt_args.append('-O{0}'.format(args.L))
# disable sinking instructions to end of basic block
# this might create unwanted aliasing scenarios
# for now, there is no option to undo this switch
opt_args.append('--simplifycfg-sink-common=false')
# disable always vectorization
## With LLVM 10: loop vectorization must be enabled
# opt_args.append('--disable-loop-vectorization')
opt_args.append('--disable-slp-vectorization')
## Unavailable after porting to LLVM10
# if is_seaopt:
# # disable always loop rotation. Loop rotation converts to loops
# # that are much harder to reason about them using crab due to
# # several reasons:
# #
# # 1. Complex loops that break widening heuristics
# # 2. Rewrite loop exits by adding often disequalities
# # 3. Introduce new *unsigned* loop variables.
# opt_args.append('--disable-loop-rotate')
# These two should be optional
#opt_args.append('--enable-indvar=true')
#opt_args.append('--enable-loop-idiom=true')
## Unavailable after porting to LLVM10
# if is_seaopt:
# if args.undef_nondet:
# opt_args.append('--enable-nondet-init=true')
# else:
# opt_args.append('--enable-nondet-init=false')
if args.inline_threshold is not None:
opt_args.append('--inline-threshold={t}'.format
(t=args.inline_threshold))
# if args.unroll_threshold is not None:
# opt_args.append('--unroll-threshold={t}'.format
# (t=args.unroll_threshold))
if args.print_after_all: opt_args.append('--print-after-all')
if args.debug_pass: opt_args.append('--debug-pass=Structure')
opt_args.extend(extra_args)
opt_args.append(in_name)
if verbose:
print('seaopt command: ' + ' '.join(opt_args))
returnvalue, timeout, out_of_mem, _, unknown = \
run_command_with_limits(opt_args, cpu, mem)
if timeout:
sys.exit(FRONTEND_TIMEOUT)
elif out_of_mem:
sys.exit(FRONTEND_MEMORY_OUT)
elif unknown or returnvalue != 0:
sys.exit(OPT_ERROR)
# Generate dot files for each LLVM function.
def dot(in_name, view_dot = False, cpu = -1, mem = -1):
fnull = open(os.devnull, 'w')
args = [getOptLlvm(), in_name, '-dot-cfg']
if view_dot: args.append('-view-cfg')
if verbose:
print(' '.join(args))
## We don't bother here analyzing the exit code
run_command_with_limits(args, cpu, mem, fnull)
# Run crabpp
def crabpp(in_name, out_name, args, extra_args=[], cpu = -1, mem = -1):
if out_name in ('', None):
out_name = defPPName(in_name)
crabpp_args = [getClamPP(), '-o', out_name, in_name ]
# disable sinking instructions to end of basic block
# this might create unwanted aliasing scenarios
# for now, there is no option to undo this switch
crabpp_args.append('--simplifycfg-sink-common=false')
if args.promote_malloc:
crabpp_args.append('--clam-promote-malloc=true')
else:
crabpp_args.append('--clam-promote-malloc=false')
if args.inline:
crabpp_args.append('--clam-inline-all')
if args.pp_loops:
crabpp_args.append('--clam-pp-loops')
if args.peel_loops > 0:
crabpp_args.append('--clam-peel-loops={0}'.format(args.peel_loops))
if args.undef_nondet:
crabpp_args.append('--clam-turn-undef-nondet')
if args.disable_scalarize:
crabpp_args.append('--clam-scalarize=false')
else:
# Force to scalarize everthing
crabpp_args.append('--scalarize-load-store=true')
if args.disable_lower_cst_expr:
crabpp_args.append('--clam-lower-constant-expr=false')
if args.disable_lower_switch:
crabpp_args.append('--clam-lower-switch=false')
# Postponed until clam is run, otherwise it can be undone by the optLlvm
# if args.lower_unsigned_icmp:
# crabpp_args.append( '--clam-lower-unsigned-icmp')
if args.devirt != 'none':
crabpp_args.append('--clam-devirt')
if args.devirt == 'types':
crabpp_args.append('--devirt-resolver=types')
elif args.devirt == 'sea-dsa':
crabpp_args.append('--devirt-resolver=sea-dsa')
crabpp_args.append('--sea-dsa-type-aware=true')
elif args.devirt == 'dsa':
crabpp_args.append('--devirt-resolver=dsa')
if args.extern_funcs:
for f in args.extern_funcs.split(','):
crabpp_args.append('--clam-externalize-function={0}'.format(f))
if args.extern_addr_taken_funcs:
crabpp_args.append('--clam-externalize-addr-taken-funcs')
if args.print_after_all: crabpp_args.append('--print-after-all')
if args.debug_pass: crabpp_args.append('--debug-pass=Structure')
crabpp_args.extend(extra_args)
if verbose:
print('clam-pp command: ' + ' '.join(crabpp_args))
returnvalue, timeout, out_of_mem, segfault, unknown = \
run_command_with_limits(crabpp_args, cpu, mem)
if timeout:
sys.exit(FRONTEND_TIMEOUT)
elif out_of_mem:
sys.exit(FRONTEND_MEMORY_OUT)
elif segfault or unknown or returnvalue != 0:
sys.exit(PP_ERROR)
# Run clam
def clam(in_name, out_name, args, extra_opts, cpu = -1, mem = -1):
clam_args = [ getClam(), in_name, '-oll', out_name]
clam_args = clam_args + extra_opts
if args.log is not None:
for l in args.log.split(':'):
clam_args.extend(['-crab-log', l])
if args.crab_dom_params is not None:
for l in args.crab_dom_params.split(':'):
clam_args.extend(['-crab-dom-param', l])
# disable sinking instructions to end of basic block
# this might create unwanted aliasing scenarios
# for now, there is no option to undo this switch
clam_args.append('--simplifycfg-sink-common=false')
if args.only_preprocess:
clam_args.append('-no-crab')
if args.crab_verbose:
clam_args.append('--crab-verbose={0}'.format(args.crab_verbose))
if args.crab_only_cfg:
clam_args.append('--crab-only-cfg')
if args.dot_cfg:
clam_args.append('--clam-llvm-cfg-dot')
## This option already run in crabpp
if args.undef_nondet: clam_args.append( '--clam-turn-undef-nondet')
if args.lower_unsigned_icmp:
clam_args.append('--clam-lower-unsigned-icmp')
if args.lower_select:
clam_args.append('--clam-lower-select')
if args.disable_lower_cst_expr:
clam_args.append('--clam-lower-constant-expr=false')
if args.disable_lower_switch:
clam_args.append('--clam-lower-switch=false')
if args.crab_lower_unsigned_icmp:
clam_args.append('--crab-lower-unsigned-icmp')
if args.crab_lower_with_overflow_intrinsics:
clam_args.append('--crab-lower-with-overflow-intrinsics')
clam_args.append('--crab-dom={0}'.format(args.crab_dom))
clam_args.append('--crab-widening-delay={0}'.format(args.widening_delay))
clam_args.append('--crab-widening-jump-set={0}'.format(args.widening_jump_set))
clam_args.append('--crab-narrowing-iterations={0}'.format(args.narrowing_iterations))
clam_args.append('--crab-relational-threshold={0}'.format(args.num_threshold))
clam_args.append('--crab-track={0}'.format(args.track))
if args.crab_heap_analysis == 'none' or \
args.crab_heap_analysis == 'ci-sea-dsa' or \
args.crab_heap_analysis == 'cs-sea-dsa':
clam_args.append('--crab-heap-analysis={0}'.format(args.crab_heap_analysis))
elif args.crab_heap_analysis == 'ci-sea-dsa-types':
clam_args.append('--crab-heap-analysis=ci-sea-dsa')
clam_args.append('--sea-dsa-type-aware=true')
elif args.crab_heap_analysis == 'cs-sea-dsa-types':
clam_args.append('--crab-heap-analysis=cs-sea-dsa')
clam_args.append('--sea-dsa-type-aware=true')
# if context-sensitive then we run the analysis on a callgraph
# where indirect calls have been resolved already by seadsa. This
# is important among other things to avoid errors with callee/caller
# simulation relation.
if args.crab_heap_analysis == 'cs-sea-dsa' or \
args.crab_heap_analysis == 'cs-sea-dsa-types':
clam_args.append('--sea-dsa-devirt')
if args.crab_singleton_aliases: clam_args.append('--crab-singleton-aliases')
if args.crab_inter:
clam_args.append('--crab-inter')
clam_args.append('--crab-inter-max-summaries={0}'.format(args.inter_max_summaries))
if args.crab_inter_recursive:
clam_args.append('--crab-inter-recursive=true')
else:
clam_args.append('--crab-inter-recursive=false')
if args.crab_inter_exact_summary_reuse:
clam_args.append('--crab-inter-exact-summary-reuse=true')
else:
clam_args.append('--crab-inter-exact-summary-reuse=false')
if args.crab_inter_entry_main:
clam_args.append('--crab-inter-entry-main=true')
else:
clam_args.append('--crab-inter-entry-main=false')
if args.crab_backward: clam_args.append('--crab-backward')
if args.crab_live: clam_args.append('--crab-live')
if args.crab_optimizer != 'none':
clam_args.append('--crab-opt')
if args.crab_optimizer == 'dce' or args.crab_optimizer == 'all':
clam_args.append('--crab-opt-dce')
if args.crab_optimizer == 'replace-with-constants' or args.crab_optimizer == 'all':
clam_args.append('--crab-opt-replace-with-constants')
if args.crab_optimizer == 'add-invariants' or args.crab_optimizer == 'all':
clam_args.append('--crab-opt-add-invariants={0}'.format(args.crab_optimizer_inv_loc))
if args.crab_promote_assume:
clam_args.append('--crab-promote-assume')
if args.crab_check != 'none':
clam_args.append('--crab-check')
if args.crab_check == 'null-legacy':
clam_args.append('--clam-null-check-legacy')
elif args.crab_check == 'uaf-legacy':
clam_args.append('--clam-uaf-check-legacy')
elif args.crab_check == 'null':
clam_args.append('--crab-null-check')
elif args.crab_check == 'uaf':
clam_args.append('--crab-uaf-check')
elif args.crab_check == 'bounds':
clam_args.append('--crab-bounds-check')
elif args.crab_check == 'is-deref':
clam_args.append('--crab-is-deref-check')
if args.crab_check in ['null-legacy', 'uaf-legacy', 'null', 'uaf', 'bounds']:
if args.crab_check_only_typed:
clam_args.append('--crab-check-only-typed-regions=true')
else:
clam_args.append('--crab-check-only-typed-regions=false')
if args.crab_check_only_noncyclic:
clam_args.append('--crab-check-only-noncyclic-regions=true')
else:
clam_args.append('--crab-check-only-noncyclic-regions=false')
if args.check_verbose:
clam_args.append('--crab-check-verbose={0}'.format(args.check_verbose))
if args.print_summs: clam_args.append('--crab-print-summaries')
if args.print_cfg:
clam_args.append('--crab-print-cfg=true')
else:
clam_args.append('--crab-print-cfg=false')
if args.print_stats: clam_args.append('--crab-stats')
if args.print_assumptions: clam_args.append('--crab-print-unjustified-assumptions')
if args.print_voi: clam_args.append('--crab-print-voi')
if args.crab_disable_warnings:
clam_args.append('--crab-enable-warnings=false')
if args.crab_sanity_checks: clam_args.append('--crab-sanity-checks')
if args.crab_cfg_simplify: clam_args.append('--crab-cfg-simplify')
if args.crab_print_invariants:
clam_args.append('--crab-print-invariants=true')
else:
clam_args.append('--crab-print-invariants=false')
if args.store_invariants:
clam_args.append('--crab-store-invariants=true')
else:
clam_args.append('--crab-store-invariants=false')
if args.crab_dot_cfg:
clam_args.append('--crab-dot-cfg=true')
else:
clam_args.append('--crab-dot-cfg=false')
if args.crab_out_name is not None:
clam_args.append('--ocrab={0}'.format(args.crab_out_name))
# begin hidden options
if args.crab_dsa_unknown: clam_args.append('--crab-dsa-disambiguate-unknown')
if args.crab_dsa_ptr_cast: clam_args.append('--crab-dsa-disambiguate-ptr-cast')
if args.crab_dsa_external: clam_args.append('--crab-dsa-disambiguate-external')
if args.crab_keep_shadows: clam_args.append('--crab-keep-shadows')
if args.crab_name_values:
clam_args.append('--crab-name-values=true')
else:
clam_args.append('--crab-name-values=false')
if args.crab_enable_bignums:
clam_args.append('--crab-enable-bignums=true')
else:
clam_args.append('--crab-enable-bignums=false')
# end hidden options
if verbose:
print('clam command: ' + ' '.join(clam_args))
if args.out_name is not None:
clam_args.append('-o={0}'.format(args.out_name))
if args.print_after_all:
clam_args.append('--print-after-all')
if args.debug_pass:
clam_args.append('--debug-pass=Structure')
returnvalue, timeout, out_of_mem, segfault, unknown = \
run_command_with_limits(clam_args, cpu, mem)
if timeout:
sys.exit(CRAB_TIMEOUT)
elif out_of_mem:
sys.exit(CRAB_MEMORY_OUT)
elif segfault:
sys.exit(CRAB_SEGFAULT)
elif unknown or returnvalue != 0:
# crab returns EXIT_FAILURE which in most platforms is 1 but not in all.
sys.exit(CRAB_ERROR)
def main(argv):
#def stat(key, val): stats.put(key, val)
os.setpgrp()
loadEnv(os.path.join(root, "env.common"))
## add directory containing this file to the PATH
os.environ ['PATH'] = os.path.dirname(os.path.realpath(__file__)) + \
os.pathsep + os.environ['PATH']
if '--llvm-version' in argv[1:] or '-llvm-version' in argv[1:]:
print("LLVM version " + llvm_version)
return 0
if '--clang-version' in argv[1:] or '-clang-version' in argv[1:]:
print("Clang version " + getClangVersion(getClang(False)))
return 0
print("Platform: {0} {1}".format(platform.system(), platform.release()))
print("LLVM version: {0}".format(llvm_version))
#print("Clam started at {0}\n\n".format(datetime.now().strftime("%H:%M:%S")))
args = parseArgs(argv[1:])
workdir = createWorkDir(args.temp_dir, args.save_temps)
in_name = args.file
if args.preprocess:
bc_out = defBCName(in_name, workdir)
if bc_out != in_name:
extra_args = []
if args.debug_info: extra_args.append('-g')
with stats.timer('Clang'):
clang(in_name, bc_out, args, arch=args.machine, extra_args=extra_args)
#stat('Progress', 'Clang')
in_name = bc_out
pp_out = defPPName(in_name, workdir)
if pp_out != in_name:
with stats.timer('ClamPP'):
crabpp(in_name, pp_out, args=args, cpu=args.cpu, mem=args.mem)
#stat('Progress', 'Clam preprocessor')
in_name = pp_out
if args.L > 0:
o_out = defOptName(in_name, workdir)
if o_out != in_name:
extra_args = []
with stats.timer('CrabOptLlvm'):
optLlvm(in_name, o_out, args, extra_args, cpu=args.cpu, mem=args.mem)
#stat('Progress', 'Llvm optimizer')
in_name = o_out
pp_out = defOutPPName(in_name, workdir)
with stats.timer('Clam'):
extra_opts = []
clam(in_name, pp_out, args, extra_opts, cpu=args.cpu, mem=args.mem)
#stat('Progress', 'Clam')
if args.asm_out_name is not None and args.asm_out_name != pp_out:
if False: #verbose:
print('cp {0} {1}'.format(pp_out, args.asm_out_name))
shutil.copy2(pp_out, args.asm_out_name)
#print("\nClam finished at {0}\n".format(datetime.now().strftime("%H:%M:%S")))
return 0
def killall():
global running_process
if running_process is not None:
try:
running_process.terminate()
running_process.kill()
running_process.wait()
running_process = None
except OSError: pass
if __name__ == '__main__':
# unbuffered output
sys.stdout = io.TextIOWrapper(open(sys.stdout.fileno(), 'wb', 0), write_through=True)
try:
signal.signal(signal.SIGTERM, lambda x, y: killall())
sys.exit(main(sys.argv))
except KeyboardInterrupt: pass
finally:
killall()
stats.brunch_print()
|
hvad/test_utils/context_managers.py | Kunpors/dr.pors- | 341 | 12680504 | <reponame>Kunpors/dr.pors-
# -*- coding: utf-8 -*-
import shutil
import tempfile
import warnings
#===============================================================================
try:
from tempfile import TemporaryDirectory
except ImportError:
class TemporaryDirectory(object):
def __init__(self, suffix='', prefix='tmp', dir=None):
self.name = tempfile.mkdtemp(suffix, prefix, dir)
def __enter__(self):
return self.name
def __exit__(self, exc, value, tb):
try:
shutil.rmtree(self.name)
except OSError as err:
if err.errno != 2:
raise
#===============================================================================
class UserLoginContext(object):
def __init__(self, testcase, **kwargs):
self.testcase = testcase
self.kwargs = kwargs
def __enter__(self):
self.testcase.assertTrue(self.testcase.client.login(**self.kwargs))
def __exit__(self, exc, value, tb):
self.testcase.client.logout()
#===============================================================================
class AssertThrowsWarningContext(object):
def __init__(self, test_case, klass, number):
self.test_case = test_case
self.klass = klass
self.number = number
self.ctx = warnings.catch_warnings(record=True)
def __enter__(self):
self.warnings = self.ctx.__enter__()
warnings.resetwarnings()
warnings.simplefilter('always')
def __exit__(self, type, value, traceback):
self.test_case.assertEqual(
len(self.warnings), self.number, "%d warnings thrown, %d expected" % (
len(self.warnings), self.number
)
)
for warning in self.warnings:
self.test_case.assertTrue(issubclass(warning.category, self.klass),
'%s warning thrown, %s expected' %
(warning.category.__name__, self.klass.__name__))
self.ctx.__exit__(type, value, traceback)
|
tests/conftest.py | dgilland/fnc | 152 | 12680511 | from unittest import mock
import pytest
@pytest.fixture
def mocksleep():
with mock.patch("time.sleep") as mocked:
yield mocked
|
tests/client/construct_request_test.py | educatedguessing/bravado | 600 | 12680536 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
import mock
import pytest
from bravado_core.operation import Operation
from bravado_core.request import IncomingRequest
from bravado_core.request import unmarshal_request
from bravado_core.spec import Spec
from typing import Any
from typing import Dict
from bravado.client import CallableOperation
from bravado.client import construct_request
def build_swagger_spec(swagger_dict):
spec = Spec(swagger_dict)
spec.api_url = 'http://localhost/'
return spec
@pytest.mark.parametrize('timeout_kv', [
('timeout', 1),
('connect_timeout', 2),
])
@mock.patch('bravado.client.marshal_param')
def test_with_timeouts(
mock_marshal_param, minimal_swagger_spec,
getPetById_spec, request_dict, timeout_kv,
):
request_dict['url'] = '/pet/{petId}'
op = CallableOperation(Operation.from_spec(
minimal_swagger_spec, '/pet/{petId}', 'get', getPetById_spec))
k, v = timeout_kv
request = construct_request(op, request_options={k: v}, petId=34, api_key='foo')
assert request[k] == v
assert mock_marshal_param.call_count == 2
@pytest.mark.parametrize(
'swagger_type, swagger_format, header_name, header_value', [
('boolean', None, 'boolean', True),
('integer', None, 'integer', 1),
('number', 'float', 'float', 2.0),
],
)
def test_with_not_string_headers(
minimal_swagger_dict, getPetById_spec, request_dict,
swagger_type, swagger_format, header_name, header_value,
):
url = '/pet/{petId}'
parameter = {
'name': header_name,
'in': 'header',
'required': False,
'type': swagger_type,
}
if swagger_format:
parameter['format'] = swagger_format
minimal_swagger_dict['paths'][url]['get']['parameters'].append(parameter)
minimal_swagger_spec = build_swagger_spec(minimal_swagger_dict)
request_dict['url'] = url
operation = Operation.from_spec(
swagger_spec=minimal_swagger_spec,
path_name='/pet/{petId}',
http_method='get',
op_spec=getPetById_spec,
)
petId = 34
api_key = 'foo'
request = construct_request(
operation=operation,
request_options={'headers': {header_name: header_value}},
petId=petId,
api_key=api_key,
)
# To unmarshall a request bravado-core needs the request to be wrapped
# by an object with a specific list of attributes
request_object = type('IncomingRequest', (IncomingRequest,), {
'path': {'petId': petId},
'query': {},
'form': {},
'headers': request['headers'],
'files': mock.Mock(),
})
expected_header_value = str(header_value)
# we need to handle a backwards-incompatible change in bravado-core 5.0.5
if swagger_type == 'boolean':
assert request['headers'][header_name] in (expected_header_value, expected_header_value.lower())
else:
assert request['headers'][header_name] == expected_header_value
unmarshalled_request = unmarshal_request(request_object, operation)
assert unmarshalled_request[header_name] == header_value
def test_use_msgpack(
minimal_swagger_spec,
getPetById_spec,
):
op = CallableOperation(
Operation.from_spec(
minimal_swagger_spec,
'/pet/{petId}',
'get',
getPetById_spec
)
)
request_options = {
'use_msgpack': True,
'headers': {'Some-Header': 'header-value'}
} # type: Dict[str, Any]
request = construct_request(
op,
request_options=request_options,
petId=1,
)
assert request['headers']['Accept'] == 'application/msgpack'
assert request['headers']['Some-Header'] == 'header-value', \
"Requested header should be present"
assert 'Accept' not in request_options['headers'], \
"Original request options should not be modified"
|
bin/bnf_to_cnf/bnf_to_cnf/parser.py | s-weigand/darglint | 405 | 12680537 |
from lark import (
Lark,
Tree,
)
from .node import (
Node,
NodeType,
)
class Parser(object):
grammar = r'''
start: grammar
grammar: imports? external_imports? name? start_expression? production+
production: annotations? symbol _OPER expression
_OPER: "::="
expression: sequence (_BAR sequence)*
_BAR: "|"
sequence: probability? annotations? (symbol | TERMINAL) (_WHITESPACE (symbol | TERMINAL))*
TERMINAL: "\"" (LETTER | ESCAPED | NUMBER | "_" | "-" | ":")+ "\""
| "ε"
ESCAPED: "\\" ("." | "," | "*" | "^" | "("
| ")" | "+" | "-" | "/" | "\""
| " " | "]" | "[" | "|")
probability: NUMBER+
start_expression: _START symbol
_START: "start:"
name: _GRAMMAR NAME
NAME: LETTER+
_GRAMMAR: "Grammar:"
external_imports: external_import+
external_import: _FROM FILENAME _IMPORT _LP items _RP
_FROM: "from"
_LP: "("
_RP: ")"
items: ITEM ","?
| ITEM "," items
ITEM: /\w+/
imports: import+
import: _IMPORT FILENAME
FILENAME: /(\w|\\|\.|-|_)+/
_IMPORT: "import"
annotations: annotation+
annotation: _AT IDENT
_AT: "@"
symbol: _LB IDENT _RB
_LB: "<"
_RB: ">"
IDENT: LETTER (LETTER | NUMBER | "_" | "-")*
%import common.LETTER
%import common.NUMBER
_COMMENT: /#[^\n]*/
%ignore _COMMENT
_WHITESPACE: (" " | "\n" | "\t")+
%ignore _WHITESPACE
''' # noqa: E501
def __init__(self):
self.delegate = Lark(self.grammar)
def parse(self, value: str) -> Node:
tree = self.delegate.parse(value)
return Node.from_lark_tree(tree)
def parse_production(self, value: str) -> Node:
"""Parse just an production.
Args:
value: The string to parse.
Throws:
Exception: If there is more than a single production in the
value.
Returns:
A node which is the head of the production (not the grammar.)
"""
if '\n' in value:
raise Exception(
'There should only be a single product, but '
'a newline is present.'
)
grammar = self.parse(value)
if grammar.children[0].node_type == NodeType.PRODUCTION:
production = grammar.children[0]
else:
production = grammar.children[1]
grammar.children = list()
return production
|
extra/src/autogluon/extra/model_zoo/models/standford_dog_models.py | zhiqiangdon/autogluon | 4,462 | 12680545 | """Pretrained models for Standford Dogs dataset"""
import os
import mxnet as mx
import gluoncv as gcv
from ..model_store import get_model_file
__all__ = ['standford_dog_resnet152_v1', 'standford_dog_resnext101_64x4d']
def standford_dog_resnet152_v1(pretrained=False, root=os.path.join('~', '.autogluon', 'models'),
ctx=mx.cpu(0), **kwargs):
net = gcv.model_zoo.resnet152_v1(classes=120, **kwargs)
if pretrained:
net.load_parameters(get_model_file('standford_dog_resnet152_v1',
root=root), ctx=ctx)
return net
def standford_dog_resnext101_64x4d(pretrained=False, root=os.path.join('~', '.autogluon', 'models'),
ctx=mx.cpu(0), **kwargs):
net = gcv.model_zoo.resnext.resnext101_64x4d(classes=120, **kwargs)
if pretrained:
net.load_parameters(get_model_file('standford_dog_resnext101_64x4d',
root=root), ctx=ctx)
return net
|
tests/test_models/test_bottom_up_forward.py | nightfuryyy/mmpose | 1,775 | 12680586 | <filename>tests/test_models/test_bottom_up_forward.py
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmpose.models.detectors import AssociativeEmbedding
def test_ae_forward():
model_cfg = dict(
type='AssociativeEmbedding',
pretrained=None,
backbone=dict(type='ResNet', depth=18),
keypoint_head=dict(
type='AESimpleHead',
in_channels=512,
num_joints=17,
num_deconv_layers=0,
tag_per_joint=True,
with_ae_loss=[True],
extra=dict(final_conv_kernel=1, ),
loss_keypoint=dict(
type='MultiLossFactory',
num_joints=17,
num_stages=1,
ae_loss_type='exp',
with_ae_loss=[True],
push_loss_factor=[0.001],
pull_loss_factor=[0.001],
with_heatmaps_loss=[True],
heatmaps_loss_factor=[1.0])),
train_cfg=dict(),
test_cfg=dict(
num_joints=17,
max_num_people=30,
scale_factor=[1],
with_heatmaps=[True],
with_ae=[True],
project2image=True,
nms_kernel=5,
nms_padding=2,
tag_per_joint=True,
detection_threshold=0.1,
tag_threshold=1,
use_detection_val=True,
ignore_too_much=False,
adjust=True,
refine=True,
soft_nms=False,
flip_test=True,
post_process=True,
shift_heatmap=True,
use_gt_bbox=True,
flip_pairs=[[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12],
[13, 14], [15, 16]],
))
detector = AssociativeEmbedding(model_cfg['backbone'],
model_cfg['keypoint_head'],
model_cfg['train_cfg'],
model_cfg['test_cfg'],
model_cfg['pretrained'])
detector.init_weights()
input_shape = (1, 3, 256, 256)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
target = mm_inputs.pop('target')
mask = mm_inputs.pop('mask')
joints = mm_inputs.pop('joints')
img_metas = mm_inputs.pop('img_metas')
# Test forward train
losses = detector.forward(
imgs, target, mask, joints, img_metas, return_loss=True)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
_ = detector.forward(imgs, img_metas=img_metas, return_loss=False)
_ = detector.forward_dummy(imgs)
def _demo_mm_inputs(input_shape=(1, 3, 256, 256)):
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
"""
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
target = np.zeros([N, 17, H // 32, W // 32], dtype=np.float32)
mask = np.ones([N, H // 32, W // 32], dtype=np.float32)
joints = np.zeros([N, 30, 17, 2], dtype=np.float32)
img_metas = [{
'image_file':
'test.jpg',
'aug_data': [torch.zeros(1, 3, 256, 256)],
'test_scale_factor': [1],
'base_size': (256, 256),
'center':
np.array([128, 128]),
'scale':
np.array([1.28, 1.28]),
'flip_index':
[0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
} for _ in range(N)]
mm_inputs = {
'imgs': torch.FloatTensor(imgs).requires_grad_(True),
'target': [torch.FloatTensor(target)],
'mask': [torch.FloatTensor(mask)],
'joints': [torch.FloatTensor(joints)],
'img_metas': img_metas
}
return mm_inputs
|
bleak/backends/bluezdbus/scanner.py | arthurbiancarelli/bleak | 753 | 12680599 | <filename>bleak/backends/bluezdbus/scanner.py
import logging
from typing import Any, Dict, List, Optional
from dbus_next.aio import MessageBus
from dbus_next.constants import BusType, MessageType
from dbus_next.message import Message
from dbus_next.signature import Variant
from bleak.backends.bluezdbus import defs
from bleak.backends.bluezdbus.signals import MatchRules, add_match, remove_match
from bleak.backends.bluezdbus.utils import (
assert_reply,
unpack_variants,
validate_address,
)
from bleak.backends.device import BLEDevice
from bleak.backends.scanner import BaseBleakScanner, AdvertisementData
logger = logging.getLogger(__name__)
# set of org.bluez.Device1 property names that come from advertising data
_ADVERTISING_DATA_PROPERTIES = {
"AdvertisingData",
"AdvertisingFlags",
"ManufacturerData",
"Name",
"ServiceData",
"UUIDs",
}
def _device_info(path, props):
try:
name = props.get("Alias", "Unknown")
address = props.get("Address", None)
if address is None:
try:
address = path[-17:].replace("_", ":")
if not validate_address(address):
address = None
except Exception:
address = None
rssi = props.get("RSSI", "?")
return name, address, rssi, path
except Exception:
return None, None, None, None
class BleakScannerBlueZDBus(BaseBleakScanner):
"""The native Linux Bleak BLE Scanner.
For possible values for `filters`, see the parameters to the
``SetDiscoveryFilter`` method in the `BlueZ docs
<https://git.kernel.org/pub/scm/bluetooth/bluez.git/tree/doc/adapter-api.txt?h=5.48&id=0d1e3b9c5754022c779da129025d493a198d49cf>`_
Keyword Args:
adapter (str): Bluetooth adapter to use for discovery.
filters (dict): A dict of filters to be applied on discovery.
"""
def __init__(self, **kwargs):
super(BleakScannerBlueZDBus, self).__init__(**kwargs)
# kwarg "device" is for backwards compatibility
self._adapter = kwargs.get("adapter", kwargs.get("device", "hci0"))
self._bus: Optional[MessageBus] = None
self._cached_devices: Dict[str, Variant] = {}
self._devices: Dict[str, Dict[str, Any]] = {}
self._rules: List[MatchRules] = []
self._adapter_path: str = f"/org/bluez/{self._adapter}"
# Discovery filters
self._filters: Dict[str, Variant] = {}
self.set_scanning_filter(**kwargs)
async def start(self):
self._bus = await MessageBus(bus_type=BusType.SYSTEM).connect()
self._devices.clear()
self._cached_devices.clear()
# Add signal listeners
self._bus.add_message_handler(self._parse_msg)
rules = MatchRules(
interface=defs.OBJECT_MANAGER_INTERFACE,
member="InterfacesAdded",
arg0path=f"{self._adapter_path}/",
)
reply = await add_match(self._bus, rules)
assert_reply(reply)
self._rules.append(rules)
rules = MatchRules(
interface=defs.OBJECT_MANAGER_INTERFACE,
member="InterfacesRemoved",
arg0path=f"{self._adapter_path}/",
)
reply = await add_match(self._bus, rules)
assert_reply(reply)
self._rules.append(rules)
rules = MatchRules(
interface=defs.PROPERTIES_INTERFACE,
member="PropertiesChanged",
path_namespace=self._adapter_path,
)
reply = await add_match(self._bus, rules)
assert_reply(reply)
self._rules.append(rules)
# Find the HCI device to use for scanning and get cached device properties
reply = await self._bus.call(
Message(
destination=defs.BLUEZ_SERVICE,
path="/",
member="GetManagedObjects",
interface=defs.OBJECT_MANAGER_INTERFACE,
)
)
assert_reply(reply)
# get only the device interface
self._cached_devices = {
path: unpack_variants(interfaces[defs.DEVICE_INTERFACE])
for path, interfaces in reply.body[0].items()
if defs.DEVICE_INTERFACE in interfaces
}
logger.debug(f"cached devices: {self._cached_devices}")
# Apply the filters
reply = await self._bus.call(
Message(
destination=defs.BLUEZ_SERVICE,
path=self._adapter_path,
interface=defs.ADAPTER_INTERFACE,
member="SetDiscoveryFilter",
signature="a{sv}",
body=[self._filters],
)
)
assert_reply(reply)
# Start scanning
reply = await self._bus.call(
Message(
destination=defs.BLUEZ_SERVICE,
path=self._adapter_path,
interface=defs.ADAPTER_INTERFACE,
member="StartDiscovery",
)
)
assert_reply(reply)
async def stop(self):
reply = await self._bus.call(
Message(
destination=defs.BLUEZ_SERVICE,
path=self._adapter_path,
interface=defs.ADAPTER_INTERFACE,
member="StopDiscovery",
)
)
assert_reply(reply)
for rule in self._rules:
await remove_match(self._bus, rule)
self._rules.clear()
self._bus.remove_message_handler(self._parse_msg)
# Try to disconnect the System Bus.
try:
self._bus.disconnect()
except Exception as e:
logger.error("Attempt to disconnect system bus failed: {0}".format(e))
self._bus = None
def set_scanning_filter(self, **kwargs):
"""Sets OS level scanning filters for the BleakScanner.
For possible values for `filters`, see the parameters to the
``SetDiscoveryFilter`` method in the `BlueZ docs
<https://git.kernel.org/pub/scm/bluetooth/bluez.git/tree/doc/adapter-api.txt?h=5.48&id=0d1e3b9c5754022c779da129025d493a198d49cf>`_
See variant types here: <https://python-dbus-next.readthedocs.io/en/latest/type-system/>
Keyword Args:
filters (dict): A dict of filters to be applied on discovery.
"""
for k, v in kwargs.get("filters", {}).items():
if k == "UUIDs":
self._filters[k] = Variant("as", v)
elif k == "RSSI":
self._filters[k] = Variant("n", v)
elif k == "DuplicateData":
self._filters[k] = Variant("b", v)
elif k == "Pathloss":
self._filters[k] = Variant("n", v)
elif k == "Transport":
self._filters[k] = Variant("s", v)
else:
logger.warning("Filter '%s' is not currently supported." % k)
if "Transport" not in self._filters:
self._filters["Transport"] = Variant("s", "le")
@property
def discovered_devices(self) -> List[BLEDevice]:
# Reduce output.
discovered_devices = []
for path, props in self._devices.items():
if not props:
logger.debug(
"Disregarding %s since no properties could be obtained." % path
)
continue
name, address, _, path = _device_info(path, props)
if address is None:
continue
uuids = props.get("UUIDs", [])
manufacturer_data = props.get("ManufacturerData", {})
discovered_devices.append(
BLEDevice(
address,
name,
{"path": path, "props": props},
props.get("RSSI", 0),
uuids=uuids,
manufacturer_data=manufacturer_data,
)
)
return discovered_devices
# Helper methods
def _invoke_callback(self, path: str, message: Message) -> None:
"""Invokes the advertising data callback.
Args:
message: The D-Bus message that triggered the callback.
"""
if self._callback is None:
return
props = self._devices[path]
# Get all the information wanted to pack in the advertisement data
_local_name = props.get("Name")
_manufacturer_data = {
k: bytes(v) for k, v in props.get("ManufacturerData", {}).items()
}
_service_data = {k: bytes(v) for k, v in props.get("ServiceData", {}).items()}
_service_uuids = props.get("UUIDs", [])
# Pack the advertisement data
advertisement_data = AdvertisementData(
local_name=_local_name,
manufacturer_data=_manufacturer_data,
service_data=_service_data,
service_uuids=_service_uuids,
platform_data=(props, message),
)
device = BLEDevice(
props["Address"],
props["Alias"],
{"path": path, "props": props},
props.get("RSSI", 0),
)
self._callback(device, advertisement_data)
def _parse_msg(self, message: Message):
if message.message_type != MessageType.SIGNAL:
return
logger.debug(
"received D-Bus signal: {0}.{1} ({2}): {3}".format(
message.interface, message.member, message.path, message.body
)
)
if message.member == "InterfacesAdded":
# if a new device is discovered while we are scanning, add it to
# the discovered devices list
obj_path: str
interfaces_and_props: Dict[str, Dict[str, Variant]]
obj_path, interfaces_and_props = message.body
device_props = unpack_variants(
interfaces_and_props.get(defs.DEVICE_INTERFACE, {})
)
if device_props:
self._devices[obj_path] = device_props
self._invoke_callback(obj_path, message)
elif message.member == "InterfacesRemoved":
# if a device disappears while we are scanning, remove it from the
# discovered devices list
obj_path: str
interfaces: List[str]
obj_path, interfaces = message.body
if defs.DEVICE_INTERFACE in interfaces:
# Using pop to avoid KeyError if obj_path does not exist
self._devices.pop(obj_path, None)
elif message.member == "PropertiesChanged":
# Property change events basically mean that new advertising data
# was received or the RSSI changed. Either way, it lets us know
# that the device is active and we can add it to the discovered
# devices list.
interface: str
changed: Dict[str, Variant]
invalidated: List[str]
interface, changed, invalidated = message.body
if interface != defs.DEVICE_INTERFACE:
return
first_time_seen = False
if message.path not in self._devices:
if message.path not in self._cached_devices:
# This can happen when we start scanning. The "PropertyChanged"
# handler is attached before "GetManagedObjects" is called
# and so self._cached_devices is not assigned yet.
# This is not a problem. We just discard the property value
# since "GetManagedObjects" will return a newer value.
return
first_time_seen = True
self._devices[message.path] = self._cached_devices[message.path]
changed = unpack_variants(changed)
self._devices[message.path].update(changed)
# Only do advertising data callback if this is the first time the
# device has been seen or if an advertising data property changed.
# Otherwise we get a flood of callbacks from RSSI changing.
if first_time_seen or not _ADVERTISING_DATA_PROPERTIES.isdisjoint(
changed.keys()
):
self._invoke_callback(message.path, message)
|
algorithms/dp/knapsack.py | vansh-tiwari/algorithms | 22,426 | 12680635 | <gh_stars>1000+
"""
Given the capacity of the knapsack and items specified by weights and values,
return the maximum summarized value of the items that can be fit in the
knapsack.
Example:
capacity = 5, items(value, weight) = [(60, 5), (50, 3), (70, 4), (30, 2)]
result = 80 (items valued 50 and 30 can both be fit in the knapsack)
The time complexity is O(n * m) and the space complexity is O(m), where n is
the total number of items and m is the knapsack's capacity.
"""
class Item:
def __init__(self, value, weight):
self.value = value
self.weight = weight
def get_maximum_value(items, capacity):
dp = [0] * (capacity + 1)
for item in items:
for cur_weight in reversed(range(item.weight, capacity+1)):
dp[cur_weight] = max(dp[cur_weight], item.value + dp[cur_weight - item.weight])
return dp[capacity]
|
examples/23.ray_visibility.py | Neburski/NVISII | 149 | 12680638 | <reponame>Neburski/NVISII
import nvisii
import math
import PySide2
import colorsys
from PySide2.QtCore import *
from PySide2.QtWidgets import *
nvisii.initialize()
nvisii.resize_window(1000,1000)
nvisii.enable_denoiser()
# nvisii.configure_denoiser(False, False, True)
nvisii.set_max_bounce_depth(diffuse_depth=2, glossy_depth = 8, transparency_depth = 8, transmission_depth = 12, volume_depth = 2)
# Set the sky
nvisii.disable_dome_light_sampling()
nvisii.set_dome_light_color((0,0,0))
# Set camera
camera = nvisii.entity.create(
name = "camera",
transform = nvisii.transform.create(name = "camera_transform"),
camera = nvisii.camera.create(
name = "camera_camera",
aspect = 1.0
)
)
camera.get_transform().look_at(
at = (0, 0, 0.5), # at position
up = (0, 0, 1), # up vector
eye = (0, 5, 2) # eye position
)
nvisii.set_camera_entity(camera)
# Floor
floor = nvisii.entity.create(
name = "floor",
mesh = nvisii.mesh.create_plane("mesh_floor"),
transform = nvisii.transform.create("transform_floor"),
material = nvisii.material.create("material_floor")
)
floor.get_material().set_base_color((0.19,0.16,0.19))
floor.get_material().set_metallic(0)
floor.get_material().set_roughness(1)
floor.get_transform().set_scale((5,5,1))
# Mirror 1
mirror1 = nvisii.entity.create(
name = "mirror1",
mesh = nvisii.mesh.create_box("mesh_mirror1"),
transform = nvisii.transform.create("transform_mirror1"),
material = nvisii.material.create("material_mirror1")
)
mirror1.get_transform().look_at(eye = (-1.5, -1.5, .5), at = (0,0,.7), up = (0,0,1))
mirror1.get_material().set_base_color((1.,1.,1.))
mirror1.get_material().set_metallic(1)
mirror1.get_material().set_roughness(0)
mirror1.get_transform().set_scale((.7,.7,.1))
# Glass 1
glass1 = nvisii.entity.create(
name = "glass1",
mesh = nvisii.mesh.create_box("mesh_glass1"),
transform = nvisii.transform.create("transform_glass1"),
material = nvisii.material.create("material_glass1")
)
glass1.get_transform().look_at(eye = (1.5, 1.5, .5), at = (0,0,.7), up = (0,0,1))
glass1.get_material().set_base_color((1.,1.,1.))
glass1.get_material().set_transmission(1)
glass1.get_material().set_roughness(0)
glass1.get_transform().set_scale((.7,.7,.1))
# Mirror 2
mirror2 = nvisii.entity.create(
name = "mirror2",
mesh = nvisii.mesh.create_box("mesh_mirror2"),
transform = nvisii.transform.create("transform_mirror2"),
material = nvisii.material.create("material_mirror2")
)
mirror2.get_transform().look_at(eye = (1.5, -1.5, .5), at = (0,0,.7), up = (0,0,1))
mirror2.get_material().set_base_color((1.,1.,1.))
mirror2.get_material().set_metallic(1)
mirror2.get_material().set_roughness(0)
mirror2.get_transform().set_scale((.7,.7,.1))
# Glass 2
glass2 = nvisii.entity.create(
name = "glass2",
mesh = nvisii.mesh.create_box("mesh_glass2"),
transform = nvisii.transform.create("transform_glass2"),
material = nvisii.material.create("material_glass2")
)
glass2.get_transform().look_at(eye = (-1.5, 1.5, .5), at = (0,0,.7), up = (0,0,1))
glass2.get_material().set_base_color((1.,1.,1.))
glass2.get_material().set_transmission(1)
glass2.get_material().set_roughness(0)
glass2.get_transform().set_scale((.7,.7,.1))
# Fog
fog = nvisii.entity.create(
name = "fog",
volume = nvisii.volume.create_box("mesh_fog"),
transform = nvisii.transform.create("transform_fog"),
material = nvisii.material.create("material_fog")
)
fog.get_material().set_base_color((1.,1.,1.))
fog.get_material().set_transmission(1)
fog.get_material().set_roughness(0)
fog.get_volume().set_scale(100)
# Light
light = nvisii.entity.create(
name = "light",
light = nvisii.light.create("light"),
transform = nvisii.transform.create("light"),
mesh = nvisii.mesh.create_sphere("light")
)
light.get_transform().set_position((0,0,5))
light.get_transform().set_scale((.1,.1,.1))
light.get_light().set_exposure(7)
# Light blocker
blocker = nvisii.entity.create(
name = "blocker",
mesh = nvisii.mesh.create_capped_tube("blocker", innerRadius = .04),
transform = nvisii.transform.create("blocker"),
material = nvisii.material.create("blocker")
)
blocker.get_transform().set_scale((10,10,.01))
blocker.get_transform().set_position((0,0,3.0))
# Teapot
teapotahedron = nvisii.entity.create(
name="teapotahedron",
mesh = nvisii.mesh.create_teapotahedron("teapotahedron", segments = 32),
transform = nvisii.transform.create("teapotahedron"),
material = nvisii.material.create("teapotahedron")
)
teapotahedron.get_transform().set_rotation(nvisii.angleAxis(nvisii.pi() / 4.0, (0,0,1)))
teapotahedron.get_transform().set_position((0,0,0))
teapotahedron.get_transform().set_scale((0.4, 0.4, 0.4))
teapotahedron.get_material().set_base_color((255.0 / 255.0, 100.0 / 255.0, 2.0 / 256.0))
teapotahedron.get_material().set_roughness(0.0)
teapotahedron.get_material().set_specular(1.0)
teapotahedron.get_material().set_metallic(1.0)
# Make a QT window to demonstrate the difference between alpha transparency and transmission
app = QApplication([]) # Start an application.
window = QWidget() # Create a window.
layout = QVBoxLayout() # Create a layout.
def rotateCamera(value):
value = value / 100.0
cam_pos = camera.get_transform().get_position()
camera.get_transform().look_at(
at = (0, 0, 0.5), # at position
up = (0, 0, 1), # up vector
eye = (5 * math.cos(value * 2 * nvisii.pi()), 5 * math.sin(value * 2 * nvisii.pi()), cam_pos[2]) # eye position
)
rotateCamera(0)
dial = QDial()
dial.setWrapping(True)
dial.valueChanged[int].connect(rotateCamera)
layout.addWidget(QLabel('Camera rotation'))
layout.addWidget(dial)
def rotateCameraElevation(value):
# print(value)
value = value / 100
cam_pos = camera.get_transform().get_position()
camera.get_transform().look_at(
at = (0, 0, 0.5), # at position
up = (0, 0, 1), # up vector
eye = (cam_pos[0], cam_pos[1], 0.1 + 2.5*value) # eye position
)
# print(value, 2 * math.cos(value * 2 * nvisii.pi()))
slider = QSlider(Qt.Horizontal)
slider.valueChanged[int].connect(rotateCameraElevation)
slider.setValue(40)
layout.addWidget(QLabel('Camera Elevation'))
layout.addWidget(slider)
# Add some toggles to demonstrate how the set_visibility function works
camera_visibility = True
diffuse_visibility = True
glossy_visibility = True
transmission_visibility = True
scatter_visibility = True
shadow_visibility = True
def updateVisibility():
global camera_visibility
global diffuse_visibility
global glossy_visibility
global transmission_visibility
global scatter_visibility
global shadow_visibility
teapotahedron.set_visibility(
camera = camera_visibility,
diffuse = diffuse_visibility,
glossy = glossy_visibility,
transmission = transmission_visibility,
volume_scatter = scatter_visibility,
shadow = shadow_visibility)
def toggleCamera():
global camera_visibility
camera_visibility = not camera_visibility
updateVisibility()
button = QPushButton("toggleCamera")
button.clicked.connect(toggleCamera)
layout.addWidget(button)
def toggleDiffuse():
global diffuse_visibility
diffuse_visibility = not diffuse_visibility
updateVisibility()
button = QPushButton("toggleDiffuse")
button.clicked.connect(toggleDiffuse)
layout.addWidget(button)
def toggleGlossy():
global glossy_visibility
glossy_visibility = not glossy_visibility
updateVisibility()
button = QPushButton("toggleGlossy")
button.clicked.connect(toggleGlossy)
layout.addWidget(button)
def toggleTransmission():
global transmission_visibility
transmission_visibility = not transmission_visibility
updateVisibility()
button = QPushButton("toggleTransmission")
button.clicked.connect(toggleTransmission)
layout.addWidget(button)
def toggleScattering():
global scatter_visibility
scatter_visibility = not scatter_visibility
updateVisibility()
button = QPushButton("toggleScattering")
button.clicked.connect(toggleScattering)
layout.addWidget(button)
def toggleShadows():
global shadow_visibility
shadow_visibility = not shadow_visibility
updateVisibility()
button = QPushButton("toggleShadows")
button.clicked.connect(toggleShadows)
layout.addWidget(button)
def setFogStrength(value):
value = (100 - value) * 2 + 10
fog.get_volume().set_scale(value)
setFogStrength(100)
slider = QSlider(Qt.Horizontal)
slider.valueChanged[int].connect(setFogStrength)
slider.setValue(100)
layout.addWidget(QLabel('Fog Strength'))
layout.addWidget(slider)
def setLightHeight(value):
value = value / 100.0
light.get_transform().set_position((0,0,3 + value * 2))
setLightHeight(50)
slider = QSlider(Qt.Horizontal)
slider.valueChanged[int].connect(setLightHeight)
slider.setValue(50)
layout.addWidget(QLabel('Light Height'))
layout.addWidget(slider)
window.setLayout(layout)
window.show()
app.exec_()
nvisii.deinitialize() |
baselines/NGRAM/ngram_rerank.py | sordonia/HierarchicalEncoderDecoder | 116 | 12680662 | import os
import argparse
import cPickle
import operator
import itertools
from Common.psteff import *
def rerank(model_file, ctx_file, rnk_file):
pstree = PSTInfer()
pstree.load(model_file)
output_file = open(rnk_file + "_NGRAM.gen", "w")
coverage = 0
for num_line, (ctx_line, rnk_line) in \
enumerate(itertools.izip(open(ctx_file), open(rnk_file))):
suffix = ctx_line.strip().split('\t')
candidates = rnk_line.strip().split('\t')
found, not_found = pstree.rerank(suffix, candidates, exact_match=True)
coverage += len(found) != 0
print >> output_file, '\t'.join(found + not_found)
output_file.close()
print 'Coverage {}/{}'.format(coverage, num_line+1)
|
qtpy/QtPrintSupport.py | spyder-ide/qtpy | 632 | 12680663 | <reponame>spyder-ide/qtpy
#
# Copyright © 2009- The Spyder Development Team
#
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
"""
Provides QtPrintSupport classes and functions.
"""
from . import PYQT5, PYQT6, PYSIDE6, PYSIDE2, PythonQtError
if PYQT5:
from PyQt5.QtPrintSupport import *
elif PYQT6:
from PyQt6.QtPrintSupport import *
QPageSetupDialog.exec_ = QPageSetupDialog.exec
QPrintDialog.exec_ = QPrintDialog.exec
QPrintPreviewWidget.print_ = QPrintPreviewWidget.print
elif PYSIDE6:
from PySide6.QtPrintSupport import *
elif PYSIDE2:
from PySide2.QtPrintSupport import *
else:
raise PythonQtError('No Qt bindings could be found')
|
eventsourcing/tests/test_processapplication.py | johnbywater/eventsourcing | 972 | 12680668 | <filename>eventsourcing/tests/test_processapplication.py
from typing import List
from unittest.case import TestCase
from eventsourcing.dispatch import singledispatchmethod
from eventsourcing.domain import AggregateEvent
from eventsourcing.persistence import IntegrityError, Notification, Transcoder
from eventsourcing.system import (
Follower,
Leader,
ProcessApplication,
ProcessEvent,
Promptable,
)
from eventsourcing.tests.test_aggregate import BankAccount
from eventsourcing.tests.test_application_with_popo import (
BankAccounts,
EmailAddressAsStr,
)
from eventsourcing.tests.test_processingpolicy import EmailNotification
class TestProcessApplication(TestCase):
def test_pull_and_process(self):
leader_cls = type(
BankAccounts.__name__,
(BankAccounts, Leader),
{},
)
accounts = leader_cls()
email_process = EmailProcess()
email_process.follow(
accounts.name,
accounts.log,
)
section = email_process.log["1,5"]
self.assertEqual(len(section.items), 0)
accounts.open_account("Alice", "<EMAIL>")
email_process.pull_and_process(BankAccounts.name)
section = email_process.log["1,5"]
self.assertEqual(len(section.items), 1)
# Check we have processed the first event.
self.assertEqual(email_process.recorder.max_tracking_id(BankAccounts.name), 1)
# Check trying to reprocess the first event causes an IntegrityError.
with self.assertRaises(IntegrityError):
email_process.pull_and_process(BankAccounts.name, start=1)
# Check we can continue from the next position.
email_process.pull_and_process(BankAccounts.name, start=2)
# Check we haven't actually processed anything further.
self.assertEqual(email_process.recorder.max_tracking_id(BankAccounts.name), 1)
section = email_process.log["1,5"]
self.assertEqual(len(section.items), 1)
# Subscribe for notifications.
accounts.lead(PromptForwarder(email_process))
# Create another notification.
accounts.open_account("Bob", "<EMAIL>")
# Check we have processed the next notification.
section = email_process.log["1,5"]
self.assertEqual(len(section.items), 2)
# Check we have actually processed the second event.
self.assertEqual(email_process.recorder.max_tracking_id(BankAccounts.name), 2)
class EmailProcess(ProcessApplication):
def register_transcodings(self, transcoder: Transcoder) -> None:
super(EmailProcess, self).register_transcodings(transcoder)
transcoder.register(EmailAddressAsStr())
@singledispatchmethod
def policy(
self,
domain_event: AggregateEvent,
process_event: ProcessEvent,
):
"""Default policy"""
@policy.register(BankAccount.Opened)
def _(
self,
domain_event: AggregateEvent,
process_event: ProcessEvent,
):
assert isinstance(domain_event, BankAccount.Opened)
notification = EmailNotification.create(
to=domain_event.email_address,
subject="Your New Account",
message="Dear {}, ...".format(domain_event.full_name),
)
process_event.collect_events(notification)
class PromptForwarder(Promptable):
def __init__(self, application: Follower):
self.application = application
def receive_notifications(
self, leader_name: str, notifications: List[Notification]
) -> None:
self.application.pull_and_process(leader_name, start=notifications[0].id)
|
specs/matchers/built_in/end_with_spec.py | danibaena/expects | 189 | 12680680 | # -*- coding: utf-8 -*
from collections import OrderedDict
from expects import *
from expects.testing import failure
IRRELEVANT_ARGS = (1, 2)
with describe('end_with'):
with before.each:
self.str = 'My foo string'
self.lst = [1, 2, 3]
self.dct = {'bar': 0, 'baz': 1}
self.ordered_dct = OrderedDict([('bar', 0), ('baz', 1)])
with it('should pass if string ends with string'):
expect(self.str).to(end_with(self.str[5:]))
with it('should pass if list ends with arg'):
expect(self.lst).to(end_with(self.lst[-1]))
with it('should pass if list ends with args'):
expect(self.lst).to(end_with(*self.lst[-2:]))
with it('should pass if ordered dict ends with keys'):
expected_args = list(self.ordered_dct)[:2]
expect(self.ordered_dct).to(end_with(*expected_args))
with it('should fail if string does not end with string'):
str_ = 'My foo string'
with failure('but: ends with {0!r}'.format(str_[-5:])):
expect(self.str).to(end_with(str_[:5]))
with it('should fail if list ends with first arg but not second'):
with failure('but: ends with {0!r}'.format(self.lst[-2:])):
expect(self.lst).to(end_with(self.lst[-1], self.lst[-1]))
with it('should fail if subject is a dict'):
with failure('but: does not have ordered keys'):
expect(self.dct).to(end_with(*IRRELEVANT_ARGS))
with context('when negated'):
with it('should pass if string does not end with string'):
expect(self.str).not_to(end_with(self.str[:5]))
with it('should pass if list does not end with args'):
expect(self.lst).not_to(end_with(*self.lst[:2]))
with it('should pass if list ends with first arg but not second'):
expected_args = self.lst[-1], self.lst[-1]
expect(self.lst).not_to(end_with(*expected_args))
with it('should fail if subject is a dict'):
with failure('but: does not have ordered keys'):
expect(self.dct).not_to(end_with(*IRRELEVANT_ARGS))
|
setup.py | hiroki-sawano/puput | 554 | 12680696 | <reponame>hiroki-sawano/puput
import os
import re
import codecs
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
def get_metadata(package, field):
"""
Return package data as listed in `__{field}__` in `init.py`.
"""
init_py = codecs.open(os.path.join(package, '__init__.py'), encoding='utf-8').read()
return re.search("^__{}__ = ['\"]([^'\"]+)['\"]".format(field), init_py, re.MULTILINE).group(1)
setup(
name='puput',
version=get_metadata('puput', 'version'),
packages=find_packages(exclude=("example*", "tests*")),
include_package_data=True,
keywords="django wagtail puput blog cms app",
description='A Django blog app implemented in Wagtail.',
long_description=codecs.open(os.path.join(os.path.dirname(__file__), 'README.rst'), encoding='utf-8').read(),
install_requires=[
'Django>=2.0',
'wagtail>=2.7,<3.0',
'django-el-pagination>=3.2.4',
'django-social-share>=1.3.0',
'django-colorful>=1.3'
],
url='http://github.com/APSL/puput',
author=get_metadata('puput', 'author'),
author_email=get_metadata('puput', 'email'),
long_description_content_type='text/x-rst',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 2.2',
'Framework :: Django :: 3.0',
'Framework :: Django :: 3.1',
'Framework :: Django :: 3.2',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Operating System :: OS Independent',
'Topic :: Software Development'
]
)
|
python/bench/bench_cross_entropy.py | shauheen/triton | 3,352 | 12680705 | import torch
import triton
confs = [
triton.testing.Benchmark(
x_names = ['N'],
x_vals = [128, 256, 512, 1024, 2048, 3072, 4096, 6144, 8192],
line_arg = 'provider',
line_vals = ['triton', 'torch'],
line_names = ['Triton', 'Torch'],
ylabel = 'GBPS',
plot_name = f'{mode}-2048',
args = {'M': 2048, 'dtype': torch.float16, 'mode': mode}
)\
for mode in ['forward', 'backward']
]
@triton.testing.perf_report(confs)
def bench_op(M, N, dtype, mode, provider):
# create inputs
x = torch.randn(M, N, dtype=dtype, device='cuda', requires_grad=True)
idx = 4 + torch.ones(M, dtype=torch.int64, device='cuda')
num_gb = (2 * x.numel() * x.element_size() * 1e-9)
gbps = lambda ms: num_gb / ms * 1e3
# forward pass
op = {'torch': torch.nn.CrossEntropyLoss(reduction='none'), \
'triton': triton.ops.cross_entropy}[provider]
if mode == 'forward':
mean_ms, min_ms, max_ms = triton.testing.do_bench(lambda: op(x, idx))
if mode == 'backward':
y = op(x, idx)
dy = torch.randn_like(y)
fn = lambda: y.backward(dy, retain_graph=True)
mean_ms, min_ms, max_ms = triton.testing.do_bench(fn, grad_to_none=[x])
return gbps(mean_ms), gbps(min_ms), gbps(max_ms)
if __name__ == '__main__':
bench_op.run(print_data=True) |
tests/integration/ERC20CRV/test_mintable_in_timeframe.py | AqualisDAO/curve-dao-contracts | 217 | 12680742 | <reponame>AqualisDAO/curve-dao-contracts
import pytest
from brownie.test import given, strategy
from tests.conftest import INITIAL_SUPPLY, YEAR, YEAR_1_SUPPLY, approx
@pytest.fixture(scope="module", autouse=True)
def initial_setup(chain, token):
chain.sleep(86401)
token.update_mining_parameters()
@given(time=strategy("decimal", min_value=1, max_value=7))
def test_mintable_in_timeframe(accounts, token, theoretical_supply, time, chain):
t0 = token.start_epoch_time()
chain.sleep(int(10 ** time))
chain.mine()
t1 = chain[-1].timestamp
if t1 - t0 >= YEAR:
token.update_mining_parameters({"from": accounts[0]})
t1 = chain[-1].timestamp
available_supply = token.available_supply()
mintable = token.mintable_in_timeframe(t0, t1)
assert (
available_supply - (INITIAL_SUPPLY * 10 ** 18)
) >= mintable # Should only round down, not up
if t1 == t0:
assert mintable == 0
else:
assert (available_supply - (INITIAL_SUPPLY * 10 ** 18)) / mintable - 1 < 1e-7
assert approx(theoretical_supply(), available_supply, 1e-16)
@given(time1=strategy("uint", max_value=YEAR), time2=strategy("uint", max_value=YEAR))
def test_random_range_year_one(token, chain, accounts, time1, time2):
creation_time = token.start_epoch_time()
start, end = sorted((creation_time + time1, creation_time + time2))
rate = YEAR_1_SUPPLY // YEAR
assert token.mintable_in_timeframe(start, end) == rate * (end - start)
@given(start=strategy("uint", max_value=YEAR * 6), duration=strategy("uint", max_value=YEAR))
def test_random_range_multiple_epochs(token, chain, accounts, start, duration):
creation_time = token.start_epoch_time()
start += creation_time
end = duration + start
start_epoch = (start - creation_time) // YEAR
end_epoch = (end - creation_time) // YEAR
rate = int(YEAR_1_SUPPLY // YEAR / (2 ** 0.25) ** start_epoch)
for i in range(end_epoch):
chain.sleep(YEAR)
chain.mine()
token.update_mining_parameters({"from": accounts[0]})
if start_epoch == end_epoch:
assert approx(token.mintable_in_timeframe(start, end), rate * (end - start), 2e-16)
else:
assert token.mintable_in_timeframe(start, end) < rate * end
@given(duration=strategy("uint", min_value=1, max_value=YEAR))
def test_available_supply(chain, web3, token, duration):
creation_time = token.start_epoch_time()
initial_supply = token.totalSupply()
rate = token.rate()
chain.sleep(duration)
chain.mine()
expected = initial_supply + (web3.eth.getBlock("latest")["timestamp"] - creation_time) * rate
assert token.available_supply() == expected
|
pcdet/models/backbones_3d/pfe/__init__.py | TillBeemelmanns/OpenPCDet | 184 | 12680753 | from .voxel_set_abstraction import VoxelSetAbstraction
__all__ = {
'VoxelSetAbstraction': VoxelSetAbstraction
}
|
algoexpert.io/python/Find_Three_Largest_Number.py | its-sushant/coding-interview-gym | 713 | 12680759 | <gh_stars>100-1000
# My solution using Hepa
import heapq
def findThreeLargestNumbers(array):
hp = []
for num in array:
if len(hp) < 3:
heapq.heappush(hp, num)
else:
if hp[0] < num:
heapq.heappop(hp)
heapq.heappush(hp, num)
return sorted(hp)
# Solution providd by Algoexpert
# O(n) time | O(1) space
def find_three_largest_number(array):
three_largest_number = [None, None, None]
for num in array:
update_largest(num, three_largest_number)
return three_largest_number
def update_largest(number, three_largest_number):
if three_largest_number[2] is None or number > three_largest_number[2]:
shift_and_update(three_largest_number, number, 2)
elif three_largest_number[1] is None or number > three_largest_number[1]:
shift_and_update(three_largest_number, number, 1)
elif three_largest_number[0] is None or number > three_largest_number[0]:
shift_and_update(three_largest_number, number, 0)
def shift_and_update(three_largest_number, number, index):
for i in range(index + 1):
if i == index:
three_largest_number[index] = number
else:
three_largest_number[i] = three_largest_number[i + 1]
given_numbers = [141, 1, 17, -7, -17, -27, 18, 541, 8, 7, 7]
largest_numbers = find_three_largest_number(given_numbers)
print("Largest numbers are: ", largest_numbers)
|
microraiden/utils/__init__.py | andrevmatos/microraiden | 417 | 12680778 | <reponame>andrevmatos/microraiden<filename>microraiden/utils/__init__.py
from .crypto import (
generate_privkey,
pubkey_to_addr,
privkey_to_addr,
addr_from_sig,
pack,
keccak256,
keccak256_hex,
sign,
sign_transaction,
eth_message_hash,
eth_sign,
eth_verify,
eth_sign_typed_data_message,
eth_sign_typed_data,
eth_sign_typed_data_message_eip,
eth_sign_typed_data_eip,
get_balance_message,
sign_balance_proof,
verify_balance_proof,
sign_close,
verify_closing_sig
)
from .contract import (
create_signed_transaction,
create_transaction,
create_signed_contract_transaction,
create_contract_transaction,
create_transaction_data,
get_logs,
get_event_blocking,
wait_for_transaction
)
from .private_key import (
check_permission_safety,
get_private_key
)
from .misc import (
get_function_kwargs,
pop_function_kwargs
)
__all__ = [
generate_privkey,
pubkey_to_addr,
privkey_to_addr,
addr_from_sig,
pack,
keccak256,
keccak256_hex,
sign,
sign_transaction,
eth_message_hash,
eth_sign,
eth_verify,
eth_sign_typed_data_message,
eth_sign_typed_data,
eth_sign_typed_data_message_eip,
eth_sign_typed_data_eip,
get_balance_message,
sign_balance_proof,
verify_balance_proof,
sign_close,
verify_closing_sig,
create_signed_transaction,
create_transaction,
create_signed_contract_transaction,
create_contract_transaction,
create_transaction_data,
get_logs,
get_event_blocking,
wait_for_transaction,
check_permission_safety,
get_private_key,
get_function_kwargs,
pop_function_kwargs,
]
|
DQM/SiStripMonitorHardware/python/siStripBuildTrackerMap_cfi.py | ckamtsikis/cmssw | 852 | 12680783 | <filename>DQM/SiStripMonitorHardware/python/siStripBuildTrackerMap_cfi.py
import FWCore.ParameterSet.Config as cms
siStripBuildTrackerMap = cms.EDAnalyzer(
"BuildTrackerMapPlugin",
#input root file containing histograms
InputFileName = cms.untracked.string('DQMStore.root'),
DoDifference = cms.untracked.bool(False),
InputFileNameForDiff = cms.untracked.string('DQMStore.root'),
#name of tkHistoMap to dump
TkHistoMapNameVec = cms.untracked.vstring('TkHMap_MeanCMAPV0','TkHMap_MeanCMAPV1','TkHMap_MeanCMAPV0minusAPV1','TkHMap_RmsCMAPV0','TkHMap_RmsCMAPV1','TkHMap_RmsCMAPV0minusAPV1'),
MinValueVec = cms.untracked.vdouble(120,120,-20,0,0,0),
MaxValueVec = cms.untracked.vdouble(140,140,20,10,10,10),
MechanicalView = cms.untracked.bool(True),
#Name of top folder (SiStrip/MechanicalView appended automatically)
HistogramFolderName = cms.untracked.string('DQMData/'),
#Whether to dump buffer info and raw data if any error is found:
#1=errors, 2=minimum info, 3=full debug with printing of the data buffer of each FED per event.
PrintDebugMessages = cms.untracked.uint32(1),
TkmapParameters = cms.PSet(
loadFedCabling = cms.untracked.bool(True),
# trackerdatPath = cms.untracked.string('CommonTools/TrackerMap/data/'),
# trackermaptxtPath = cms.untracked.string('CommonTools/TrackerMap/data/')
)
)
|
crabageprediction/venv/Lib/site-packages/pandas/tests/indexes/timedeltas/test_timedelta_range.py | 13rianlucero/CrabAgePrediction | 28,899 | 12680806 | import numpy as np
import pytest
from pandas import (
Timedelta,
timedelta_range,
to_timedelta,
)
import pandas._testing as tm
from pandas.tseries.offsets import (
Day,
Second,
)
class TestTimedeltas:
def test_timedelta_range(self):
expected = to_timedelta(np.arange(5), unit="D")
result = timedelta_range("0 days", periods=5, freq="D")
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(11), unit="D")
result = timedelta_range("0 days", "10 days", freq="D")
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(5), unit="D") + Second(2) + Day()
result = timedelta_range("1 days, 00:00:02", "5 days, 00:00:02", freq="D")
tm.assert_index_equal(result, expected)
expected = to_timedelta([1, 3, 5, 7, 9], unit="D") + Second(2)
result = timedelta_range("1 days, 00:00:02", periods=5, freq="2D")
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(50), unit="T") * 30
result = timedelta_range("0 days", freq="30T", periods=50)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"periods, freq", [(3, "2D"), (5, "D"), (6, "19H12T"), (7, "16H"), (9, "12H")]
)
def test_linspace_behavior(self, periods, freq):
# GH 20976
result = timedelta_range(start="0 days", end="4 days", periods=periods)
expected = timedelta_range(start="0 days", end="4 days", freq=freq)
tm.assert_index_equal(result, expected)
def test_errors(self):
# not enough params
msg = (
"Of the four parameters: start, end, periods, and freq, "
"exactly three must be specified"
)
with pytest.raises(ValueError, match=msg):
timedelta_range(start="0 days")
with pytest.raises(ValueError, match=msg):
timedelta_range(end="5 days")
with pytest.raises(ValueError, match=msg):
timedelta_range(periods=2)
with pytest.raises(ValueError, match=msg):
timedelta_range()
# too many params
with pytest.raises(ValueError, match=msg):
timedelta_range(start="0 days", end="5 days", periods=10, freq="H")
@pytest.mark.parametrize(
"start, end, freq, expected_periods",
[
("1D", "10D", "2D", (10 - 1) // 2 + 1),
("2D", "30D", "3D", (30 - 2) // 3 + 1),
("2s", "50s", "5s", (50 - 2) // 5 + 1),
# tests that worked before GH 33498:
("4D", "16D", "3D", (16 - 4) // 3 + 1),
("8D", "16D", "40s", (16 * 3600 * 24 - 8 * 3600 * 24) // 40 + 1),
],
)
def test_timedelta_range_freq_divide_end(self, start, end, freq, expected_periods):
# GH 33498 only the cases where `(end % freq) == 0` used to fail
res = timedelta_range(start=start, end=end, freq=freq)
assert Timedelta(start) == res[0]
assert Timedelta(end) >= res[-1]
assert len(res) == expected_periods
def test_timedelta_range_infer_freq(self):
# https://github.com/pandas-dev/pandas/issues/35897
result = timedelta_range("0s", "1s", periods=31)
assert result.freq is None
|
usaspending_api/idvs/v2/views/activity.py | g4brielvs/usaspending-api | 217 | 12680808 | from collections import OrderedDict
from copy import copy, deepcopy
from psycopg2.sql import Identifier, Literal, SQL
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.views import APIView
from usaspending_api.common.cache_decorator import cache_response
from usaspending_api.common.helpers.generic_helper import get_pagination_metadata
from usaspending_api.common.helpers.sql_helpers import execute_sql_to_ordered_dictionary
from usaspending_api.common.validator.award import get_internal_or_generated_award_id_model
from usaspending_api.common.validator.pagination import PAGINATION
from usaspending_api.common.validator.tinyshield import TinyShield
# In gather_award_ids, if any awards are found for IDVs in the second half of
# the union, by definition, they have to be grandchildren so even though the
# grandchild boolean appears to be applying to the IDV, it will actually
# trickle down to its children.
ACTIVITY_SQL = SQL(
"""
with gather_award_ids as (
select award_id,
false grandchild
from parent_award
where {award_id_column} = {award_id}
union all
select cpa.award_id,
true grandchild
from parent_award ppa
inner join parent_award cpa on cpa.parent_award_id = ppa.award_id
where ppa.{award_id_column} = {award_id}
)
select
ca.id award_id,
ta.name awarding_agency,
ca.awarding_agency_id awarding_agency_id,
ca.generated_unique_award_id,
tf.period_of_perf_potential_e period_of_performance_potential_end_date,
pa.id parent_award_id,
pa.generated_unique_award_id parent_generated_unique_award_id,
ca.parent_award_piid,
ca.total_obligation obligated_amount,
ca.base_and_all_options_value awarded_amount,
ca.period_of_performance_start_date,
ca.piid,
rl.legal_business_name recipient_name,
rp.recipient_hash || '-' || rp.recipient_level recipient_id,
gaids.grandchild
from
gather_award_ids gaids
inner join awards pa on pa.id = gaids.award_id
inner join awards ca on
ca.parent_award_piid = pa.piid and
ca.fpds_parent_agency_id = pa.fpds_agency_id and
ca.type not like 'IDV%'
{hide_edges_awarded_amount}
left outer join transaction_fpds tf on tf.transaction_id = ca.latest_transaction_id
left outer join recipient_lookup rl on rl.duns = tf.awardee_or_recipient_uniqu
left outer join recipient_profile rp on
rp.recipient_hash = rl.recipient_hash and
rp.recipient_level = case when tf.ultimate_parent_unique_ide is null then 'R' else 'C' end
left outer join agency a on a.id = ca.awarding_agency_id
left outer join toptier_agency ta on ta.toptier_agency_id = a.toptier_agency_id
{hide_edges_end_date}
order by
ca.total_obligation desc, ca.id desc
limit {limit} offset {offset}
"""
)
# So, as it turns out, we already count all descendant contracts. Go us!
# There's always the chance these may not 100% match the actual count for a
# myriad of reasons, but they pretty much all involve failed operations
# processes or bad data so we're going to go ahead and give the benefit of
# the doubt and assume everything works as expected.
COUNT_ACTIVITY_SQL = SQL(
"""
select rollup_contract_count
from parent_award
where {award_id_column} = {award_id}
"""
)
COUNT_ACTIVITY_HIDDEN_SQL = SQL(
"""
with gather_award_ids as (
select award_id,
false grandchild
from parent_award
where {award_id_column} = {award_id}
union all
select cpa.award_id,
true grandchild
from parent_award ppa
inner join parent_award cpa on cpa.parent_award_id = ppa.award_id
where ppa.{award_id_column} = {award_id}
)
select
count(*) rollup_contract_count
from
gather_award_ids gaids
inner join awards pa on pa.id = gaids.award_id
inner join awards ca on
ca.parent_award_piid = pa.piid and
ca.fpds_parent_agency_id = pa.fpds_agency_id and
ca.type not like 'IDV%'
{hide_edges_awarded_amount}
left outer join transaction_fpds tf on tf.transaction_id = ca.latest_transaction_id
{hide_edges_end_date}
"""
)
def _prepare_tiny_shield_models():
# This endpoint has a fixed sort. No need for "sort" or "order".
models = [copy(p) for p in PAGINATION if p["name"] in ("page", "limit")]
models.extend([get_internal_or_generated_award_id_model()])
models.extend(
[{"key": "hide_edge_cases", "name": "hide_edge_cases", "type": "boolean", "optional": True, "default": False}]
)
return models
TINY_SHIELD_MODELS = _prepare_tiny_shield_models()
class IDVActivityViewSet(APIView):
"""
Returns award funding info for children and grandchildren of an IDV. Used
to power the Activity visualization on IDV Summary page.
"""
endpoint_doc = "usaspending_api/api_contracts/contracts/v2/idvs/activity.md"
@staticmethod
def _parse_and_validate_request(request: dict) -> dict:
return TinyShield(deepcopy(TINY_SHIELD_MODELS)).block(request)
@staticmethod
def _business_logic(request_data: dict) -> tuple:
# By this point, our award_id has been validated and cleaned up by
# TinyShield. We will either have an internal award id that is an
# integer or a generated award id that is a string.
award_id = request_data["award_id"]
hide_edge_cases = request_data.get("hide_edge_cases")
hide_edges_awarded_amount = ""
hide_edges_end_date = ""
award_id_column = "award_id" if type(award_id) is int else "generated_unique_award_id"
if hide_edge_cases:
hide_edges_awarded_amount = "and ca.base_and_all_options_value > 0 and ca.total_obligation > 0"
hide_edges_end_date = "where tf.period_of_perf_potential_e is not null"
sql = COUNT_ACTIVITY_HIDDEN_SQL.format(
award_id_column=Identifier(award_id_column),
award_id=Literal(award_id),
hide_edges_awarded_amount=SQL(hide_edges_awarded_amount),
hide_edges_end_date=SQL(hide_edges_end_date),
)
else:
sql = COUNT_ACTIVITY_SQL.format(award_id_column=Identifier(award_id_column), award_id=Literal(award_id))
overall_count_results = execute_sql_to_ordered_dictionary(sql)
overall_count = overall_count_results[0]["rollup_contract_count"] if overall_count_results else 0
sql = ACTIVITY_SQL.format(
award_id_column=Identifier(award_id_column),
award_id=Literal(award_id),
limit=Literal(request_data["limit"] + 1),
offset=Literal((request_data["page"] - 1) * request_data["limit"]),
hide_edges_awarded_amount=SQL(hide_edges_awarded_amount),
hide_edges_end_date=SQL(hide_edges_end_date),
)
return execute_sql_to_ordered_dictionary(sql), overall_count
@cache_response()
def post(self, request: Request) -> Response:
request_data = self._parse_and_validate_request(request.data)
results, overall_count = self._business_logic(request_data)
page_metadata = get_pagination_metadata(overall_count, request_data["limit"], request_data["page"])
response = OrderedDict((("results", results[: request_data["limit"]]), ("page_metadata", page_metadata)))
return Response(response)
|
reviewboard/hostingsvcs/tests/test_bitbucket.py | seekingalpha/reviewboard | 921 | 12680830 | <gh_stars>100-1000
"""Unit tests for the Bitbucket hosting service."""
from __future__ import unicode_literals
import logging
from django.contrib.auth.models import User
from django.test.client import RequestFactory
from django.utils.safestring import SafeText
from djblets.testing.decorators import add_fixtures
from reviewboard.hostingsvcs.bitbucket import BitbucketAuthForm
from reviewboard.hostingsvcs.errors import (AuthorizationError,
RepositoryError)
from reviewboard.hostingsvcs.testing import HostingServiceTestCase
from reviewboard.reviews.models import ReviewRequest
from reviewboard.scmtools.core import Branch, Commit
from reviewboard.scmtools.crypto_utils import (decrypt_password,
encrypt_password)
from reviewboard.scmtools.errors import FileNotFoundError
from reviewboard.site.models import LocalSite
from reviewboard.site.urlresolvers import local_site_reverse
class BitbucketTestCase(HostingServiceTestCase):
"""Base class for Bitbucket test suites."""
service_name = 'bitbucket'
fixtures = ['test_scmtools']
default_account_data = {
'password': encrypt_password(HostingServiceTestCase.default_password),
}
default_repository_extra_data = {
'bitbucket_repo_name': 'myrepo',
}
class BitbucketTests(BitbucketTestCase):
"""Unit tests for the Bitbucket hosting service."""
def test_service_support(self):
"""Testing Bitbucket service support capabilities"""
self.assertTrue(self.service_class.supports_bug_trackers)
self.assertTrue(self.service_class.supports_repositories)
def test_get_repository_fields_with_git_and_personal_plan(self):
"""Testing Bitbucket.get_repository_fields for Git and plan=personal"""
self.assertEqual(
self.get_repository_fields(
'Git',
fields={
'bitbucket_repo_name': 'myrepo',
},
plan='personal'
),
{
'path': '[email protected]:myuser/myrepo.git',
'mirror_path': ('https://[email protected]/myuser/'
'myrepo.git'),
})
def test_get_repository_fields_with_git_and_team_plan(self):
"""Testing Bitbucket.get_repository_fields for Git and plan=team"""
self.assertEqual(
self.get_repository_fields(
'Git',
fields={
'bitbucket_team_name': 'myteam',
'bitbucket_team_repo_name': 'myrepo',
},
plan='team'
),
{
'path': 'git<EMAIL>:myteam/myrepo.git',
'mirror_path': ('https://[email protected]/myteam/'
'myrepo.git'),
})
def test_get_repository_fields_with_git_and_other_user_plan(self):
"""Testing Bitbucket.get_repository_fields for Git and plan=other-user
"""
self.assertEqual(
self.get_repository_fields(
'Git',
fields={
'bitbucket_other_user_username': 'someuser',
'bitbucket_other_user_repo_name': 'myrepo',
},
plan='other-user'
),
{
'path': 'git<EMAIL>:someuser/myrepo.git',
'mirror_path': ('https://[email protected]/someuser/'
'myrepo.git'),
})
def test_get_bug_tracker_field_with_personal_plan(self):
"""Testing Bitbucket.get_bug_tracker_field with plan=personal"""
self.assertTrue(self.service_class.get_bug_tracker_requires_username(
plan='personal'))
self.assertEqual(
self.service_class.get_bug_tracker_field(
'personal',
{
'bitbucket_repo_name': 'myrepo',
'hosting_account_username': 'myuser',
}),
'https://bitbucket.org/myuser/myrepo/issue/%s/')
def test_get_bug_tracker_field_with_team_plan(self):
"""Testing Bitbucket.get_bug_tracker_field with plan=team"""
self.assertFalse(self.service_class.get_bug_tracker_requires_username(
plan='team'))
self.assertEqual(
self.service_class.get_bug_tracker_field(
'team',
{
'bitbucket_team_name': 'myteam',
'bitbucket_team_repo_name': 'myrepo',
}),
'https://bitbucket.org/myteam/myrepo/issue/%s/')
def test_get_bug_tracker_field_with_other_user_plan(self):
"""Testing Bitbucket.get_bug_tracker_field with plan=other-user"""
self.assertFalse(self.service_class.get_bug_tracker_requires_username(
plan='other-user'))
self.assertEqual(
self.service_class.get_bug_tracker_field(
'other-user',
{
'bitbucket_other_user_username': 'someuser',
'bitbucket_other_user_repo_name': 'myrepo',
}),
'https://bitbucket.org/someuser/myrepo/issue/%s/')
def test_get_repository_hook_instructions(self):
"""Testing BitBucket.get_repository_hook_instructions"""
account = self.create_hosting_account()
repository = self.create_repository(hosting_account=account)
hooks_uuid = repository.get_or_create_hooks_uuid()
request = RequestFactory().get(path='/')
request.user = User.objects.create(username='test-user')
content = repository.hosting_service.get_repository_hook_instructions(
request=request,
repository=repository)
self.assertIsInstance(content, SafeText)
self.assertIn(
('https://bitbucket.org/myuser/myrepo/admin/addon/admin/'
'bitbucket-webhooks/bb-webhooks-repo-admin'),
content)
self.assertIn(
('http://example.com/repos/1/bitbucket/hooks/%s/close-submitted/'
% hooks_uuid),
content)
self.assertIn('Review Board supports closing', content)
self.assertIn('<code>Review Board</code>', content)
def test_check_repository_with_personal_plan(self):
"""Testing Bitbucket.check_repository with plan=personal"""
with self.setup_http_test(payload=b'{"scm": "git"}',
expected_http_calls=1) as ctx:
ctx.service.check_repository(bitbucket_repo_name='myrepo',
plan='personal',
tool_name='Git')
ctx.assertHTTPCall(
0,
url=('https://bitbucket.org/api/2.0/repositories/myuser/myrepo'
'?fields=scm'))
def test_check_repository_with_team_plan(self):
"""Testing Bitbucket.check_repository with plan=team"""
with self.setup_http_test(payload=b'{"scm": "git"}',
expected_http_calls=1) as ctx:
ctx.service.check_repository(bitbucket_team_name='myteam',
bitbucket_team_repo_name='myrepo',
tool_name='Git',
plan='team')
ctx.assertHTTPCall(
0,
url=('https://bitbucket.org/api/2.0/repositories/myteam/myrepo'
'?fields=scm'))
def test_check_repository_with_other_user_plan(self):
"""Testing Bitbucket.check_repository with plan=other-user"""
with self.setup_http_test(payload=b'{"scm": "git"}',
expected_http_calls=1) as ctx:
ctx.service.check_repository(
bitbucket_other_user_username='someuser',
bitbucket_other_user_repo_name='myrepo',
plan='other-user',
tool_name='Git')
ctx.assertHTTPCall(
0,
url=('https://bitbucket.org/api/2.0/repositories/someuser/myrepo'
'?fields=scm'))
def test_check_repository_with_slash(self):
"""Testing Bitbucket.check_repository with /"""
expected_message = \
'Please specify just the name of the repository, not a path.'
with self.setup_http_test(expected_http_calls=0) as ctx:
with self.assertRaisesMessage(RepositoryError, expected_message):
ctx.service.check_repository(
bitbucket_team_name='myteam',
bitbucket_team_repo_name='myteam/myrepo',
plan='team')
def test_check_repository_with_dot_git(self):
"""Testing Bitbucket.check_repository with .git"""
expected_message = \
'Please specify just the name of the repository without ".git".'
with self.setup_http_test(expected_http_calls=0) as ctx:
with self.assertRaisesMessage(RepositoryError, expected_message):
ctx.service.check_repository(
bitbucket_team_name='myteam',
bitbucket_team_repo_name='myrepo.git',
plan='team')
def test_check_repository_with_type_mismatch(self):
"""Testing Bitbucket.check_repository with type mismatch"""
error_message = (
'The Bitbucket repository being configured does not match the '
'type of repository you have selected.'
)
with self.setup_http_test(payload=b'{"scm": "git"}',
expected_http_calls=1) as ctx:
# Check Git repositories.
with self.assertRaisesMessage(RepositoryError, error_message):
ctx.service.check_repository(
bitbucket_team_name='myteam',
bitbucket_team_repo_name='myrepo',
plan='team',
tool_name='Mercurial')
ctx.assertHTTPCall(
0,
url=('https://bitbucket.org/api/2.0/repositories/myteam/myrepo'
'?fields=scm'))
def test_authorize(self):
"""Testing Bitbucket.authorize"""
hosting_account = self.create_hosting_account(data={})
with self.setup_http_test(payload=b'{}',
hosting_account=hosting_account,
expected_http_calls=1) as ctx:
self.assertFalse(ctx.service.is_authorized())
ctx.service.authorize(username='myuser',
password='<PASSWORD>')
self.assertIn('password', hosting_account.data)
self.assertNotEqual(hosting_account.data['password'], '<PASSWORD>')
self.assertEqual(decrypt_password(hosting_account.data['password']),
'<PASSWORD>')
self.assertTrue(ctx.service.is_authorized())
ctx.assertHTTPCall(
0,
url='https://bitbucket.org/api/2.0/user',
username='myuser',
password='<PASSWORD>')
def test_authorize_with_bad_credentials(self):
"""Testing Bitbucket.authorize with bad credentials"""
hosting_account = self.create_hosting_account(data={})
expected_message = (
'Invalid Bitbucket username or password. Make sure you are using '
'your Bitbucket username and not e-mail address, and are using an '
'app password if two-factor authentication is enabled.'
)
with self.setup_http_test(status_code=401,
hosting_account=hosting_account,
expected_http_calls=1) as ctx:
self.assertFalse(ctx.service.is_authorized())
with self.assertRaisesMessage(AuthorizationError,
expected_message):
ctx.service.authorize(username='myuser',
password='<PASSWORD>')
self.assertNotIn('password', hosting_account.data)
self.assertFalse(ctx.service.is_authorized())
ctx.assertHTTPCall(
0,
url='https://bitbucket.org/api/2.0/user',
username='myuser',
password='<PASSWORD>')
def test_get_file_with_git_and_base_commit_id(self):
"""Testing Bitbucket.get_file with Git and base commit ID"""
self._test_get_file(
tool_name='Git',
revision='123',
base_commit_id='456',
expected_revision='456')
def test_get_file_with_git_and_revision(self):
"""Testing Bitbucket.get_file with Git and revision"""
with self.assertRaises(FileNotFoundError):
self._test_get_file(tool_name='Git',
revision='123',
base_commit_id=None,
expected_revision='123')
def test_get_file_exists_with_git_and_base_commit_id(self):
"""Testing Bitbucket.get_file_exists with Git and base commit ID"""
self._test_get_file_exists(
tool_name='Git',
revision='123',
base_commit_id='456',
expected_revision='456',
expected_found=True)
def test_get_file_exists_with_git_and_revision(self):
"""Testing Bitbucket.get_file_exists with Git and revision"""
self._test_get_file_exists(
tool_name='Git',
revision='123',
base_commit_id=None,
expected_revision='123',
expected_found=False,
expected_http_called=False)
def test_get_file_exists_with_git_and_404(self):
"""Testing BitBucket.get_file_exists with Git and a 404 error"""
self._test_get_file_exists(
tool_name='Git',
revision='123',
base_commit_id='456',
expected_revision='456',
expected_found=False)
def test_get_branches(self):
"""Testing Bitbucket.get_branches"""
branches_api_response_1 = self.dump_json({
'next': ('https://bitbucket.org/api/2.0/repositories/myuser/'
'myrepo/refs/branches'
'?fields=values.name%2Cvalues.target.hash%2Cnext'
'&pagelen=100&page=2'),
'values': [
{
'name': 'branch1',
'target': {
'hash': '1c44b461cebe5874a857c51a4a13a849a4d1e52d',
},
},
{
'name': 'branch2',
'target': {
'hash': '44568f7d33647d286691517e6325fea5c7a21d5e',
},
},
],
})
branches_api_response_2 = self.dump_json({
'values': [
{
'name': 'branch3',
'target': {
'hash': 'e5874a857c51a4a13a849a4d1e52d1c44b461ceb',
},
},
{
'name': 'branch4',
'target': {
'hash': 'd286691517e6325fea5c7a21d5e44568f7d33647',
},
},
],
})
get_repository_api_response = self.dump_json({
'mainbranch': {
'name': 'branch3',
},
})
paths = {
'/api/2.0/repositories/myuser/myrepo': {
'payload': get_repository_api_response,
},
('/api/2.0/repositories/myuser/myrepo/refs/branches'
'?fields=values.name%2Cvalues.target.hash%2Cnext&pagelen=100'): {
'payload': branches_api_response_1,
},
('/api/2.0/repositories/myuser/myrepo/refs/branches'
'?fields=values.name%2Cvalues.target.hash%2Cnext&page=2'
'&pagelen=100'): {
'payload': branches_api_response_2,
},
}
with self.setup_http_test(self.make_handler_for_paths(paths),
expected_http_calls=3) as ctx:
repository = self.create_repository(tool_name='Git')
branches = ctx.service.get_branches(repository)
ctx.assertHTTPCall(
0,
url=('https://bitbucket.org/api/2.0/repositories/myuser/myrepo'
'?fields=mainbranch.name'))
ctx.assertHTTPCall(
1,
url=('https://bitbucket.org/api/2.0/repositories/myuser/myrepo/'
'refs/branches'
'?fields=values.name%2Cvalues.target.hash%2Cnext'
'&pagelen=100'))
ctx.assertHTTPCall(
2,
url=('https://bitbucket.org/api/2.0/repositories/myuser/myrepo/'
'refs/branches'
'?fields=values.name%2Cvalues.target.hash%2Cnext'
'&page=2&pagelen=100'))
self.assertEqual(
branches,
[
Branch(id='branch1',
commit='1<PASSWORD>a<PASSWORD>a<PASSWORD>'),
Branch(id='branch2',
commit='44568f7d33647d286691517e6325fea5c7a21d5e'),
Branch(id='branch3',
commit='e5874a857c51a4a13a849a4d1e52d1c44b461ceb',
default=True),
Branch(id='branch4',
commit='d286691517e6325fea5c7a21d5e44568f7d33647'),
])
def test_get_commits(self):
"""Testing Bitbucket.get_commits"""
payload = self.dump_json({
'size': 2,
'values': [
{
'hash': '1c44b461cebe5874a857c51a4a13a849a4d1e52d',
'author': {
'raw': 'Some User 1 <<EMAIL>>',
},
'date': '2017-01-24T13:11:22+00:00',
'message': 'This is commit 1.',
'parents': [
{
'hash': '44568f7d33647d286691517e6325fea5c7a21d5e',
},
],
},
{
'hash': '44568f7d33647d286691517e6325fea5c7a21d5e',
'author': {
'raw': 'Some User 2 <<EMAIL>>',
},
'date': '2017-01-23T08:09:10+00:00',
'message': 'This is commit 2.',
'parents': [
{
'hash': 'e5874a857c51a4a13a849a4d1e52d1c44b461ceb',
},
],
},
],
})
with self.setup_http_test(payload=payload,
expected_http_calls=1) as ctx:
repository = ctx.create_repository(tool_name='Git')
commits = ctx.service.get_commits(repository)
ctx.assertHTTPCall(
0,
url=('https://bitbucket.org/api/2.0/repositories/myuser/myrepo/'
'commits'
'?fields=values.author.raw%2Cvalues.hash%2Cvalues.date'
'%2Cvalues.message%2Cvalues.parents.hash'
'&pagelen=20'))
self.assertEqual(
commits,
[
Commit(author_name='Some User 1 <<EMAIL>>',
date='2017-01-24T13:11:22+00:00',
id='<PASSWORD>be<PASSWORD>',
message='This is commit 1.',
parent='44568f7d33647d286691517e6325fea5c7a21d5e'),
Commit(author_name='Some User 2 <<EMAIL>>',
date='2017-01-23T08:09:10+00:00',
id='<PASSWORD>',
message='This is commit 2.',
parent='e5874a857c51a4a13a849a4d1e52d1c44b461ceb'),
])
for commit in commits:
self.assertIsNone(commit.diff)
def test_get_commits_with_start(self):
"""Testing Bitbucket.get_commits with start="""
payload = self.dump_json({
'size': 2,
'values': [
{
'hash': '1c44b461cebe5874a857c51a4a13a849a4d1e52d',
'author': {
'raw': 'Some User 1 <<EMAIL>>',
},
'date': '2017-01-24T13:11:22+00:00',
'message': 'This is commit 1.',
'parents': [
{
'hash': '44568f7d33647d286691517e6325fea5c7a21d5e',
},
],
},
{
'hash': '44568f7d33647d286691517e6325fea5c7a21d5e',
'author': {
'raw': 'Some User 2 <<EMAIL>>',
},
'date': '2017-01-23T08:09:10+00:00',
'message': 'This is commit 2.',
'parents': [
{
'hash': 'e5874a857c51a4a13a849a4d1e52d1c44b461ceb',
},
],
},
],
})
with self.setup_http_test(payload=payload,
expected_http_calls=1) as ctx:
repository = ctx.create_repository(tool_name='Git')
commits = ctx.service.get_commits(
repository,
start='1c44b461cebe5874a857c51a4a13a849a4d1e5')
ctx.assertHTTPCall(
0,
url=('https://bitbucket.org/api/2.0/repositories/myuser/myrepo/'
'commits/1c44b461cebe5874a857c51a4a13a849a4d1e5'
'?fields=values.author.raw%2Cvalues.hash%2Cvalues.date'
'%2Cvalues.message%2Cvalues.parents.hash'
'&pagelen=20'))
self.assertEqual(
commits,
[
Commit(author_name='Some User 1 <<EMAIL>>',
date='2017-01-24T13:11:22+00:00',
id='<PASSWORD>',
message='This is commit 1.',
parent='44568f7d33647d286691517e6325fea5c7a21d5e'),
Commit(author_name='Some User 2 <<EMAIL>>',
date='2017-01-23T08:09:10+00:00',
id='<PASSWORD>',
message='This is commit 2.',
parent='e5874a857c51a4a13a849a4d1e52d1c44b461ceb'),
])
for commit in commits:
self.assertIsNone(commit.diff)
def test_get_commits_with_branch(self):
"""Testing Bitbucket.get_commits with branch="""
payload = self.dump_json({
'size': 2,
'values': [
{
'hash': '1c44b461cebe5874a857c51a4a13a849a4d1e52d',
'author': {
'raw': 'Some User 1 <<EMAIL>>',
},
'date': '2017-01-24T13:11:22+00:00',
'message': 'This is commit 1.',
'parents': [
{
'hash': '44568f7d33647d286691517e6325fea5c7a21d5e',
},
],
},
{
'hash': '44568f7d33647d286691517e6325fea5c7a21d5e',
'author': {
'raw': 'Some User 2 <<EMAIL>>',
},
'date': '2017-01-23T08:09:10+00:00',
'message': 'This is commit 2.',
'parents': [
{
'hash': 'e5874a857c51a4a13a849a4d1e52d1c44b461ceb',
},
],
},
],
})
with self.setup_http_test(payload=payload,
expected_http_calls=1) as ctx:
repository = ctx.create_repository(tool_name='Git')
commits = ctx.service.get_commits(repository,
branch='master')
ctx.assertHTTPCall(
0,
url=('https://bitbucket.org/api/2.0/repositories/myuser/myrepo/'
'commits/master'
'?fields=values.author.raw%2Cvalues.hash%2Cvalues.date'
'%2Cvalues.message%2Cvalues.parents.hash'
'&pagelen=20'))
self.assertEqual(
commits,
[
Commit(author_name='Some User 1 <<EMAIL>>',
date='2017-01-24T13:11:22+00:00',
id='<PASSWORD>1a4<PASSWORD>a8<PASSWORD>4<PASSWORD>',
message='This is commit 1.',
parent='44568f7d33647d286691517e6325fea5c7a21d5e'),
Commit(author_name='Some User 2 <<EMAIL>>',
date='2017-01-23T08:09:10+00:00',
id='<PASSWORD>',
message='This is commit 2.',
parent='e5874a857c51a4a13a849a4d1e52d1c44b461ceb'),
])
for commit in commits:
self.assertIsNone(commit.diff)
def test_get_commits_with_start_and_branch(self):
"""Testing Bitbucket.get_commits with start= and branch="""
payload = self.dump_json({
'size': 2,
'values': [
{
'hash': '1c44b461cebe5874a857c51a4a13a849a4d1e52d',
'author': {
'raw': 'Some User 1 <<EMAIL>>',
},
'date': '2017-01-24T13:11:22+00:00',
'message': 'This is commit 1.',
'parents': [
{
'hash': '44568f7d33647d286691517e6325fea5c7a21d5e',
},
],
},
{
'hash': '44568f7d33647d286691517e6325fea5c7a21d5e',
'author': {
'raw': 'Some User 2 <<EMAIL>>',
},
'date': '2017-01-23T08:09:10+00:00',
'message': 'This is commit 2.',
'parents': [
{
'hash': 'e5874a857c51a4a13a849a4d1e52d1c44b461ceb',
},
],
},
],
})
with self.setup_http_test(payload=payload,
expected_http_calls=1) as ctx:
repository = ctx.create_repository(tool_name='Git')
commits = ctx.service.get_commits(
repository,
start='1c44b461cebe5874a857c51a4a13a849a4d1e52d',
branch='master')
ctx.assertHTTPCall(
0,
url=('https://bitbucket.org/api/2.0/repositories/myuser/myrepo/'
'commits/1c44b461cebe5874a857c51a4a13a849a4d1e52d'
'?fields=values.author.raw%2Cvalues.hash%2Cvalues.date'
'%2Cvalues.message%2Cvalues.parents.hash'
'&pagelen=20'))
self.assertEqual(
commits,
[
Commit(author_name='Some User 1 <<EMAIL>>',
date='2017-01-24T13:11:22+00:00',
id='<PASSWORD>',
message='This is commit 1.',
parent='44568f7d33647d286691517e6325fea5c7a21d5e'),
Commit(author_name='Some User 2 <<EMAIL>>',
date='2017-01-23T08:09:10+00:00',
id='<PASSWORD>',
message='This is commit 2.',
parent='e5874a857c51a4a13a849a4d1e52d1c44b461ceb'),
])
for commit in commits:
self.assertIsNone(commit.diff)
def test_get_change(self):
"""Testing BitBucket.get_change"""
commit_sha = '1c44b461cebe5874a857c51a4a13a849a4d1e52d'
parent_sha = '44568f7d33647d286691517e6325fea5c7a21d5e'
paths = {
'/api/2.0/repositories/myuser/myrepo/commit/%s' % commit_sha: {
'payload': self.dump_json({
'hash': commit_sha,
'author': {
'raw': 'Some User <<EMAIL>>',
},
'date': '2017-01-24T13:11:22+00:00',
'message': 'This is a message.',
'parents': [{'hash': parent_sha}],
}),
},
'/api/2.0/repositories/myuser/myrepo/diff/%s' % commit_sha: {
'payload': b'This is a test \xc7.',
},
}
with self.setup_http_test(self.make_handler_for_paths(paths),
expected_http_calls=2) as ctx:
repository = ctx.create_repository(tool_name='Git')
commit = ctx.service.get_change(repository, commit_sha)
ctx.assertHTTPCall(
0,
url=('https://bitbucket.org/api/2.0/repositories/myuser/myrepo/'
'commit/1c44b461cebe5874a857c51a4a13a849a4d1e52d'
'?fields=author.raw%2Chash%2Cdate%2Cmessage%2Cparents.hash'))
ctx.assertHTTPCall(
1,
url=('https://bitbucket.org/api/2.0/repositories/myuser/myrepo/'
'diff/1c44b461cebe5874a857c51a4a13a849a4d1e52d'))
self.assertEqual(
commit,
Commit(author_name='Some User <<EMAIL>>',
date='2017-01-24T13:11:22+00:00',
id=commit_sha,
message='This is a message.',
parent=parent_sha))
self.assertEqual(commit.diff, b'This is a test \xc7.\n')
def _test_get_file(self, tool_name, revision, base_commit_id,
expected_revision):
"""Test file fetching.
Args:
tool_name (unicode):
The name of the SCM Tool to test with.
revision (unicode, optional):
The revision to check.
base_commit_id (unicode, optional):
The base commit to fetch against.
expected_revision (unicode, optional):
The revision expected in the payload.
"""
with self.setup_http_test(payload=b'My data',
expected_http_calls=1) as ctx:
repository = ctx.create_repository(tool_name=tool_name)
result = ctx.service.get_file(repository=repository,
path='path',
revision=revision,
base_commit_id=base_commit_id)
ctx.assertHTTPCall(
0,
url=('https://bitbucket.org/api/2.0/repositories/myuser/myrepo/'
'src/%s/path'
% expected_revision))
self.assertIsInstance(result, bytes)
self.assertEqual(result, b'My data')
def _test_get_file_exists(self, tool_name, revision, base_commit_id,
expected_revision, expected_found,
expected_http_called=True):
"""Test file existence checks.
Args:
tool_name (unicode):
The name of the SCM Tool to test with.
revision (unicode, optional):
The revision to check.
base_commit_id (unicode, optional):
The base commit to fetch against.
expected_revision (unicode, optional):
The revision expected in the payload.
expected_found (bool, optional):
Whether a truthy response should be expected.
expected_http_called (bool, optional):
Whether an HTTP request is expected to have been made.
"""
if expected_found:
payload = b'file...'
status_code = None
else:
payload = None
status_code = 404
if expected_http_called:
expected_calls = 1
else:
expected_calls = 0
with self.setup_http_test(payload=payload,
status_code=status_code,
expected_http_calls=expected_calls) as ctx:
repository = ctx.create_repository(tool_name=tool_name)
result = ctx.service.get_file_exists(repository=repository,
path='path',
revision=revision,
base_commit_id=base_commit_id)
if expected_http_called:
ctx.assertHTTPCall(
0,
method='HEAD',
url=('https://bitbucket.org/api/2.0/repositories/myuser/'
'myrepo/src/%s/path'
% expected_revision))
self.assertEqual(result, expected_found)
class BitbucketAuthFormTests(BitbucketTestCase):
"""Unit tests for BitbucketAuthForm."""
def test_clean_hosting_account_username_with_username(self):
"""Testing BitbucketAuthForm.clean_hosting_account_username with
username
"""
form = BitbucketAuthForm(
hosting_service_cls=self.service_class,
data={
'hosting_account_username': 'myuser',
'hosting_account_password': '<PASSWORD>',
})
self.assertTrue(form.is_valid())
def test_clean_hosting_account_username_with_email(self):
"""Testing BitbucketAuthForm.clean_hosting_account_username with
e-mail address
"""
form = BitbucketAuthForm(
hosting_service_cls=self.service_class,
data={
'hosting_account_username': '<EMAIL>',
'hosting_account_password': '<PASSWORD>',
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['hosting_account_username'],
['This must be your Bitbucket username (the same one '
'you would see in URLs for your own repositories), '
'not your Atlassian e-mail address.'])
class CloseSubmittedHookTests(BitbucketTestCase):
"""Unit tests for the Bitbucket close-submitted webhook."""
fixtures = ['test_users', 'test_scmtools']
COMMITS_URL = ('/api/2.0/repositories/test/test/commits'
'?exclude=abc123&include=def123')
def test_close_submitted_hook(self):
"""Testing BitBucket close_submitted hook"""
self._test_post_commit_hook()
@add_fixtures(['test_site'])
def test_close_submitted_hook_with_local_site(self):
"""Testing BitBucket close_submitted hook with a Local Site"""
self._test_post_commit_hook(
LocalSite.objects.get(name=self.local_site_name))
def test_close_submitted_hook_with_truncated_commits(self):
"""Testing BitBucket close_submitted hook with truncated list of
commits
"""
account = self.create_hosting_account()
repository = self.create_repository(hosting_account=account)
# Create two review requests: One per referenced commit.
review_request1 = self.create_review_request(id=99,
repository=repository,
publish=True)
self.assertTrue(review_request1.public)
self.assertEqual(review_request1.status,
review_request1.PENDING_REVIEW)
review_request2 = self.create_review_request(id=100,
repository=repository,
publish=True)
self.assertTrue(review_request2.public)
self.assertEqual(review_request2.status,
review_request2.PENDING_REVIEW)
page2_url = '%s&page=2&pagelen=100' % self.COMMITS_URL
paths = {
'%s&pagelen=100' % self.COMMITS_URL: {
'payload': self.dump_json({
'next': page2_url,
'values': [
{
'hash': '1c44b461cebe5874a857c51a4a13a84'
'9a4d1e52d',
'message': 'This is my fancy commit.\n'
'\n'
'Reviewed at http://example.com%s'
% review_request1.get_absolute_url(),
},
],
}),
},
page2_url: {
'payload': self.dump_json({
'values': [
{
'hash': '9fad89712ebe5874a857c5112a3c9d1'
'87ada0dbc',
'message': 'This is another commit\n'
'\n'
'Reviewed at http://example.com%s'
% review_request2.get_absolute_url(),
},
],
}),
}
}
# Simulate the webhook.
url = local_site_reverse(
'bitbucket-hooks-close-submitted',
kwargs={
'repository_id': repository.pk,
'hosting_service_id': 'bitbucket',
'hooks_uuid': repository.get_or_create_hooks_uuid(),
})
with self.setup_http_test(self.make_handler_for_paths(paths),
expected_http_calls=2):
self._post_commit_hook_payload(
post_url=url,
review_request_url=review_request1.get_absolute_url(),
truncated=True)
# Check the first review request.
#
# The first review request has an entry in the truncated list and the
# fetched list. We'll make sure we've only processed it once.
review_request1 = ReviewRequest.objects.get(pk=review_request1.pk)
self.assertTrue(review_request1.public)
self.assertEqual(review_request1.status, review_request1.SUBMITTED)
self.assertEqual(review_request1.changedescs.count(), 1)
changedesc = review_request1.changedescs.get()
self.assertEqual(changedesc.text, 'Pushed to master (1c44b46)')
# Check the first review request.
review_request2 = ReviewRequest.objects.get(pk=review_request2.pk)
self.assertTrue(review_request2.public)
self.assertEqual(review_request2.status, review_request2.SUBMITTED)
self.assertEqual(review_request2.changedescs.count(), 1)
changedesc = review_request2.changedescs.get()
self.assertEqual(changedesc.text, 'Pushed to master (9fad897)')
def test_close_submitted_hook_with_truncated_commits_limits(self):
"""Testing BitBucket close_submitted hook with truncated list of
commits obeys limits
"""
paths = {
'%s&pagelen=100' % self.COMMITS_URL: {
'payload': self.dump_json({
'next': '%s&page=2' % self.COMMITS_URL,
'values': [],
}),
},
}
paths.update({
'%s&page=%s&pagelen=100' % (self.COMMITS_URL, i): {
'payload': self.dump_json({
'next': '%s&page=%s' % (self.COMMITS_URL, i + 1),
'values': [],
}),
}
for i in range(1, 10)
})
account = self.create_hosting_account()
repository = self.create_repository(hosting_account=account)
# Create two review requests: One per referenced commit.
review_request1 = self.create_review_request(id=99,
repository=repository,
publish=True)
self.assertTrue(review_request1.public)
self.assertEqual(review_request1.status,
review_request1.PENDING_REVIEW)
review_request2 = self.create_review_request(id=100,
repository=repository,
publish=True)
self.assertTrue(review_request2.public)
self.assertEqual(review_request2.status,
review_request2.PENDING_REVIEW)
# Simulate the webhook.
url = local_site_reverse(
'bitbucket-hooks-close-submitted',
kwargs={
'repository_id': repository.pk,
'hosting_service_id': 'bitbucket',
'hooks_uuid': repository.get_or_create_hooks_uuid(),
})
# There should have been 5 API requests. We'll never hit the final
# page.
with self.setup_http_test(self.make_handler_for_paths(paths),
expected_http_calls=5):
self._post_commit_hook_payload(
post_url=url,
review_request_url=review_request1.get_absolute_url(),
truncated=True)
# The review requests should not have been updated.
review_request1 = ReviewRequest.objects.get(pk=review_request1.pk)
self.assertTrue(review_request1.public)
self.assertEqual(review_request1.status,
review_request1.PENDING_REVIEW)
self.assertEqual(review_request1.changedescs.count(), 0)
# Check the first review request.
review_request2 = ReviewRequest.objects.get(pk=review_request2.pk)
self.assertTrue(review_request2.public)
self.assertEqual(review_request1.status,
review_request1.PENDING_REVIEW)
self.assertEqual(review_request2.changedescs.count(), 0)
def test_close_submitted_hook_with_truncated_and_auth_error(self):
"""Testing BitBucket close_submitted hook with truncated list of
commits and authentication error talking to Bitbucket
"""
account = self.create_hosting_account()
repository = self.create_repository(hosting_account=account)
# Create two review requests: One per referenced commit.
review_request1 = self.create_review_request(id=99,
repository=repository,
publish=True)
self.assertTrue(review_request1.public)
self.assertEqual(review_request1.status,
review_request1.PENDING_REVIEW)
review_request2 = self.create_review_request(id=100,
repository=repository,
publish=True)
self.assertTrue(review_request2.public)
self.assertEqual(review_request2.status,
review_request2.PENDING_REVIEW)
# Simulate the webhook.
url = local_site_reverse(
'bitbucket-hooks-close-submitted',
kwargs={
'repository_id': repository.pk,
'hosting_service_id': 'bitbucket',
'hooks_uuid': repository.get_or_create_hooks_uuid(),
})
with self.setup_http_test(status_code=401,
hosting_account=account,
expected_http_calls=1):
response = self._post_commit_hook_payload(
post_url=url,
review_request_url=review_request1.get_absolute_url(),
truncated=True)
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content,
b'Incorrect username or password configured for '
b'this repository on Review Board.')
# The review requests should not have been updated.
review_request1 = ReviewRequest.objects.get(pk=review_request1.pk)
self.assertTrue(review_request1.public)
self.assertEqual(review_request1.status,
review_request1.PENDING_REVIEW)
self.assertEqual(review_request1.changedescs.count(), 0)
# Check the first review request.
review_request2 = ReviewRequest.objects.get(pk=review_request2.pk)
self.assertTrue(review_request2.public)
self.assertEqual(review_request1.status,
review_request1.PENDING_REVIEW)
self.assertEqual(review_request2.changedescs.count(), 0)
def test_close_submitted_hook_with_invalid_repo(self):
"""Testing BitBucket close_submitted hook with invalid repository"""
repository = self.create_repository()
review_request = self.create_review_request(repository=repository,
publish=True)
self.assertTrue(review_request.public)
self.assertEqual(review_request.status, review_request.PENDING_REVIEW)
url = local_site_reverse(
'bitbucket-hooks-close-submitted',
kwargs={
'repository_id': repository.pk,
'hosting_service_id': 'bitbucket',
'hooks_uuid': repository.get_or_create_hooks_uuid(),
})
response = self._post_commit_hook_payload(
post_url=url,
review_request_url=review_request.get_absolute_url())
self.assertEqual(response.status_code, 404)
review_request = ReviewRequest.objects.get(pk=review_request.pk)
self.assertTrue(review_request.public)
self.assertEqual(review_request.status, review_request.PENDING_REVIEW)
self.assertEqual(review_request.changedescs.count(), 0)
@add_fixtures(['test_site'])
def test_close_submitted_hook_with_invalid_site(self):
"""Testing BitBucket close_submitted hook with invalid Local Site"""
local_site = LocalSite.objects.get(name=self.local_site_name)
account = self.create_hosting_account(local_site=local_site)
account.save()
repository = self.create_repository(hosting_account=account,
local_site=local_site)
review_request = self.create_review_request(repository=repository,
publish=True)
self.assertTrue(review_request.public)
self.assertEqual(review_request.status, review_request.PENDING_REVIEW)
url = local_site_reverse(
'bitbucket-hooks-close-submitted',
local_site_name='badsite',
kwargs={
'repository_id': repository.pk,
'hosting_service_id': 'bitbucket',
'hooks_uuid': repository.get_or_create_hooks_uuid(),
})
response = self._post_commit_hook_payload(
post_url=url,
review_request_url=review_request.get_absolute_url())
self.assertEqual(response.status_code, 404)
review_request = ReviewRequest.objects.get(pk=review_request.pk)
self.assertTrue(review_request.public)
self.assertEqual(review_request.status, review_request.PENDING_REVIEW)
self.assertEqual(review_request.changedescs.count(), 0)
def test_close_submitted_hook_with_invalid_service_id(self):
"""Testing BitBucket close_submitted hook with invalid hosting
service ID
"""
# We'll test against GitHub for this test.
account = self.create_hosting_account()
account.service_name = 'github'
account.save()
repository = self.create_repository(hosting_account=account)
review_request = self.create_review_request(repository=repository,
publish=True)
self.assertTrue(review_request.public)
self.assertEqual(review_request.status, review_request.PENDING_REVIEW)
url = local_site_reverse(
'bitbucket-hooks-close-submitted',
kwargs={
'repository_id': repository.pk,
'hosting_service_id': 'bitbucket',
'hooks_uuid': repository.get_or_create_hooks_uuid(),
})
response = self._post_commit_hook_payload(
post_url=url,
review_request_url=review_request.get_absolute_url())
self.assertEqual(response.status_code, 404)
review_request = ReviewRequest.objects.get(pk=review_request.pk)
self.assertTrue(review_request.public)
self.assertEqual(review_request.status, review_request.PENDING_REVIEW)
self.assertEqual(review_request.changedescs.count(), 0)
def test_close_submitted_hook_with_invalid_review_request(self):
"""Testing BitBucket close_submitted hook with invalid review request
"""
self.spy_on(logging.error)
account = self.create_hosting_account()
repository = self.create_repository(hosting_account=account)
review_request = self.create_review_request(repository=repository,
publish=True)
self.assertTrue(review_request.public)
self.assertEqual(review_request.status, review_request.PENDING_REVIEW)
url = local_site_reverse(
'bitbucket-hooks-close-submitted',
kwargs={
'repository_id': repository.pk,
'hosting_service_id': 'bitbucket',
'hooks_uuid': repository.get_or_create_hooks_uuid(),
})
response = self._post_commit_hook_payload(
post_url=url,
review_request_url='/r/9999/')
self.assertEqual(response.status_code, 200)
review_request = ReviewRequest.objects.get(pk=review_request.pk)
self.assertTrue(review_request.public)
self.assertEqual(review_request.status, review_request.PENDING_REVIEW)
self.assertEqual(review_request.changedescs.count(), 0)
self.assertTrue(logging.error.called_with(
'close_all_review_requests: Review request #%s does not exist.',
9999))
def _test_post_commit_hook(self, local_site=None):
"""Testing posting to a commit hook.
This will simulate pushing a commit and posting the resulting webhook
payload from Bitbucket to the handler for the hook.
Args:
local_site (reviewboard.site.models.LocalSite, optional):
The Local Site owning the review request.
"""
account = self.create_hosting_account(local_site=local_site)
repository = self.create_repository(hosting_account=account,
local_site=local_site)
review_request = self.create_review_request(repository=repository,
local_site=local_site,
publish=True)
self.assertTrue(review_request.public)
self.assertEqual(review_request.status, review_request.PENDING_REVIEW)
url = local_site_reverse(
'bitbucket-hooks-close-submitted',
local_site=local_site,
kwargs={
'repository_id': repository.pk,
'hosting_service_id': 'bitbucket',
'hooks_uuid': repository.get_or_create_hooks_uuid(),
})
self._post_commit_hook_payload(
post_url=url,
review_request_url=review_request.get_absolute_url())
review_request = ReviewRequest.objects.get(pk=review_request.pk)
self.assertTrue(review_request.public)
self.assertEqual(review_request.status, review_request.SUBMITTED)
self.assertEqual(review_request.changedescs.count(), 1)
changedesc = review_request.changedescs.get()
self.assertEqual(changedesc.text, 'Pushed to master (1c44b46)')
def _post_commit_hook_payload(self, post_url, review_request_url,
truncated=False):
"""Post a payload for a hook for testing.
Args:
post_url (unicode):
The URL to post to.
review_request_url (unicode):
The URL of the review request being represented in the
payload.
truncated (bool, optional):
Whether the commit list should be marked truncated.
Results:
django.core.handlers.request.wsgi.WSGIRequest:
The post request.
"""
return self.client.post(
post_url,
content_type='application/json',
data=self.dump_json({
# NOTE: This payload only contains the content we make
# use of in the hook.
'push': {
'changes': [
{
'new': {
'type': 'branch',
'name': 'master',
},
'truncated': truncated,
'commits': [
{
'hash': '1c44b461cebe5874a857c51a4a13a84'
'9a4d1e52d',
'message': 'This is my fancy commit\n'
'\n'
'Reviewed at http://example.com'
'%s'
% review_request_url,
},
],
'links': {
'commits': {
'href': self.COMMITS_URL,
},
},
},
# Some entries containing missing keys.
{
'new': {
'type': 'frobblegobble',
'name': 'master',
},
'truncated': truncated,
'commits': [
{
'hash': '1c44b461cebe5874a857c51a4a13a84'
'9a4d1e52d',
'message': 'This is my fancy commit\n'
'\n'
'Reviewed at http://example.com'
'%s'
% review_request_url,
},
],
'links': {
'commits': {
'href': self.COMMITS_URL,
},
},
},
{
'new': {
'type': 'branch',
'name': 'other',
},
'truncated': truncated,
'commits': [
{
'hash': 'f46a13a1cc43bebea857c558741a484'
'1e52d9a4d',
'message': 'Ignored commit.'
},
],
'links': {},
},
{
'new': {},
'commits': [],
},
{
'new': None,
'commits': None,
},
{
}
],
}
}, for_response=False))
|
aleph/tests/test_groups_api.py | Rosencrantz/aleph | 1,213 | 12680834 | <reponame>Rosencrantz/aleph
from aleph.core import db
from aleph.views.util import validate
from aleph.tests.util import TestCase
class GroupsApiTestCase(TestCase):
def setUp(self):
super(GroupsApiTestCase, self).setUp()
self.role = self.create_user(foreign_id="user_1")
self.create_group("group_1", self.role)
self.create_group("group_2", self.role)
self.other = self.create_user(foreign_id="other")
db.session.commit()
def test_index(self):
res = self.client.get("/api/2/groups")
assert res.status_code == 403, res
_, headers = self.login(foreign_id="user_1")
res = self.client.get("/api/2/groups", headers=headers)
assert res.status_code == 200, res
assert res.json["total"] == 2, res.json
validate(res.json["results"][0], "Role")
_, headers = self.login(foreign_id="other")
res = self.client.get("/api/2/groups", headers=headers)
assert res.status_code == 200, res
assert res.json["total"] == 0, res.json
|
samples/12-transitionparsing/tree_parser.py | tomshafer/nn4nlp | 1,037 | 12680835 | <gh_stars>1000+
from collections import defaultdict, Counter
import codecs
import time
import random
import dynet as dy
import numpy as np
from tree import Tree
def read_dataset(filename):
return [Tree.from_sexpr(line.strip()) for line in codecs.open(filename,"r")]
def get_vocabs(trees):
label_vocab = Counter()
word_vocab = Counter()
for tree in trees:
label_vocab.update([n.label for n in tree.nonterms()])
word_vocab.update([l.label for l in tree.leaves()])
labels = [x for x,c in label_vocab.iteritems() if c > 0]
words = ["_UNK_"] + [x for x,c in word_vocab.iteritems() if c > 0]
l2i = {l:i for i,l in enumerate(labels)}
w2i = {w:i for i,w in enumerate(words)}
return l2i, w2i, labels, words
train = read_dataset("../data/parsing/trees/train.txt")
dev = read_dataset("../data/parsing/trees/dev.txt")
l2i, w2i, i2l, i2w = get_vocabs(train)
ntags = len(l2i)
nwords = len(w2i)
# Socher-style Tree RNN
class TreeRNNBuilder(object):
def __init__(self, model, word_vocab, hdim):
self.W = model.add_parameters((hdim, 2*hdim))
self.E = model.add_lookup_parameters((len(word_vocab),hdim))
self.w2i = word_vocab
def expr_for_tree(self, tree):
if tree.isleaf():
return self.E[self.w2i.get(tree.label,0)]
if len(tree.children) == 1:
assert(tree.children[0].isleaf())
expr = self.expr_for_tree(tree.children[0])
return expr
assert(len(tree.children) == 2),tree.children[0]
e1 = self.expr_for_tree(tree.children[0])
e2 = self.expr_for_tree(tree.children[1])
W = dy.parameter(self.W)
expr = dy.tanh(W*dy.concatenate([e1,e2]))
return expr
# Tai-style Tree LSTM
class TreeLSTMBuilder(object):
def __init__(self, model, word_vocab, wdim, hdim):
self.WS = [model.add_parameters((hdim, wdim)) for _ in "iou"]
self.US = [model.add_parameters((hdim, 2*hdim)) for _ in "iou"]
self.UFS =[model.add_parameters((hdim, hdim)) for _ in "ff"]
self.BS = [model.add_parameters(hdim) for _ in "iouf"]
self.E = model.add_lookup_parameters((len(word_vocab),wdim))
self.w2i = word_vocab
def expr_for_tree(self, tree):
if tree.isleaf():
return self.E[self.w2i.get(tree.label,0)]
if len(tree.children) == 1:
assert(tree.children[0].isleaf())
emb = self.expr_for_tree(tree.children[0])
Wi,Wo,Wu = [dy.parameter(w) for w in self.WS]
bi,bo,bu,_ = [dy.parameter(b) for b in self.BS]
i = dy.logistic(Wi*emb + bi)
o = dy.logistic(Wo*emb + bo)
u = dy.tanh( Wu*emb + bu)
c = dy.cmult(i,u)
expr = dy.cmult(o,dy.tanh(c))
return expr
assert(len(tree.children) == 2),tree.children[0]
e1 = self.expr_for_tree(tree.children[0])
e2 = self.expr_for_tree(tree.children[1])
Ui,Uo,Uu = [dy.parameter(u) for u in self.US]
Uf1,Uf2 = [dy.parameter(u) for u in self.UFS]
bi,bo,bu,bf = [dy.parameter(b) for b in self.BS]
e = dy.concatenate([e1,e2])
i = dy.logistic(Ui*e + bi)
o = dy.logistic(Uo*e + bo)
f1 = dy.logistic(Uf1*e1 + bf)
f2 = dy.logistic(Uf2*e2 + bf)
u = dy.tanh( Uu*e + bu)
c = dy.cmult(i,u) + dy.cmult(f1,e1) + dy.cmult(f2,e2)
h = dy.cmult(o,dy.tanh(c))
expr = h
return expr
# Start DyNet and define trainer
model = dy.Model()
trainer = dy.AdamTrainer(model)
# Define the model
EMB_SIZE = 64
HID_SIZE = 64
# builder = TreeRNNBuilder(model, w2i, HID_SIZE)
builder = TreeLSTMBuilder(model, w2i, HID_SIZE, EMB_SIZE)
W_sm = model.add_parameters((ntags, HID_SIZE)) # Softmax weights
b_sm = model.add_parameters((ntags)) # Softmax bias
# A function to calculate scores for one value
def calc_scores(tree):
dy.renew_cg()
emb = builder.expr_for_tree(tree)
W_sm_exp = dy.parameter(W_sm)
b_sm_exp = dy.parameter(b_sm)
return W_sm_exp * emb + b_sm_exp
for ITER in range(100):
# Perform training
random.shuffle(train)
train_loss = 0.0
start = time.time()
for tree in train:
my_loss = dy.hinge(calc_scores(tree), l2i[tree.label])
# my_loss = dy.pickneglogsoftmax(calc_scores(tree), l2i[tree.label])
train_loss += my_loss.value()
my_loss.backward()
trainer.update()
print("iter %r: train loss/sent=%.4f, time=%.2fs" % (ITER, train_loss/len(train), time.time()-start))
# Perform testing
test_correct = 0.0
for tree in dev:
scores = calc_scores(tree).npvalue()
predict = np.argmax(scores)
if predict == l2i[tree.label]:
test_correct += 1
print("iter %r: test acc=%.4f" % (ITER, test_correct/len(dev)))
|
tests/test_compound.py | regoawt/django-rest-framework-mongoengine | 594 | 12680857 | """ testing compound fields list and dict """
from __future__ import unicode_literals
from django.test import TestCase
from mongoengine import Document, fields
from rest_framework_mongoengine.serializers import DocumentSerializer
from .models import DumbEmbedded
from .utils import dedent
class BasicCompoundDoc(Document):
list_field = fields.ListField()
int_list_field = fields.ListField(fields.IntField())
dict_field = fields.DictField()
int_dict_field = fields.DictField(field=fields.IntField())
int_map_field = fields.MapField(fields.IntField())
class OptionsCompoundDoc(Document):
int_list_field = fields.ListField(fields.IntField(min_value=3, max_value=7))
class NestedCompoundDoc(Document):
dict_list_field = fields.ListField(fields.DictField())
list_dict_field = fields.MapField(fields.ListField())
list_dict_list_field = fields.ListField(fields.MapField(fields.ListField()))
class TestCompundFieldMappings(TestCase):
maxDiff = 10000
def test_basic(self):
class TestSerializer(DocumentSerializer):
class Meta:
model = BasicCompoundDoc
fields = '__all__'
expected = dedent("""
TestSerializer():
id = ObjectIdField(read_only=True)
list_field = ListField(required=False)
int_list_field = ListField(child=IntegerField(required=False), required=False)
dict_field = DictField(required=False)
int_dict_field = DictField(child=IntegerField(required=False), required=False)
int_map_field = DictField(child=IntegerField(required=False), required=False)
""")
assert repr(TestSerializer()) == expected
def test_suboptions(self):
class TestSerializer(DocumentSerializer):
class Meta:
model = OptionsCompoundDoc
fields = '__all__'
expected = dedent("""
TestSerializer():
id = ObjectIdField(read_only=True)
int_list_field = ListField(child=IntegerField(max_value=7, min_value=3, required=False), required=False)
""")
assert repr(TestSerializer()) == expected
def test_nested(self):
class TestSerializer(DocumentSerializer):
class Meta:
model = NestedCompoundDoc
fields = '__all__'
expected = dedent("""
TestSerializer():
id = ObjectIdField(read_only=True)
dict_list_field = ListField(child=DictField(required=False), required=False)
list_dict_field = DictField(child=ListField(required=False), required=False)
list_dict_list_field = ListField(child=DictField(child=ListField(required=False), required=False), required=False)
""")
assert repr(TestSerializer()) == expected
class TestSerializer(DocumentSerializer):
class Meta:
model = BasicCompoundDoc
fields = '__all__'
class TestIntegration(TestCase):
def doCleanups(self):
BasicCompoundDoc.drop_collection()
def test_parsing(self):
input_data = {
'list_field': ["1", 2, 3.0],
'int_list_field': [1, 2, 3],
'dict_field': {'a': "1", 'b': 2, 'c': 3.0},
'int_dict_field': {'a': 1, 'b': 2, 'c': 3},
'int_map_field': {'a': 1, 'b': 2, 'c': 3}
}
serializer = TestSerializer(data=input_data)
assert serializer.is_valid(), serializer.errors
expected = {
'list_field': ["1", 2, 3.0],
'int_list_field': [1, 2, 3],
'dict_field': {'a': "1", 'b': 2, 'c': 3.0},
'int_dict_field': {'a': 1, 'b': 2, 'c': 3},
'int_map_field': {'a': 1, 'b': 2, 'c': 3}
}
assert serializer.validated_data == expected
def test_retrieval(self):
instance = BasicCompoundDoc.objects.create(
list_field=["1", 2, 3.0],
int_list_field=[1, 2, 3],
dict_field={'a': "1", 'b': 2, 'c': 3.0},
int_dict_field={'a': 1, 'b': 2, 'c': 3},
int_map_field={'a': 1, 'b': 2, 'c': 3}
)
serializer = TestSerializer(instance)
expected = {
'id': str(instance.id),
'list_field': ["1", 2, 3.0],
'int_list_field': [1, 2, 3],
'dict_field': {'a': "1", 'b': 2, 'c': 3.0},
'int_dict_field': {'a': 1, 'b': 2, 'c': 3},
'int_map_field': {'a': 1, 'b': 2, 'c': 3}
}
assert serializer.data == expected
def test_create(self):
data = {
'list_field': ["1", 2, 3.0],
'int_list_field': [1, 2, 3],
'dict_field': {'a': "1", 'b': 2, 'c': 3.0},
'int_dict_field': {'a': 1, 'b': 2, 'c': 3},
'int_map_field': {'a': 1, 'b': 2, 'c': 3}
}
serializer = TestSerializer(data=data)
assert serializer.is_valid(), serializer.errors
instance = serializer.save()
assert instance.list_field == ["1", 2, 3.0]
assert instance.int_list_field == [1, 2, 3]
assert instance.dict_field == {'a': "1", 'b': 2, 'c': 3.0}
assert instance.int_dict_field == {'a': 1, 'b': 2, 'c': 3}
assert instance.int_map_field == {'a': 1, 'b': 2, 'c': 3}
expected = {
'id': str(instance.id),
'list_field': ["1", 2, 3.0],
'int_list_field': [1, 2, 3],
'dict_field': {'a': "1", 'b': 2, 'c': 3.0},
'int_dict_field': {'a': 1, 'b': 2, 'c': 3},
'int_map_field': {'a': 1, 'b': 2, 'c': 3}
}
assert serializer.data == expected
def test_update(self):
instance = BasicCompoundDoc.objects.create(
list_field=["1", 2, 3.0],
int_list_field=[1, 2, 3],
dict_field={'a': "1", 'b': 2, 'c': 3.0},
int_dict_field={'a': 1, 'b': 2, 'c': 3},
int_map_field={'a': 1, 'b': 2, 'c': 3}
)
data = {
'list_field': ["0", 1, 2.0],
'int_list_field': [9, 1, 2],
'dict_field': {'a': "0", 'b': 1, 'c': 2.0, 'd': 3},
'int_dict_field': {'a': 0, 'b': 1, 'c': 2, 'd': 3},
'int_map_field': {'a': 0, 'b': 1, 'c': 2, 'd': 3}
}
serializer = TestSerializer(instance, data=data)
assert serializer.is_valid(), serializer.errors
instance = serializer.save()
assert instance.list_field == ["0", 1, 2.0]
assert instance.int_list_field == [9, 1, 2]
assert instance.dict_field == {'a': "0", 'b': 1, 'c': 2.0, 'd': 3}
assert instance.int_dict_field == {'a': 0, 'b': 1, 'c': 2, 'd': 3}
assert instance.int_map_field == {'a': 0, 'b': 1, 'c': 2, 'd': 3}
expected = {
'id': str(instance.id),
'list_field': ["0", 1, 2.0],
'int_list_field': [9, 1, 2],
'dict_field': {'a': "0", 'b': 1, 'c': 2.0, 'd': 3},
'int_dict_field': {'a': 0, 'b': 1, 'c': 2, 'd': 3},
'int_map_field': {'a': 0, 'b': 1, 'c': 2, 'd': 3}
}
assert serializer.data == expected
class ValidatingSerializer(DocumentSerializer):
class Meta:
model = OptionsCompoundDoc
fields = '__all__'
class TestCompoundValidation(TestCase):
def test_validation_is_executed(self):
serializer = ValidatingSerializer(data={'int_list_field': [1, 2, 3]})
assert not serializer.is_valid()
assert 'int_list_field' in serializer.errors
def test_validation_passing(self):
serializer = ValidatingSerializer(data={'int_list_field': [3, 4, 5]})
assert serializer.is_valid(), serializer.errors
# Mongoengine's ListField has a specific meaning of required argument
# Thus, we have to test that it's compatible with DRF's ListField
class RequiredListDocument(Document):
required_list = fields.ListField(fields.StringField(), required=True)
class RequiredListSerializer(DocumentSerializer):
class Meta:
model = RequiredListDocument
fields = '__all__'
class TestRequriedList(TestCase):
def doCleanups(self):
RequiredListDocument.drop_collection()
def test_parsing(self):
input_data = {
'required_list': []
}
serializer = RequiredListSerializer(data=input_data)
serializer.is_valid()
assert serializer.errors['required_list'] == [u'This list may not be empty.']
# Check that ListField is allowed to be empty, if required=False
class NonRequiredListDocument(Document):
non_required_list = fields.ListField(fields.StringField(), required=False)
class NonRequiredListSerializer(DocumentSerializer):
class Meta:
model = NonRequiredListDocument
fields = '__all__'
class TestNonRequiredList(TestCase):
def doCleanups(self):
NonRequiredListDocument.drop_collection()
def test_parsing(self):
input_data = {
'non_required_list': []
}
serializer = NonRequiredListSerializer(data=input_data)
assert serializer.is_valid()
# Check that Compound fields work with DynamicField
# So far implemented only for ListField, cause it's failing
class CompoundsWithDynamicFieldDoc(Document):
list_field = fields.ListField(fields.DynamicField(null=True))
class CompoundsWithDynamicFieldSerializer(DocumentSerializer):
class Meta:
model = CompoundsWithDynamicFieldDoc
fields = '__all__'
class TestCompoundsWithDynamicField(TestCase):
def doCleanups(self):
CompoundsWithDynamicFieldDoc.drop_collection()
def test_parsing(self):
input_data = {
'list_field': [None, "1", 2, 3.0]
}
serializer = CompoundsWithDynamicFieldSerializer(data=input_data)
assert serializer.is_valid(), serializer.errors
expected = {
'list_field': [None, "1", 2, 3.0]
}
assert serializer.validated_data == expected
def test_retrieval(self):
instance = CompoundsWithDynamicFieldDoc.objects.create(
list_field=[None, "1", 2, 3.0]
)
serializer = CompoundsWithDynamicFieldSerializer(instance)
expected = {
'id': str(instance.id),
'list_field': [None, "1", 2, 3.0]
}
assert serializer.data == expected
def test_create(self):
data = {
'list_field': [None, "1", 2, 3.0]
}
serializer = CompoundsWithDynamicFieldSerializer(data=data)
assert serializer.is_valid(), serializer.errors
instance = serializer.save()
assert instance.list_field == [None, "1", 2, 3.0]
expected = {
'id': str(instance.id),
'list_field': [None, "1", 2, 3.0],
}
assert serializer.data == expected
def test_update(self):
instance = BasicCompoundDoc.objects.create(
list_field=[None, "1", 2, 3.0]
)
data = {
'list_field': ["0", 1, 2.0, None]
}
serializer = CompoundsWithDynamicFieldSerializer(instance, data=data)
assert serializer.is_valid(), serializer.errors
instance = serializer.save()
assert instance.list_field == ["0", 1, 2.0, None]
expected = {
'id': str(instance.id),
'list_field': ["0", 1, 2.0, None]
}
assert serializer.data == expected
class MapEmbeddedDoc(Document):
embedded_map_field = fields.MapField(fields.EmbeddedDocumentField(DumbEmbedded))
class MapEmbeddedFieldSerializer(DocumentSerializer):
class Meta:
model = MapEmbeddedDoc
fields = '__all__'
class TestMapFieldWithEmbeddedDocument(TestCase):
def doCleanups(self):
MapEmbeddedDoc.drop_collection()
def test_parsing(self):
input_data = {
"embedded_map_field": {"a": {"name": "spam", "foo": 1}, "b": {"name": "ham", "foo": 2}},
}
serializer = MapEmbeddedFieldSerializer(data=input_data)
assert serializer.is_valid(), serializer.errors
expected = {
"embedded_map_field": {"a": {"name": "spam", "foo": 1}, "b": {"name": "ham", "foo": 2}},
}
assert serializer.validated_data == expected
def test_retrieval(self):
instance = MapEmbeddedDoc.objects.create(
embedded_map_field={"a": DumbEmbedded(name="spam", foo=1), "b": DumbEmbedded(name="ham", foo=2)},
)
serializer = MapEmbeddedFieldSerializer(instance)
expected = {
"id": str(instance.id),
"embedded_map_field": {"a": {"name": "spam", "foo": 1}, "b": {"name": "ham", "foo": 2}},
}
assert serializer.data == expected
def test_create(self):
data = {
"embedded_map_field": {"a": {"name": "spam", "foo": 1}, "b": {"name": "ham", "foo": 2}},
}
serializer = MapEmbeddedFieldSerializer(data=data)
assert serializer.is_valid(), serializer.errors
instance = serializer.save()
expected = {
"id": str(instance.id),
"embedded_map_field": {"a": {"name": "spam", "foo": 1}, "b": {"name": "ham", "foo": 2}},
}
assert serializer.data == expected
def test_update(self):
instance = MapEmbeddedDoc.objects.create(
embedded_map_field={"a": DumbEmbedded(name="spam", foo=1), "b": DumbEmbedded(name="ham", foo=2)},
)
data = {
"embedded_map_field": {"a": {"name": "spam", "foo": 3}, "b": {"name": "ham", "foo": 4}},
}
serializer = MapEmbeddedFieldSerializer(instance, data=data)
assert serializer.is_valid(), serializer.errors
instance = serializer.save()
expected = {
"id": str(instance.id),
"embedded_map_field": {"a": {"name": "spam", "foo": 3}, "b": {"name": "ham", "foo": 4}},
}
assert serializer.data == expected
|
evkit/rl/algo/ppo_replay.py | joel99/midlevel-reps | 120 | 12680870 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import random
class PPOReplay(object):
def __init__(self,
actor_critic,
clip_param,
ppo_epoch,
num_mini_batch,
value_loss_coef,
entropy_coef,
on_policy_epoch,
off_policy_epoch,
lr=None,
eps=None,
max_grad_norm=None,
amsgrad=True,
weight_decay=0.0,
intrinsic_losses=None, # list of loss key words
intrinsic_loss_coef=0.0
):
self.actor_critic = actor_critic
self.clip_param = clip_param
self.ppo_epoch = ppo_epoch
self.on_policy_epoch = on_policy_epoch
self.off_policy_epoch = off_policy_epoch
self.num_mini_batch = num_mini_batch
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.intrinsic_loss_coef=intrinsic_loss_coef # TODO make this a list
self.max_grad_norm = max_grad_norm
self.optimizer = optim.Adam(actor_critic.parameters(),
lr=lr,
eps=eps,
weight_decay=weight_decay,
amsgrad=amsgrad)
self.last_grad_norm = None
self.intrinsic_losses = intrinsic_losses if intrinsic_losses is not None else []
def update(self, rollouts):
value_loss_epoch = 0
action_loss_epoch = 0
dist_entropy_epoch = 0
max_importance_weight_epoch = 0
on_policy = [0] * self.on_policy_epoch
off_policy = [1] * self.off_policy_epoch
epochs = on_policy + off_policy
random.shuffle(epochs)
info = {}
for e in epochs:
if e == 0:
data_generator = rollouts.feed_forward_generator(
None, self.num_mini_batch, on_policy=True)
else:
data_generator = rollouts.feed_forward_generator(
None, self.num_mini_batch, on_policy=False)
for sample in data_generator:
observations_batch, states_batch, actions_batch, \
return_batch, masks_batch, old_action_log_probs_batch, \
adv_targ = sample
# Reshape to do in a single forward pass for all steps
cache = {}
values, action_log_probs, dist_entropy, states = self.actor_critic.evaluate_actions(
observations_batch, states_batch,
masks_batch, actions_batch, cache
)
intrinsic_loss_dict = self.actor_critic.compute_intrinsic_losses(
self.intrinsic_losses,
observations_batch, states_batch,
masks_batch, actions_batch, cache
)
ratio = torch.exp(action_log_probs - old_action_log_probs_batch)
surr1 = ratio * adv_targ
surr2 = torch.clamp(ratio, 1.0 - self.clip_param,
1.0 + self.clip_param) * adv_targ
action_loss = -torch.min(surr1, surr2).mean()
value_loss = F.mse_loss(values, return_batch)
self.optimizer.zero_grad()
total_loss = value_loss * self.value_loss_coef + action_loss - dist_entropy * self.entropy_coef
for loss_name, loss_val in intrinsic_loss_dict.items():
total_loss += loss_val * self.intrinsic_loss_coef
total_loss.backward()
self.last_grad_norm = nn.utils.clip_grad_norm_(self.actor_critic.parameters(),
self.max_grad_norm)
self.optimizer.step()
value_loss_epoch += value_loss.item()
action_loss_epoch += action_loss.item()
dist_entropy_epoch += dist_entropy.item()
for loss in self.intrinsic_losses:
try:
info[loss] += intrinsic_loss_dict[loss].item()
except:
info[loss] = intrinsic_loss_dict[loss].item()
max_importance_weight_epoch = max(torch.max(ratio).item(), max_importance_weight_epoch)
num_updates = 2 * self.ppo_epoch * self.num_mini_batch # twice since on_policy and off_policy
value_loss_epoch /= num_updates
action_loss_epoch /= num_updates
dist_entropy_epoch /= num_updates
for loss in self.intrinsic_losses:
info[loss] /= num_updates
return value_loss_epoch, action_loss_epoch, dist_entropy_epoch, max_importance_weight_epoch, info
|
cosrlib/dataproviders/ut1_blacklist.py | commonsearch/cosr-back | 141 | 12680874 | <reponame>commonsearch/cosr-back
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import tempfile
from collections import defaultdict
import shutil
from cosrlib.config import config
from cosrlib.url import URL
from . import BaseDataProvider
class DataProvider(BaseDataProvider):
""" Return the UT1 categories in which the URL belongs.
https://dsi.ut-capitole.fr/blacklists/index_en.php
"""
dump_testdata = "tests/testdata/ut1_blacklists"
dump_url = "ftp://ftp.ut-capitole.fr/pub/reseau/cache/squidguard_contrib/blacklists.tar.gz"
dump_batch_size = None
def iter_rows(self):
if config["TESTDATA"] == "1":
extract_dir = self.dump_testdata
clean = False
else:
extract_dir = tempfile.mkdtemp(suffix="cosr-ut1-import")
clean = True
os.system("curl %s > %s/blacklists.tar.gz" % (self.dump_url, extract_dir))
os.system("cd %s && tar zxf blacklists.tar.gz" % extract_dir)
extract_dir += "/blacklists"
data = defaultdict(list)
for fp in os.listdir(extract_dir):
fullpath = os.path.join(extract_dir, fp)
if os.path.isdir(fullpath) and not os.path.islink(fullpath):
cnt = 0
with open(fullpath + "/domains", 'r') as f:
for line in f.readlines():
url = URL(line.strip()).normalized
if url:
data[url].append(fp)
cnt += 1
if os.path.isfile(fullpath + "/urls"):
with open(fullpath + "/urls", 'r') as f:
for line in f.readlines():
url = URL(line.strip()).normalized
if url:
data[url].append(fp)
cnt += 1
print("Done %s (%s entries)" % (fp, cnt))
if clean:
shutil.rmtree(os.path.dirname(extract_dir))
for key, value in data.iteritems():
yield key, {"ut1_blacklist": value}
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.