max_stars_repo_path
stringlengths
4
245
max_stars_repo_name
stringlengths
7
115
max_stars_count
int64
101
368k
id
stringlengths
2
8
content
stringlengths
6
1.03M
telemelody/evaluation/cal_acc.py
hongwen-sun/muzic
1,903
11116196
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. # from tqdm import tqdm import numpy as np import traceback import sys PITCH_CLASS_NAMES = [ 'C', 'C#', 'D', 'Eb', 'E', 'F', 'F#', 'G', 'Ab', 'A', 'Bb', 'B'] POS_RESOLUTION = 4 ROOT_pitch = { 'C': 0, 'C#': 1, 'D': 2, 'Eb': 3, 'E': 4, 'F': 5, 'F#': 6, 'G': 7, 'Ab': 8, 'A': 9, 'Bb': 10, 'B': 11 } _CHORD_KIND_PITCHES = { '': [0, 4, 7], 'm': [0, 3, 7], '+': [0, 4, 8], 'dim': [0, 3, 6], '7': [0, 4, 7, 10], 'maj7': [0, 4, 7, 11], 'm7': [0, 3, 7, 10], 'm7b5': [0, 3, 6, 10], } def get_tonality(e): def get_pitch_class_histogram(notes, use_duration=True, normalize=True): weights = np.ones(len(notes)) if use_duration: weights *= [note[3] for note in notes] # duration histogram, _ = np.histogram([note[2] % 12 for note in notes], bins=np.arange( 13), weights=weights, density=normalize) if normalize: histogram /= (histogram.sum() + (histogram.sum() == 0)) return histogram e = [i for i in e if i[2] < 128] histogram = get_pitch_class_histogram(e) major_count = histogram[PITCH_CLASS_NAMES.index('C')] minor_count = histogram[PITCH_CLASS_NAMES.index('A')] if major_count < minor_count: is_major = False elif major_count > minor_count: is_major = True else: is_major = None return is_major def fix(items): tmp = [] target_tokens = ['Bar', 'Pos', 'Pitch', 'Dur'] i = 0 for item in items: if item.split('_')[0] == target_tokens[i]: tmp.append(item) i = (i + 1) % len(target_tokens) return tmp def get_value(s): return s.split('_')[1] def get_pitch(chord): try: root, type = chord.split(':') except: return None cur_pitch = [] for i in _CHORD_KIND_PITCHES[type]: cur_pitch.append((ROOT_pitch[root] + i) % 12) return cur_pitch if __name__ == '__main__': all_num = 0 ok_num = 0 note_num = 0 beat_num = 0 chord_num = 0 struct_num = 0 struct_num_2 = 0 struct_num_3 = 0 pause1_num = 0 pause2_num = 0 pause3_num = 0 tonality_num = 0 tonality_sum = 0 chord_sum_2 = 0 chord_num_2 = 0 assert len(sys.argv) == 1 + 1 prefix = sys.argv[1] with open(f'{prefix}/test.hyp.txt', 'r') as h, open(f'{prefix}/test.src.txt', 'r') as s: for hyp_str, src_str in tqdm(list(zip(h, s))): try: all_num += 1 hyp = hyp_str.strip().split() hyp = fix(hyp) hyp = [[int(get_value(hyp[j])) for j in range(i, i+4)] for i in range(0, len(hyp) // 4 * 4, 4)] src = src_str.strip().split() is_major = get_tonality(hyp) if is_major is not None: tonality_sum += 1 if is_major == (src[0] == 'MAJ'): tonality_num += 1 src = src[1:] src = [[get_value(src[i]), src[i+1], int(get_value(src[i+2]))] for i in range(0, len(src), 3)] max_pos = 0 note_items = [] for idx in range(min(len(hyp), len(src))): hyp_item = hyp[idx] src_item = src[idx] note_num += 1 bar, pos, pitch, dur = hyp_item chord, struct, beat = src_item if pos // POS_RESOLUTION == beat: beat_num += 1 cur_pitch = get_pitch(chord) if cur_pitch is None or pitch % 12 in cur_pitch: chord_num += 1 if idx != len(hyp) - 1: if struct == 'HALF': pause1_num += 1 elif struct == 'AUT': pause2_num += 1 else: pause3_num += 1 next_item = hyp[idx + 1] cur_pos = 4 * POS_RESOLUTION * bar + pos next_pos = 4 * POS_RESOLUTION * \ next_item[0] + next_item[1] if next_pos - cur_pos >= POS_RESOLUTION * 1.5 and struct == 'HALF' and dur >= POS_RESOLUTION: struct_num += 1 if next_pos - cur_pos >= POS_RESOLUTION * 2.0 and struct == 'AUT' and dur >= POS_RESOLUTION: struct_num_2 += 1 if struct == 'NOT': if next_pos - cur_pos < POS_RESOLUTION * 2.0 or dur < POS_RESOLUTION: struct_num_3 += 1 ok_num += 1 except: continue print('TA:', round(tonality_num/tonality_sum, 5)) print('CA:', round(chord_num/note_num, 5)) print('RA:', round(beat_num / note_num, 5)) print('AA:', round((struct_num+struct_num_2+struct_num_3) / (pause1_num + pause2_num+pause3_num), 5))
tests/test_basic.py
stevenlovegrove/sony_camera_api
191
11116219
import unittest import pysony class TestBasics(unittest.TestCase): def test_loadpysony(self): api = pysony.SonyAPI() self.assertEqual(api.QX_ADDR, 'http://10.0.0.1:10000')
Lib/test/bugs/jdkbugs.py
jeff5/jython-whinchat
577
11116244
import sys print sys.platform try: try: raise KeyError except KeyError: # no bug print 'Your JVM seems to be working' except: print 'Your JVM seems broken'
airmozilla/uploads/tests/test_views.py
mozilla/airmozilla
115
11116246
<reponame>mozilla/airmozilla<gh_stars>100-1000 import datetime import hashlib import json import mock from nose.tools import eq_, ok_ from django.conf import settings from django.core.cache import cache from django.core.urlresolvers import reverse from airmozilla.base.tests.testbase import DjangoTestCase from airmozilla.uploads.models import Upload from airmozilla.main.models import Event class HeadResponse(object): def __init__(self, **headers): self.headers = headers class TestUploads(DjangoTestCase): def test_home(self): url = reverse('uploads:home') response = self.client.get(url) eq_(response.status_code, 302) ok_(reverse('main:login') in response['location']) self._login() response = self.client.get(url) eq_(response.status_code, 200) def test_upload(self): url = reverse('uploads:upload') response = self.client.get(url) eq_(response.status_code, 302) ok_(reverse('main:login') in response['location']) self._login() response = self.client.get(url) eq_(response.status_code, 200) def test_sign(self): url = reverse('uploads:sign') self._login() response = self.client.get(url) eq_(response.status_code, 400) response = self.client.get( url, {'s3_object_name': 'foo.flv', 's3_object_type': 'video/file'} ) eq_(response.status_code, 200) structure = json.loads(response.content) # expect the URL to contain todays date s3_url = structure['url'] now = datetime.datetime.utcnow() ok_(now.strftime('%Y/%m/%d') in s3_url) ok_('.flv' in s3_url) signed_request = structure['signed_request'] ok_(settings.AWS_ACCESS_KEY_ID in signed_request) ok_(settings.S3_UPLOAD_BUCKET in signed_request) # that should have set a cache key too cache_key = 'file_name_%s' % hashlib.md5(s3_url).hexdigest() eq_(cache.get(cache_key), 'foo.flv') def test_sign_unicode_name(self): url = reverse('uploads:sign') self._login() response = self.client.get( url, {'s3_object_name': u'st\xe9phanie.flv', 's3_object_type': 'video/file'} ) eq_(response.status_code, 200) @mock.patch('requests.head') def test_save(self, rhead): def mocked_head(url, **options): return HeadResponse(**{'content-length': 123456}) rhead.side_effect = mocked_head url = reverse('uploads:save') response = self.client.post(url) eq_(response.status_code, 302) ok_(reverse('main:login') in response['location']) user = self._login() response = self.client.post(url) eq_(response.status_code, 400) response = self.client.post(url, { 'url': 'https://aws.com/foo.flv' }) eq_(response.status_code, 200) structure = json.loads(response.content) new_id = structure['id'] upload = Upload.objects.get(pk=new_id) eq_(upload.size, 123456) eq_(upload.url, 'https://aws.com/foo.flv') eq_(upload.file_name, 'foo.flv') eq_(upload.user, user) @mock.patch('requests.head') def test_save_on_an_active_event_edit(self, rhead): def mocked_head(url, **options): return HeadResponse(**{'content-length': 123456}) rhead.side_effect = mocked_head user = self._login() user.is_superuser = True user.is_staff = True user.save() event = Event.objects.get(title='Test event') event_upload_url = reverse('manage:event_upload', args=(event.pk,)) response = self.client.get(event_upload_url) eq_(response.status_code, 200) url = reverse('uploads:save') response = self.client.post(url, { 'url': 'https://aws.com/foo.flv', 'event_id': event.id, }) eq_(response.status_code, 200) structure = json.loads(response.content) new_id = structure['id'] upload = Upload.objects.get(pk=new_id) eq_(upload.size, 123456) eq_(upload.url, 'https://aws.com/foo.flv') eq_(upload.file_name, 'foo.flv') eq_(upload.user, user) eq_(upload.event, event) event = Event.objects.get(pk=event.pk) eq_(event.upload, upload) @mock.patch('requests.head') def test_verify_size(self, rhead): def mocked_head(url, **options): return HeadResponse(**{'content-length': 123456}) rhead.side_effect = mocked_head url = reverse('uploads:verify_size') self._login() response = self.client.get(url, {'url': 'https://aws.com/foo.flv'}) eq_(response.status_code, 200) structure = json.loads(response.content) eq_(structure['size'], 123456) eq_(structure['size_human'], u'120.6\xa0KB')
gnocchi/indexer/alembic/versions/62a8dfb139bb_change_uuid_to_string.py
Dmitry-Eremeev/gnocchi
299
11116247
# Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Change uuid to string Revision ID: <KEY> Revises: <PASSWORD> Create Date: 2016-01-20 11:57:45.954607 """ from alembic import op import sqlalchemy as sa import sqlalchemy_utils # revision identifiers, used by Alembic. revision = '<KEY>' down_revision = '<PASSWORD>' branch_labels = None depends_on = None resourcehelper = sa.Table( 'resource', sa.MetaData(), sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=True), nullable=False), sa.Column('tmp_created_by_user_id', sqlalchemy_utils.types.uuid.UUIDType(binary=True), nullable=True), sa.Column('tmp_created_by_project_id', sqlalchemy_utils.types.uuid.UUIDType(binary=True), nullable=True), sa.Column('tmp_user_id', sqlalchemy_utils.types.uuid.UUIDType(binary=True), nullable=True), sa.Column('tmp_project_id', sqlalchemy_utils.types.uuid.UUIDType(binary=True), nullable=True), sa.Column('created_by_user_id', sa.String(length=255), nullable=True), sa.Column('created_by_project_id', sa.String(length=255), nullable=True), sa.Column('user_id', sa.String(length=255), nullable=True), sa.Column('project_id', sa.String(length=255), nullable=True), ) resourcehistoryhelper = sa.Table( 'resource_history', sa.MetaData(), sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=True), nullable=False), sa.Column('tmp_created_by_user_id', sqlalchemy_utils.types.uuid.UUIDType(binary=True), nullable=True), sa.Column('tmp_created_by_project_id', sqlalchemy_utils.types.uuid.UUIDType(binary=True), nullable=True), sa.Column('tmp_user_id', sqlalchemy_utils.types.uuid.UUIDType(binary=True), nullable=True), sa.Column('tmp_project_id', sqlalchemy_utils.types.uuid.UUIDType(binary=True), nullable=True), sa.Column('created_by_user_id', sa.String(length=255), nullable=True), sa.Column('created_by_project_id', sa.String(length=255), nullable=True), sa.Column('user_id', sa.String(length=255), nullable=True), sa.Column('project_id', sa.String(length=255), nullable=True), ) metrichelper = sa.Table( 'metric', sa.MetaData(), sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=True), nullable=False), sa.Column('tmp_created_by_user_id', sqlalchemy_utils.types.uuid.UUIDType(binary=True), nullable=True), sa.Column('tmp_created_by_project_id', sqlalchemy_utils.types.uuid.UUIDType(binary=True), nullable=True), sa.Column('created_by_user_id', sa.String(length=255), nullable=True), sa.Column('created_by_project_id', sa.String(length=255), nullable=True), ) def upgrade(): connection = op.get_bind() # Rename user/project fields to tmp_* op.alter_column('metric', 'created_by_project_id', new_column_name='tmp_created_by_project_id', existing_type=sa.BINARY(length=16)) op.alter_column('metric', 'created_by_user_id', new_column_name='tmp_created_by_user_id', existing_type=sa.BINARY(length=16)) op.alter_column('resource', 'created_by_project_id', new_column_name='tmp_created_by_project_id', existing_type=sa.BINARY(length=16)) op.alter_column('resource', 'created_by_user_id', new_column_name='tmp_created_by_user_id', existing_type=sa.BINARY(length=16)) op.alter_column('resource', 'project_id', new_column_name='tmp_project_id', existing_type=sa.BINARY(length=16)) op.alter_column('resource', 'user_id', new_column_name='tmp_user_id', existing_type=sa.BINARY(length=16)) op.alter_column('resource_history', 'created_by_project_id', new_column_name='tmp_created_by_project_id', existing_type=sa.BINARY(length=16)) op.alter_column('resource_history', 'created_by_user_id', new_column_name='tmp_created_by_user_id', existing_type=sa.BINARY(length=16)) op.alter_column('resource_history', 'project_id', new_column_name='tmp_project_id', existing_type=sa.BINARY(length=16)) op.alter_column('resource_history', 'user_id', new_column_name='tmp_user_id', existing_type=sa.BINARY(length=16)) # Add new user/project fields as strings op.add_column('metric', sa.Column('created_by_project_id', sa.String(length=255), nullable=True)) op.add_column('metric', sa.Column('created_by_user_id', sa.String(length=255), nullable=True)) op.add_column('resource', sa.Column('created_by_project_id', sa.String(length=255), nullable=True)) op.add_column('resource', sa.Column('created_by_user_id', sa.String(length=255), nullable=True)) op.add_column('resource', sa.Column('project_id', sa.String(length=255), nullable=True)) op.add_column('resource', sa.Column('user_id', sa.String(length=255), nullable=True)) op.add_column('resource_history', sa.Column('created_by_project_id', sa.String(length=255), nullable=True)) op.add_column('resource_history', sa.Column('created_by_user_id', sa.String(length=255), nullable=True)) op.add_column('resource_history', sa.Column('project_id', sa.String(length=255), nullable=True)) op.add_column('resource_history', sa.Column('user_id', sa.String(length=255), nullable=True)) # Migrate data for tablehelper in [resourcehelper, resourcehistoryhelper]: for resource in connection.execute(tablehelper.select()): if resource.tmp_created_by_project_id: created_by_project_id = \ str(resource.tmp_created_by_project_id).replace('-', '') else: created_by_project_id = None if resource.tmp_created_by_user_id: created_by_user_id = \ str(resource.tmp_created_by_user_id).replace('-', '') else: created_by_user_id = None if resource.tmp_project_id: project_id = str(resource.tmp_project_id).replace('-', '') else: project_id = None if resource.tmp_user_id: user_id = str(resource.tmp_user_id).replace('-', '') else: user_id = None connection.execute( tablehelper.update().where( tablehelper.c.id == resource.id ).values( created_by_project_id=created_by_project_id, created_by_user_id=created_by_user_id, project_id=project_id, user_id=user_id, ) ) for metric in connection.execute(metrichelper.select()): if resource.tmp_created_by_project_id: created_by_project_id = \ str(resource.tmp_created_by_project_id).replace('-', '') else: created_by_project_id = None if resource.tmp_created_by_user_id: created_by_user_id = \ str(resource.tmp_created_by_user_id).replace('-', '') else: created_by_user_id = None connection.execute( metrichelper.update().where( metrichelper.c.id == metric.id ).values( created_by_project_id=created_by_project_id, created_by_user_id=created_by_user_id, ) ) # Delete temp fields op.drop_column('metric', 'tmp_created_by_project_id') op.drop_column('metric', 'tmp_created_by_user_id') op.drop_column('resource', 'tmp_created_by_project_id') op.drop_column('resource', 'tmp_created_by_user_id') op.drop_column('resource', 'tmp_project_id') op.drop_column('resource', 'tmp_user_id') op.drop_column('resource_history', 'tmp_created_by_project_id') op.drop_column('resource_history', 'tmp_created_by_user_id') op.drop_column('resource_history', 'tmp_project_id') op.drop_column('resource_history', 'tmp_user_id')
benchmarks/instruction_counts/definitions/setup.py
Hacky-DH/pytorch
60,067
11116271
<filename>benchmarks/instruction_counts/definitions/setup.py """Define some common setup blocks which benchmarks can reuse.""" import enum from core.api import GroupedSetup from core.utils import parse_stmts _TRIVIAL_2D = GroupedSetup( r"x = torch.ones((4, 4))", r"auto x = torch::ones({4, 4});" ) _TRIVIAL_3D = GroupedSetup( r"x = torch.ones((4, 4, 4))", r"auto x = torch::ones({4, 4, 4});" ) _TRIVIAL_4D = GroupedSetup( r"x = torch.ones((4, 4, 4, 4))", r"auto x = torch::ones({4, 4, 4, 4});" ) _TRAINING = GroupedSetup(*parse_stmts( r""" Python | C++ ---------------------------------------- | ---------------------------------------- # Inputs | // Inputs x = torch.ones((1,)) | auto x = torch::ones({1}); y = torch.ones((1,)) | auto y = torch::ones({1}); | # Weights | // Weights w0 = torch.ones( | auto w0 = torch::ones({1}); (1,), requires_grad=True) | w0.set_requires_grad(true); w1 = torch.ones( | auto w1 = torch::ones({1}); (1,), requires_grad=True) | w1.set_requires_grad(true); w2 = torch.ones( | auto w2 = torch::ones({2}); (2,), requires_grad=True) | w2.set_requires_grad(true); """ )) class Setup(enum.Enum): TRIVIAL_2D = _TRIVIAL_2D TRIVIAL_3D = _TRIVIAL_3D TRIVIAL_4D = _TRIVIAL_4D TRAINING = _TRAINING
pypower/qps_mosek.py
Bengt/PYPOWER
221
11116287
# Copyright (c) 1996-2015 PSERC. All rights reserved. # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. """Quadratic Program Solver based on MOSEK. """ import re from sys import stdout, stderr from numpy import array, Inf, zeros, shape, tril, any from numpy import flatnonzero as find from scipy.sparse import csr_matrix as sparse try: from pymosek import mosekopt except ImportError: # print 'MOSEK not available' pass from pypower.mosek_options import mosek_options def qps_mosek(H, c=None, A=None, l=None, u=None, xmin=None, xmax=None, x0=None, opt=None): """Quadratic Program Solver based on MOSEK. A wrapper function providing a PYPOWER standardized interface for using MOSEKOPT to solve the following QP (quadratic programming) problem:: min 1/2 x'*H*x + c'*x x subject to:: l <= A*x <= u (linear constraints) xmin <= x <= xmax (variable bounds) Inputs (all optional except C{H}, C{C}, C{A} and C{L}): - C{H} : matrix (possibly sparse) of quadratic cost coefficients - C{C} : vector of linear cost coefficients - C{A, l, u} : define the optional linear constraints. Default values for the elements of L and U are -Inf and Inf, respectively. - xmin, xmax : optional lower and upper bounds on the C{x} variables, defaults are -Inf and Inf, respectively. - C{x0} : optional starting value of optimization vector C{x} - C{opt} : optional options structure with the following fields, all of which are also optional (default values shown in parentheses) - C{verbose} (0) - controls level of progress output displayed - 0 = no progress output - 1 = some progress output - 2 = verbose progress output - C{max_it} (0) - maximum number of iterations allowed - 0 = use algorithm default - C{mosek_opt} - options struct for MOSEK, values in C{verbose} and C{max_it} override these options - C{problem} : The inputs can alternatively be supplied in a single C{problem} struct with fields corresponding to the input arguments described above: C{H, c, A, l, u, xmin, xmax, x0, opt} Outputs: - C{x} : solution vector - C{f} : final objective function value - C{exitflag} : exit flag - 1 = success - 0 = terminated at maximum number of iterations - -1 = primal or dual infeasible < 0 = the negative of the MOSEK return code - C{output} : output dict with the following fields: - C{r} - MOSEK return code - C{res} - MOSEK result dict - C{lmbda} : dict containing the Langrange and Kuhn-Tucker multipliers on the constraints, with fields: - C{mu_l} - lower (left-hand) limit on linear constraints - C{mu_u} - upper (right-hand) limit on linear constraints - C{lower} - lower bound on optimization variables - C{upper} - upper bound on optimization variables @author: <NAME> (PSERC Cornell) """ ##----- input argument handling ----- ## gather inputs if isinstance(H, dict): ## problem struct p = H else: ## individual args p = {'H': H, 'c': c, 'A': A, 'l': l, 'u': u} if xmin is not None: p['xmin'] = xmin if xmax is not None: p['xmax'] = xmax if x0 is not None: p['x0'] = x0 if opt is not None: p['opt'] = opt ## define nx, set default values for H and c if 'H' not in p or len(p['H']) or not any(any(p['H'])): if ('A' not in p) | len(p['A']) == 0 & \ ('xmin' not in p) | len(p['xmin']) == 0 & \ ('xmax' not in p) | len(p['xmax']) == 0: stderr.write('qps_mosek: LP problem must include constraints or variable bounds\n') else: if 'A' in p & len(p['A']) > 0: nx = shape(p['A'])[1] elif 'xmin' in p & len(p['xmin']) > 0: nx = len(p['xmin']) else: # if isfield(p, 'xmax') && ~isempty(p.xmax) nx = len(p['xmax']) p['H'] = sparse((nx, nx)) qp = 0 else: nx = shape(p['H'])[0] qp = 1 if 'c' not in p | len(p['c']) == 0: p['c'] = zeros(nx) if 'x0' not in p | len(p['x0']) == 0: p['x0'] = zeros(nx) ## default options if 'opt' not in p: p['opt'] = [] if 'verbose' in p['opt']: verbose = p['opt']['verbose'] else: verbose = 0 if 'max_it' in p['opt']: max_it = p['opt']['max_it'] else: max_it = 0 if 'mosek_opt' in p['opt']: mosek_opt = mosek_options(p['opt']['mosek_opt']) else: mosek_opt = mosek_options() if max_it: mosek_opt['MSK_IPAR_INTPNT_MAX_ITERATIONS'] = max_it if qp: mosek_opt['MSK_IPAR_OPTIMIZER'] = 0 ## default solver only for QP ## set up problem struct for MOSEK prob = {} prob['c'] = p['c'] if qp: prob['qosubi'], prob['qosubj'], prob['qoval'] = find(tril(sparse(p['H']))) if 'A' in p & len(p['A']) > 0: prob['a'] = sparse(p['A']) if 'l' in p & len(p['A']) > 0: prob['blc'] = p['l'] if 'u' in p & len(p['A']) > 0: prob['buc'] = p['u'] if 'xmin' in p & len(p['xmin']) > 0: prob['blx'] = p['xmin'] if 'xmax' in p & len(p['xmax']) > 0: prob['bux'] = p['xmax'] ## A is not allowed to be empty if 'a' not in prob | len(prob['a']) == 0: unconstrained = True prob['a'] = sparse((1, (1, 1)), (1, nx)) prob.blc = -Inf prob.buc = Inf else: unconstrained = False ##----- run optimization ----- if verbose: methods = [ 'default', 'interior point', '<default>', '<default>', 'primal simplex', 'dual simplex', 'primal dual simplex', 'automatic simplex', '<default>', '<default>', 'concurrent' ] if len(H) == 0 or not any(any(H)): lpqp = 'LP' else: lpqp = 'QP' # (this code is also in mpver.m) # MOSEK Version 6.0.0.93 (Build date: 2010-10-26 13:03:27) # MOSEK Version 6.0.0.106 (Build date: 2011-3-17 10:46:54) # pat = 'Version (\.*\d)+.*Build date: (\d\d\d\d-\d\d-\d\d)'; pat = 'Version (\.*\d)+.*Build date: (\d+-\d+-\d+)' s, e, tE, m, t = re.compile(eval('mosekopt'), pat) if len(t) == 0: vn = '<unknown>' else: vn = t[0][0] print('MOSEK Version %s -- %s %s solver\n' % (vn, methods[mosek_opt['MSK_IPAR_OPTIMIZER'] + 1], lpqp)) cmd = 'minimize echo(%d)' % verbose r, res = mosekopt(cmd, prob, mosek_opt) ##----- repackage results ----- if 'sol' in res: if 'bas' in res['sol']: sol = res['sol.bas'] else: sol = res['sol.itr'] x = sol['xx'] else: sol = array([]) x = array([]) ##----- process return codes ----- if 'symbcon' in res: sc = res['symbcon'] else: r2, res2 = mosekopt('symbcon echo(0)') sc = res2['symbcon'] eflag = -r msg = '' if r == sc.MSK_RES_OK: if len(sol) > 0: # if sol['solsta'] == sc.MSK_SOL_STA_OPTIMAL: if sol['solsta'] == 'OPTIMAL': msg = 'The solution is optimal.' eflag = 1 else: eflag = -1 # if sol['prosta'] == sc['MSK_PRO_STA_PRIM_INFEAS']: if sol['prosta'] == 'PRIMAL_INFEASIBLE': msg = 'The problem is primal infeasible.' # elif sol['prosta'] == sc['MSK_PRO_STA_DUAL_INFEAS']: elif sol['prosta'] == 'DUAL_INFEASIBLE': msg = 'The problem is dual infeasible.' else: msg = sol['solsta'] elif r == sc['MSK_RES_TRM_MAX_ITERATIONS']: eflag = 0 msg = 'The optimizer terminated at the maximum number of iterations.' else: if 'rmsg' in res and 'rcodestr' in res: msg = '%s : %s' % (res['rcodestr'], res['rmsg']) else: msg = 'MOSEK return code = %d' % r ## always alert user if license is expired if (verbose or r == 1001) and len(msg) < 0: stdout.write('%s\n' % msg) ##----- repackage results ----- if r == 0: f = p['c'].T * x if len(p['H']) > 0: f = 0.5 * x.T * p['H'] * x + f else: f = array([]) output = {} output['r'] = r output['res'] = res if 'sol' in res: lmbda = {} lmbda['lower'] = sol['slx'] lmbda['upper'] = sol['sux'] lmbda['mu_l'] = sol['slc'] lmbda['mu_u'] = sol['suc'] if unconstrained: lmbda['mu_l'] = array([]) lmbda['mu_u'] = array([]) else: lmbda = array([]) return x, f, eflag, output, lmbda
tools/gcs-bucket-mover/gcs_bucket_mover/sts_job_status.py
ruchirjain86/professional-services
2,116
11116296
<gh_stars>1000+ # Copyright 2018 Google LLC. All rights reserved. Licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License # is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing permissions and limitations under # the License. # # Any software provided by Google hereunder is distributed "AS IS", WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, and is not intended for production use. """Status of the STS job""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from enum import Enum class StsJobStatus(Enum): """Status of the STS job""" failed = 1 in_progress = 2 success = 3
tests/auto/test_embeddings.py
lukovkin/keras
250
11116325
<reponame>lukovkin/keras<gh_stars>100-1000 import unittest import numpy as np from keras.models import Sequential from keras.layers.core import Merge, Dense, Activation, Flatten from keras.layers.embeddings import Embedding from theano import function from keras.constraints import unitnorm class TestEmbedding(unittest.TestCase): def setUp(self): self.X1 = np.array([[1], [2]], dtype='int32') self.W1 = np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]], dtype='float32') def test_unitnorm_constraint(self): lookup = Sequential() lookup.add(Embedding(3, 2, weights=[self.W1], W_constraint=unitnorm())) lookup.add(Flatten()) lookup.add(Dense(2, 1)) lookup.add(Activation('sigmoid')) lookup.compile(loss='binary_crossentropy', optimizer='sgd', class_mode='binary') lookup.train_on_batch(self.X1, np.array([[1], [0]], dtype='int32')) norm = np.linalg.norm(lookup.params[0].get_value(), axis=1) self.assertTrue(np.allclose(norm, np.ones_like(norm).astype('float32'))) if __name__ == '__main__': unittest.main()
vyapp/plugins/__init__.py
iogf/vy
927
11116338
from functools import wraps # ENV is a dict holding plugins objects, like functions, classes etc. # Plugins should install their handles in ENV. HANDLE = [] ENV = {} def autoload(plugin, *args, **kwargs): HANDLE.append((plugin.install, args, kwargs)) def autocall(handle, *args, **kwargs): HANDLE.append((handle, args, kwargs)) def mapset(namespace, map): HANDLE.append((lambda area: area.update_map(namespace, map), (), {})) class Command: area = None def __init__(self, name=None): self.name = name def __call__(self, handle): name = self.name if self.name else handle.__name__ @wraps(handle) def wrapper(*args, **kwargs): return handle(Command.area, *args, **kwargs) ENV[name] = wrapper return wrapper @classmethod def set_target(cls, area): cls.area = area
hypernets/tabular/ensemble/__init__.py
wyq-1997/Hypernets
1,080
11116372
<reponame>wyq-1997/Hypernets # -*- coding:utf-8 -*- __author__ = 'yangjian' """ """ from .base_ensemble import BaseEnsemble from .stacking import StackingEnsemble from .voting import AveragingEnsemble, GreedyEnsemble from .dask_ensemble import DaskGreedyEnsemble
energyusage/test.py
annlor/energy-usage
141
11116393
<gh_stars>100-1000 import unittest import requests import evaluate as evaluate import locate as locate YEAR = "2016" PROCESS_KWH = 0.1 printToScreen = False class Test(unittest.TestCase): def test_kwh_to_co2(self): # US locations breakdown = [5.868023799, 1.321624392, 66.17474207, 26.63395815] location = "Massachusetts" emission, state_emission = evaluate.emissions(PROCESS_KWH, breakdown, \ location, YEAR, printToScreen) self.assertAlmostEqual(emission, 0.037254766047499006) self.assertEqual(state_emission, 821.327) # Unknown and international location breakdown = [5.572323934, 36.95920652, 20.30010129, 37.16836826] location = "New Zealand" emission, state_emission = evaluate.emissions(PROCESS_KWH, breakdown, \ location, YEAR, printToScreen) self.assertAlmostEqual(emission, 0.05083272721075440) self.assertEqual(state_emission, 0) def test_ip_to_location(self): geo = requests.get("https://get.geojs.io/v1/ip/geo/172.16.31.10.json").json() self.assertEqual(locate.get(printToScreen, geo), "Pennsylvania") def test_get_local_energy_mix(self): output_pennsylvania_mix = [] output_unknown_mix = [] pennsylvania_mix = [25.4209872, 0.1686522923, 31.640982, 42.51657052] unknown_mix = [14.34624948, 39.45439942, 28.64046947, 17.55888163] # breadown from function pennsylvania_breakdown = evaluate.energy_mix("Pennsylvania", YEAR) unknown_breadown = evaluate.energy_mix("Unknown", YEAR) for i in range(0, 4): # US locations pennsylvania_mix[i] = round(pennsylvania_mix[i], 5) output_pennsylvania_mix.append(round(pennsylvania_breakdown[i], 5)) # Unknown (default to US) or international locations unknown_mix[i] = round(unknown_mix[i], 5) output_unknown_mix.append(round(unknown_breadown[i], 5)) self.assertListEqual(output_pennsylvania_mix, pennsylvania_mix) self.assertListEqual(output_unknown_mix, unknown_mix) def test_emissions_comparison(self): locations = ["New Zealand"] default_location = False comparison_values = evaluate.emissions_comparison(PROCESS_KWH, locations, \ YEAR, default_location, printToScreen) comparison_values_list = list(comparison_values[0]) comparison_value = comparison_values_list[1] self.assertAlmostEqual(comparison_value, 0.05083272721075440) def test_old_emissions_comparison(self): default_location = True rounded_default_emissions_list = [] default_emissions = evaluate.old_emissions_comparison(PROCESS_KWH, YEAR,\ default_location, printToScreen) for value in default_emissions: default_emissions_list = list(value) rounded_default_emissions = round(default_emissions_list[1], 11) rounded_default_emissions_list.append(rounded_default_emissions) self.assertListEqual(rounded_default_emissions_list, [0.09233947591, \ 0.07541226821, 0.01049881617, 0.09433027569, 0.06590723112, 0.01697252192, \ 0.09190960756, 0.04500865546, 0.00258048699]) def test_small_energy_consumption_exception(self): def small_function(n): n+1 with self.assertRaises(Exception) as e: evaluate.evaluate(small_function(), 10) self.assertTrue("Process executed too fast to gather energy consumption" in e.exception) if __name__ == '__main__': unittest.main()
mx_mg/data/utils.py
kevinid/molecule_generator
127
11116436
""" Containing utility functions for data processing """ import random from scipy import sparse from rdkit import Chem import networkx as nx import numpy as np from mx_mg.data import data_struct __all__ = ['get_graph_from_smiles_list', 'get_mol_from_graph', 'get_mol_from_graph_list', 'get_d'] def get_graph_from_smiles(smiles): mol = Chem.MolFromSmiles(smiles) # build graph atom_types, atom_ranks, bonds, bond_types = [], [], [], [] for a, r in zip(mol.GetAtoms(), Chem.CanonicalRankAtoms(mol)): atom_types.append(data_struct.get_mol_spec().get_atom_type(a)) atom_ranks.append(r) for b in mol.GetBonds(): idx_1, idx_2, bt = b.GetBeginAtomIdx(), b.GetEndAtomIdx(), data_struct.get_mol_spec().get_bond_type(b) bonds.append([idx_1, idx_2]) bond_types.append(bt) # build nx graph graph = nx.Graph() graph.add_nodes_from(range(len(atom_types))) graph.add_edges_from(bonds) return graph, atom_types, atom_ranks, bonds, bond_types def get_graph_from_smiles_list(smiles_list): graph_list = [] for smiles in smiles_list: mol = Chem.MolFromSmiles(smiles) # build graph atom_types, bonds, bond_types = [], [], [] for a in mol.GetAtoms(): atom_types.append(data_struct.get_mol_spec().get_atom_type(a)) for b in mol.GetBonds(): idx_1, idx_2, bt = b.GetBeginAtomIdx(), b.GetEndAtomIdx(), data_struct.get_mol_spec().get_bond_type(b) bonds.append([idx_1, idx_2]) bond_types.append(bt) X_0 = np.array(atom_types, dtype=np.int32) A_0 = np.concatenate([np.array(bonds, dtype=np.int32), np.array(bond_types, dtype=np.int32)[:, np.newaxis]], axis=1) graph_list.append([X_0, A_0]) return graph_list def traverse_graph(graph, atom_ranks, current_node=None, step_ids=None, p=0.9, log_p=0.0): if current_node is None: next_nodes = range(len(atom_ranks)) step_ids = [-1, ] * len(next_nodes) next_node_ranks = atom_ranks else: next_nodes = graph.neighbors(current_node) # get neighbor nodes next_nodes = [n for n in next_nodes if step_ids[n] < 0] # filter visited nodes next_node_ranks = [atom_ranks[n] for n in next_nodes] # get ranks for neighbors next_nodes = [n for n, r in sorted(zip(next_nodes, next_node_ranks), key=lambda _x:_x[1])] # sort by rank # iterate through neighbors while len(next_nodes) > 0: if len(next_nodes)==1: next_node = next_nodes[0] elif random.random() >= (1 - p): next_node = next_nodes[0] log_p += np.log(p) else: next_node = next_nodes[random.randint(1, len(next_nodes) - 1)] log_p += np.log((1.0 - p) / (len(next_nodes) - 1)) step_ids[next_node] = max(step_ids) + 1 _, log_p = traverse_graph(graph, atom_ranks, next_node, step_ids, p, log_p) next_nodes = [n for n in next_nodes if step_ids[n] < 0] # filter visited nodes return step_ids, log_p def single_reorder(X_0, A_0, step_ids): X_0, A_0 = np.copy(X_0), np.copy(A_0) step_ids = np.array(step_ids, dtype=np.int32) # sort by step_ids sorted_ids = np.argsort(step_ids) X_0 = X_0[sorted_ids] A_0[:, 0], A_0[:, 1] = step_ids[A_0[:, 0]], step_ids[A_0[:, 1]] max_b, min_b = np.amax(A_0[:, :2], axis=1), np.amin(A_0[:, :2], axis=1) A_0 = A_0[np.lexsort([-min_b, max_b]), :] # separate append and connect max_b, min_b = np.amax(A_0[:, :2], axis=1), np.amin(A_0[:, :2], axis=1) is_append = np.concatenate([np.array([True]), max_b[1:] > max_b[:-1]]) A_0 = np.concatenate([np.where(is_append[:, np.newaxis], np.stack([min_b, max_b], axis=1), np.stack([max_b, min_b], axis=1)), A_0[:, -1:]], axis=1) return X_0, A_0 def single_expand(X_0, A_0): X_0, A_0 = np.copy(X_0), np.copy(A_0) # expand X is_append_iter = np.less(A_0[:, 0], A_0[:, 1]).astype(np.int32) NX = np.cumsum(np.pad(is_append_iter, [[1, 0]], mode='constant', constant_values=1)) shift = np.cumsum(np.pad(NX, [[1, 0]], mode='constant')[:-1]) X_index = np.arange(NX.sum(), dtype=np.int32) - np.repeat(shift, NX) X = X_0[X_index] # expand A _, A_index = np.tril_indices(A_0.shape[0]) A = A_0[A_index, :] NA = np.arange(A_0.shape[0] + 1) # get action # action_type, atom_type, bond_type, append_pos, connect_pos action_type = 1 - is_append_iter atom_type = np.where(action_type == 0, X_0[A_0[:, 1]], 0) bond_type = A_0[:, 2] append_pos = np.where(action_type == 0, A_0[:, 0], 0) connect_pos = np.where(action_type == 1, A_0[:, 1], 0) actions = np.stack([action_type, atom_type, bond_type, append_pos, connect_pos], axis=1) last_action = [[2, 0, 0, 0, 0]] actions = np.append(actions, last_action, axis=0) action_0 = np.array([X_0[0]], dtype=np.int32) # }}} # {{{ Get mask last_atom_index = shift + NX - 1 last_atom_mask = np.zeros_like(X) last_atom_mask[last_atom_index] = np.where( np.pad(is_append_iter, [[1, 0]], mode='constant', constant_values=1) == 1, np.ones_like(last_atom_index), np.ones_like(last_atom_index) * 2) # }}} return action_0, X, NX, A, NA, actions, last_atom_mask def get_d(A, X): _to_sparse = lambda _A, _X: sparse.coo_matrix((np.ones([_A.shape[0] * 2], dtype=np.int32), (np.concatenate([_A[:, 0], _A[:, 1]], axis=0), np.concatenate([_A[:, 1], _A[:, 0]], axis=0))), shape=[_X.shape[0], ] * 2) A_sparse = _to_sparse(A, X) d2 = A_sparse * A_sparse d3 = d2 * A_sparse # get D_2 D_2 = np.stack(d2.nonzero(), axis=1) D_2 = D_2[D_2[:, 0] < D_2[:, 1], :] # get D_3 D_3 = np.stack(d3.nonzero(), axis=1) D_3 = D_3[D_3[:, 0] < D_3[:, 1], :] # remove D_1 elements from D_3 D_3_sparse = _to_sparse(D_3, X) D_3_sparse = D_3_sparse - D_3_sparse.multiply(A_sparse) D_3 = np.stack(D_3_sparse.nonzero(), axis=1) D_3 = D_3[D_3[:, 0] < D_3[:, 1], :] return D_2, D_3 def merge_single_0(X_0, A_0, NX_0, NA_0): # shift_ids cumsum = np.cumsum(np.pad(NX_0, [[1, 0]], mode='constant')[:-1]) A_0[:, :2] += np.stack([np.repeat(cumsum, NA_0), ] * 2, axis=1) # get D D_0_2, D_0_3 = get_d(A_0, X_0) # split A A_split = [] for i in range(data_struct.get_mol_spec().num_bond_types): A_i = A_0[A_0[:, 2] == i, :2] A_split.append(A_i) A_split.extend([D_0_2, D_0_3]) A_0 = A_split # NX_rep NX_rep_0 = np.repeat(np.arange(NX_0.shape[0]), NX_0) return X_0, A_0, NX_0, NX_rep_0 def merge_single(X, A, NX, NA, mol_ids, rep_ids, iw_ids, action_0, actions, last_append_mask, log_p): X, A, NX, NX_rep = merge_single_0(X, A, NX, NA) cumsum = np.cumsum(np.pad(NX, [[1, 0]], mode='constant')[:-1]) actions[:, -2] += cumsum * (actions[:, 0] == 0) actions[:, -1] += cumsum * (actions[:, 0] == 1) mol_ids_rep = np.repeat(mol_ids, NX) rep_ids_rep = np.repeat(rep_ids, NX) return X, A,\ mol_ids_rep, rep_ids_rep, iw_ids,\ last_append_mask,\ NX, NX_rep,\ action_0, actions, \ log_p def process_single(smiles, k, p): graph, atom_types, atom_ranks, bonds, bond_types = get_graph_from_smiles(smiles) # original X_0 = np.array(atom_types, dtype=np.int32) A_0 = np.concatenate([np.array(bonds, dtype=np.int32), np.array(bond_types, dtype=np.int32)[:, np.newaxis]], axis=1) X, A = [], [] NX, NA = [], [] mol_ids, rep_ids, iw_ids = [], [], [] action_0, actions = [], [] last_append_mask = [] log_p = [] # random sampling decoding route for i in range(k): step_ids_i, log_p_i = traverse_graph(graph, atom_ranks, p=p) X_i, A_i = single_reorder(X_0, A_0, step_ids_i) action_0_i, X_i, NX_i, A_i, NA_i, actions_i, last_atom_mask_i = single_expand(X_i, A_i) # appends X.append(X_i) A.append(A_i) NX.append(NX_i) NA.append(NA_i) action_0.append(action_0_i) actions.append(actions_i) last_append_mask.append(last_atom_mask_i) mol_ids.append(np.zeros_like(NX_i, dtype=np.int32)) rep_ids.append(np.ones_like(NX_i, dtype=np.int32) * i) iw_ids.append(np.ones_like(NX_i, dtype=np.int32) * i) log_p.append(log_p_i) # concatenate X = np.concatenate(X, axis=0) A = np.concatenate(A, axis = 0) NX = np.concatenate(NX, axis = 0) NA = np.concatenate(NA, axis = 0) action_0 = np.concatenate(action_0, axis = 0) actions = np.concatenate(actions, axis = 0) last_append_mask = np.concatenate(last_append_mask, axis = 0) mol_ids = np.concatenate(mol_ids, axis = 0) rep_ids = np.concatenate(rep_ids, axis = 0) iw_ids = np.concatenate(iw_ids, axis = 0) log_p = np.array(log_p, dtype=np.float32) return X, A, NX, NA, mol_ids, rep_ids, iw_ids, action_0, actions, last_append_mask, log_p # noinspection PyArgumentList def get_mol_from_graph(X, A, sanitize=True): try: mol = Chem.RWMol(Chem.Mol()) X, A = X.tolist(), A.tolist() for i, atom_type in enumerate(X): mol.AddAtom(data_struct.get_mol_spec().index_to_atom(atom_type)) for atom_id1, atom_id2, bond_type in A: data_struct.get_mol_spec().index_to_bond(mol, atom_id1, atom_id2, bond_type) except: return None if sanitize: try: mol = mol.GetMol() Chem.SanitizeMol(mol) return mol except: return None else: return mol def get_mol_from_graph_list(graph_list, sanitize=True): mol_list = [get_mol_from_graph(X, A, sanitize) for X, A in graph_list] return mol_list
tests/test_driver_policy.py
nmandery/rasterio
1,479
11116441
from rasterio.drivers import is_blacklisted def test_netcdf_is_blacklisted(): assert is_blacklisted('netCDF', 'w') assert is_blacklisted('netCDF', 'r+') def test_gtiff_is_not_blacklisted(): assert not is_blacklisted('GTiff', 'w') assert not is_blacklisted('GTiff', 'r+')
macgraph/encoder/__init__.py
Octavian-ai/mac-graph
116
11116464
from .encode import encode_input
api/tacticalrmm/automation/migrations/0008_auto_20210302_0415.py
infinite8co/tacticalrmm
903
11116480
<gh_stars>100-1000 # Generated by Django 3.1.7 on 2021-03-02 04:15 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('agents', '0030_agent_offline_time'), ('clients', '0009_auto_20210212_1408'), ('automation', '0007_policy_alert_template'), ] operations = [ migrations.AddField( model_name='policy', name='excluded_agents', field=models.ManyToManyField(blank=True, related_name='policy_exclusions', to='agents.Agent'), ), migrations.AddField( model_name='policy', name='excluded_clients', field=models.ManyToManyField(blank=True, related_name='policy_exclusions', to='clients.Client'), ), migrations.AddField( model_name='policy', name='excluded_sites', field=models.ManyToManyField(blank=True, related_name='policy_exclusions', to='clients.Site'), ), ]
piccolo/apps/schema/piccolo_app.py
0scarB/piccolo
750
11116507
from piccolo.conf.apps import AppConfig, Command from .commands.generate import generate from .commands.graph import graph APP_CONFIG = AppConfig( app_name="schema", migrations_folder_path="", commands=[ Command(callable=generate, aliases=["gen", "create", "new", "mirror"]), Command( callable=graph, aliases=["map", "visualise", "vizualise", "viz", "vis"], ), ], )
TopQuarkAnalysis/TopJetCombination/python/TtSemiLepHypGenMatch_cff.py
ckamtsikis/cmssw
852
11116525
<filename>TopQuarkAnalysis/TopJetCombination/python/TtSemiLepHypGenMatch_cff.py import FWCore.ParameterSet.Config as cms # # produce genmatch hypothesis with all necessary # ingredients # ## std sequence to produce ttSemiJetPartonMatch from TopQuarkAnalysis.TopTools.TtSemiLepJetPartonMatch_cfi import * ## configure genMatch hypothesis from TopQuarkAnalysis.TopJetCombination.TtSemiLepHypGenMatch_cfi import * ## make hypothesis makeHypothesis_genMatchTask = cms.Task( ttSemiLepJetPartonMatch, ttSemiLepHypGenMatch ) makeHypothesis_genMatch = cms.Sequence(makeHypothesis_genMatchTask)
app/sub_views/scrape_targets_view.py
fake-name/ReadableWebProxy
193
11116568
<gh_stars>100-1000 import traceback import pickle import time import datetime from calendar import timegm from flask import g from flask import render_template from flask import make_response from flask import jsonify from flask import request from sqlalchemy.sql import text from sqlalchemy.dialects import postgresql from sqlalchemy.sql.expression import func from tzlocal import get_localzone from app import app from app import auth import common.database as db import WebMirror.rules import WebMirror.OfflineFilters.NewNetlocTracker as nnt def url_state_update(sess, parameters): for row_updates in parameters: print(row_updates) assert 'id' in row_updates assert 'old-ignore' in row_updates assert 'new-ignore' in row_updates row = sess.query(db.NewNetlocTracker).filter(db.NewNetlocTracker.id == row_updates['id']).scalar() assert row assert row.ignore == row_updates['old-ignore'] row.ignore = row_updates['new-ignore'] sess.commit() return {"error" : False, 'message' : "Changes applied!"} ops = { 'update url states' : url_state_update, } @app.route('/url_api/', methods=['GET', 'POST']) @auth.login_required def url_api(): if not request.json: # print("Non-JSON request!") js = { "error" : True, "message" : "This endpoint only accepts JSON POST requests." } resp = jsonify(js) resp.status_code = 200 resp.mimetype="application/json" return resp print("API Request!") print("session:", g.session) print("Request method: ", request.method) print("Request json: ", request.json) try: if 'op' in request.json and 'data' in request.json and request.json['op'] in ops: data = ops[request.json['op']](g.session, request.json['data']) else: data = {"wat": "wat"} except Exception as e: print("Failure in processing url api call!") traceback.print_exc() js = { "error" : True, "message" : "Error: \n%s" % (traceback.format_exc(), ) } resp = jsonify(js) resp.status_code = 200 resp.mimetype="application/json" return resp response = jsonify(data) print("ResponseData: ", data) print("Response: ", response) response.status_code = 200 response.mimetype="application/json" g.session.commit() g.session.expire_all() return response @app.route('/urls/', methods=['GET']) @auth.login_required def url_view(): WebMirror.rules.load_rules(override=True) scope = request.args.get('scope', 'missing') ignored = request.args.get('ignore', 'exclude') nnt.filter_get_have_urls() # g.session.expire() query = g.session.query(db.NewNetlocTracker) if scope == 'missing': query = query.filter(db.NewNetlocTracker.have == False) if ignored == 'exclude': query = query.filter(db.NewNetlocTracker.ignore == False) items = query.all() g.session.commit() segs = max([item.netloc.count(".") for item in items]) print("Max segments:", segs) def keyf(item): if not item.extra: return (False, ) is_wp = 1 if item.extra.get("is-wp", False) else 2 nlr = item.netloc.split(".") if nlr[0] == "www": nlr = nlr[1:] nlr.reverse() flag = 9001 if "wordpress" in nlr: flag = 1 if "blogspot" in nlr: flag = 2 if "livejournal" in nlr: flag = 3 if "dreamwidth" in nlr: flag = 4 if "syosetu" in nlr: flag = 5 if "wixsite" in nlr: flag = 6 if "fandom" in nlr: flag = 7 if "deviantart" in nlr: flag = 8 ret = (is_wp, flag, 10 + segs - len(nlr), nlr, item.example_url) print(ret) return ret items.sort(key=keyf) return render_template('url_listings.html', netloc_items = items, # states = states, )
scripts/counts.py
aminya/despacer
110
11116675
for i in range(1<<16): print(bin(i).count("1"),end=",")
tests/test_acc_on.py
rsketine/neon
4,415
11116693
<reponame>rsketine/neon # ****************************************************************************** # Copyright 2014-2018 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ****************************************************************************** ''' Test of the acc_on functionality ''' from utils import tensors_allclose, allclose_with_out import numpy as np import pytest from neon import NervanaObject from neon.initializers.initializer import Uniform from neon.layers.layer import ( Convolution, Deconvolution, Layer, Linear, Bias, BatchNorm, ) from neon.layers.container import Sequential def pytest_generate_tests(metafunc): if 'test_args' in metafunc.fixturenames: batch_size = 64 indim = 16 nifm = 4 fshape = 2 nofm = 16 stride = 1 pad = 0 stride = 1 init_unif = Uniform(low=0.0, high=1.0) fargs = [[indim, nifm, fshape, nofm, batch_size, stride, pad, init_unif]] metafunc.parametrize('test_args', fargs) def layer_setup(layer, in_shape, deltas_buffer): """ Generic layer setup """ layer.configure(in_shape) layer.prev_layer = True with pytest.raises(BufferError): layer.set_acc_on(True) layer.allocate(accumulate_updates=True) layer.allocate_deltas(deltas_buffer) deltas_buffer.allocate_buffers() layer.set_deltas(deltas_buffer) def container_setup(layer, in_shape, deltas_buffer): """ Generic layer setup """ layer.configure([in_shape, in_shape]) layer.prev_layer = True with pytest.raises(BufferError): layer.set_acc_on(True) layer.allocate(accumulate_updates=True) layer.allocate_deltas(deltas_buffer) deltas_buffer.allocate_buffers() layer.set_deltas(deltas_buffer) def random_fprop_layer(in_shape, in_size, layer): dtypeu = np.float32 inp_rng = [0.0, 1.0] inpa = np.random.random(in_size) inpa *= inp_rng[1] - inp_rng[0] inpa += inp_rng[0] inpa = inpa.astype(dtypeu) inp = layer.be.array(inpa) inp.lshape = in_shape return layer.fprop(inp).get().shape def errfn(layer, out_shape): """ Random errors (deltas from a previous layer) used for bprop testing """ dtypeu = np.float32 erra = np.random.random(out_shape) erra = erra.astype(dtypeu) err = layer.be.array(erra) return err def check_acc_on(layer, out_shape, buffers): # generate two random delta tensors err1 = errfn(layer, out_shape) err2 = errfn(layer, out_shape) assert (not (tensors_allclose(err1, err2, atol=0.0, rtol=0.0))) terminal_layer = layer.get_terminal() for b in buffers: # check that we have created the acc buffer assert (getattr(terminal_layer, b).get().shape == getattr(terminal_layer, "acc_" + b).get().shape) layer.bprop(err1) dW_s = getattr(terminal_layer, b).get() layer.bprop(err2) dW = getattr(terminal_layer, b).get() # Turning acc_on on should accumulate layer.set_acc_on(True) # test we created the buffer assert (hasattr(terminal_layer, "acc_" + b)) layer.bprop(err1) dW_p = getattr(terminal_layer, b).get() assert allclose_with_out(dW_p, (dW + dW_s)) # Turning acc_on off should reset on next bprop layer.set_acc_on(False) layer.bprop(err2) dW_dp = getattr(terminal_layer, b).get() assert allclose_with_out(dW_dp, dW) def test_unsupported_layer(): layer = Layer() with pytest.raises(TypeError): layer.allocate(accumulate_updates=True) with pytest.raises(BufferError): layer.set_acc_on(True) def test_unsupported_class(): """ Test that the decorator doesn't work if applied to a non-Layer subclass """ class Foo: @Layer.accumulates def bprop(self): pass layer = Foo() with pytest.raises(TypeError): layer.bprop() def test_api(backend_default, mocker): """ Basic test for API breakage, not working as intended with `self.p = MyTensor(np.ones(10))`, but this could be due to invalid use of something.""" class MyTensor(object): def __init__(self, arr): self.arr = arr self.shape = arr.shape def __add__(self, x): return MyTensor(self.get() + x.get()) def __setitem__(self, index, value): self.arr[index] = value.get() def __getitem__(self, index): return self.arr[index] def get(self): return self.arr def my_get(self): return self def my_allocate(self, accumulate_updates=False): self.accumulate_updates = accumulate_updates self.p = MyTensor(np.zeros(10)) self.acc_p = MyTensor(np.zeros(10)) self.acc_params = [(self.acc_p, self.p)] @Layer.accumulates def my_bprop(self, err): pass layer = Layer() mocker.patch('neon.layers.layer.Layer.allocate', my_allocate) mocker.patch('neon.layers.layer.Layer.bprop', my_bprop) layer.allocate(accumulate_updates=True) layer.set_acc_on(True) layer.bprop(np.zeros(10)) check_acc_on(layer, (10), ['p']) def test_conv_acc_on(backend_default, test_args, deltas_buffer): indim, nifm, fshape, nofm, batch_size, stride, pad, init_unif = test_args in_shape = (nifm, indim, indim) layer = Convolution(fshape=(fshape, fshape, nofm), strides=stride, padding=pad, init=init_unif) testLayer = LayerTest(layer, in_shape, batch_size, deltas_buffer) testLayer.test(['dW']) def test_deconv_acc_on(backend_default, test_args, deltas_buffer): indim, nifm, fshape, nofm, batch_size, stride, pad, init_unif = test_args in_shape = (nifm, indim, indim) layer = Deconvolution(fshape=(fshape, fshape, nofm), strides=stride, padding=0, init=init_unif) testLayer = LayerTest(layer, in_shape, batch_size, deltas_buffer) testLayer.test(['dW']) def test_linear_acc_on(backend_default, test_args, deltas_buffer): indim, nifm, fshape, nofm, batch_size, stride, pad, init_unif = test_args layer = Linear(nout=indim, init=init_unif) in_shape = indim testLayer = LayerTest(layer, in_shape, batch_size, deltas_buffer) testLayer.test(['dW']) def test_bias_acc_on(backend_default, test_args, deltas_buffer): indim, nifm, fshape, nofm, batch_size, stride, pad, init_unif = test_args layer = Bias(init=init_unif) in_shape = (indim, batch_size) testLayer = LayerTest(layer, in_shape, batch_size, deltas_buffer) testLayer.test(['dW']) def test_batchnorm_acc_on(backend_default, test_args, deltas_buffer): indim, nifm, fshape, nofm, batch_size, stride, pad, init_unif = test_args layer = BatchNorm() in_shape = (indim, indim) testLayer = LayerTest(layer, in_shape, batch_size, deltas_buffer) testLayer.test(['grad_beta', 'grad_gamma']) def test_layer_container(backend_default, test_args, deltas_buffer): indim, nifm, fshape, nofm, batch_size, stride, pad, init_unif = test_args in_shape = indim containerTest = LayerTest(Sequential([Linear(nout=indim, init=init_unif), Linear(nout=indim, init=init_unif)]), in_shape, batch_size, deltas_buffer) containerTest.test(['dW']) def test_layer_container_unsupported_layer(backend_default, test_args, deltas_buffer): indim, nifm, fshape, nofm, batch_size, stride, pad, init_unif = test_args in_shape = indim def fail_on_accumulate_updates(f): def wrapper(*args, **kwargs): if 'accumulate_updates' in kwargs: raise AttributeError("Should not have gotten accumulate updates") out = f(*args, **kwargs) return out return wrapper unsupportedLayer = Linear(nout=indim, init=init_unif) unsupportedLayer.allocate = fail_on_accumulate_updates(unsupportedLayer.allocate) containerTest = LayerTest(Sequential([Linear(nout=indim, init=init_unif), unsupportedLayer, Linear(nout=indim, init=init_unif)]), in_shape, batch_size, deltas_buffer) containerTest.test(['dW']) class LayerTest(object): def __init__(self, layer, in_shape, batch_size, deltas_buffer): self.layer = layer self.in_shape = in_shape self.in_size = (np.prod(in_shape), batch_size) NervanaObject.be.bsz = batch_size layer_setup(self.layer, self.in_shape, deltas_buffer) def test(self, buffers): self.out_shape = random_fprop_layer(self.in_shape, self.in_size, self.layer) check_acc_on(self.layer, self.out_shape, buffers)
ffi/python/pairing.py
adambudziak/mcl
325
11116707
<gh_stars>100-1000 from ctypes import * from ctypes.wintypes import LPWSTR, LPCSTR, LPVOID g_lib = None def BN256_init(): global g_lib g_lib = cdll.LoadLibrary("../../bin/bn256.dll") ret = g_lib.BN256_init() if ret: print "ERR BN256_init" class Fr(Structure): _fields_ = [("v", c_ulonglong * 4)] def setInt(self, v): g_lib.BN256_Fr_setInt(self.v, v) def setStr(self, s): ret = g_lib.BN256_Fr_setStr(self.v, c_char_p(s)) if ret: print("ERR Fr:setStr") def __str__(self): svLen = 1024 sv = create_string_buffer('\0' * svLen) ret = g_lib.BN256_Fr_getStr(sv, svLen, self.v) if ret: print("ERR Fr:getStr") return sv.value def isZero(self, rhs): return g_lib.BN256_Fr_isZero(self.v) != 0 def isOne(self, rhs): return g_lib.BN256_Fr_isOne(self.v) != 0 def __eq__(self, rhs): return g_lib.BN256_Fr_isEqual(self.v, rhs.v) != 0 def __ne__(self, rhs): return not(P == Q) def __add__(self, rhs): ret = Fr() g_lib.BN256_Fr_add(ret.v, self.v, rhs.v) return ret def __sub__(self, rhs): ret = Fr() g_lib.BN256_Fr_sub(ret.v, self.v, rhs.v) return ret def __mul__(self, rhs): ret = Fr() g_lib.BN256_Fr_mul(ret.v, self.v, rhs.v) return ret def __div__(self, rhs): ret = Fr() g_lib.BN256_Fr_div(ret.v, self.v, rhs.v) return ret def __neg__(self): ret = Fr() g_lib.BN256_Fr_neg(ret.v, self.v) return ret def Fr_add(z, x, y): g_lib.BN256_Fr_add(z.v, x.v, y.v) def Fr_sub(z, x, y): g_lib.BN256_Fr_sub(z.v, x.v, y.v) def Fr_mul(z, x, y): g_lib.BN256_Fr_mul(z.v, x.v, y.v) def Fr_div(z, x, y): g_lib.BN256_Fr_div(z.v, x.v, y.v) BN256_init() P = Fr() Q = Fr() print P == Q print P != Q P.setInt(5) Q.setStr("34982034824") print Q R = Fr() Fr_add(R, P, Q) print R
guppy/heapy/test/test_menuleak.py
odidev/guppy3
251
11116708
from tkinter import * import sys import gc class FixedMenu(Menu): # A fix for the .delete() method in Menu. # To delete commands defined in the menu items deleted. # Also changed the comment: INDEX2 is actually INCLUDED. def delete(self, index1, index2=None): """Delete menu items between INDEX1 and INDEX2 (included).""" print(self._tclCommands) if index2 is None: index2 = index1 # First find out what entries have defined commands. cmds = [] for i in range(self.index(index1), self.index(index2)+1): c = str(self.entrycget(i, 'command')) if c in self._tclCommands: # I don't want to delete the command already, since it # seems mystical to do that while the entry is not yet deleted. cmds.append(c) # Delete the menu entries. self.tk.call(self._w, 'delete', index1, index2) # Now that the menu entries have been deleted, we can delete their commands. for c in cmds: self.deletecommand(c) def test1(M): # Test with a single command gc.collect() root = Tk() button = Menubutton(root, text='Window') menu = M(button) button['menu'] = menu def command(): print('command button pressed') rc = sys.getrefcount(command) menu.add_command(command=command) # or add_radiobutton etc idx = menu.index(END) menu.delete(idx) gc.collect() rc1 = sys.getrefcount(command) print('leak test with class', M, end=' ') if rc1 != rc: print('failed: command is now hold by', rc1, 'references') else: print('succeeded: command is now hold by', rc1, 'references') root.destroy() def test2(M): # Test with 3 commands, especially to see that deleting a range works. gc.collect() root = Tk() button = Menubutton(root, text='Window') menu = M(button) button['menu'] = menu def command0(): print('command 0 button pressed') 'deleting 0 and 1' menu.delete(idx0, idx1) def command1(): print('command 1 button pressed') def command2(): print('command 2 button pressed') print('deleting at END') menu.delete(END) root.quit() rc = [sys.getrefcount(x) for x in (command0, command1, command0)] button.pack() # or add_radiobutton etc menu.add_command(command=command0, label='press first') idx0 = menu.index(END) menu.add_radiobutton(command=command1, label='command1') # to see that delete works even when no command supplied menu.add_command(label='no Command') idx1 = menu.index(END) menu.add_command(command=command2, label='press last') idx2 = menu.index(END) root.mainloop() gc.collect() rc1 = [sys.getrefcount(x) for x in (command0, command1, command0)] print('leak test with class', M, end=' ') if rc1 != rc: print('failed: command is now hold by', rc1, 'references, should be', rc) else: print('succeeded: command is now hold by', rc1, 'references') root.destroy() for M in (Menu, FixedMenu,): test1(M) test2(M)
tools/extract_typings_rule.bzl
John-Cassidy/angular
95,154
11116734
"""Starlark file that exposes a rule for extracting type definitions of dependencies.""" load("@build_bazel_rules_nodejs//:providers.bzl", "DeclarationInfo") def _extract_typings_rule_impl(ctx): """Implementation of the `extract_typings` rule.""" transitive_depsets = [] for dep in ctx.attr.deps: # Based on whether declarations should be collected, extract direct # and transitive declaration files using the `DeclarationInfo` provider. if DeclarationInfo in dep: transitive_depsets.append(dep[DeclarationInfo].transitive_declarations) return [DefaultInfo(files = depset(transitive = transitive_depsets))] # TODO: Move into shared dev-infra package. extract_typings = rule( implementation = _extract_typings_rule_impl, doc = """Rule that extracts all transitive typings of dependencies""", attrs = { "deps": attr.label_list( allow_files = True, ), }, )
Python/control/upperbody_opspace.py
hpbader42/Klampt
238
11116761
<filename>Python/control/upperbody_opspace.py<gh_stars>100-1000 from OperationalSpaceController import * from controller import * from estimators import * import robotinfo class ArmController(OpSpaceController): def __init__(self,robot,arm): self.arm = arm self.dofs = robotinfo.arm(robot,self.arm) OpSpaceController.__init__(self,robot) def setupTasks(self,opSpaceController): robot = self.robot opSpaceController.activeDofs = self.dofs opSpaceController.regularizationFactor = 1e-2 # joint task self.jtask = opSpaceController.addTask(JointTask(robot,self.dofs), gains=(-0.01, 0, 0.0), weight = 0.001, priority=2) self.eeTask = opSpaceController.addTask(LinkTask(robot,self.dofs[-1],'position'), gains=(-10.0,-0.25,-20.0), weight = 1.0, priority=1) self.eeTask.eImax = 1.0 self.eeTask.vcmdmax = 1.0 def manageTasks(self,inputs,opController): return def makeTwoArm(robot): rarm_dofs = robotinfo.arm(robot,'r') larm_dofs = robotinfo.arm(robot,'l') other_dofs = [i for i in range(robot.numLinks()) if i not in rarm_dofs and i not in larm_dofs] master = MultiController() #need to get dq from q master.launch(VelocityEstimator(robot)) c = MultiController() #set the right arm controller to qcmd_r,dqcmd_r rc = ArmController(robot,'r') rc = c.launch(rc) c.map_output(rc,'qcmd','qcmd_r') c.map_output(rc,'dqcmd','dqcmd_r') lc = ArmController(robot,'l') lc = c.launch(lc) #set the left arm controller to qcmd_r,dqcmd_r c.map_output(lc,'qcmd','qcmd_l') c.map_output(lc,'dqcmd','dqcmd_l') #map the single arm controller outputs to qcmd and dqcmd c.launch(ComposeController({'qcmd_r':rarm_dofs,'qcmd_l':larm_dofs,'qcmd':other_dofs},'qcmd')) c.launch(ComposeController({'dqcmd_r':rarm_dofs,'dqcmd_l':larm_dofs,'dqcmd':other_dofs},'dqcmd')) #switch to the operational space controller after 0.1 second tc = TimedControllerSequence([BaseController(),c],[0.01,1e30]) master.launch(tc) #send qcmd and dqcmd to the controllers master.map_my_output('qcmd') master.map_my_output('dqcmd') return master def make(robot): return makeTwoArm(robot)
tiles/debug/black.py
wendymhelson/tilecloud
134
11116824
from tilecloud.store.debug import DebugTileStore tilestore = DebugTileStore(color=(0, 0, 0))
cpmpy/bus_scheduling_csplib.py
tias/hakank
279
11116834
<filename>cpmpy/bus_scheduling_csplib.py """ Bus driver scheduling problem (prob022 in CSPLib) in cpmpy. http://www.cs.st-andrews.ac.uk/~ianm/CSPLib/prob/prob022/index.html From http://www.cs.st-andrews.ac.uk/~ianm/CSPLib/prob/prob022/spec.html ''' Specification Bus driver scheduling can be formulated as a set paritioning problem. We propose 12 set partitioning problems derived from small bus driver scheduling problems. These consist of a given set of tasks (pieces of work) to cover and a large set of possible shifts, where each shift covers a subset of the tasks and has an associated cost. We must select a subset of possible shifts that covers each piece of work once and only once: this is called a partition. Further, In the driver scheduling (unlike air crew scheduling) the main aim is to reduce the number of shifts used in the solution partition and the total cost of the partition is secondary. To simplify the problem we have made the cost of each shift the same. This means that the goal is to minimise the number of shifts. The problems come from four different bus companies: Reading (r1 to r5a), CentreWest Ealing area (c1, c1a, c2), the former London Transport (t1 and t2). The problems have differing regulations and features (e.g. urban and short distance rural bus schedules can have very different features). Note that r1 and r1a are the same problem, but have different numbers of generated shifts. Similarly with the problems: c1, c1a and r5, r5a. Problems are presented in the same format as the set partitioning examples in ORLIB. The first line gives the number of rows (pieces of work), columns (shifts) and the minimum number of columns need for a partition. Then each line after that corresponds to one column. It starts with the cost (which is always 1 in our case) then the number of rows it covers, followed by the rows it covers. ''' The datafiles are in the directory http://hakank.org/cpmpy/bus_scheduling_csplib/problems/ Here's the walltimes (seconds) with a 60s timeout: t1 : 0.012489291000000001 r1 : 1.4382032850000002 r2 : 1.864917573, t2 : 2.6930278050000003, r4 : 3.383277532, r5 : 60, r1a: 3.1755979450000003, c1 : 5.129078195, c1a: 11.481672312, c2 : 60, r5a: 60, r3 : 25.730254748 The output for problem t1: num_work: 24 num_shifts: 77 min_num_shifts: 7 tot_shifts: 7 selected shifts: shift 1 : [11, 3, 4] shift 9 : [1, 2, 14, 15] shift 17 : [7, 18, 19, 20] shift 24 : [12, 13, 5, 6] shift 35 : [8, 9, 16, 17] shift 72 : [10, 22, 23] shift 76 : [21, 0] Num conflicts: 8 NumBranches: 103 WallTime: 0.012489291000000001 Model created by <NAME>, <EMAIL> See also my CPMpy page: http://www.hakank.org/cpmpy/ """ import os from cpmpy import * import numpy as np from cpmpy_hakank import * # # Read the data file # def read_problem(file): f = open(file, "r") num_rows,num_shifts,min_num_shifts = [int(t) for t in f.readline().replace('\n','').split(' ')] data = [] for i in range(num_shifts): t = [int(t) for t in f.readline().rstrip().split(' ')] data.append([t[j] for j in range(2,len(t))]) return [num_rows,num_shifts,min_num_shifts,data] def bus_scheduling(problem,timeout=60): # data num_work = problem[0] num_shifts = problem[1] min_num_shifts = problem[2] shifts = problem[3] print("num_work:", num_work, "num_shifts:", num_shifts, "min_num_shifts:", min_num_shifts) # variables x = boolvar(shape=num_shifts,name="x") # x[i] = 1 if this shift is selected tot_shifts = intvar(0, min_num_shifts,name="tot_shifts") # constraints model = Model(tot_shifts == sum(x)) for j in range(num_work): model += (1 == sum([x[i]*(sum([k == j for k in shifts[i]]) >= 1) for i in range(num_shifts)])) model += (tot_shifts >= min_num_shifts) model.minimize(tot_shifts) ss = CPM_ortools(model) # ss.ort_solver.parameters.log_search_progress = True # ss.ort_solver.parameters.num_search_workers = 8 # Don't work together with SearchForAllSolutions # ss.ort_solver.parameters.search_branching = ort.PORTFOLIO_SEARCH # ss.ort_solver.parameters.cp_model_presolve = False # ss.ort_solver.parameters.linearization_level = 0 # ss.ort_solver.parameters.cp_model_probing_level = 0 if ss.solve(time_limit=timeout): print("tot_shifts:", tot_shifts.value()) selected_shifts = [i for i in range(num_shifts) if x[i].value() == 1] print("selected shifts:") shifts_s = [0 for i in range(num_work)] for s in selected_shifts: print("shift ", s, ":", shifts[s]) for i in shifts[s]: shifts_s[i] = s # for w in range(num_work): # print("work", w, "is covered in shift", ss[w]) print() print("Num conflicts:", ss.ort_solver.NumConflicts()) print("NumBranches:", ss.ort_solver.NumBranches()) walltime = ss.ort_solver.WallTime() print("WallTime:", walltime) print() return walltime else: print(f"Timeout: {timeout}s") return timeout # the problems in bus_scheduling_csplib/problems/ problems = [ #name size (bytes) order by size "t1", # 1127 "r1", # 46974 "r2", # 53383 "t2", # 69564 "r4", # 71752 "r5", # 77707 "r1a", # 80572 "c1", # 113966 "c1a", # 228132 "c2", # 444719 "r5a", # 496153 "r3", # 780452 ] def benchmark(timeout=60): wall_times = {} print("Timeout:", timeout) for p in problems: print(f"problem {p}") path = "bus_scheduling_csplib/problems/%s"%p problem = read_problem(path) t = bus_scheduling(problem,timeout) wall_times[p] = t print(flush=True) print("Wall times:", wall_times) # p = "t1" # if len(sys.argv) > 1: # p= sys.argv[1] # print("problem", p) # path = "bus_scheduling_csplib/problems/%s"%p # if not os.path.isfile(path): # print("The file %s does not exists!" % path) # else: # problem = read_problem(path) # bus_scheduling(problem) timeout = 60 # seconds benchmark(timeout)
examples/recaptcha3_request.py
smeinecke/python-anticaptcha
199
11116866
import re import requests from os import environ from six.moves.urllib_parse import urljoin from python_anticaptcha import AnticaptchaClient, RecaptchaV3TaskProxyless api_key = environ["KEY"] site_key_pattern = "grecaptcha.execute\('(.+?)'" action_name_pattern = "\{action: '(.+?)'\}" url = "https://recaptcha-demo.appspot.com/recaptcha-v3-request-scores.php" client = AnticaptchaClient(api_key) session = requests.Session() def get_form_html(): return session.get(url).text def get_token(form_html): website_key = re.search(site_key_pattern, form_html).group(1) page_action = re.search(action_name_pattern, form_html).group(1) task = RecaptchaV3TaskProxyless( website_url=url, website_key=website_key, page_action=page_action, min_score=0.7 ) job = client.createTask(task) job.join(maximum_time=9 * 60) return [page_action, job.get_solution_response()] def form_submit(page_action, token): return requests.post( url=urljoin(url, "recaptcha-v3-verify.php"), params={"action": page_action, "token": token}, ).json() def process(): html = get_form_html() [page_action, token] = get_token(html) return form_submit(page_action, token) if __name__ == "__main__": result = process() assert result["success"] is True print("Processed successfully")
doc/python_api/examples/bpy.types.Operator.py
rbabari/blender
365
11116898
""" Basic Operator Example ++++++++++++++++++++++ This script shows simple operator which prints a message. Since the operator only has an :class:`Operator.execute` function it takes no user input. .. note:: Operator subclasses must be registered before accessing them from blender. """ import bpy class HelloWorldOperator(bpy.types.Operator): bl_idname = "wm.hello_world" bl_label = "Minimal Operator" def execute(self, context): print("Hello World") return {'FINISHED'} bpy.utils.register_class(HelloWorldOperator) # test call to the newly defined operator bpy.ops.wm.hello_world()
tests/scripts/ci.py
shreejitverma/tvm
2,084
11116919
<reponame>shreejitverma/tvm #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import sys import multiprocessing import os import getpass import inspect import argparse import grp import subprocess from pathlib import Path from typing import List, Dict, Any, Optional REPO_ROOT = Path(__file__).resolve().parent.parent.parent NPROC = multiprocessing.cpu_count() class col: BLUE = "\033[94m" CYAN = "\033[96m" GREEN = "\033[92m" YELLOW = "\033[93m" RED = "\033[91m" RESET = "\033[0m" BOLD = "\033[1m" UNDERLINE = "\033[4m" def print_color(color: str, msg: str, **kwargs: Any) -> None: if hasattr(sys.stdout, "isatty") and sys.stdout.isatty(): print(col.BOLD + color + msg + col.RESET, **kwargs) else: print(msg, **kwargs) def clean_exit(msg: str) -> None: print_color(col.RED, msg, file=sys.stderr) exit(1) def cmd(commands: List[Any], **kwargs: Any): commands = [str(s) for s in commands] command_str = " ".join(commands) print_color(col.BLUE, command_str) proc = subprocess.run(commands, **kwargs) if proc.returncode != 0: raise RuntimeError(f"Command failed: '{command_str}'") def docker(name: str, image: str, scripts: List[str], env: Dict[str, str]): """ Invoke a set of bash scripts through docker/bash.sh """ if sys.platform == "linux": # Check that the user is in the docker group before running try: group = grp.getgrnam("docker") if getpass.getuser() not in group.gr_mem: print_color( col.YELLOW, f"Note: User '{getpass.getuser()}' is not in the 'docker' group" ) except KeyError: print_color(col.YELLOW, f"Note: 'docker' group does not exist") docker_bash = REPO_ROOT / "docker" / "bash.sh" command = [docker_bash, "--name", name] for key, value in env.items(): command.append("--env") command.append(f"{key}={value}") command += [image, "bash", "-c", " && ".join(scripts)] try: cmd(command) except RuntimeError as e: clean_exit(f"Error invoking Docker: {e}") except KeyboardInterrupt: cmd(["docker", "stop", "--time", "1", name]) def docs( tutorial_pattern: Optional[str] = None, full: bool = False, precheck: bool = False, cpu: bool = False, ) -> None: """ Build the documentation from gallery/ and docs/. By default this builds only the Python docs. If you are on a CPU machine, you can skip the tutorials and build the docs with the '--precheck --cpu' options. arguments: full -- Build all language docs, not just Python precheck -- Run Sphinx precheck script tutorial-pattern -- Regex for which tutorials to execute when building docs (can also be set via TVM_TUTORIAL_EXEC_PATTERN) cpu -- Use CMake defaults for building TVM (useful for building docs on a CPU machine.) """ config = "./tests/scripts/task_config_build_gpu.sh" if cpu and full: clean_exit("--full cannot be used with --cpu") if cpu: # The docs import tvm.micro, so it has to be enabled in the build config = "cd build && cp ../cmake/config.cmake . && echo set\(USE_MICRO ON\) >> config.cmake && cd .." scripts = [ config, f"./tests/scripts/task_build.sh build -j{NPROC}", "./tests/scripts/task_ci_setup.sh", "./tests/scripts/task_sphinx_precheck.sh" if precheck else "./tests/scripts/task_python_docs.sh", ] if tutorial_pattern is None: tutorial_pattern = os.getenv("TVM_TUTORIAL_EXEC_PATTERN", ".py" if full else "none") env = { "TVM_TUTORIAL_EXEC_PATTERN": tutorial_pattern, "PYTHON_DOCS_ONLY": "0" if full else "1", "IS_LOCAL": "1", } docker(name="ci-docs", image="ci_gpu", scripts=scripts, env=env) def serve_docs(directory: str = "_docs") -> None: """ Serve the docs using Python's http server arguments: directory -- Directory to serve from """ directory = Path(directory) if not directory.exists(): clean_exit("Docs have not been build, run 'ci.py docs' first") cmd([sys.executable, "-m", "http.server"], cwd=directory) def lint() -> None: """ Run CI's Sanity Check step """ docker( name="ci-lint", image="ci_lint", scripts=["./tests/scripts/task_lint.sh"], env={}, ) def cli_name(s: str) -> str: return s.replace("_", "-") def add_subparser(func, subparsers) -> Any: """ Utility function to make it so subparser commands can be defined locally as a function rather than directly via argparse and manually dispatched out. """ # Each function is intended follow the example for arguments in PEP257, so # split apart the function documentation from the arguments split = [s.strip() for s in func.__doc__.split("arguments:\n")] if len(split) == 1: args_help = None command_help = split[0] else: command_help, args_help = split # Parse out the help text for each argument if present arg_help_texts = {} if args_help is not None: for line in args_help.split("\n"): line = line.strip() name, help_text = [t.strip() for t in line.split(" -- ")] arg_help_texts[name] = help_text subparser = subparsers.add_parser(cli_name(func.__name__), help=command_help) # Add each parameter to the subparser signature = inspect.signature(func) for name, value in signature.parameters.items(): kwargs = {"help": arg_help_texts[cli_name(name)]} # Grab the default value if present if value.default is not value.empty: kwargs["default"] = value.default # Check if it should be a flag if value.annotation is bool: kwargs["action"] = "store_true" subparser.add_argument(f"--{cli_name(name)}", **kwargs) return subparser def main(): parser = argparse.ArgumentParser(description="Run CI scripts locally via Docker") subparsers = parser.add_subparsers(dest="command") subparser_functions = {cli_name(func.__name__): func for func in [docs, serve_docs, lint]} for func in subparser_functions.values(): add_subparser(func, subparsers) args = parser.parse_args() func = subparser_functions[args.command] # Extract out the parsed args and invoke the relevant function kwargs = {k: getattr(args, k) for k in dir(args) if not k.startswith("_") and k != "command"} func(**kwargs) if __name__ == "__main__": main()
setup.py
tutengfei/flaskblog
204
11116943
#!/usr/bin/env python from setuptools import setup import app try: long_description = open('README.md').read() except: long_description = app.__description__ REQUIREMENTS = [i.strip() for i in open("requirements.txt").readlines()] setup( name='flaskblog', version=app.__version__, url='https://github.com/defshine/flaskblog', author=app.__author__, author_email=app.__email__, description=app.__description__, long_description=long_description, license=app.__license__, packages=['flaskblog'], include_package_data=True, zip_safe=False, platforms='any', install_requires=REQUIREMENTS )
excel4lib/config/__init__.py
aaaddress1/boobsnail
169
11116956
<gh_stars>100-1000 from .excel4_obfuscator_config import * from .excel4_translator_config import * from .excel4_config import *
kur/engine/jinja_engine.py
greedyuser/kur
867
11116983
""" Copyright 2016 Deepgram Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os import ast import json import logging import yaml import jinja2 from .engine import Engine from ..utils import CudaContext, CudaError logger = logging.getLogger(__name__) ############################################################################### def combine(value, new=None): """ Jinja2 filter which merges dictionaries. """ new = new or {} value = dict(value) value.update(new) return value ############################################################################### def as_dict(value, key): """ Jinja2 filter which constructs a dictionary from a key/value pair. """ return {key : value} ############################################################################### def ternary(value, result_true, result_false): """ Implements a ternary if/else conditional. """ return result_true if value else result_false ############################################################################### # pylint: disable=protected-access def gpu_count(): """ Returns the number of GPU devices available on the system. Notes ----- The result of this function is cached so that it will return immediately during future calls. """ if gpu_count._value is None: try: with CudaContext() as context: gpu_count._value = len(context) except CudaError: gpu_count._value = 0 return gpu_count._value gpu_count._value = None # pylint: enable=protected-access ############################################################################### def resolve_path(engine, filename): """ Resolves a path relative to the Kurfile. """ filename = os.path.expanduser(os.path.expandvars(filename)) kurfile = engine._scope.get('filename') if kurfile: filename = os.path.join( os.path.dirname(kurfile), filename ) return os.path.abspath(filename) ############################################################################### def create_load_json(engine): """ Creates the JSON loader. """ # pylint: disable=protected-access def load_json(filename, use_cache=True): """ Loads a JSON file from disk. """ path = resolve_path(engine, filename) logger.debug('Loading JSON file: %s (%s)', filename, path) if use_cache and path in load_json.cache: logger.trace('Using cached data.') else: with open(path) as fh: load_json.cache[path] = json.loads(fh.read()) return load_json.cache[path] load_json.cache = {} # pylint: enable=protected-access return load_json ############################################################################### def create_load_yaml(engine): """ Creates the YAML loader. """ # pylint: disable=protected-access def load_yaml(filename, use_cache=True): """ Loads a YAML file from disk. """ path = resolve_path(engine, filename) logger.debug('Loading YAML file: %s (%s)', filename, path) if use_cache and path in load_yaml.cache: logger.trace('Using cached data.') else: with open(path) as fh: load_yaml.cache[path] = yaml.load(fh.read()) return load_yaml.cache[path] load_yaml.cache = {} # pylint: enable=protected-access return load_yaml ############################################################################### class JinjaEngine(Engine): """ An evaluation engine which uses Jinja2 for templating. """ ########################################################################### def register_custom_filters(self, env): """ Adds our custom filters to the Jinja2 engine. Arguments --------- env: jinja2.Environment instance. The environment to add the custom filters to. """ env.filters['basename'] = os.path.basename env.filters['dirname'] = os.path.dirname env.filters['splitext'] = os.path.splitext env.filters['combine'] = combine env.filters['as_dict'] = as_dict env.filters['ternary'] = ternary env.globals['gpu_count'] = gpu_count env.globals['load_json'] = create_load_json(self) env.globals['load_yaml'] = create_load_yaml(self) ########################################################################### def __init__(self, *args, **kwargs): """ Creates a new Jinja2 templating engine. """ # Call the parent super().__init__(*args, **kwargs) # Create a Jinja2 environment. We could use jinja2.Template().render() # directly, but having an environment gives us more control over, e.g., # custom filters. self.env = jinja2.Environment() # Registering custom filters is described here: # http://jinja.pocoo.org/docs/dev/api/#custom-filters self.register_custom_filters(self.env) # Built-in Jinja2 filters are listed here: # http://jinja.pocoo.org/docs/dev/templates/#builtin-filters ########################################################################### def _evaluate(self, expression): """ Evaluates an expression in the current scope. # Arguments expression: str. The string to evaluate. # Return value The evaluated expression (some Python object/class). """ result = self.env.from_string(expression).render(**self._scope) # Jinja2's `render()` will return a string which is a valid Python # expression (e.g., passing it through `eval` will succeed). However, # if you reference, e.g., a list that Jinja renders, the list will get # printed as a string. So we use `ast.literal_eval()` to turn it back # into a Python object. This may have unintended consequences, such as # turning the literal string "None" into the `None` Python object. # But it's better than nothing. try: result = ast.literal_eval(result) except (ValueError, SyntaxError): pass return result ### EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF
Chapter10/listing10_2.py
hohsieh/osgeopy-code
160
11116987
# Script to use ground control points to add a geotransform to a raster. import glob import math import os from osgeo import gdal, osr # The get_extent function from the text is in ch10funcs.py. import ch10funcs # Don't forget to change the directory. os.chdir(r'D:\osgeopy-data\Massachusetts') # Get the list of tiffs that start with O. in_files = glob.glob('O*.tif') # Loop through all of the files and get the bounding coordinates for the # whole batch. This will be the output extent. min_x, max_y, max_x, min_y = ch10funcs.get_extent(in_files[0]) for fn in in_files[1:]: minx, maxy, maxx, miny = ch10funcs.get_extent(fn) min_x = min(min_x, minx) max_y = max(max_y, maxy) max_x = max(max_x, maxx) min_y = min(min_y, miny) # Calculate the dimensions for the output based on the output extent. in_ds = gdal.Open(in_files[0]) gt = in_ds.GetGeoTransform() rows = math.ceil((max_y - min_y) / -gt[5]) columns = math.ceil((max_x - min_x) / gt[1]) # Create the output dataset. driver = gdal.GetDriverByName('gtiff') out_ds = driver.Create('mosaic.tif', columns, rows) out_ds.SetProjection(in_ds.GetProjection()) out_band = out_ds.GetRasterBand(1) # Change the upper left coordinates in the geotransform and add it to the # output image. gt = list(in_ds.GetGeoTransform()) gt[0], gt[3] = min_x, max_y out_ds.SetGeoTransform(gt) # Loop through the input files. for fn in in_files: in_ds = gdal.Open(fn) # Create a transformer between this input image and the output mosaic # and then use it to calculate the offsets for this raster in the # mosaic. trans = gdal.Transformer(in_ds, out_ds, []) success, xyz = trans.TransformPoint(False, 0, 0) x, y, z = map(int, xyz) # Copy the data. data = in_ds.GetRasterBand(1).ReadAsArray() out_band.WriteArray(data, x, y) # From later in the text, get the real-world coordinates from out_ds at # column 1078 and row 648. trans = gdal.Transformer(out_ds, None, []) success, xyz = trans.TransformPoint(0, 1078, 648) print(xyz) del in_ds, out_band, out_ds
tools/gnvprof.py
mehrdad-shokri/fbcuda
296
11116995
<reponame>mehrdad-shokri/fbcuda #!/usr/local/bin/ifbpy2 --never-use-in-production from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import sys import json import pandas as pd import numpy as np def trace_event(row): return dict( name=row["Name"], tid=row["Context"], cat="cuda", ts=row["Start"], ph="X", pid=row["pid"], dur=row["Duration"], args=dict( stream=row["Stream"] ) ) def parse(filename): def parse_int(x): try: return int(x) except ValueError: return -1 def parse_pid(device): if isinstance(device, str): return device assert np.isnan(device) return "Host" df = pd.read_csv(filename)[1:] df.Duration = df.Duration.map(lambda x: float(x)) df.Start = df.Start.map(lambda x: float(x)) df.Stream = df.Stream.map(lambda x: parse_int(x)) df.Context = df.Context.map(lambda x: parse_int(x)) df["pid"] = df.Device.map(lambda x: parse_pid(x)) return df def main(): df = parse(sys.argv[1]) events = [trace_event(row) for row in df.to_dict("records")] print(json.dumps(events, indent=2)) if __name__ == "__main__": main()
ai/src/tests/perf_data.py
ScriptBox99/spiceai
713
11117032
<gh_stars>100-1000 from io import BytesIO, StringIO import time import pandas as pd from pyarrow import csv from proto.aiengine.v1 import aiengine_pb2 import main from tests import common class DataSource: def __init__(self, name, init_json_path, data_path): self.name = name self.init_req = common.get_init_from_json(init_data_path=init_json_path, pod_name=name) with open(data_path, "r", encoding="utf8") as data_file: self.csv_string = data_file.read() self.csv_bytes = self.csv_string.encode() class PerfData(): def __init__(self): print("Starting performance test for data\n") self.aiengine = main.AIEngine() self.data_sources = { "trader": DataSource( "trader", "../../test/assets/aiengine/api/trader_init.json", "../../test/assets/data/csv/trader.csv"), "coinbase": DataSource( "coinbase", "../../test/assets/aiengine/api/coinbase_init.json", "../../test/assets/data/csv/COINBASE_BTCUSD, 30.csv") } def init(self, init_req: aiengine_pb2.InitRequest, expected_error: bool = False, expected_result: str = "ok"): resp = self.aiengine.Init(init_req, None) assert resp.error == expected_error assert resp.result == expected_result def add_data(self, pod_name: str, csv_data: str): resp = self.aiengine.AddData(aiengine_pb2.AddDataRequest(pod=pod_name, csv_data=csv_data), None) assert resp.error is False def arrow_load(self): print("Arrow data loading time") for data_name, data_source in self.data_sources.items(): print(f"{data_name}:", end="", flush=True) start_time = time.time() for _ in range(1000): csv.read_csv(BytesIO(data_source.csv_bytes)) end_time = time.time() print(f" {end_time - start_time:.02f}ms") print() def pandas_load(self): print("Pandas data loading time") for data_name, data_source in self.data_sources.items(): print(f"{data_name}:", end="", flush=True) start_time = time.time() for _ in range(1000): pd.read_csv(StringIO(data_source.csv_string)) end_time = time.time() print(f" {end_time - start_time:.02f}ms") print() def aiengine_load(self): accumulation = 1000 print(f"AIEngine loading {accumulation}x data") for data_name, data_source in self.data_sources.items(): print(f"{data_name}:", end="", flush=True) self.init(data_source.init_req) start_time = time.time() for _ in range(accumulation): self.add_data(data_name, data_source.csv_bytes) end_time = time.time() print(f" {end_time - start_time:.02f}s") print() if __name__ == "__main__": suite = PerfData() suite.arrow_load() suite.pandas_load() suite.aiengine_load()
Anaconda-files/Program_21c.py
arvidl/dynamical-systems-with-applications-using-python
106
11117040
<filename>Anaconda-files/Program_21c.py<gh_stars>100-1000 # Program 21c: Josephson junction limit cycle. # See Figure 21.9. from matplotlib import pyplot as plt import numpy as np from scipy.integrate import odeint fig = plt.figure() bj = 1.2 tmax = 100 kappa = 1.4 def jj_ode(x, t): return [x[1], kappa - bj*x[1] - np.sin(x[0])] time = np.arange(0, tmax, 0.1) x0=[0.1,0.1] xs = odeint(jj_ode, x0, time) imgplot = plt.plot(np.sin(xs[:, 0]), xs[:, 1], 'r-') plt.xlabel(r'$\sin(\phi)$', fontsize=15) plt.ylabel(r'$\Omega$', fontsize=15) plt.tick_params(labelsize=15) plt.show()
notebook/numpy_slice.py
vhn0912/python-snippets
174
11117066
import numpy as np l = list(range(10)) print(l) # [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] print(l[4:8]) # [4, 5, 6, 7] print(l[-5:-2]) # [5, 6, 7] print(l[::-1]) # [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] a = np.arange(10) print(a) # [0 1 2 3 4 5 6 7 8 9] print(a[4:8]) # [4 5 6 7] print(a[-5:-2]) # [5 6 7] print(a[::-1]) # [9 8 7 6 5 4 3 2 1 0] a[3:6] = 100 print(a) # [ 0 1 2 100 100 100 6 7 8 9] a[3:6] = [100, 200, 300] print(a) # [ 0 1 2 100 200 300 6 7 8 9] # a[3:6] = [100, 200, 300, 400] # ValueError: cannot copy sequence with size 4 to array axis with dimension 3 a = np.arange(10) print(a) # [0 1 2 3 4 5 6 7 8 9] print(a[2:8:2]) # [2 4 6] a[2:8:2] = 100 print(a) # [ 0 1 100 3 100 5 100 7 8 9] a[2:8:2] = [100, 200, 300] print(a) # [ 0 1 100 3 200 5 300 7 8 9] a = np.arange(12).reshape((3, 4)) print(a) # [[ 0 1 2 3] # [ 4 5 6 7] # [ 8 9 10 11]] print(a[1:, 1:3]) # [[ 5 6] # [ 9 10]] print(a[1:, :]) # [[ 4 5 6 7] # [ 8 9 10 11]] print(a[1:]) # [[ 4 5 6 7] # [ 8 9 10 11]] print(a[1]) # [4 5 6 7] print(a[1].shape) # (4,) print(a[1:2]) # [[4 5 6 7]] print(a[1:2].shape) # (1, 4) print(a[:, 1:3]) # [[ 1 2] # [ 5 6] # [ 9 10]] print(a[:, 1]) # [1 5 9] print(a[:, 1].shape) # (3,) print(a[:, 1:2]) # [[1] # [5] # [9]] print(a[:, 1:2].shape) # (3, 1) a = np.arange(12).reshape((3, 4)) print(a) # [[ 0 1 2 3] # [ 4 5 6 7] # [ 8 9 10 11]] print(a[1:, 1:3]) # [[ 5 6] # [ 9 10]] a[1:, 1:3] = 100 print(a) # [[ 0 1 2 3] # [ 4 100 100 7] # [ 8 100 100 11]] a[1:, 1:3] = [100, 200] print(a) # [[ 0 1 2 3] # [ 4 100 200 7] # [ 8 100 200 11]] a[1:, 1:3] = [[100, 200], [300, 400]] print(a) # [[ 0 1 2 3] # [ 4 100 200 7] # [ 8 300 400 11]] a = np.arange(12).reshape((3, 4)) print(a) # [[ 0 1 2 3] # [ 4 5 6 7] # [ 8 9 10 11]] print(a[1:, ::2]) # [[ 4 6] # [ 8 10]] a[1:, ::2] = 100 print(a) # [[ 0 1 2 3] # [100 5 100 7] # [100 9 100 11]] a[1:, ::2] = [100, 200] print(a) # [[ 0 1 2 3] # [100 5 200 7] # [100 9 200 11]] a[1:, ::2] = [[100, 200], [300, 400]] print(a) # [[ 0 1 2 3] # [100 5 200 7] # [300 9 400 11]] a = np.arange(12).reshape((3, 4)) print(a) # [[ 0 1 2 3] # [ 4 5 6 7] # [ 8 9 10 11]] a_slice = a[1:, 1:3] print(a_slice) # [[ 5 6] # [ 9 10]] a_slice[0, 0] = 100 print(a_slice) # [[100 6] # [ 9 10]] print(a) # [[ 0 1 2 3] # [ 4 100 6 7] # [ 8 9 10 11]] a = np.arange(12).reshape((3, 4)) print(a) # [[ 0 1 2 3] # [ 4 5 6 7] # [ 8 9 10 11]] a_slice_copy = a[1:, 1:3].copy() print(a_slice_copy) # [[ 5 6] # [ 9 10]] a_slice_copy[0, 0] = 100 print(a_slice_copy) # [[100 6] # [ 9 10]] print(a) # [[ 0 1 2 3] # [ 4 5 6 7] # [ 8 9 10 11]] a = np.arange(12).reshape((3, 4)) print(a) # [[ 0 1 2 3] # [ 4 5 6 7] # [ 8 9 10 11]] print(a[[0, 2], 1:3]) # [[ 1 2] # [ 9 10]] a[[0, 2], 1:3] = 100 print(a) # [[ 0 100 100 3] # [ 4 5 6 7] # [ 8 100 100 11]] a[[0, 2], 1:3] = [100, 200] print(a) # [[ 0 100 200 3] # [ 4 5 6 7] # [ 8 100 200 11]] a[[0, 2], 1:3] = [[100, 200], [300, 400]] print(a) # [[ 0 100 200 3] # [ 4 5 6 7] # [ 8 300 400 11]] a_subset = a[[0, 2], 1:3] print(a_subset) # [[100 200] # [300 400]] a_subset[0, 0] = -1 print(a_subset) # [[ -1 200] # [300 400]] print(a) # [[ 0 100 200 3] # [ 4 5 6 7] # [ 8 300 400 11]]
tests/behavioral_tests/test_memento.py
defianceblack/PyPattyrn
1,499
11117136
<gh_stars>1000+ from unittest import TestCase from pypattyrn.behavioral.memento import Memento, Originator class MementoTestCase(TestCase): """ Unit testing class for the Memento Class. """ def setUp(self): """ Initialize testing data. """ self.state = {'foo': 'bar'} def test_init(self): """ Test the __init__ method. @raise AssertionError: If the test fails. """ memento = Memento(self.state) self.assertEqual(memento.state, self.state) class OriginatorTestCase(TestCase): """ Unit testing class for the Originator class. """ def setUp(self): """ Initialize testing data. """ class Cat(Originator): def __init__(self, name): self.name = name self.cat_class = Cat def test_commit(self): """ Test the commit method. @raise AssertionError: If the test fails. """ cat = self.cat_class('Tom') cat_memento = cat.commit() self.assertDictEqual(cat.__dict__, cat_memento.state) def test_rollback(self): """ Test the rollback method. @raise AssertionError: If the test fails. """ cat = self.cat_class('Tom') cat_memento = cat.commit() cat.name = 'jerry' cat.rollback(cat_memento) self.assertEqual('Tom', cat.name)
examples/clusterman_metrics/clusterman_metrics/util/costs.py
akshaysharma096/clusterman
281
11117151
# Copyright 2019 Yelp Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import staticconf from clusterman_metrics.util.constants import CONFIG_NAMESPACE config_reader = staticconf.NamespaceReaders(CONFIG_NAMESPACE) def estimate_cost_per_hour( cluster, pool, cpus=0, mem=0, ): cpu_cost = cpus * _get_resource_cost("cpus", cluster, pool) mem_cost = mem * _get_resource_cost("mem", cluster, pool) return max(cpu_cost, mem_cost) def _get_resource_cost(resource, cluster, pool): default_cost = config_reader.read_float("cost_per_hour.defaults.{}".format(resource), default=0,) return config_reader.read_float("cost_per_hour.{}.{}.{}".format(cluster, pool, resource), default=default_cost,) def should_warn(cost): threshold = config_reader.read_float("cost_warning_threshold", default=100,) return cost > threshold
tests/python/test_states.py
chakpongchung/tinyflow
2,035
11117159
<filename>tests/python/test_states.py<gh_stars>1000+ import tinyflow as tf import numpy as np def test_assign(): x = tf.Variable(tf.zeros(shape=[2,3])) sess = tf.Session() sess.run(tf.assign(x, tf.zeros(shape=[2,3]))) ax = sess.run(x) np.testing.assert_almost_equal(ax, np.zeros((2,3))) def test_group(): x1 = tf.Variable(tf.zeros(shape=[2,3])) x2 = tf.Variable(tf.zeros(shape=[2,3])) a1 = tf.assign(x1, tf.zeros(shape=[2,3])) a2 = tf.assign(x2, tf.ones(shape=[2,3])) sess = tf.Session() sess.run(tf.group(a1, a2)) ax1 = sess.run(x1) ax2 = sess.run(x2) np.testing.assert_almost_equal(ax1, np.zeros((2,3))) np.testing.assert_almost_equal(ax2, np.ones((2,3))) def test_init(): x1 = tf.Variable(tf.ones(shape=[2,3])) x2 = tf.Variable(tf.zeros(shape=[2,3])) sess = tf.Session() sess.run(tf.initialize_all_variables()) ax1 = sess.run(x1) ax2 = sess.run(x2) np.testing.assert_almost_equal(ax1, np.ones((2,3))) np.testing.assert_almost_equal(ax2, np.zeros((2,3))) if __name__ == "__main__": pass
tests/test_stats_manager.py
machinalis/featureforge
366
11117201
from datetime import timedelta import mock from unittest import TestCase import warnings from featureforge.experimentation.stats_manager import StatsManager DEPRECATION_MSG = ( 'Init arguments will change. ' 'Take a look to http://feature-forge.readthedocs.io/en/latest/experimentation.html' '#exploring-the-finished-experiments' ) DB_CONNECTION_PATH = 'featureforge.experimentation.stats_manager.StatsManager.setup_database_connection' # NOQA class TestStatsManager(TestCase): def setUp(self): self.db_name = 'a_db_name' self.booking_duration = 10 def test_init_with_db_name_as_first_parameter_and_booking_duration_as_second(self): with mock.patch(DB_CONNECTION_PATH): st = StatsManager(db_name=self.db_name, booking_duration=self.booking_duration) self.assertEqual(st._db_config['name'], self.db_name) self.assertEqual(st.booking_delta, timedelta(seconds=self.booking_duration)) def test_if_init_with_db_name_as_second_argument_will_warning(self): with warnings.catch_warnings(record=True) as w: # Cause all warnings to always be triggered. warnings.simplefilter("always", DeprecationWarning) # Trigger a warning. with mock.patch(DB_CONNECTION_PATH): StatsManager(self.booking_duration, self.db_name) # Verify some things self.assertEqual(len(w), 1) self.assertTrue(issubclass(w[-1].category, DeprecationWarning)) self.assertEqual(str(w[-1].message), DEPRECATION_MSG) def test_if_use_db_name_as_second_argument_warnings_but_can_continue(self): with warnings.catch_warnings(record=True): # Cause all warnings to always be triggered. warnings.simplefilter("always", DeprecationWarning) # Trigger a warning. with mock.patch(DB_CONNECTION_PATH): st = StatsManager(self.booking_duration, self.db_name) self.assertEqual(st._db_config['name'], self.db_name) self.assertEqual(st.booking_delta, timedelta(seconds=self.booking_duration))
Algo and DSA/LeetCode-Solutions-master/Python/escape-the-ghosts.py
Sourav692/FAANG-Interview-Preparation
3,269
11117218
<reponame>Sourav692/FAANG-Interview-Preparation<filename>Algo and DSA/LeetCode-Solutions-master/Python/escape-the-ghosts.py # Time: O(n) # Space: O(1) class Solution(object): def escapeGhosts(self, ghosts, target): """ :type ghosts: List[List[int]] :type target: List[int] :rtype: bool """ total = abs(target[0])+abs(target[1]) return all(total < abs(target[0]-i)+abs(target[1]-j) for i, j in ghosts)
Training Utility/somatictrainer/sandbox.py
ZackFreedman/Somatic
328
11117228
<filename>Training Utility/somatictrainer/sandbox.py<gh_stars>100-1000 import random import time import requests import tensorflow.keras as keras import tensorflow as tf import sklearn.model_selection import logging import numpy as np import hexdump from datetime import datetime from somatictrainer.gestures import GestureTrainingSet, Gesture logging.basicConfig(level=logging.DEBUG) class Callbacks(keras.callbacks.Callback): def on_train_begin(self, logs=None): logging.info('Starting training!') def on_train_end(self, logs=None): logging.info('Training over!') def on_train_batch_begin(self, batch, logs=None): logging.info('Starting batch {}. Logs: {}'.format(batch, logs)) def on_train_batch_end(self, batch, logs=None): logging.info('Ending batch {}. Logs: {}'.format(batch, logs)) def on_epoch_begin(self, epoch, logs=None): logging.info('Starting epoch {}'.format(epoch)) def on_epoch_end(self, epoch, logs=None): logging.info('Ending epoch {}. Logs: {}'.format( epoch, logs)) def make_model(): filename = 'training_set_2.db' corpus = GestureTrainingSet.load( 'E:\\Dropbox\\Projects\\Source-Controlled Projects\\Somatic\\Training Utility\\' + filename) data, labels = corpus.to_training_set() one_hot_labels = keras.utils.to_categorical(labels, num_classes=np.max(labels) + 1) x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split( data, one_hot_labels, test_size=0.1, shuffle=True, stratify=one_hot_labels) batch_size = 32 model = keras.Sequential() # model.add(keras.layers.LSTM(batch_size, activation='sigmoid', recurrent_activation='relu', # input_shape=data.shape[1:], return_sequences=True)) # model.add(keras.layers.Dropout(0.2)) # See https://stackoverflow.com/questions/48026129/how-to-build-a-keras-model-with-multidimensional-input-and-output # # model.add(keras.layers.LSTM(64)) # model.add(keras.layers.Dropout(0.2)) model.add(keras.layers.Flatten(input_shape=data.shape[1:])) model.add(keras.layers.Dense(batch_size, activation='relu')) for i in range(4): model.add(keras.layers.Dense(100, activation='relu')) model.add(keras.layers.Dense(np.max(labels) + 1, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() model.fit(x_train, y_train, epochs=500, batch_size=batch_size, shuffle=True, verbose=True, validation_data=(x_test, y_test)) model.evaluate(data, one_hot_labels, batch_size=batch_size) if '.' in filename: filename = filename[:filename.rindex('.')] model_name = 'E:\\Dropbox\\Projects\\Source-Controlled Projects\\Somatic\\Training Utility\\' + filename + '.h5' logging.info('Done! Model saved to ' + model_name) model.save(model_name) # model = keras.models.load_model('E:\\Dropbox\\Projects\\Source-Controlled Projects\\Somatic\\Training Utility\\' + filename + '.h5') converter = tf.lite.TFLiteConverter.from_keras_model(model) # converter.optimizations = [tf.lite.Optimize.DEFAULT] # converter._experimental_new_quantizer = True converter.experimental_new_converter = True converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] # converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS] # converter.allow_custom_ops = True def rep_data_gen(): for val in x_test: yield [np.array(val, dtype=np.float32, ndmin=2)] converter.representative_dataset = rep_data_gen tflite_model = converter.convert() open(filename + '.tflite', 'wb').write(tflite_model) hex_lines = [] for chunk in hexdump.chunks(tflite_model, 16): hex_lines.append('0x' + hexdump.dump(chunk, sep=', 0x')) char_map = corpus.get_character_map() with open(filename + '_model.h', 'w') as f: f.write('const unsigned char modelBin[] = {\n') for index, line in enumerate(hex_lines): f.write(' ' + line) if index < len(hex_lines) - 1: f.write(',') f.write('\n') f.write('};\n\n') f.write('const unsigned int modelLen = {};\n\n'.format(len(tflite_model))) f.write('const byte charMap[] = {\n') highest_key = np.max(list(char_map.keys())) for i in range(highest_key + 1): if i % 10 == 0: f.write(' ') if i in char_map: f.write(hex(ord(char_map[i]))) else: f.write('0x00') if i < highest_key: f.write(', ') if i % 10 == 9: f.write('\n') f.write('\n};\n\n') f.write('#define charCount {}\n'.format(len(char_map))) def generate_training_sentence(): hipsum = requests.get('https://hipsum.co/api/?type=hipster-centric&sentences=3').json()[ 0] # Get Hipster Ipsum, strip off trailing punctuation tokens = hipsum.split(' ') for i in range(len(tokens)): if random.randint(0, 2) == 0: # Randomly capitalize some of the words to collect more caps samples tokens[i] = tokens[i].capitalize() elif random.randint(0, 5) == 0: tokens[i] = tokens[i].upper() if i < len(tokens) - 1: tokens[i] += ' ' number = '' for i in range(len(tokens)): if random.randint(0, 4) == 0: # Also add some numbers, we need more of those too for j in range(2, 6): number += chr(ord('0') + random.randint(0, 9)) tokens.insert(i, number + ' ') symbols = '!"#$\',-./?@' # No space - we have plenty 'o them for i in range(len(tokens)): if random.randint(0, 4) == 0: if i > 0: tokens[i - 1] = tokens[i - 1][:-1] # Get rid of that space # Add some punctuation symbols too tokens.insert(i, symbols[random.randint(0, len(symbols) - 1)] + ' ') return ''.join(tokens) if __name__ == "__main__": # while True: # print(generate_training_sentence()) # time.sleep(1) make_model()
tests/perf/test_long_cycles_nbrows_cycle_length_31000_380.py
shaido987/pyaf
377
11117285
import tests.perf.test_cycles_full_long_long as gen gen.test_nbrows_cycle(31000 , 380)
tests/syntax/unmatched_closing_bracket_1.py
matan-h/friendly
287
11117291
"""Should raise SyntaxError: invalid syntax""" x = (1, 2, 3]
Tools/PNGEmbedder.py
AntonioCS/MinecraftC
172
11117320
from PIL import Image import os import sys def export(filePath): outputPath = __file__[:__file__.rfind('/Tools')] + '/Resources' + filePath[filePath.rfind('/Input'):][6:] outputPath = outputPath[:-4] + '.h' cName = filePath[filePath.rfind('/Input'):][7:-4].replace('/', '_') image = Image.open(filePath).convert('RGBA') pixels = image.load() width, height = image.size output = open(outputPath, 'w') output.write('static unsigned int Resource_' + cName + '_Width = ' + str(width) + ';\n\n') output.write('static unsigned int Resource_' + cName + '_Height = ' + str(height) + ';\n\n') output.write('static unsigned char Resource_' + cName + '_RGBA[] = \n') output.write('{') for y in range(height): for x in range(width): if (x * width + y) % 4 == 0: output.write('\n\t') r, g, b, a = pixels[x, y] output.write('0x%0.2X, ' % r) output.write('0x%0.2X, ' % g) output.write('0x%0.2X, ' % b) output.write('0x%0.2X, ' % a) output.write('\n};\n') output.close() image.close() for subdir, dirs, files in os.walk(__file__[:__file__.rfind('/')] + '/Input'): for fileName in files: filePath = subdir + os.sep + fileName if filePath.endswith('.png'): export(filePath)
scripts/system/parse_log.py
jjzhang166/minerva
561
11117336
<gh_stars>100-1000 import sys import re import operator interval = 0.01 def getDeviceIdFromExecution(s): words = s.split() if len(words) < 4 or words[3] != 'execute': return None else: return int(words[2][1:]) def getDeviceIdFromCreation(s): words = s.split() if len(words) < 6 or words[0] != 'create' or words[1] != 'new' or words[2] != 'op': return None else: return int(words[7][1:]) def getDeviceIdFromDeletion(s): words = s.split() if len(words) < 6 or words[0] != 'dispatcher' or words[1] != 'ready': return None else: return 0 def parseSecond(s): hour, minute, second = s.split(':') return int(hour) * 3600 + int(minute) * 60 + float(second) def parseFile(filename, deviceIdParser): ret = {} with open(filename) as f: for line in f.readlines(): words = line.split(None, 4) time = parseSecond(words[1]) device = deviceIdParser(words[4]) if device == None: continue else: bucket = int(time / interval) ret.setdefault(bucket, dict()) ret[bucket].setdefault(device, 0) ret[bucket][device] += 1 return ret def outputBuckets(l): union = list(set.union(*map(set, map(dict.keys, l)))) timeAxis = range(min(union), max(union) + 1) deviceIds = set.union(*map(set, map(dict.keys, reduce(operator.add, map(dict.values, l))))) with open(sys.argv[1] + '.hist', 'w') as f: f.write(','.join(['time'] + reduce(operator.add, [[str(bucketId) + '_device' + str(deviceId) for deviceId in deviceIds] for bucketId in range(len(l))])) + '\n') for time in timeAxis: f.write(','.join(map(str, [time] + reduce(operator.add, [[l[bucketId].get(time, dict()).get(deviceId, 0) for deviceId in deviceIds] for bucketId in range(len(l))]))) + '\n') def main(): assert(1 < len(sys.argv)) filename = sys.argv[1] execution = parseFile(filename, getDeviceIdFromExecution) creation = parseFile(filename, getDeviceIdFromCreation) deletion = parseFile(filename, getDeviceIdFromDeletion) outputBuckets([execution, creation, deletion]) if __name__ == '__main__': main()
Utils/AnnotationTools.py
Caius-Lu/Savior
108
11117338
import cv2 import os from PIL import Image, ImageDraw, ImageFont import numpy as np import colorsys from Utils.GeometryUtils import compute_two_points_angle, get_coordinates_of_rotated_box current_directory = os.path.dirname(__file__) candidate_font = '田氏颜体大字库2.0.ttf' annotate_font = ImageFont.truetype(os.path.join(current_directory, candidate_font), size=21) def generate_colors(_color_num): """ 生成一定数量的颜色 Args: _color_num: 颜色的数量 Returns: 所有生成的颜色 """ to_return_palette = [] # 最多50种颜色 for i in range(_color_num): hue_value = (i % 50) * 0.02 (r, g, b) = colorsys.hsv_to_rgb(hue_value, 1, 1) to_return_palette.append([int(b * 255), int(g * 255), int(r * 255)]) return to_return_palette def annotation_multi_horizon_line_on_image(_img, _y_list, _line_color, _line_thickness=4): to_return_img = _img.copy() for m_y in _y_list: cv2.line(to_return_img, (0, m_y), (_img.shape[0] - 1, m_y), _line_color, thickness=_line_thickness) return cv2.addWeighted(to_return_img, 0.5, _img, 0.5, 0) def annotation_horizon_line_on_image(_img, _y, _line_color, _line_thickness=4): return annotation_multi_horizon_line_on_image(_img, [_y], _line_color, _line_thickness) def annotate_bounding_box_on_image(_img, _boxes, _specific_color, _with_index=False, _thickness=4): to_return_img = _img.copy() if len(_boxes) > 0: for m_box_index, m_box in enumerate(_boxes): cv2.rectangle(to_return_img, (m_box[0], m_box[1]), (m_box[2], m_box[3]), _specific_color, thickness=_thickness) if _with_index: cv2.putText(to_return_img, f'{m_box_index}', (m_box[0] + 5, m_box[1] + 5), cv2.FONT_HERSHEY_SIMPLEX, 1, _specific_color ) return to_return_img def annotate_circle_on_image(_to_annotate_image, _points, _specific_color, _radius=8, _thickness=2): """ 在图中标注多个圆 Args: _to_annotate_image: 待标注图像 _points: 待标注圆的中心(同opencv配置) _specific_color: 圆的颜色(同opencv配置) _radius: 圆的半径(同opencv配置) _thickness: 圆的厚度(同opencv配置) """ h, w = _to_annotate_image.shape[:2] if len(_points) > 0: for m_point in _points: cv2.circle(_to_annotate_image, (int(m_point[0] * w), int(m_point[1] * h)), _radius, _specific_color, thickness=_thickness) def annotate_polygon_on_image(_img, _polygon, _specific_color, _is_transparent=True): """ 在图中标注多边形区域 :param _img: 待标注图像 :param _polygon: 多边形区域 :param _specific_color: 标注颜色 :param _is_transparent: 是否透明 :return: 标注完成的图像 """ to_return_img = _img.copy() h, w = _img.shape[:2] if isinstance(_polygon, list): _polygon = (np.array(_polygon) * (w, h)).astype(np.int32) cv2.fillPoly(to_return_img, [_polygon, ], _specific_color) if _is_transparent: to_return_img = cv2.addWeighted(to_return_img, 0.5, _img, 0.5, 0) return to_return_img def __annotation_text_on_image(_img, _text_start_position, _text_color, _text): img_pil = Image.fromarray(_img) to_draw_image = ImageDraw.Draw(img_pil) to_draw_image.multiline_text(_text_start_position, _text, fill=_text_color, font=annotate_font) to_return_img = np.asarray(img_pil) return to_return_img def annotation_angle_on_image(_img, _start_point, _middle_point, _end_point, _line_color, _text_color, _angle): """ 在图上画一个角 :param _img: 需要标注的图 :param _start_point: 起点(顺时针) :param _middle_point: 中点 :param _end_point: 终点(顺时针) :param _line_color: 线条颜色 :param _text_color: 文本颜色 :param _angle: 当前角度 :return: """ to_return_img = _img.copy() cv2.line(to_return_img, (_start_point[0], _start_point[1]), (_middle_point[0], _middle_point[1]), _line_color, 2) cv2.line(to_return_img, (_middle_point[0], _middle_point[1]), (_end_point[0], _end_point[1]), _line_color, 2) cv2.circle(to_return_img, (_middle_point[0], _middle_point[1]), 3, (0, 255, 0), 3) angle_1 = compute_two_points_angle(_middle_point, _start_point) angle_2 = compute_two_points_angle(_middle_point, _end_point) start_angle = 0 if angle_2 < angle_1: angle_2 = angle_2 + 360 - angle_1 start_angle = angle_1 angle_1 = 0 cv2.ellipse(to_return_img, (_middle_point[0], _middle_point[1]), (15, 15), start_angle, angle_1, angle_2, _line_color, 2) to_return_img = __annotation_text_on_image(to_return_img, (_middle_point[0] + 5, _middle_point[1] + 5), _text_color, str(_angle)) return to_return_img def annotation_multi_horizon_width(_img, _y, _x_list, _line_color, _text_color, _text_list, _thickness=1, _with_arrow=True): """ 横向标注多个宽度 :param _img: 需要标注的图像 :param _y: 当前直线所在高度 :param _x_list: 所有x的列表 :param _line_color: 线条颜色(bgr) :param _text_color: 文本颜色(bgr) :param _text_list: 每个区间需要显示的文本 :param _thickness: 线条粗细 :param _with_arrow: 线条两端是否带箭头 :return: 标注后的图像 """ assert len(_x_list) - 1 == len(_text_list), '线段数与字符串数不匹配' to_return_img = _img.copy() # 需要绘制: # 1. 双向箭头线 # 2. 箭头到头的直线 # 3. 线条对应的文字 for m_index, (m_start_x, m_end_x, m_text) in enumerate(zip(_x_list[:-1], _x_list[1:], _text_list)): if _with_arrow: cv2.arrowedLine(to_return_img, (m_start_x, _y), (m_end_x, _y), _line_color, thickness=_thickness) cv2.arrowedLine(to_return_img, (m_end_x, _y), (m_start_x, _y), _line_color, thickness=_thickness) else: cv2.line(to_return_img, (m_start_x, _y), (m_end_x, _y), _line_color, thickness=_thickness) cv2.line(to_return_img, (m_end_x, _y), (m_start_x, _y), _line_color, thickness=_thickness) # 文本在最左侧 text_start_x = m_start_x text_start_y = _y + (10 if m_index % 2 == 0 else -annotate_font.size - 10) to_return_img = __annotation_text_on_image(to_return_img, (text_start_x, text_start_y), _text_color, m_text) for m_x in _x_list: cv2.line(to_return_img, (m_x, _y - 12), (m_x, _y + 12), _line_color, thickness=_thickness) return to_return_img def annotation_horizon_width(_img, _y, _start_x, _end_x, _line_color, _text_color, _text): """ 横向标注宽度 :param _img: 需要标注的图像 :param _y: 当前直线所在高度 :param _start_x: 起始x :param _end_x: 结束x :param _line_color: 线条颜色(bgr) :param _text_color: 文本颜色(bgr) :param _text: 需要显示的文本 :return: 标注后的图像 """ return annotation_multi_horizon_width(_img, _y, [_start_x, _end_x], _line_color, _text_color, [_text]) def annotation_multi_vertical_height(_img, _x, _y_list, _line_color, _text_color, _text_list, _thickness=1, _with_arrow=True): """ 纵向标注多个高度 :param _img: 需要标注的图像 :param _x: 当前直线所在宽度 :param _y_list: 所有y的列表 :param _line_color: 线条颜色(bgr) :param _text_color: 文本颜色(bgr) :param _text_list: 所有需要显示的文本 :param _thickness: 线条粗细 :param _with_arrow: 线条两端是否带箭头 :return: 标注后的图像 """ assert len(_y_list) - 1 == len(_text_list), '线段数与字符串数不匹配' to_return_img = _img.copy() # 需要绘制: # 1. 双向箭头线 # 2. 箭头到头的直线 # 3. 线条对应的文字 for m_start_y, m_end_y, m_text in zip(_y_list[:-1], _y_list[1:], _text_list): if _with_arrow: cv2.arrowedLine(to_return_img, (_x, m_start_y), (_x, m_end_y), _line_color, thickness=_thickness) cv2.arrowedLine(to_return_img, (_x, m_end_y), (_x, m_start_y), _line_color, thickness=_thickness) else: cv2.line(to_return_img, (_x, m_start_y), (_x, m_end_y), _line_color, thickness=_thickness) cv2.line(to_return_img, (_x, m_end_y), (_x, m_start_y), _line_color, thickness=_thickness) text_start_x = _x + 10 text_start_y = m_start_y + (m_end_y - m_start_y) // 2 to_return_img = __annotation_text_on_image(to_return_img, (text_start_x, text_start_y), _text_color, m_text) for m_y in _y_list: cv2.line(to_return_img, (_x - 12, m_y), (_x + 12, m_y), _line_color, thickness=_thickness) return to_return_img def annotation_vertical_height(_img, _x, _start_y, _end_y, _line_color, _text_color, _text): return annotation_multi_vertical_height(_img, _x, [_start_y, _end_y], _line_color, _text_color, [_text, ]) def draw_rotated_bbox(_to_draw_image: np.ndarray, _rotated_box: dict, _color: tuple, _thickness: int): """ 在图中标注旋转矩形 Args: _to_draw_image: 待标注图像 _rotated_box: 待标注的旋转矩形(包含center_x,center_y,box_width,box_height,degree) _color: 标注颜色(同opencv配置) _thickness: 边框粗细(同opencv配置) """ rotated_points = get_coordinates_of_rotated_box(_to_draw_image, _rotated_box) cv2.polylines(_to_draw_image, [rotated_points, ], True, _color, _thickness) def annotate_segmentation( _to_draw_image, _segmentation_result, _background_index=0, ): """ 标注分割区域 Args: _to_draw_image: 需要标注的图像 _segmentation_result: 分割的结果 _background_index: 背景部分的下标 Returns: 标注完成的图 """ h, w = _to_draw_image.shape[:2] if _to_draw_image.shape[:2] != _segmentation_result.shape[:2]: _segmentation_result = cv2.resize(_segmentation_result, (w, h), cv2.INTER_NEAREST) distinct_index = np.sort(np.unique(_segmentation_result), axis=None) candidate_colors = generate_colors(len(distinct_index)) mask_result_image = _to_draw_image.copy() for m_index, m_candidate_color in zip(distinct_index.tolist(), candidate_colors): if m_index == _background_index: continue m_index_segment_result = _segmentation_result == m_index np.putmask(mask_result_image, np.repeat(m_index_segment_result[..., None], 3, axis=-1), m_candidate_color) add_weighted_result_image = cv2.addWeighted(_to_draw_image, 0.5, mask_result_image, 0.5, 0) return add_weighted_result_image def annotate_detect_rotated_bbox_and_text_result( _to_draw_image, _rotated_box_list, _text_list, _box_color, _box_thickness ): """ 标注rotated box和文本到图片上 Args: _to_draw_image: 待标注图像 _rotated_box_list: box列表 _text_list: 文本列表 _box_color: box的颜色 _box_thickness: box的边框粗细 Returns: 标注好的图像 """ to_return_image = _to_draw_image.copy() h, w = to_return_image.shape[:2] for m_box, m_text in zip(_rotated_box_list, _text_list): draw_rotated_bbox(to_return_image, m_box, _box_color, _box_thickness) m_box_center_x = int(m_box['center_x'] * w) m_box_center_y = int(m_box['center_y'] * h) to_return_image = __annotation_text_on_image(to_return_image, (m_box_center_x, m_box_center_y), (0, 255, 0), m_text['text']) return to_return_image
alipay/aop/api/response/MybankCreditLoanapplyPromotionDynamicurlGetResponse.py
snowxmas/alipay-sdk-python-all
213
11117364
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.response.AlipayResponse import AlipayResponse class MybankCreditLoanapplyPromotionDynamicurlGetResponse(AlipayResponse): def __init__(self): super(MybankCreditLoanapplyPromotionDynamicurlGetResponse, self).__init__() self._dynamic_url = None @property def dynamic_url(self): return self._dynamic_url @dynamic_url.setter def dynamic_url(self, value): self._dynamic_url = value def parse_response_content(self, response_content): response = super(MybankCreditLoanapplyPromotionDynamicurlGetResponse, self).parse_response_content(response_content) if 'dynamic_url' in response: self.dynamic_url = response['dynamic_url']
src/pytorch_adapt/meta_validators/__init__.py
KevinMusgrave/pytorch-adapt
131
11117393
<reponame>KevinMusgrave/pytorch-adapt from .forward_only_validator import ForwardOnlyValidator from .reverse_validator import ReverseValidator
lingua_franca/lang/common_data_it.py
NeonDaniel/lingua-franca
191
11117448
import collections _SHORT_ORDINAL_STRING_IT = { 1: 'primo', 2: 'secondo', 3: 'terzo', 4: 'quarto', 5: 'quinto', 6: 'sesto', 7: 'settimo', 8: 'ottavo', 9: 'nono', 10: 'decimo', 11: 'undicesimo', 12: 'dodicesimo', 13: 'tredicesimo', 14: 'quattordicesimo', 15: 'quindicesimo', 16: 'sedicesimo', 17: 'diciassettesimo', 18: 'diciottesimo', 19: 'diciannovesimo', 20: 'ventesimo', 30: 'trentesimo', 40: 'quarantesimo', 50: 'cinquantesimo', 60: 'sessantesimo', 70: 'settantesimo', 80: 'ottantesimo', 90: 'novantesimo', 1e2: 'centesimo', 1e3: 'millesimo', 1e6: 'milionesimo', 1e9: 'miliardesimo', 1e12: 'trilionesimo', 1e15: 'quadrilionesimo', 1e18: 'quintilionesim', 1e21: 'sestilionesimo', 1e24: 'settilionesimo', 1e27: 'ottilionesimo', 1e30: 'nonilionesimo', 1e33: 'decilionesimo' # TODO > 1e-33 } # per i > 10e12 modificata solo la desinenza: da sistemare a fine debug _LONG_ORDINAL_STRING_IT = { 1: 'primo', 2: 'secondo', 3: 'terzo', 4: 'quarto', 5: 'quinto', 6: 'sesto', 7: 'settimo', 8: 'ottavo', 9: 'nono', 10: 'decimo', 11: 'undicesimo', 12: 'dodicesimo', 13: 'tredicesimo', 14: 'quattordicesimo', 15: 'quindicesimo', 16: 'sedicesimo', 17: 'diciassettesimo', 18: 'diciottesimo', 19: 'diciannovesimo', 20: 'ventesimo', 30: 'trentesimo', 40: 'quarantesimo', 50: 'cinquantesimo', 60: 'sessantesimo', 70: 'settantesimo', 80: 'ottantesimo', 90: 'novantesimo', 1e2: 'centesimo', 1e3: 'millesimo', 1e6: 'milionesimo', 1e12: 'bilionesimo', 1e18: 'trilionesimo', 1e24: 'quadrilionesimo', 1e30: 'quintilionesimo', 1e36: 'sestilionesimo', 1e42: 'settilionesimo', 1e48: 'ottilionesimo', 1e54: 'nonilionesimo', 1e60: 'decilionesimo' # TODO > 1e60 } # Undefined articles ['un', 'una', 'un\''] can not be supressed, # in Italian, 'un cavallo' means 'a horse' or 'one horse'. _ARTICLES_IT = ['il', 'lo', 'la', 'i', 'gli', 'le'] _STRING_NUM_IT = { 'zero': 0, 'un': 1, 'uno': 1, 'una': 1, 'un\'': 1, 'due': 2, 'tre': 3, 'quattro': 4, 'cinque': 5, 'sei': 6, 'sette': 7, 'otto': 8, 'nove': 9, 'dieci': 10, 'undici': 11, 'dodici': 12, 'tredici': 13, 'quattordici': 14, 'quindici': 15, 'sedici': 16, 'diciassette': 17, 'diciotto': 18, 'diciannove': 19, 'venti': 20, 'vent': 20, 'trenta': 30, 'trent': 30, 'quaranta': 40, 'quarant': 40, 'cinquanta': 50, 'cinquant': 50, 'sessanta': 60, 'sessant': 60, 'settanta': 70, 'settant': 70, 'ottanta': 80, 'ottant': 80, 'novanta': 90, 'novant': 90, 'cento': 100, 'duecento': 200, 'trecento': 300, 'quattrocento': 400, 'cinquecento': 500, 'seicento': 600, 'settecento': 700, 'ottocento': 800, 'novecento': 900, 'mille': 1000, 'mila': 1000, 'centomila': 100000, 'milione': 1000000, 'miliardo': 1000000000, 'primo': 1, 'secondo': 2, 'mezzo': 0.5, 'mezza': 0.5, 'paio': 2, 'decina': 10, 'decine': 10, 'dozzina': 12, 'dozzine': 12, 'centinaio': 100, 'centinaia': 100, 'migliaio': 1000, 'migliaia': 1000 } _NUM_STRING_IT = { 0: 'zero', 1: 'uno', 2: 'due', 3: 'tre', 4: 'quattro', 5: 'cinque', 6: 'sei', 7: 'sette', 8: 'otto', 9: 'nove', 10: 'dieci', 11: 'undici', 12: 'dodici', 13: 'tredici', 14: 'quattordici', 15: 'quindici', 16: 'sedici', 17: 'diciassette', 18: 'diciotto', 19: 'diciannove', 20: 'venti', 30: 'trenta', 40: 'quaranta', 50: 'cinquanta', 60: 'sessanta', 70: 'settanta', 80: 'ottanta', 90: 'novanta' } _FRACTION_STRING_IT = { 2: 'mezz', 3: 'terz', 4: 'quart', 5: 'quint', 6: 'sest', 7: 'settim', 8: 'ottav', 9: 'non', 10: 'decim', 11: 'undicesim', 12: 'dodicesim', 13: 'tredicesim', 14: 'quattordicesim', 15: 'quindicesim', 16: 'sedicesim', 17: 'diciassettesim', 18: 'diciottesim', 19: 'diciannovesim', 20: 'ventesim' } # fonte: http://tulengua.es/numeros-texto/default.aspx _LONG_SCALE_IT = collections.OrderedDict([ (100, 'cento'), (1000, 'mila'), (1000000, 'milioni'), (1e9, "miliardi"), (1e12, "bilioni"), (1e18, 'trilioni'), (1e24, "quadrilioni"), (1e30, "quintilioni"), (1e36, "sestilioni"), (1e42, "settilioni"), (1e48, "ottillioni"), (1e54, "nonillioni"), (1e60, "decemillioni"), (1e66, "undicilione"), (1e72, "dodicilione"), (1e78, "tredicilione"), (1e84, "quattordicilione"), (1e90, "quindicilione"), (1e96, "sedicilione"), (1e102, "diciasettilione"), (1e108, "diciottilione"), (1e114, "dicianovilione"), (1e120, "vintilione"), (1e306, "unquinquagintilione"), (1e312, "duoquinquagintilione"), (1e336, "sesquinquagintilione"), (1e366, "unsexagintilione") ]) _SHORT_SCALE_IT = collections.OrderedDict([ (100, 'cento'), (1000, 'mila'), (1000000, 'milioni'), (1e9, "miliardi"), (1e12, 'bilioni'), (1e15, "biliardi"), (1e18, "trilioni"), (1e21, "triliardi"), (1e24, "quadrilioni"), (1e27, "quadriliardi"), (1e30, "quintilioni"), (1e33, "quintiliardi"), (1e36, "sestilioni"), (1e39, "sestiliardi"), (1e42, "settilioni"), (1e45, "settiliardi"), (1e48, "ottilioni"), (1e51, "ottiliardi"), (1e54, "nonilioni"), (1e57, "noniliardi"), (1e60, "decilioni"), (1e63, "deciliardi"), (1e66, "undicilioni"), (1e69, "undiciliardi"), (1e72, "dodicilioni"), (1e75, "dodiciliardi"), (1e78, "tredicilioni"), (1e81, "trediciliardi"), (1e84, "quattordicilioni"), (1e87, "quattordiciliardi"), (1e90, "quindicilioni"), (1e93, "quindiciliardi"), (1e96, "sedicilioni"), (1e99, "sediciliardi"), (1e102, "diciassettilioni"), (1e105, "diciassettiliardi"), (1e108, "diciottilioni"), (1e111, "diciottiliardi"), (1e114, "dicianovilioni"), (1e117, "dicianoviliardi"), (1e120, "vintilioni"), (1e123, "vintiliardi"), (1e153, "quinquagintillion"), (1e183, "sexagintillion"), (1e213, "septuagintillion"), (1e243, "ottogintilioni"), (1e273, "nonigintillioni"), (1e303, "centilioni"), (1e306, "uncentilioni"), (1e309, "duocentilioni"), (1e312, "trecentilioni"), (1e333, "decicentilioni"), (1e336, "undicicentilioni"), (1e363, "viginticentilioni"), (1e366, "unviginticentilioni"), (1e393, "trigintacentilioni"), (1e423, "quadragintacentillion"), (1e453, "quinquagintacentillion"), (1e483, "sexagintacentillion"), (1e513, "septuagintacentillion"), (1e543, "ctogintacentillion"), (1e573, "nonagintacentillion"), (1e603, "ducentillion"), (1e903, "trecentillion"), (1e1203, "quadringentillion"), (1e1503, "quingentillion"), (1e1803, "sescentillion"), (1e2103, "septingentillion"), (1e2403, "octingentillion"), (1e2703, "nongentillion"), (1e3003, "millinillion") ])
twnews/tests/soup/test_soup_ettoday.py
virus-warnning/twnews
188
11117449
<reponame>virus-warnning/twnews """ 東森新聞雲分解測試 """ # pylint: disable=duplicate-code import unittest import twnews.common from twnews.soup import NewsSoup class TestEttoday(unittest.TestCase): """ 東森新聞雲分解測試 """ def setUp(self): self.dtf = '%Y-%m-%d %H:%M:%S' def test_01_sample(self): """ 測試東森新聞雲樣本 """ pkgdir = twnews.common.get_package_dir() nsoup = NewsSoup(pkgdir + '/samples/ettoday.html.xz') self.assertEqual('ettoday', nsoup.channel) self.assertIn('客運司機車禍致人於死 心情鬱悶陽台以狗鍊上吊', nsoup.title()) self.assertEqual('2017-12-09 00:26:00', nsoup.date().strftime(self.dtf)) self.assertEqual('林悅', nsoup.author()) self.assertIn('台南市永康區永安路住處後陽台上吊', nsoup.contents()) def test_02_mobile(self): """ 測試東森新聞雲 """ url = 'https://www.ettoday.net/news/20181020/1285826.htm' nsoup = NewsSoup(url, refresh=True, proxy_first=True) self.assertEqual('ettoday', nsoup.channel) self.assertIn('快訊/整日沒出房門!三重無業男半夜住處2樓上吊 母開門才發現', nsoup.title()) self.assertEqual('2018-10-20 04:11:00', nsoup.date().strftime(self.dtf)) self.assertEqual('趙永博', nsoup.author()) self.assertIn('新北市三重區三和路三段101巷一處民宅2樓', nsoup.contents()) def test_03_layouts(self): """ 測試東森新聞雲其他排版 """ layouts = [ { 'url': 'https://fashion.ettoday.net/news/1316942', 'title': '漫長的冬日夜晚想閱讀 不妨參考誠品2018年度暢銷書單', 'date': '2018-11-28 10:10:00', 'author': '蔡惠如', 'contents': '我輩中人:寫給中年人的情書' }, { 'url': 'https://game.ettoday.net/article/1315167.htm', 'title': '網石與DC聯名合作 《蝙蝠俠》角色進駐《天堂2 革命》', 'date': '2018-11-24 23:59:00', 'author': None, 'contents': '天堂2:革命' }, { 'url': 'https://health.ettoday.net/news/1317507', 'title': '謝碧珠/沒有自費就醫資訊的健康存摺很空心', 'date': '2018-11-28 12:50:00', 'author': None, 'contents': '全民健康保險法' }, { 'url': 'https://house.ettoday.net/news/1317619', 'title': '台北、新北26.1%潛勢區 「市民生命安全」柯P、漢子準備好了嗎?', 'date': '2018-11-28 11:16:00', 'author': '陳韋帆', 'contents': '其中「山腳斷層」更跨越了金山、三芝、淡水、五股、泰山、新莊及樹林等各區' }, { 'url': 'https://pets.ettoday.net/news/1307563', 'title': '去睡覺囉! 傑克羅素㹴秒轉身「抱大狗娃娃」踏踏進房:晚安~', 'date': '2018-11-16 14:50:00', 'author': '陳靜', 'contents': '每天晚上都一定要帶著它睡覺,有時候突然想到也會馬上衝進房間' }, { 'url': 'https://speed.ettoday.net/news/1316854', 'title': '休旅車&轎車誰安全?數據顯示這種車「死亡率低一半」', 'date': '2018-11-27 20:03:00', 'author': None, 'contents': '發生各類車禍事故時,普通轎車乘客死亡率為39%,而SUV只有21%' }, { 'url': 'https://sports.ettoday.net/news/1317313', 'title': '日職/丸佳浩MVP二連霸 山川穗高獲獎喊目標50轟', 'date': '2018-11-27 21:10:00', 'author': '楊舒帆', 'contents': '今年繳出打擊率0.306、39轟、97分打點的成績' }, { 'url': 'https://star.ettoday.net/news/1308844', 'title': 'KID半夜突襲女神房間! 驚見「浴衣脫落門外」他狂喜衝進去', 'date': '2018-11-17 16:40:00', 'author': '劉宜庭', 'contents': 'KID親睹這一幕,更是興奮到整個人趴在地上' }, { 'url': 'https://travel.ettoday.net/article/1317664.htm', 'title': '普羅旺斯花海、《鬼怪》蕎麥花!「全台5大花海」超醉心', 'date': '2018-11-28 12:30:00', 'author': None, 'contents': '粉紅波斯菊、向日葵、百合、鼠尾草、貓尾花、麒麟菊、雞冠花、鳳仙花等陸續盛開' } ] for layout in layouts: nsoup = NewsSoup(layout['url'], refresh=True, proxy_first=True) self.assertEqual('ettoday', nsoup.channel) self.assertIn(layout['title'], nsoup.title()) self.assertEqual(layout['date'], nsoup.date().strftime(self.dtf)) self.assertEqual(layout['author'], nsoup.author()) self.assertIn(layout['contents'], nsoup.contents())
sportsipy/mlb/player.py
MArtinherz/sportsipy
221
11117472
<reponame>MArtinherz/sportsipy<filename>sportsipy/mlb/player.py import pandas as pd import re from functools import wraps from lxml.etree import ParserError, XMLSyntaxError from pyquery import PyQuery as pq from urllib.error import HTTPError from .. import utils from .constants import (BOXSCORE_SCHEME, NATIONALITY, PLAYER_ELEMENT_INDEX, PLAYER_SCHEME, PLAYER_URL, ROSTER_URL) def _cleanup(prop): try: prop = prop.replace('%', '') prop = prop.replace('$', '') prop = prop.replace(',', '') return prop.replace('+', '') # Occurs when a value is of Nonetype. When that happens, return a blank # string as whatever came in had an incomplete value. except AttributeError: return '' def _int_property_decorator(func): @property @wraps(func) def wrapper(*args): index = args[0]._index prop = func(*args) element_ind = 0 if func.__name__ in PLAYER_ELEMENT_INDEX.keys(): element_ind = PLAYER_ELEMENT_INDEX[func.__name__] try: value = _cleanup(prop[index][element_ind]) return int(value) except (ValueError, TypeError, IndexError): # If there is no value, default to None return None return wrapper def _float_property_decorator(func): @property @wraps(func) def wrapper(*args): index = args[0]._index prop = func(*args) element_ind = 0 if func.__name__ in PLAYER_ELEMENT_INDEX.keys(): element_ind = PLAYER_ELEMENT_INDEX[func.__name__] try: value = _cleanup(prop[index][element_ind]) return float(value) except (ValueError, TypeError, IndexError): # If there is no value, default to None return None return wrapper class AbstractPlayer: """ Get player information and stats for all seasons. Given a player ID, such as 'altuvjo01' for <NAME>, capture all relevant stats and information like name, nationality, height/weight, career home runs, last season's batting average, salary, contract amount, and much more. By default, the class instance will return the player's career stats, but single-season stats can be found by calling the instance with the requested season as denoted on baseball-reference.com. Parameters ---------- player_id : string A player's ID according to basketball-reference.com, such as 'altuvjo01' for <NAME>. The player ID can be found by navigating to the player's stats page and getting the string between the final slash and the '.html' in the URL. In general, the ID is in the format 'LLLLLFFNN' where 'LLLLL' are the first 5 letters in the player's last name, 'FF', are the first 2 letters in the player's first name, and 'NN' is a number starting at '01' for the first time that player ID has been used and increments by 1 for every successive player. """ def __init__(self, player_id, player_name, player_data): self._player_id = player_id self._name = player_name self._plate_appearances = None self._at_bats = None self._runs = None self._hits = None self._runs_batted_in = None self._bases_on_balls = None self._times_struck_out = None self._batting_average = None self._on_base_percentage = None self._slugging_percentage = None self._on_base_plus_slugging_percentage = None self._putouts = None self._assists = None # Stats specific to pitchers self._hits_allowed = None self._runs_allowed = None self._earned_runs_allowed = None self._home_runs_allowed = None self._bases_on_balls_given = None self._strikeouts = None self._batters_faced = None self._parse_player_data(player_data) def _parse_value(self, stats, field): """ Pull the specified value from the HTML contents. Given a field, find the corresponding HTML tag for that field and parse its value before returning the value as a string. Parameters ---------- stats : PyQuery object A PyQuery object containing all stats in HTML format for a particular player. field : string A string of the field to parse from the HTML. Returns ------- list A list of all values that match the requested field. If no value could be found, returns None. """ scheme = PLAYER_SCHEME[field] items = [i.text() for i in stats(scheme).items()] # Stats can be added and removed on a yearly basis. If no stats are # found, return None and have that be the value. if len(items) == 0: return None return items def _parse_player_data(self, player_data): """ Parse all player information and set attributes. Iterate through each class attribute to parse the data from the HTML page and set the attribute value with the result. Parameters ---------- player_data : dictionary or string If this class is inherited from the ``Player`` class, player_data will be a dictionary where each key is a string representing the season and each value contains the HTML data as a string. If this class is inherited from the ``BoxscorePlayer`` class, player_data will be a string representing the player's game statistics in HTML format. """ for field in self.__dict__: short_field = str(field)[1:] if short_field == 'player_id' or \ short_field == 'index' or \ short_field == 'most_recent_season' or \ short_field == 'season' or \ short_field == 'name' or \ short_field == 'weight' or \ short_field == 'height' or \ short_field == 'nationality' or \ short_field == 'birth_date' or \ short_field == 'contract': continue field_stats = [] if type(player_data) == dict: for year, data in player_data.items(): stats = pq(data['data']) value = self._parse_value(stats, short_field) field_stats.append(value) else: stats = pq(player_data) value = self._parse_value(stats, short_field) field_stats.append(value) setattr(self, field, field_stats) @property def player_id(self): """ Returns a ``string`` of the player's ID on sports-reference, such as 'altuvjo01' for <NAME>. """ return self._player_id @property def name(self): """ Returns a ``string`` of the player's name, such as '<NAME>'. """ return self._name @_int_property_decorator def plate_appearances(self): """ Returns an ``int`` of the number of plate appearances the player had. """ return self._plate_appearances @_int_property_decorator def at_bats(self): """ Returns an ``int`` of the number of at bats the player had. """ return self._at_bats @_int_property_decorator def runs(self): """ Returns an ``int`` of the number of runs the player scored. """ return self._runs @_int_property_decorator def hits(self): """ Returns an ``int`` of the number of hits the player had. """ return self._hits @_int_property_decorator def runs_batted_in(self): """ Returns an ``int`` of the number of runs batted in the player registered. """ return self._runs_batted_in @_int_property_decorator def bases_on_balls(self): """ Returns an ``int`` of the number of bases the player registered as a result of balls. """ return self._bases_on_balls @_int_property_decorator def times_struck_out(self): """ Returns an ``int`` of the number of times the player was struck out. """ return self._times_struck_out @_float_property_decorator def batting_average(self): """ Returns a ``float`` of the batting average for the player. """ return self._batting_average @_float_property_decorator def on_base_percentage(self): """ Returns a ``float`` of the percentage of at bats that result in the batter getting on base. """ return self._on_base_percentage @_float_property_decorator def slugging_percentage(self): """ Returns a ``float`` of the slugging percentage for the player based on the number of bases gained per at-bat with bigger plays getting more weight. """ return self._slugging_percentage @_float_property_decorator def on_base_plus_slugging_percentage(self): """ Returns a ``float`` of the on base percentage plus the slugging percentage. Percentage ranges from 0-1. """ return self._on_base_plus_slugging_percentage @_int_property_decorator def putouts(self): """ Returns an ``int`` of the number of putouts the player had. """ return self._putouts @_int_property_decorator def assists(self): """ Returns an ``int`` of the number of assists the player had. """ return self._assists @_int_property_decorator def hits_allowed(self): """ Returns an ``int`` of the number of hits the player allowed as a pitcher. """ return self._hits_allowed @_int_property_decorator def runs_allowed(self): """ Returns an ``int`` of the number of runs the player allowed as a pitcher. """ return self._runs_allowed @_int_property_decorator def earned_runs_allowed(self): """ Returns an ``int`` of the number of earned runs the player allowed as a pitcher. """ return self._earned_runs_allowed @_int_property_decorator def bases_on_balls_given(self): """ Returns an ``int`` of the number of bases on balls the player has given as a pitcher. """ return self._bases_on_balls_given @_int_property_decorator def strikeouts(self): """ Returns an ``int`` of the number of strikeouts the player threw as a pitcher. """ return self._strikeouts @_int_property_decorator def batters_faced(self): """ Returns an ``int`` of the number of batters the pitcher has faced. """ return self._batters_faced
remote_notebook/code/notebook_server_client.py
sfahad1414/seahorse
111
11117475
# Copyright 2016 deepsense.ai (CodiLime, Inc) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 import json import urllib2 from utils import Logging from seahorse_notebook_path import SeahorseNotebookPath class NotebookServerClient(Logging): # Passed seahorse_notebook_path enforces NotebookServerClient to use this path for finding/saving # notebook. When seahorse_notebook_path is not defined then NotebookServerClient will connect to session # endpoint to find its path def __init__(self, nb_host, nb_port, kernel_id, seahorse_notebook_path=None): super(NotebookServerClient, self).__init__() self._nb_host = nb_host self._nb_port = nb_port self._kernel_id = kernel_id self._notebook_server_location = "{}:{}".format(self._nb_host, self._nb_port) self._api_url = "http://{}/jupyter/api/sessions".format(self._notebook_server_location) self.seahorse_notebook_path = seahorse_notebook_path def _get_path(self): if self.seahorse_notebook_path: return self.seahorse_notebook_path else: session = self._get_my_session() return str(session['notebook']['path']) def extract_dataframe_source(self): notebook_path = SeahorseNotebookPath.deserialize(self._get_path()) return notebook_path.workflow_id, notebook_path.datasource_node_id, notebook_path.datasource_node_port def restart_kernel(self): # 'data' specified to make it a POST request urllib2.urlopen("http://{}/jupyter/api/kernels/{}/restart".format(self._notebook_server_location, self._kernel_id), "") def stop_kernel(self): # When seahorse_notebook_path is not defined then no session is on notebook server. # We don't have to delete the session. if self.seahorse_notebook_path is not None: return self.logger.debug("Getting session") session = self._get_my_session() self.logger.debug("Got session: {}".format(session)) url = "{}/{}".format(self._api_url, session['id']) self.logger.debug("Preparing DELETE request to {}".format(url)) request = urllib2.Request(url) request.get_method = lambda: 'DELETE' result = urllib2.urlopen(request) self.logger.debug("DELETE returned: {}".format(result)) def _get_my_session(self): sessions = self._get_sessions() for session in sessions: if session['kernel']['id'] == self._kernel_id: return session raise Exception('Session matching kernel ID ' + self._kernel_id + 'was not found.') def _get_sessions(self): response = urllib2.urlopen(self._api_url).read() return json.loads(response)
nornir/lab-system/deployment.py
caputomarcos/network-programmability-stream
120
11117482
<filename>nornir/lab-system/deployment.py import itertools import logging from collections import deque, defaultdict, OrderedDict from pathlib import Path from typing import ( Dict, List, Iterable, Any, Set, NamedTuple, Optional, ValuesView, Iterator, Deque, DefaultDict, ) import ruamel.yaml from mypy_extensions import TypedDict from nornir.core.inventory import Inventory from nornir.core.inventory import Host from nornir.core.filter import F import constants from utils import roundup YAML_FILENAME_EXTENSIONS = [".yml", ".yaml"] YAML = ruamel.yaml.YAML(typ="safe") ConnectionEnd = TypedDict( "ConnectionEnd", {"hostname": str, "port": str, "tag": str, "service": str, "vm": str, "portgroup_num": int, "update_vlan": bool}, total=False, ) Connection = List[ConnectionEnd] VMDict = TypedDict("VMDict", {"name": str}) # TopologyDict = TypedDict("TopologyDict", {"connections": List[List[LinkDict]]}) logger = logging.getLogger("lab_system.deployment") class Interface(NamedTuple): device_name: str num: Optional[int] = None name: Optional[str] = None class Portgroup(NamedTuple): name: str vlan: Optional[int] = None def __eq__(self, other) -> bool: return self.name == other.name def __hash__(self) -> int: return hash(self.name) DeviceMatrixConnections = Dict[str, Interface] class VM: def __init__(self, name: str, pod: "Pod") -> None: self.name = name self.pod = pod self.portgroups: List[Portgroup] = [] self.show_portgroups = False self.turn_on = True def __repr__(self): return f"VM(name={self.name!r})" class Deployment: def __init__(self, topologies: Dict[str, Dict[str, Any]], inventory) -> None: self.topologies: Dict[str, Dict[str, Any]] = topologies self.inventory = inventory self.pod_id_to_pod: Dict[int, Pod] = {} self.unallocated_pod_gear = inventory.filter( F(pod="unallocated", has_parent_group="pod-gear") ).hosts self._load_topologies() self.free_matrix_connections = self._parse_matrix_switches() self.device_to_pod_mgmt_port = self._parse_pod_mgmt_ports() self.internet_vlans = self._parse_pair_routers() self.cur_dot1q_tunnel_vlan = constants.DOT1Q_TUNNEL_VLAN_START self._allocate_pod_gear() @property def pods(self) -> ValuesView["Pod"]: return self.pod_id_to_pod.values() @property def vms(self) -> Iterator[VM]: return itertools.chain.from_iterable(pod.vms for pod in self.pods) @property def matrix_switches(self) -> Iterable[Host]: return self.inventory.filter( F(groups__contains="matrix-switches") ).hosts.values() def get_device(self, device_name: str) -> Host: return self.inventory.hosts[device_name] def _load_topologies(self) -> None: topologies_dir = Path("topologies") for topology_name, topology_info in self.topologies.items(): topology_dir = topologies_dir / topology_name if not topology_dir.is_dir(): raise OSError(f'Directory "{topology_dir}" was not found') configs_dir = topology_dir / "configs" with open(topology_dir / "topology.yml") as f: logger.info('Loading topology "%s"', topology_name) details = YAML.load(f) for device_name, device in details.get('devices', {}).items(): startup_config_path = configs_dir / f'{device_name}.txt' if startup_config_path.is_file(): with open(startup_config_path) as f: device["startup_config"] = f.read() details["configs_dir"] = str(configs_dir) topology_info.update(details) def _parse_matrix_switches(self) -> DefaultDict[str, DeviceMatrixConnections]: result: DefaultDict[str, DeviceMatrixConnections] = defaultdict(OrderedDict) for matrix_switch in self.matrix_switches: for interface_num, interface in enumerate( matrix_switch.get("interfaces", []) ): connected_device_info = interface.get("connected_device") if connected_device_info: device_name = interface["connected_device"]["name"] port = interface["connected_device"].get("port") if port in result[device_name]: matrix_interface = result[device_name][port] raise ValueError( f"{device_name} {port} can't be mapped to " f"{matrix_switch.name} {interface['name']} because it was " f"already mapped to {matrix_interface.device_name} " f"{matrix_interface.name}" ) result[device_name][port] = Interface( matrix_switch.name, interface_num, interface["name"] ) elif interface.get("mode") == "dot1q-tunnel": raise ValueError( f"Interface {interface['name']} on the switch " f"{matrix_switch.name} is 'dot1q-tunnel'" f"but does not have connected_device variable" ) return result def _parse_pod_mgmt_ports(self) -> Dict[str, Interface]: result: Dict[str, Interface] = {} pod_mgmt_switches = self.inventory.filter( F(groups__contains="pod-mgmt") ).hosts.values() for pod_mgmt_switch in pod_mgmt_switches: for int_num, interface in enumerate(pod_mgmt_switch.get("interfaces")): interface_name = interface["name"] connected_device = interface.get("connected_device") if connected_device and not interface.get("management", False): device = connected_device["name"] if device in result: connected_interface = result[device] raise ValueError( f"Device {device} is already connected to " f"{connected_interface.device_name} " f"{connected_interface.name}" ) connected_interface = Interface( pod_mgmt_switch.name, int_num, interface_name ) result[device] = connected_interface return result def _parse_pair_routers(self) -> Deque[int]: result: Deque[int] = deque() presence_set: Set[int] = set() pair_routers = self.inventory.filter( F(groups__contains="pair-routers") ).hosts.values() for router in pair_routers: for interface in router.get("interfaces", []): if interface.get("service") == "internet": vlan = interface.get("vlan") if not isinstance(vlan, int): raise ValueError( f"{router.name} {interface.get('name')} has vlan value is " f"{vlan!r} when it should be of type int" ) if vlan in presence_set: raise ValueError( f"{router.name} {interface.get('name')} has vlan value " f"{vlan} which is already in use" ) result.append(vlan) presence_set.add(vlan) return result def _increase_dot1q_tunnel_vlan(self) -> None: self.cur_dot1q_tunnel_vlan = roundup(self.cur_dot1q_tunnel_vlan) def _allocate_pod_gear(self) -> None: topology_pod_id_start = 0 for topology_name, topology_info in self.topologies.items(): topology_pod_id_start += constants.POD_ID_STEP for i in range(topology_info["quantity"]): pod_id = topology_pod_id_start + i + 1 pod = Pod(pod_id, topology_name, deployment=self) self.pod_id_to_pod[pod_id] = pod logger.info(f'Allocated pod #{pod_id}, topology: "{topology_name}"') self._increase_dot1q_tunnel_vlan() class Pod: def __init__(self, pod_id: int, topology_name: str, deployment: Deployment) -> None: self.id = pod_id self.deployment = deployment self.topology_name = topology_name topology_info = self.deployment.topologies[topology_name] self.devices = topology_info.get("devices", {}) self.vm_name_to_vm: Dict[str, VM] = {} self.connections = topology_info.get("connections", []) self.hostname_to_device: Dict[str, Host] = {} self._update_vms(topology_info.get("vms", [])) self._allocate_gear() self._process_connections() self._flatten_template_data() @property def vms(self) -> ValuesView[VM]: return self.vm_name_to_vm.values() @property def sequence_num(self) -> int: return self.id % constants.POD_ID_STEP @property def inventory(self) -> Inventory: return self.deployment.inventory @property def free_matrix_connections( self ) -> DefaultDict[str, DeviceMatrixConnections]: return self.deployment.free_matrix_connections @property def unallocated_pod_gear(self) -> Dict[str, Host]: return self.deployment.unallocated_pod_gear def get_device( self, hostname: Optional[str] = None, device_name: Optional[str] = None ) -> Host: if hostname: return self.hostname_to_device[hostname] elif device_name: return self.deployment.get_device(device_name) else: raise ValueError(f"Neither hostname nor device_name were provided") def get_vm_name(self, vm_name: str) -> str: return f"{self.topology_name}__{vm_name}__{self.sequence_num:02d}" def get_vm_portgroup_name(self, vm_name: str, portgroup_num: Optional[int] = None) -> str: pod_vm_name = self.get_vm_name(vm_name) if portgroup_num is None: return pod_vm_name else: return f"{pod_vm_name}__{portgroup_num:02d}" def _update_vms(self, vms: List[VMDict]) -> None: for vm_info in vms: vm_name = vm_info["name"] full_vm_name = self.get_vm_name(vm_name) self.vm_name_to_vm[vm_name] = VM(full_vm_name, self) def _process_special_reset_device(self, device: Host) -> None: device.data["special_reset"] = True pod_mgmt_interface = self.deployment.device_to_pod_mgmt_port[device.name] device_name = pod_mgmt_interface.device_name pod_mgmt_interfaces = self.get_device(device_name=device_name).get("interfaces") pod_mgmt_interfaces[pod_mgmt_interface.num]["shutdown"] = True def _allocate_gear(self) -> None: for device, device_info in self.devices.items(): unallocated = True group = device_info.get("group") if group: group = f"{group}__{self.sequence_num:02d}" else: group = "dynamic" tags = device_info.get("tags", []) exclude_tags = device_info.get("exclude_tags", []) special_reset = device_info.get("special_reset", False) for free_device_name, free_device in list( self.unallocated_pod_gear.items() ): free_device_tags = free_device.get("tags", []) if ( free_device.has_parent_group(group) and all(tag in free_device_tags for tag in tags) and not any(tag in free_device_tags for tag in exclude_tags) ): self.unallocated_pod_gear.pop(free_device_name) free_device.data["pod"] = self.id free_device.data["lab_hostname"] = device startup_config = device_info.get("startup_config") if startup_config is not None: free_device.data["startup_config"] = startup_config self.hostname_to_device[device] = free_device if special_reset: self._process_special_reset_device(free_device) unallocated = False break if unallocated: raise ValueError( f'Cannot allocate pod #{self.id}, topology "{self.topology_name}" ' f"because an unallocated device with tags: {tags} and " f'the group "{group}" was not found' ) @staticmethod def skip_tunnel_creation(connection: Connection) -> bool: return any( "vm" in connection_end and not connection_end.get("update_vlan", True) for connection_end in connection ) @staticmethod def is_internet_service(connection: Connection) -> bool: return {"service": "internet"} in connection def _process_matrix_bypass(self, connection: Connection) -> None: if len(connection) != 2: raise ValueError( f"update_vlan is set to False but there are " f"{len(connection)} devices. Should be exactly 2." ) device = None port_num = None vm = None for host in connection: if "hostname" in host: device = self.get_device(host["hostname"]) if "port" in host: port = host["port"] port_num_match = constants.INTERFACE_NAME_RE.search(port) if port_num_match is None: raise ValueError( f"Port {port} could not be parsed with regular expression" ) port_num = port_num_match.group("interface_num").replace("/", "-") if "vm" in host: vm = self.vm_name_to_vm[host["vm"]] if device and port_num and vm: portgroup = Portgroup(f"{device.name}__{port_num}") vm.portgroups.append(portgroup) vm.show_portgroups = True else: raise ValueError( f"Either 'device' or 'port' or 'vm' was not specified in: {connection}" ) def _process_connection_end(self, connection_end: ConnectionEnd, dot1q_vlan: int) -> None: if "hostname" in connection_end: hostname = connection_end["hostname"] device = self.get_device(hostname) device_free_matrix_ports = self.free_matrix_connections[device.name] if "port" in connection_end: port = connection_end["port"] matrix_int = device_free_matrix_ports.pop(port, None) if matrix_int is None: raise ValueError( f'Port {port!r} on the device {device.name!r} is not available ' f'for matrix connection' ) else: try: port, matrix_int = device_free_matrix_ports.popitem(last=False) # type: ignore except KeyError: raise ValueError( f'No more matrix free connections left for {device.name!r}' ) template_data = device.data.get("template_data") if template_data is None: template_data = {'interfaces': {}} device.data["template_data"] = template_data if 'tag' in connection_end: tag = connection_end['tag'] else: int_num = len(device.data["template_data"]["interfaces"]) + 1 tag = f'interface_{int_num}' template_data["interfaces"][tag] = port matrix_switch = self.get_device(device_name=matrix_int.device_name) matrix_int_num = matrix_int.num matrix_int_details = matrix_switch["interfaces"][matrix_int_num] matrix_int_details["access_vlan"] = dot1q_vlan matrix_int_details["shutdown"] = False matrix_int_details["description"] += ( f" | pod: {self.id} | hostname: {hostname}" ) elif "vm" in connection_end: vm_name = connection_end["vm"] full_vm_name = self.get_vm_name(vm_name) vm = VM(full_vm_name, self) self.vm_name_to_vm[vm_name] = vm portgroup_num = connection_end.get("portgroup_num") portgroup_name = self.get_vm_portgroup_name(vm_name, portgroup_num) vm.portgroups.append(Portgroup(portgroup_name, vlan=dot1q_vlan)) elif "service" in connection_end: pass def _process_matrix_connection( self, connection: Connection, dot1q_vlan: int ) -> None: for connection_end in connection: self._process_connection_end(connection_end, dot1q_vlan) def _process_connections(self) -> None: for connection in self.connections: if self.skip_tunnel_creation(connection): self._process_matrix_bypass(connection) else: if self.is_internet_service(connection): dot1q_vlan = self.deployment.internet_vlans.popleft() else: dot1q_vlan = self.deployment.cur_dot1q_tunnel_vlan self.deployment.cur_dot1q_tunnel_vlan += 1 self._process_matrix_connection(connection, dot1q_vlan) def _flatten_template_data(self) -> None: for device in self.hostname_to_device.values(): if 'template_data' in device.data: template_data = device.data["template_data"] template_data.update(template_data.pop('interfaces', {}))
utils/lit/tests/usage.py
clayne/DirectXShaderCompiler
1,192
11117497
# Basic sanity check that usage works. # # RUN: %{lit} --help > %t.out # RUN: FileCheck < %t.out %s # # CHECK: Usage: lit.py [options] {file-or-path}
studio/serve_main.py
ilblackdragon/studio
397
11117510
import argparse import sys import os import time import json import glob import traceback import importlib import pickle import re import threading from flask import Flask, request from studio import fs_tracker, logs from .model_util import ModelPipe app = Flask(__name__) model = None logger = None killtimer = None killtimer_duration = None def get_logger(): global logger if not logger: logger = logs.get_logger('studio-serve') logger.setLevel(logs.DEBUG) return logger def restart_killtimer(duration=None): global killtimer global killtimer_duration def killtriger(): get_logger().info('Shutting down due to inactivity') os._exit(0) if killtimer: killtimer.cancel() if duration: killtimer_duration = duration else: duration = killtimer_duration get_logger().info('Initializing kill timer for {} s' .format(duration)) killtimer = threading.Timer(duration, killtriger) killtimer.start() @app.route('/', methods=['POST']) def inference(): try: tic = time.time() input_dict = request.json output_dict = model(input_dict) return json.dumps(output_dict) except BaseException as e: return json.dumps({ 'error': traceback.format_exc(e) }) finally: get_logger().info('inference completed in {} s' .format(time.time() - tic)) restart_killtimer() def main(): argparser = argparse.ArgumentParser( description='Serve studio model' ) argparser.add_argument( '--wrapper', '-w', help='python script with function create_model ' + 'that takes modeldir ' '(that is, directory where experiment saves ' + 'the checkpoints etc)' + 'and returns dict -> dict function (model).' + 'By default, studio-serve will try to determine ' + 'this function automatically.', default=None ) argparser.add_argument('--port', help='port to run Flask server on', type=int, default=5000) argparser.add_argument('--host', help='host name.', default='0.0.0.0') argparser.add_argument( '--killafter', help='Shut down after this many seconds of inactivity', default=3600) options = argparser.parse_args(sys.argv[1:]) global model modeldir = fs_tracker.get_artifact('modeldata') if options.wrapper: module_name = re.sub('.py\Z', '', options.wrapper) wrapper_module = importlib.import_module(module_name) model = wrapper_module.create_model(modeldir) else: model = auto_generate_model(modeldir) restart_killtimer(int(options.killafter)) app.run(host=options.host, port=options.port) def auto_generate_model(modeldir): if modeldir is not None: hdf5_files = [ (p, os.path.getmtime(p)) for p in glob.glob(modeldir + '/*.hdf*') + glob.glob(modeldir + '/*.h5')] if any(hdf5_files): # experiment type - keras get_logger().info("Loading keras model " + "(using pickle serialization)") import keras last_checkpoint = max(hdf5_files, key=lambda t: t[1])[0] keras_model = keras.models.load_model(last_checkpoint) print(keras_model) keras_model.summary() return wrap_keras_model(keras_model) return lambda x: x def wrap_keras_model(keras_model): pipe = ModelPipe() pipe.add(lambda d: pickle.loads(d)) pipe.add(keras_model) pipe.add(lambda d: pickle.dumps(d)) return pipe if __name__ == '__main__': main()
custom_components/dahua/digest.py
lpdescamps/dahua
114
11117514
<reponame>lpdescamps/dahua<filename>custom_components/dahua/digest.py<gh_stars>100-1000 """Dahua Digest Auth Support""" import os import time import hashlib import aiohttp from aiohttp.client_reqrep import ClientResponse from aiohttp.client_exceptions import ClientError from yarl import URL # Seems that aiohttp doesn't support Diegest Auth, which Dahua cams require. So I had to bake it in here. # Copied and then modified from https://github.com/aio-libs/aiohttp/pull/2213 # I really wish this was baked into aiohttp :-( class DigestAuth: """HTTP digest authentication helper. The work here is based off of https://github.com/requests/requests/blob/v2.18.4/requests/auth.py. """ def __init__(self, username: str, password: str, session: aiohttp.ClientSession, previous=None): if previous is None: previous = {} self.username = username self.password = password self.last_nonce = previous.get("last_nonce", "") self.nonce_count = previous.get("nonce_count", 0) self.challenge = previous.get("challenge") self.args = {} self.session = session async def request(self, method, url, *, headers=None, **kwargs): """Makes a request""" if headers is None: headers = {} # Save the args so we can re-run the request self.args = {"method": method, "url": url, "headers": headers, "kwargs": kwargs} if self.challenge: authorization = self._build_digest_header(method.upper(), url) headers["AUTHORIZATION"] = authorization response = await self.session.request(method, url, headers=headers, **kwargs) # Only try performing digest authentication if the response status is from 401 if response.status == 401: return await self._handle_401(response) return response def _build_digest_header(self, method, url): """ :rtype: str """ realm = self.challenge["realm"] nonce = self.challenge["nonce"] qop = self.challenge.get("qop") algorithm = self.challenge.get("algorithm", "MD5").upper() opaque = self.challenge.get("opaque") if qop and not (qop == "auth" or "auth" in qop.split(",")): raise ClientError("Unsupported qop value: %s" % qop) # lambdas assume digest modules are imported at the top level if algorithm == "MD5" or algorithm == "MD5-SESS": hash_fn = hashlib.md5 elif algorithm == "SHA": hash_fn = hashlib.sha1 else: return "" def H(x): return hash_fn(x.encode()).hexdigest() def KD(s, d): return H("%s:%s" % (s, d)) path = URL(url).path_qs A1 = "%s:%s:%s" % (self.username, realm, self.password) A2 = "%s:%s" % (method, path) HA1 = H(A1) HA2 = H(A2) if nonce == self.last_nonce: self.nonce_count += 1 else: self.nonce_count = 1 self.last_nonce = nonce ncvalue = "%08x" % self.nonce_count # cnonce is just a random string generated by the client. cnonce_data = "".join( [ str(self.nonce_count), nonce, time.ctime(), os.urandom(8).decode(errors="ignore"), ] ).encode() cnonce = hashlib.sha1(cnonce_data).hexdigest()[:16] if algorithm == "MD5-SESS": HA1 = H("%s:%s:%s" % (HA1, nonce, cnonce)) # This assumes qop was validated to be 'auth' above. If 'auth-int' # support is added this will need to change. if qop: noncebit = ":".join([nonce, ncvalue, cnonce, "auth", HA2]) response_digest = KD(HA1, noncebit) else: response_digest = KD(HA1, "%s:%s" % (nonce, HA2)) base = ", ".join( [ 'username="%s"' % self.username, 'realm="%s"' % realm, 'nonce="%s"' % nonce, 'uri="%s"' % path, 'response="%s"' % response_digest, 'algorithm="%s"' % algorithm, ] ) if opaque: base += ', opaque="%s"' % opaque if qop: base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce) return "Digest %s" % base async def _handle_401(self, response: ClientResponse): """ Takes the given response and tries digest-auth, if needed. :rtype: ClientResponse """ auth_header = response.headers.get("www-authenticate", "") parts = auth_header.split(" ", 1) if "digest" == parts[0].lower() and len(parts) > 1: # Close the initial response since we are going making another request and return that response response.close() self.challenge = parse_key_value_list(parts[1]) return await self.request( self.args["method"], self.args["url"], headers=self.args["headers"], **self.args["kwargs"], ) return response def parse_pair(pair): key, value = pair.strip().split("=", 1) # If it has a trailing comma, remove it. if value[-1] == ",": value = value[:-1] # If it is quoted, then remove them. if value[0] == value[-1] == '"': value = value[1:-1] return key, value def parse_key_value_list(header): return { key: value for key, value in [parse_pair(header_pair) for header_pair in header.split(",")] }
scripts/release19/lodobject_simplify.py
tdapper/cinema4d_py_sdk
113
11117552
# This example configures the active LodObject 'op' to use the "Simplify" mode. # The first level uses the "Convex Hull" mode, the second the "Null" mode. # Use "Simplify" mode and a manual number of levels. import c4d op[c4d.LOD_MODE] = c4d.LOD_MODE_SIMPLIFY op[c4d.LOD_CRITERIA] = c4d.LOD_CRITERIA_MANUAL op[c4d.LOD_LEVEL_COUNT_DYN] = 2 # first level descID = op.GetSimplifyModeDescID(0) if descID is not None: # set mode to "Convex Hull" op[descID] = c4d.LOD_SIMPLIFY_CONVEXHULL descID = op.GetPerObjectControlDescID(0) if descID is not None: # set "Per Object" to True op[descID] = True # second level descID = op.GetSimplifyModeDescID(1) if descID is not None: # set mode to "Null" op[descID] = c4d.LOD_SIMPLIFY_NULL descID = op.GetNullDisplayDescID(1) if descID is not None: # set "Display" to "Circle" op[descID] = c4d.NULLOBJECT_DISPLAY_CIRCLE c4d.EventAdd()
tests/test_text.py
tatarize/vpype
453
11117558
<reponame>tatarize/vpype import pytest import vpype as vp from vpype_viewer import ImageRenderer, UnitType # noinspection SpellCheckingInspection LOREM = ( "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor " "incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud " "exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. " "Semper quis lectus nulla at volutpat. Nibh tortor id aliquet lectus.\n" "\n" "Ultrices sagittis orci a scelerisque purus semper eget duis at. Ultrices vitae auctor eu " "augue ut lectus.\n" "Interdum velit euismod in pellentesque massa placerat duis ultricies lacus. Morbi quis " "commodo odio aenean sed adipiscing diam. Risus nec feugiat in fermentum posuere urna nec " "tincidunt." ) def test_text_unknown_font(): with pytest.raises(ValueError): vp.text_line("Hello", "unknown font") with pytest.raises(ValueError): vp.text_block("Hello world", 500, font_name="unknown font") def test_text_unknown_align(): with pytest.raises(ValueError): vp.text_line("Hello", align="wild") with pytest.raises(ValueError): vp.text_block("Hello", 500, align="wild") def test_text_unicode_ok(): vp.text_line("hello 😂 world") @pytest.mark.parametrize("font_name", ["timesg", "futural", "gothiceng"]) @pytest.mark.parametrize( ["align", "line_spacing", "justify"], [ ("left", 1, False), ("left", 1, True), ("center", 1, False), ("right", 1, False), ("left", 2, False), ], ) def test_text_block_render(assert_image_similarity, font_name, align, line_spacing, justify): doc = vp.Document() doc.add( vp.text_block( LOREM, 500, font_name, size=18, align=align, line_spacing=line_spacing, justify=justify, ) ) doc[1].append(vp.line(500, -20, 500, 500)) renderer = ImageRenderer((1024, 1024)) renderer.engine.document = doc renderer.engine.show_rulers = True renderer.engine.unit_type = UnitType.PIXELS renderer.engine.origin = (-20, -20) renderer.engine.scale = 1.8 assert_image_similarity(renderer.render())
tests/components/plugwise/conftest.py
MrDelik/core
30,023
11117569
"""Setup mocks for the Plugwise integration tests.""" from __future__ import annotations from collections.abc import Generator import json from typing import Any from unittest.mock import AsyncMock, MagicMock, patch import pytest from homeassistant.components.plugwise.const import API, DOMAIN, PW_TYPE from homeassistant.const import ( CONF_HOST, CONF_MAC, CONF_PASSWORD, CONF_PORT, CONF_USERNAME, ) from homeassistant.core import HomeAssistant from tests.common import MockConfigEntry, load_fixture def _read_json(environment: str, call: str) -> dict[str, Any]: """Undecode the json data.""" fixture = load_fixture(f"plugwise/{environment}/{call}.json") return json.loads(fixture) @pytest.fixture def mock_config_entry() -> MockConfigEntry: """Return the default mocked config entry.""" return MockConfigEntry( title="My Plugwise", domain=DOMAIN, data={ CONF_HOST: "127.0.0.1", CONF_MAC: "AA:BB:CC:DD:EE:FF", CONF_PASSWORD: "<PASSWORD>", CONF_PORT: 80, CONF_USERNAME: "smile", PW_TYPE: API, }, unique_id="smile98765", ) @pytest.fixture def mock_setup_entry() -> Generator[AsyncMock, None, None]: """Mock setting up a config entry.""" with patch( "homeassistant.components.plugwise.async_setup_entry", return_value=True ) as mock_setup: yield mock_setup @pytest.fixture() def mock_smile_config_flow() -> Generator[None, MagicMock, None]: """Return a mocked Smile client.""" with patch( "homeassistant.components.plugwise.config_flow.Smile", autospec=True, ) as smile_mock: smile = smile_mock.return_value smile.smile_hostname = "smile12345" smile.smile_name = "Test Smile Name" smile.connect.return_value = True yield smile @pytest.fixture def mock_smile_adam() -> Generator[None, MagicMock, None]: """Create a Mock Adam environment for testing exceptions.""" chosen_env = "adam_multiple_devices_per_zone" with patch( "homeassistant.components.plugwise.gateway.Smile", autospec=True ) as smile_mock: smile = smile_mock.return_value smile.gateway_id = "fe799307f1624099878210aa0b9f1475" smile.heater_id = "90986d591dcd426cae3ec3e8111ff730" smile.smile_version = "3.0.15" smile.smile_type = "thermostat" smile.smile_hostname = "smile98765" smile.smile_name = "Adam" smile.connect.return_value = True smile.notifications = _read_json(chosen_env, "notifications") smile.async_update.return_value = _read_json(chosen_env, "all_data") yield smile @pytest.fixture def mock_smile_anna() -> Generator[None, MagicMock, None]: """Create a Mock Anna environment for testing exceptions.""" chosen_env = "anna_heatpump" with patch( "homeassistant.components.plugwise.gateway.Smile", autospec=True ) as smile_mock: smile = smile_mock.return_value smile.gateway_id = "015ae9ea3f964e668e490fa39da3870b" smile.heater_id = "1cbf783bb11e4a7c8a6843dee3a86927" smile.smile_version = "4.0.15" smile.smile_type = "thermostat" smile.smile_hostname = "smile98765" smile.smile_name = "Anna" smile.connect.return_value = True smile.notifications = _read_json(chosen_env, "notifications") smile.async_update.return_value = _read_json(chosen_env, "all_data") yield smile @pytest.fixture def mock_smile_p1() -> Generator[None, MagicMock, None]: """Create a Mock P1 DSMR environment for testing exceptions.""" chosen_env = "p1v3_full_option" with patch( "homeassistant.components.plugwise.gateway.Smile", autospec=True ) as smile_mock: smile = smile_mock.return_value smile.gateway_id = "e950c7d5e1ee407a858e2a8b5016c8b3" smile.heater_id = None smile.smile_version = "3.3.9" smile.smile_type = "power" smile.smile_hostname = "smile98765" smile.smile_name = "Smile P1" smile.connect.return_value = True smile.notifications = _read_json(chosen_env, "notifications") smile.async_update.return_value = _read_json(chosen_env, "all_data") yield smile @pytest.fixture def mock_stretch() -> Generator[None, MagicMock, None]: """Create a Mock Stretch environment for testing exceptions.""" chosen_env = "stretch_v31" with patch( "homeassistant.components.plugwise.gateway.Smile", autospec=True ) as smile_mock: smile = smile_mock.return_value smile.gateway_id = "259882df3c05415b99c2d962534ce820" smile.heater_id = None smile.smile_version = "3.1.11" smile.smile_type = "stretch" smile.smile_hostname = "stretch98765" smile.smile_name = "Stretch" smile.connect.return_value = True smile.async_update.return_value = _read_json(chosen_env, "all_data") yield smile @pytest.fixture async def init_integration( hass: HomeAssistant, mock_config_entry: MockConfigEntry ) -> MockConfigEntry: """Set up the Plugwise integration for testing.""" mock_config_entry.add_to_hass(hass) await hass.config_entries.async_setup(mock_config_entry.entry_id) await hass.async_block_till_done() return mock_config_entry
examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/utilities/noise_generators.py
felipeek/bullet3
9,136
11117571
"""Noise generators to simulate noise in sensor / actuator classes.""" import abc import gin import numpy as np class NoiseGenerator(metaclass=abc.ABCMeta): """Base class for noise generators.""" @abc.abstractmethod def _get_noise(self, shape, dtype=None): """Gets noise as a numpy array in the specified shape and dtype. Tensorflow requires the shape and dtype of noise to be correctly specified, so the generator needs to know this to produce data of the correct type. Args: shape: Shape of the returned array. dtype: Datatype of returned array (None for default). """ @abc.abstractmethod def add_noise(self, data): """Adds noise generated by _get_noise to the given data with clipping. Args: data: Numpy array of data to be modified with noise. """ @gin.configurable class BiasNoise(NoiseGenerator): """Adds bias to the data, possibly with clipping.""" def __init__(self, bias=0.0, clipping_lower_bound=-np.inf, clipping_upper_bound=np.inf): """Create a bias noise generator. Args: bias: Absolute magnitude of bias applied to input. clipping_lower_bound: lower bound of add_noise (use -np.inf to ignore). clipping_upper_bound: Upper bound of add_noise (use np.inf to ignore). """ self._bias = bias self._clipping_lower_bound = clipping_lower_bound self._clipping_upper_bound = clipping_upper_bound def _get_noise(self, shape, dtype=None): """Create bias noise of the given direction and datatype.""" return np.full(shape, self._bias, dtype) def add_noise(self, data): """Add bias noise to the given data, clipping to the given range.""" noise = self._get_noise(data.shape, data.dtype) return np.clip(data + noise, self._clipping_lower_bound, self._clipping_upper_bound) @gin.configurable class NormalNoise(BiasNoise): """Adds Gaussian noise to the data, possibly with clipping.""" def __init__(self, scale, **kwargs): """Create a normal noise generator. Args: scale: Absolute magnitude of standard deviation of Gaussian noise. Note numpy will throw an error if scale < 0. **kwargs: Arguments passed to BiasNoise (e.g. bias and clipping). """ super(NormalNoise, self).__init__(**kwargs) self._scale = scale def _get_noise(self, shape, dtype=None): """Create normal noise of the given direction and datatype.""" return np.random.normal(self._bias, self._scale, shape).astype(dtype) @gin.configurable class UniformNoise(NoiseGenerator): """Generates uniform noise in the given range.""" def __init__(self, low, high, clipping_lower_bound=-np.inf, clipping_upper_bound=np.inf): """Creates a uniform noise generator. Args: low: the lower bound of the noise. high: the higher bound of the noise. clipping_lower_bound: lower bound of add_noise (use -np.inf to ignore). clipping_upper_bound: Upper bound of add_noise (use np.inf to ignore). """ super().__init__() self._low = low self._high = high self._clipping_lower_bound = clipping_lower_bound self._clipping_upper_bound = clipping_upper_bound def _get_noise(self, shape, dtype=None): """Generates a noise using the given shape and data type.""" return np.random.uniform(self._low, self._high, shape).astype(dtype) def add_noise(self, data): """Adds noise to the given data, clipping to the given bound.""" noise = self._get_noise(data.shape, data.dtype) return np.clip(data + noise, self._clipping_lower_bound, self._clipping_upper_bound) @gin.configurable class RangeNoise(NormalNoise): """Add normally distributed noise in m, applied to hit fractions in (0, 1). This enables us to specify range noise in terms of meters of Gaussian noise between a maximum and minimum range, but the add_noise is applied as above to values expected to be in a hit fraction range of (0, 1) as needed for the SimLidarSensor API. Separate methods return noise or noisify data in meters. """ def __init__(self, range_noise_m, max_range_m, min_range_m=0.0, **kwargs): """Create a normal noise generator suitable for use in a range scanner. Args: range_noise_m: Absolute magnitude of standard deviation of Gaussian noise, applied to range observation readngs, measured in meters. max_range_m: Maximum range in meters of the data, used for clipping. min_range_m: Minimum range in meters of the data, used for clipping. **kwargs: Other arguments passed to NormalNoise (principally bias). """ # Validate range values. if range_noise_m < 0.0: raise ValueError("Range noise should not be negative: %r" % range_noise_m) if min_range_m >= max_range_m: raise ValueError("min_range_m %s must be less than max_range_m %s" % (min_range_m, max_range_m)) self._range_noise_m = range_noise_m self._max_range_m = max_range_m self._min_range_m = min_range_m self._total_range = max_range_m - min_range_m super(RangeNoise, self).__init__( scale=range_noise_m / self._total_range, clipping_lower_bound=0.0, clipping_upper_bound=1.0, **kwargs) def _get_noise_m(self, shape, dtype=None): """Create normal noise of the given direction and datatype, in meters.""" return self.range_to_m(self._get_noise(shape=shape, dtype=dtype)) def add_noise_m(self, data): """Add normal noise to the given data, scaled in meters.""" return self.range_to_m(self.add_noise(self.m_to_range(data))) def m_to_range(self, data): """Scale data in meters to a range of (0, 1).""" return (data - self._min_range_m) / self._total_range def range_to_m(self, data): """Scale data in range of (0, 1) to meters.""" return data * self._total_range + self._min_range_m @gin.configurable class TwistNoise(object): """Add normally distributed noise to twist actions. Note this is a simplified noise model in action space designed for parity with DriveWorld's r/s/e/drive_models/twist_drive.py;rcl=307540784;l=161. This assumes that the TwistNoise will be applied to a twist action which is then clipped, as currently done in wheeled_robot_base.py: robotics/reinforcement_learning/minitaur/robots/wheeled_robot_base.py;l=533 # We assume that the velocity clipping would be absorbed in this API. if self._action_filter: action = self._action_filter.filter(action) where action is a linear_velocity, angular_velocity pair, which is clipped to limits subsequently by the _compute_kinematic_base_velocity method. """ def __init__(self, linear_velocity_noise_stdev_mps: float, linear_velocity_noise_max_stdevs: float, angular_velocity_noise_stdev_rps: float, angular_velocity_noise_max_stdevs: float, noise_scaling_cutoff_mps: float = 0.0): """Create a normal noise generator suitable for use in a range scanner. Supports the API specified in the DriveWorld TwistDrive class: robotics/simulation/environments/drive_models/twist_drive.py;l=54 Args: linear_velocity_noise_stdev_mps: One standard deviation of normal noise for linear velocity in meters per second. linear_velocity_noise_max_stdevs: Max stdevs for linear velocity noise. This ensures that the noise values do not spike too crazy. angular_velocity_noise_stdev_rps: One standard deviation of normal noise for angular velocity in radians per second. angular_velocity_noise_max_stdevs: Max stdevs for angular velocity noise. noise_scaling_cutoff_mps: If linear velocity is less than this cutoff, linear and angular noise are scaled so that zero velocity produces zero noise. This enables a robot at rest to remain at rest, while still applying reasonable noise values to finite velocities. Angular velocity does not contribute to this computation to keep the model simple. """ # Validate range values. if linear_velocity_noise_stdev_mps < 0.0: raise ValueError("Linear action noise should not be negative: %r" % linear_velocity_noise_stdev_mps) if linear_velocity_noise_max_stdevs < 0.0: raise ValueError("Maximum linear noise should not be negative: %r" % linear_velocity_noise_max_stdevs) if angular_velocity_noise_stdev_rps < 0.0: raise ValueError("Angular action noise should not be negative: %r" % angular_velocity_noise_stdev_rps) if angular_velocity_noise_max_stdevs < 0.0: raise ValueError("Maximum action noise should not be negative: %r" % angular_velocity_noise_max_stdevs) if noise_scaling_cutoff_mps < 0.0: raise ValueError("Noise scaling cutoff should not be negative: %r" % noise_scaling_cutoff_mps) # Save the values to create our noise later. self._noise_shape = [ linear_velocity_noise_stdev_mps, angular_velocity_noise_stdev_rps ] # The noise clipping is performed using one standard deviation as the unit. self._noise_lower_bound = np.array([ -linear_velocity_noise_max_stdevs * linear_velocity_noise_stdev_mps, -angular_velocity_noise_max_stdevs * angular_velocity_noise_stdev_rps ]) self._noise_upper_bound = -self._noise_lower_bound self._noise_scaling_cutoff_mps = noise_scaling_cutoff_mps def filter(self, action): """Filter the linear and angular velocity by adding noise.""" linear_velocity, angular_velocity = action linear_noise, angular_noise = np.clip( np.random.normal(0, self._noise_shape, 2), self._noise_lower_bound, self._noise_upper_bound) if self._noise_scaling_cutoff_mps: clipped_velocity = min(abs(linear_velocity), self._noise_scaling_cutoff_mps) scaling_factor = clipped_velocity / self._noise_scaling_cutoff_mps linear_noise *= scaling_factor angular_noise *= scaling_factor return (linear_velocity + linear_noise, angular_velocity + angular_noise)
example-mnist/bin/py/mnist_simple.py
marcel303/ofxMSATensorFlow
481
11117611
# ============================================================================== ### IF YOU ARE RUNNING THIS IN SPYDER MAKE SURE TO USE A NEW CONSOLE EACH TIME ### TO CLEAR THE SESSION ### (press F6, and select 'Execute in a new dedicated Python console') # ============================================================================== # Copyright 2015 Google Inc. All Rights Reserved. # Modified by <NAME> to demonstrate ofxMSATensorFlow # http://github.com/memo/ofxMSATensorFlow # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A very simple MNIST classifier. See extensive documentation at http://tensorflow.org/tutorials/mnist/beginners/index.md """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # Import data import input_data import tensorflow as tf import shutil import os out_path = '../data/model-simple' out_fname = 'mnist-simple' # Get data mnist = input_data.read_data_sets("training_data/", one_hot=True) def save(path, fname, sess): # Write flow graph to disk # MEGA UGLY HACK BECAUSE TENSOR FLOW DOESN'T SAVE VARIABLES # NB. tf.train.write_graph won't save value of variables (yet?) # so need to save value of variables as constants, # and then in C++ push the constants back to the vars :S # based on https://stackoverflow.com/questions/34343259/is-there-an-example-on-how-to-generate-protobuf-files-holding-trained-tensorflow/34343517 for v in tf.all_variables(): vc = tf.constant(v.eval(sess)) n = v.name.split(":")[0] # get name (not sure what the :0 is) tf.assign(v, vc, name=n+"_VARHACK") # Delete output folder if it exists if os.path.exists(out_path): shutil.rmtree(out_path) fname = fname+".pb"; print("Saving to ", path+"/"+fname, "...") tf.train.write_graph(sess.graph_def, path, fname, as_text=False) print("...done.") with tf.Session() as sess: # Create the model # As a temp measure I'm adding _VIZ to the names of layers I want to visualize x = tf.placeholder(tf.float32, [None, 784], name='x_inputs') W = tf.Variable(tf.zeros([784, 10]), name="weights_VIZ") b = tf.Variable(tf.zeros([10]), name="biases") y = tf.nn.softmax(tf.matmul(x, W) + b, name='y_outputs') # Define loss and optimizer y_ = tf.placeholder(tf.float32, [None, 10]) cross_entropy = -tf.reduce_sum(y_ * tf.log(y)) train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy) # Init sess.run(tf.initialize_all_variables()) # Train for i in range(1000): batch_xs, batch_ys = mnist.train.next_batch(100) sess.run(train_step, {x: batch_xs, y_: batch_ys}) # Test trained model correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print("Training complete. Accuracy:", accuracy.eval({x: mnist.test.images, y_: mnist.test.labels})) save(out_path, out_fname, sess) # Save variables ? can't load them from C++ :( saver = tf.train.Saver() saver.save(sess, out_path + '/' + out_fname + '.ckpt')
tests/test_xsto_calendar.py
syonoki/exchange_calendars
128
11117616
<filename>tests/test_xsto_calendar.py<gh_stars>100-1000 import pytest import pandas as pd from exchange_calendars.exchange_calendar_xsto import XSTOExchangeCalendar from .test_exchange_calendar import ExchangeCalendarTestBase class TestXSTOCalendar(ExchangeCalendarTestBase): @pytest.fixture(scope="class") def calendar_cls(self): yield XSTOExchangeCalendar @pytest.fixture def max_session_hours(self): # The XSTO is open from 9:00 am to 5:30 pm. yield 8.5 @pytest.fixture def regular_holidays_sample(self): yield [ # 2018 + "2018-01-01", # New Year's Day "2017-01-06", # Epiphany "2018-03-30", # Good Friday "2018-04-02", # Easter Monday "2018-05-01", # Labour Day "2018-05-10", # Ascension Day "2018-06-06", # National Day "2018-06-22", # Midsummer Eve "2018-12-24", # Christmas Eve "2018-12-25", # Christmas Day "2018-12-26", # Boxing Day "2018-12-31", # New Year's Eve # # Midsummer Eve falls on the Friday after June 18. "2010-06-25", # 18th a Friday, holiday observed Friday 25th (not 18th). "2017-06-23", # 18th a Sunday, holiday observed following Friday. # Holidays that ceased to be observed. "2004-05-31", # Whit Monday last observed in 2004. ] @pytest.fixture def non_holidays_sample(self): yield [ # Midsummer Eve falls on the Friday after June 18. "2010-06-18", # 18th a Friday, holiday observed Friday 25th (not 18th) # # Holidays that fall on a weekend and are not made up. Ensure surrounding # days are not holidays. # In 2018, the Epiphany fell on a Saturday, so the market should be # open on both the prior Friday and the following Monday. "2018-01-05", "2018-01-08", # In 2010, Labour Day fell on a Saturday, so the market should be # open on both the prior Friday and the following Monday. "2010-04-30", "2010-05-03", # In 2015, National Day fell on a Saturday, so the market should be # open on both the prior Friday and the following Monday. "2015-06-05", "2015-06-08", # In 2010, Christmas fell on a Saturday, meaning Boxing Day fell on # a Sunday. The market should thus be open on the following Monday. "2010-12-27", # In 2017, New Year's Day fell on a Sunday, so the market should be # open on both the prior Friday and the following Monday. "2016-12-30", "2017-01-02", # # Holidays that ceased to be observed. "2005-05-16", # Whit Monday ceased to be observed from 2005. ] @pytest.fixture def early_closes_sample(self): yield [ "2018-01-05", # Day before Epiphany. "2018-03-29", # Maundy Thursday. "2018-04-30", # Day before Labour Day. "2018-05-09", # Day before Ascension Day. "2018-11-02", # All Saints' Eve. "2015-10-30", # All Saints' Eve. "2010-11-05", # All Saints' Eve. ] @pytest.fixture def early_closes_sample_time(self): yield pd.Timedelta(13, "H")
qcodes/dataset/data_set_info.py
riju-pal/QCoDeS_riju
223
11117657
<filename>qcodes/dataset/data_set_info.py import json from typing import Any, Dict, List, Optional from typing_extensions import TypedDict from qcodes.dataset.descriptions.rundescriber import RunDescriber from qcodes.dataset.linked_datasets.links import Link, str_to_links from qcodes.dataset.sqlite.connection import ConnectionPlus from qcodes.dataset.sqlite.queries import ( ExperimentAttributeDict, get_raw_run_attributes, raw_time_to_str_time, ) from .descriptions.versioning import serialization class RunAttributesDict(TypedDict): run_id: int counter: int captured_run_id: int captured_counter: int experiment: ExperimentAttributeDict name: str run_timestamp: Optional[str] completed_timestamp: Optional[str] metadata: Dict[str, Any] parent_dataset_links: List[Link] run_description: RunDescriber snapshot: Optional[Dict[str, Any]] def get_run_attributes(conn: ConnectionPlus, guid: str) -> Optional[RunAttributesDict]: """ Look up all information and metadata about a given dataset captured in the database. Args: conn: Connection to the database guid: GUID of the dataset to look up Returns: Dictionary of information about the dataset. """ raw_attributes = get_raw_run_attributes(conn, guid) if raw_attributes is None: return None attributes: RunAttributesDict = { "run_id": raw_attributes["run_id"], "counter": raw_attributes["counter"], "captured_run_id": raw_attributes["captured_run_id"], "captured_counter": raw_attributes["captured_counter"], "experiment": raw_attributes["experiment"], "name": raw_attributes["name"], "run_timestamp": raw_time_to_str_time(raw_attributes["run_timestamp"]), "completed_timestamp": raw_time_to_str_time( raw_attributes["completed_timestamp"] ), "metadata": raw_attributes["metadata"], "parent_dataset_links": str_to_links(raw_attributes["parent_dataset_links"]), "run_description": serialization.from_json_to_current( raw_attributes["run_description"] ), "snapshot": json.loads(raw_attributes["snapshot"]) if raw_attributes["snapshot"] is not None else None, } return attributes
test/test_exception.py
swuecho/pyecharts
11,032
11117662
<reponame>swuecho/pyecharts from nose.tools import assert_equal from pyecharts import options as opts from pyecharts.charts import Geo, BMap from pyecharts.exceptions import NonexistentCoordinatesException BAIDU_MAP_API_PREFIX = "https://api.map.baidu.com/api?v=2.0" FAKE_API_KEY = "fake_application_key" def test_geo_catch_nonexistent_coord_exception(): try: ( Geo() .add_schema(maptype="china") .add("geo", [["NonexistentLocation", 123]]) .set_series_opts(label_opts=opts.LabelOpts(is_show=False)) .set_global_opts(visualmap_opts=opts.VisualMapOpts()) ) except NonexistentCoordinatesException as err: assert_equal(type(err), NonexistentCoordinatesException) def test_geo_ignore_nonexistent_coord_exception(): ( Geo(is_ignore_nonexistent_coord=True) .add_schema(maptype="china") .add("geo", [["NonexistentLocation", 123]]) .set_series_opts(label_opts=opts.LabelOpts(is_show=False)) .set_global_opts(visualmap_opts=opts.VisualMapOpts()) ) def test_bmap_catch_nonexistent_coord_exception(): try: ( BMap() .add_schema(baidu_ak=FAKE_API_KEY, center=[-0.118092, 51.509865]) .add( "bmap", [["NonexistentLocation", 123]], label_opts=opts.LabelOpts(formatter="{b}"), ) ) except NonexistentCoordinatesException as err: assert_equal(type(err), NonexistentCoordinatesException) def test_bmap_ignore_nonexistent_coord_exception(): ( BMap(is_ignore_nonexistent_coord=True) .add_schema(baidu_ak=FAKE_API_KEY, center=[-0.118092, 51.509865]) .add( "bmap", [["NonexistentLocation", 123]], label_opts=opts.LabelOpts(formatter="{b}"), ) )
src/browser_source_interaction.py
upgradeQ/OBS-Studio-Python-Scripting-Cheatsheet
116
11117687
<gh_stars>100-1000 import obspython as S from contextlib import contextmanager G = lambda: ... @contextmanager def source_auto_release(source_name): source = S.obs_get_source_by_name(source_name) try: yield source finally: S.obs_source_release(source) def get_modifiers(key_modifiers): if key_modifiers: shift = key_modifiers.get("shift") control = key_modifiers.get("control") alt = key_modifiers.get("alt") command = key_modifiers.get("command") else: shift = control = alt = command = 0 modifiers = 0 if shift: modifiers |= S.INTERACT_SHIFT_KEY if control: modifiers |= S.INTERACT_CONTROL_KEY if alt: modifiers |= S.INTERACT_ALT_KEY if command: modifiers |= S.INTERACT_COMMAND_KEY return modifiers def send_hotkey_to_browser(source, obs_htk_id, key_modifiers=None, key_up=False): key = S.obs_key_from_name(obs_htk_id) vk = S.obs_key_to_virtual_key(key) event = S.obs_key_event() event.native_vkey = vk event.modifiers = get_modifiers(key_modifiers) event.native_modifiers = event.modifiers # https://doc.qt.io/qt-5/qkeyevent.html event.native_scancode = vk event.text = "" S.obs_source_send_key_click(source, event, key_up) def press_tab(*p): with source_auto_release(G.source_name) as source: send_hotkey_to_browser(source, "OBS_KEY_TAB") send_hotkey_to_browser(source, "OBS_KEY_TAB", key_up=True) def press_shift_tab(*p): with source_auto_release(G.source_name) as source: send_hotkey_to_browser(source, "OBS_KEY_TAB", {"shift": True}) send_hotkey_to_browser(source, "OBS_KEY_TAB", {"shift": True}, key_up=True) def send_mouse_click_to_browser( source, x=0, y=0, button_type=S.MOUSE_LEFT, mouse_up=False, click_count=1, key_modifiers=None, ): event = S.obs_mouse_event() event.modifiers = get_modifiers(key_modifiers) event.x = x event.y = y S.obs_source_send_mouse_click(source, event, button_type, mouse_up, click_count) def send_mouse_move_to_browser( source, x=0, y=0, key_modifiers=None, ): event = S.obs_mouse_event() event.modifiers = get_modifiers(key_modifiers) event.x = x event.y = y S.obs_source_send_mouse_move(source, event, False) # do not leave def move_mouse0(*p): with source_auto_release(G.source_name) as source: send_mouse_move_to_browser(source, 0, 0) def move_mouse1(*p): with source_auto_release(G.source_name) as source: send_mouse_move_to_browser(source, 100, 200) def click_at(*p): with source_auto_release(G.source_name) as source: send_mouse_click_to_browser(source, 100, 200) send_mouse_click_to_browser(source, 100, 200, mouse_up=True, click_count=2) def script_update(settings): G.source_name = S.obs_data_get_string(settings, "source") def script_properties(): # ui props = S.obs_properties_create() p = S.obs_properties_add_list( props, "source", "Browser source", S.OBS_COMBO_TYPE_EDITABLE, S.OBS_COMBO_FORMAT_STRING, ) S.obs_properties_add_button(props, "button1", "Press tab", press_tab) S.obs_properties_add_button(props, "button2", "Press shift+tab", press_shift_tab) S.obs_properties_add_button(props, "button3", "Send LMB at [100,200]", click_at) S.obs_properties_add_button(props, "button4", "Move to 0,0 ", move_mouse0) S.obs_properties_add_button(props, "button5", "Move to 100,200 ", move_mouse1) sources = S.obs_enum_sources() if sources is not None: for source in sources: source_id = S.obs_source_get_unversioned_id(source) if source_id == "browser_source": name = S.obs_source_get_name(source) S.obs_property_list_add_string(p, name, name) S.source_list_release(sources) return props
yabgp/tests/unit/message/attribute/nlri/test_blgls_epe.py
mengjunyi/yabgp
203
11117689
<reponame>mengjunyi/yabgp # Copyright 2015-2017 Cisco Systems, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http:\\www.apache.org\licenses\LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest from yabgp.message.attribute.nlri.linkstate import BGPLS class TestBGPLSEPE(unittest.TestCase): def test_parse(self): self.maxDiff = None data_bin = b"\x00\x02\x00\x51\x07\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00" \ b"\x18\x02\x00\x00\x04\x00\x00\x00\xc8\x02\x01\x00\x04\x00\x00\x00" \ b"\x00\x02\x05\x00\x04\x00\x00\x00" \ b"\xc8\x01\x01\x00\x18\x02\x00\x00\x04\x00\x00\x01\x2c\x02\x01\x00" \ b"\x04\x00\x00\x00\x00\x02\x05\x00" \ b"\x04\x00\x00\x01\x2c\x01\x03\x00\x04\xc0\xa8\x04\x03\x01\x04\x00" \ b"\x04\xc0\xa8\x04\x04" data_dict = [ { 'type': 'link', 'protocol_id': 7, 'instances_id': 0, 'descriptors': [ { 'type': 'local_node', 'value': { 'as_num': 200, 'bgpls_id': '0.0.0.0', # 'bgp_router_id': '3.3.3.3', 'member_as_num': 200}}, { 'type': 'remote_node', 'value': { 'as_num': 300, 'bgpls_id': '0.0.0.0', # 'bgp_router_id': '4.4.4.4', 'member_as_num': 300}}, { 'type': 'link_local_ipv4', 'value': '192.168.4.3'}, { 'type': 'link_remote_ipv4', 'value': '192.168.4.4'}, ] } ] self.assertEqual(data_dict, BGPLS.parse(data_bin))
examples/train_simple_net.py
HyeongminMoon/PatrickStar
494
11117699
<gh_stars>100-1000 # BSD 3-Clause License # # Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the psutil authors nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import logging import torch from patrickstar.runtime import initialize_engine from patrickstar.utils import logger from simple_net import SimpleModel, get_bert_data_loader device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") BATCH_SIZE = 8 HIDDEN_DIM = 4 SEQ_LEN = 128 def model_func(): return SimpleModel( hidden_dim=HIDDEN_DIM, seq_len=SEQ_LEN, is_ckp=True, is_share_param=True ) LR = 5e-5 BETAS = (0.9, 0.999) EPS = 1e-6 WEIGHT_DECAY = 0 # TEST_CASE = "torch" TEST_CASE = "patrickstar" logger.setLevel(logging.WARNING) print(f"TEST_CASE {TEST_CASE}") config = { # The same format as optimizer config of DeepSpeed # https://www.deepspeed.ai/docs/config-json/#optimizer-parameters "optimizer": { "type": "Adam", "params": { "lr": LR, "betas": BETAS, "eps": EPS, "weight_decay": WEIGHT_DECAY, "use_hybrid_adam": True, }, }, "fp16": { "enabled": True, "loss_scale": 0, "initial_scale_power": 2 ** 3, "loss_scale_window": 1000, "hysteresis": 2, "min_loss_scale": 1, }, "default_chunk_size": 1024, "use_fake_dist": False, "use_cpu_embedding": False, } torch.manual_seed(0) if TEST_CASE == "patrickstar": model, optim = initialize_engine(model_func=model_func, local_rank=0, config=config) elif TEST_CASE == "torch": model = model_func() optim = torch.optim.Adam( model.parameters(), LR=LR, BETAS=BETAS, EPS=EPS, WEIGHT_DECAY=WEIGHT_DECAY ) model.cuda() else: raise RuntimeError train_loader = get_bert_data_loader(BATCH_SIZE, 10000, 128, device, False) for epoch in range(3): for i, batch in enumerate(train_loader): optim.zero_grad() input_ids, labels = batch loss = model(input_ids, labels) if TEST_CASE == "patrickstar": model.backward(loss) optim.step() elif TEST_CASE == "torch": loss.backward() optim.zero_grad() optim.step() print(i, loss.item()) if i == 10: exit() model.eval()
src/dataloaders/et.py
dumpmemory/state-spaces
513
11117710
""" ET Dataset from Informer Paper. Dataset: https://github.com/zhouhaoyi/ETDataset Dataloader: https://github.com/zhouhaoyi/Informer2020 """ from typing import List import os import numpy as np import pandas as pd from pandas.tseries import offsets from pandas.tseries.frequencies import to_offset import torch from torch.utils import data from torch.utils.data import Dataset, DataLoader import warnings warnings.filterwarnings("ignore") from src.dataloaders.datasets import SequenceDataset, default_data_path class TimeFeature: def __init__(self): pass def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: pass def __repr__(self): return self.__class__.__name__ + "()" class SecondOfMinute(TimeFeature): """Minute of hour encoded as value between [-0.5, 0.5]""" def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: return index.second / 59.0 - 0.5 class MinuteOfHour(TimeFeature): """Minute of hour encoded as value between [-0.5, 0.5]""" def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: return index.minute / 59.0 - 0.5 class HourOfDay(TimeFeature): """Hour of day encoded as value between [-0.5, 0.5]""" def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: return index.hour / 23.0 - 0.5 class DayOfWeek(TimeFeature): """Hour of day encoded as value between [-0.5, 0.5]""" def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: return index.dayofweek / 6.0 - 0.5 class DayOfMonth(TimeFeature): """Day of month encoded as value between [-0.5, 0.5]""" def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: return (index.day - 1) / 30.0 - 0.5 class DayOfYear(TimeFeature): """Day of year encoded as value between [-0.5, 0.5]""" def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: return (index.dayofyear - 1) / 365.0 - 0.5 class MonthOfYear(TimeFeature): """Month of year encoded as value between [-0.5, 0.5]""" def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: return (index.month - 1) / 11.0 - 0.5 class WeekOfYear(TimeFeature): """Week of year encoded as value between [-0.5, 0.5]""" def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: return (index.isocalendar().week - 1) / 52.0 - 0.5 def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]: """ Returns a list of time features that will be appropriate for the given frequency string. Parameters ---------- freq_str Frequency string of the form [multiple][granularity] such as "12H", "5min", "1D" etc. """ features_by_offsets = { offsets.YearEnd: [], offsets.QuarterEnd: [MonthOfYear], offsets.MonthEnd: [MonthOfYear], offsets.Week: [DayOfMonth, WeekOfYear], offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear], offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear], offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear], offsets.Minute: [ MinuteOfHour, HourOfDay, DayOfWeek, DayOfMonth, DayOfYear, ], offsets.Second: [ SecondOfMinute, MinuteOfHour, HourOfDay, DayOfWeek, DayOfMonth, DayOfYear, ], } offset = to_offset(freq_str) for offset_type, feature_classes in features_by_offsets.items(): if isinstance(offset, offset_type): return [cls() for cls in feature_classes] supported_freq_msg = f""" Unsupported frequency {freq_str} The following frequencies are supported: Y - yearly alias: A M - monthly W - weekly D - daily B - business days H - hourly T - minutely alias: min S - secondly """ raise RuntimeError(supported_freq_msg) def time_features(dates, timeenc=1, freq="h"): """ > `time_features` takes in a `dates` dataframe with a 'dates' column and extracts the date down to `freq` where freq can be any of the following if `timeenc` is 0: > * m - [month] > * w - [month] > * d - [month, day, weekday] > * b - [month, day, weekday] > * h - [month, day, weekday, hour] > * t - [month, day, weekday, hour, *minute] > > If `timeenc` is 1, a similar, but different list of `freq` values are supported (all encoded between [-0.5 and 0.5]): > * Q - [month] > * M - [month] > * W - [Day of month, week of year] > * D - [Day of week, day of month, day of year] > * B - [Day of week, day of month, day of year] > * H - [Hour of day, day of week, day of month, day of year] > * T - [Minute of hour*, hour of day, day of week, day of month, day of year] > * S - [Second of minute, minute of hour, hour of day, day of week, day of month, day of year] *minute returns a number from 0-3 corresponding to the 15 minute period it falls into. """ if timeenc == 0: dates["month"] = dates.date.apply(lambda row: row.month, 1) dates["day"] = dates.date.apply(lambda row: row.day, 1) dates["weekday"] = dates.date.apply(lambda row: row.weekday(), 1) dates["hour"] = dates.date.apply(lambda row: row.hour, 1) dates["minute"] = dates.date.apply(lambda row: row.minute, 1) dates["minute"] = dates.minute.map(lambda x: x // 15) freq_map = { "y": [], "m": ["month"], "w": ["month"], "d": ["month", "day", "weekday"], "b": ["month", "day", "weekday"], "h": ["month", "day", "weekday", "hour"], "t": ["month", "day", "weekday", "hour", "minute"], } return dates[freq_map[freq.lower()]].values if timeenc == 1: dates = pd.to_datetime(dates.date.values) return np.vstack( [feat(dates) for feat in time_features_from_frequency_str(freq)] ).transpose(1, 0) class StandardScaler: def __init__(self): self.mean = 0.0 self.std = 1.0 def fit(self, data): self.mean = data.mean(0) self.std = data.std(0) def transform(self, data): mean = ( torch.from_numpy(self.mean).type_as(data).to(data.device) if torch.is_tensor(data) else self.mean ) std = ( torch.from_numpy(self.std).type_as(data).to(data.device) if torch.is_tensor(data) else self.std ) return (data - mean) / std def inverse_transform(self, data): mean = ( torch.from_numpy(self.mean).type_as(data).to(data.device) if torch.is_tensor(data) else self.mean ) std = ( torch.from_numpy(self.std).type_as(data).to(data.device) if torch.is_tensor(data) else self.std ) return (data * std) + mean class InformerDataset(Dataset): def __init__( self, root_path, flag="train", size=None, features="S", data_path="ETTh1.csv", target="OT", scale=True, inverse=False, timeenc=0, freq="h", cols=None, eval_stamp=False, eval_mask=False, ): # size [seq_len, label_len, pred_len] # info if size == None: self.seq_len = 24 * 4 * 4 self.label_len = 24 * 4 self.pred_len = 24 * 4 else: self.seq_len = size[0] self.label_len = size[1] self.pred_len = size[2] # init assert flag in ["train", "test", "val"] type_map = {"train": 0, "val": 1, "test": 2} self.set_type = type_map[flag] self.features = features self.target = target self.scale = scale self.inverse = inverse self.timeenc = timeenc self.freq = freq self.cols = cols self.eval_stamp = eval_stamp self.eval_mask = eval_mask self.root_path = root_path self.data_path = data_path self.__read_data__() def _borders(self, df_raw): num_train = int(len(df_raw) * 0.7) num_test = int(len(df_raw) * 0.2) num_vali = len(df_raw) - num_train - num_test border1s = [0, num_train - self.seq_len, len(df_raw) - num_test - self.seq_len] border2s = [num_train, num_train + num_vali, len(df_raw)] return border1s, border2s def _process_columns(self, df_raw): if self.cols: cols = self.cols.copy() cols.remove(self.target) else: cols = list(df_raw.columns) cols.remove(self.target) cols.remove("date") return df_raw[["date"] + cols + [self.target]] def __read_data__(self): self.scaler = StandardScaler() df_raw = pd.read_csv(os.path.join(self.root_path, self.data_path)) df_raw = self._process_columns(df_raw) border1s, border2s = self._borders(df_raw) border1 = border1s[self.set_type] border2 = border2s[self.set_type] if self.features == "M" or self.features == "MS": cols_data = df_raw.columns[1:] df_data = df_raw[cols_data] elif self.features == "S": df_data = df_raw[[self.target]] if self.scale: train_data = df_data[border1s[0] : border2s[0]] self.scaler.fit(train_data.values) data = self.scaler.transform(df_data.values) else: data = df_data.values df_stamp = df_raw[["date"]][border1:border2] df_stamp["date"] = pd.to_datetime(df_stamp.date) data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq) self.data_x = data[border1:border2] if self.inverse: self.data_y = df_data.values[border1:border2] else: self.data_y = data[border1:border2] self.data_stamp = data_stamp def __getitem__(self, index): s_begin = index s_end = s_begin + self.seq_len r_begin = s_end - self.label_len r_end = r_begin + self.label_len + self.pred_len seq_x = self.data_x[s_begin:s_end] seq_x = np.concatenate( [seq_x, np.zeros((self.pred_len, self.data_x.shape[-1]))], axis=0 ) if self.inverse: seq_y = np.concatenate( [ self.data_x[r_begin : r_begin + self.label_len], self.data_y[r_begin + self.label_len : r_end], ], 0, ) raise NotImplementedError else: # seq_y = self.data_y[r_begin:r_end] # OLD in Informer codebase seq_y = self.data_y[s_end:r_end] # OLD in Informer codebase # seq_x_mark = self.data_stamp[s_begin:s_end] # seq_y_mark = self.data_stamp[r_begin:r_end] if self.eval_stamp: mark = self.data_stamp[s_begin:r_end] else: mark = self.data_stamp[s_begin:s_end] mark = np.concatenate([mark, np.zeros((self.pred_len, mark.shape[-1]))], axis=0) if self.eval_mask: mask = np.concatenate([np.zeros(self.seq_len), np.ones(self.pred_len)], axis=0) else: mask = np.concatenate([np.zeros(self.seq_len), np.zeros(self.pred_len)], axis=0) mask = mask[:, None] # Add the mask to the timestamps: # 480, 5 # mark = np.concatenate([mark, mask[:, np.newaxis]], axis=1) seq_x = seq_x.astype(np.float32) seq_y = seq_y.astype(np.float32) if self.timeenc == 0: mark = mark.astype(np.int64) else: mark = mark.astype(np.float32) mask = mask.astype(np.int64) return torch.tensor(seq_x), torch.tensor(seq_y), torch.tensor(mark), torch.tensor(mask) def __len__(self): return len(self.data_x) - self.seq_len - self.pred_len + 1 def inverse_transform(self, data): return self.scaler.inverse_transform(data) @property def d_input(self): return self.data_x.shape[-1] @property def d_output(self): if self.features in ["M", "S"]: return self.data_x.shape[-1] elif self.features == "MS": return 1 else: raise NotImplementedError @property def n_tokens_time(self): if self.freq == 'h': return [13, 32, 7, 24] elif self.freq == 't': return [13, 32, 7, 24, 4] else: raise NotImplementedError class _Dataset_ETT_hour(InformerDataset): def __init__(self, **kwargs): super().__init__(**kwargs) def _borders(self, df_raw): border1s = [ 0, 12 * 30 * 24 - self.seq_len, 12 * 30 * 24 + 4 * 30 * 24 - self.seq_len, ] border2s = [ 12 * 30 * 24, 12 * 30 * 24 + 4 * 30 * 24, 12 * 30 * 24 + 8 * 30 * 24, ] return border1s, border2s def _process_columns(self, df_raw): return df_raw @property def n_tokens_time(self): assert self.freq == "h" return [13, 32, 7, 24] class _Dataset_ETT_minute(_Dataset_ETT_hour): def __init__(self, data_path="ETTm1.csv", freq="t", **kwargs): super().__init__(data_path=data_path, freq=freq, **kwargs) def _borders(self, df_raw): border1s = [ 0, 12 * 30 * 24 * 4 - self.seq_len, 12 * 30 * 24 * 4 + 4 * 30 * 24 * 4 - self.seq_len, ] border2s = [ 12 * 30 * 24 * 4, 12 * 30 * 24 * 4 + 4 * 30 * 24 * 4, 12 * 30 * 24 * 4 + 8 * 30 * 24 * 4, ] return border1s, border2s @property def n_tokens_time(self): assert self.freq == "t" return [13, 32, 7, 24, 4] class _Dataset_Weather(InformerDataset): def __init__(self, data_path="WTH.csv", target="WetBulbCelsius", **kwargs): super().__init__(data_path=data_path, target=target, **kwargs) class _Dataset_ECL(InformerDataset): def __init__(self, data_path="ECL.csv", target="MT_320", **kwargs): super().__init__(data_path=data_path, target=target, **kwargs) class InformerSequenceDataset(SequenceDataset): @property def n_tokens_time(self): # Shape of the dates: depends on `timeenc` and `freq` return self.dataset_train.n_tokens_time # data_stamp.shape[-1] @property def d_input(self): return self.dataset_train.d_input @property def d_output(self): return self.dataset_train.d_output @property def l_output(self): return self.dataset_train.pred_len def _get_data_filename(self, variant): return self.variants[variant] @staticmethod def collate_fn(batch, resolution): x, y, *z = zip(*batch) x = torch.stack(x, dim=0)[:, ::resolution] y = torch.stack(y, dim=0) z = [torch.stack(e, dim=0)[:, ::resolution] for e in z] return x, y, *z def setup(self): self.data_dir = self.data_dir or default_data_path / 'informer' / self._name_ self.dataset_train = self._dataset_cls( root_path=self.data_dir, flag="train", size=self.size, features=self.features, data_path=self._get_data_filename(self.variant), target=self.target, scale=self.scale, inverse=self.inverse, timeenc=self.timeenc, freq=self.freq, cols=self.cols, eval_stamp=self.eval_stamp, eval_mask=self.eval_mask, ) self.dataset_val = self._dataset_cls( root_path=self.data_dir, flag="val", size=self.size, features=self.features, data_path=self._get_data_filename(self.variant), target=self.target, scale=self.scale, inverse=self.inverse, timeenc=self.timeenc, freq=self.freq, cols=self.cols, eval_stamp=self.eval_stamp, eval_mask=self.eval_mask, ) self.dataset_test = self._dataset_cls( root_path=self.data_dir, flag="test", size=self.size, features=self.features, data_path=self._get_data_filename(self.variant), target=self.target, scale=self.scale, inverse=self.inverse, timeenc=self.timeenc, freq=self.freq, cols=self.cols, eval_stamp=self.eval_stamp, eval_mask=self.eval_mask, ) class ETTHour(InformerSequenceDataset): _name_ = "etth" _dataset_cls = _Dataset_ETT_hour init_defaults = { "size": None, "features": "S", "target": "OT", "variant": 0, "scale": True, "inverse": False, "timeenc": 0, "freq": "h", "cols": None, } variants = { 0: "ETTh1.csv", 1: "ETTh2.csv", } class ETTMinute(InformerSequenceDataset): _name_ = "ettm" _dataset_cls = _Dataset_ETT_minute init_defaults = { "size": None, "features": "S", "target": "OT", "variant": 0, "scale": True, "inverse": False, "timeenc": 0, "freq": "t", "cols": None, } variants = { 0: "ETTm1.csv", 1: "ETTm2.csv", } class Weather(InformerSequenceDataset): _name_ = "weather" _dataset_cls = _Dataset_Weather init_defaults = { "size": None, "features": "S", "target": "WetBulbCelsius", "variant": 0, "scale": True, "inverse": False, "timeenc": 0, "freq": "h", "cols": None, } variants = { 0: "WTH.csv", } class ECL(InformerSequenceDataset): _name_ = "ecl" _dataset_cls = _Dataset_ECL init_defaults = { "size": None, "features": "S", "target": "MT_320", "variant": 0, "scale": True, "inverse": False, "timeenc": 0, "freq": "h", "cols": None, } variants = { 0: "ECL.csv", }
questions/permutations-ii/Solution.py
marcus-aurelianus/leetcode-solutions
141
11117751
''' Given a collection of numbers that might contain duplicates, return all possible unique permutations. Example: Input: [1,1,2] Output: [ [1,1,2], [1,2,1], [2,1,1] ] ''' from collections import Counter class Solution: def permuteUnique(self, nums: List[int]) -> List[List[int]]: def generate_permutations(candidates, curr, perms): if sum(candidates.values()) == 0: perms.append(list(curr)) return for n in list(candidates.keys()): if candidates[n] == 0: continue curr.append(n) candidates[n] -= 1 generate_permutations(candidates, curr, perms) curr.pop() candidates[n] += 1 curr = [] candidates = Counter(nums) perms = [] generate_permutations(candidates, curr, perms) return perms
custom_components/dyson_local/config_flow.py
austinbeam/ha-dyson
158
11117770
"""Config flow for Dyson integration.""" import logging import threading from typing import Optional from libdyson import DEVICE_TYPE_NAMES, get_device, get_mqtt_info_from_wifi_info from libdyson.cloud import DysonDeviceInfo from libdyson.discovery import DysonDiscovery from libdyson.exceptions import ( DysonException, DysonFailedToParseWifiInfo, DysonInvalidCredential, ) import voluptuous as vol from homeassistant import config_entries from homeassistant.components.zeroconf import async_get_instance from homeassistant.const import CONF_HOST, CONF_NAME from homeassistant.exceptions import HomeAssistantError from .const import CONF_CREDENTIAL, CONF_DEVICE_TYPE, CONF_SERIAL, DOMAIN _LOGGER = logging.getLogger(__name__) DISCOVERY_TIMEOUT = 10 CONF_METHOD = "method" CONF_SSID = "ssid" CONF_PASSWORD = "password" SETUP_METHODS = { "wifi": "Setup using WiFi information", "manual": "Setup manually", } class DysonLocalConfigFlow(config_entries.ConfigFlow, domain=DOMAIN): """Dyson local config flow.""" VERSION = 1 CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH def __init__(self): """Initialize the config flow.""" self._device_info = None async def async_step_user(self, info: Optional[dict] = None): """Handle step initialized by user.""" if info is not None: if info[CONF_METHOD] == "wifi": return await self.async_step_wifi() return await self.async_step_manual() return self.async_show_form( step_id="user", data_schema=vol.Schema({vol.Required(CONF_METHOD): vol.In(SETUP_METHODS)}), ) async def async_step_wifi(self, info: Optional[dict] = None): """Handle step to setup using device WiFi information.""" errors = {} if info is not None: try: serial, credential, device_type = get_mqtt_info_from_wifi_info( info[CONF_SSID], info[CONF_PASSWORD] ) except DysonFailedToParseWifiInfo: errors["base"] = "cannot_parse_wifi_info" else: device_type_name = DEVICE_TYPE_NAMES[device_type] _LOGGER.debug("Successfully parse WiFi information") _LOGGER.debug("Serial: %s", serial) _LOGGER.debug("Device Type: %s", device_type) _LOGGER.debug("Device Type Name: %s", device_type_name) try: data = await self._async_get_entry_data( serial, credential, device_type, device_type_name, info.get(CONF_HOST), ) except InvalidAuth: errors["base"] = "invalid_auth" except CannotConnect: errors["base"] = "cannot_connect" except CannotFind: errors["base"] = "cannot_find" else: return self.async_create_entry( title=device_type_name, data=data, ) info = info or {} return self.async_show_form( step_id="wifi", data_schema=vol.Schema( { vol.Required(CONF_SSID, default=info.get(CONF_SSID, "")): str, vol.Required( CONF_PASSWORD, default=info.get(CONF_PASSWORD, "") ): str, vol.Optional(CONF_HOST, default=info.get(CONF_HOST, "")): str, } ), errors=errors, ) async def async_step_manual(self, info: Optional[dict] = None): """Handle step to setup manually.""" errors = {} if info is not None: serial = info[CONF_SERIAL] for entry in self._async_current_entries(): if entry.unique_id == serial: return self.async_abort(reason="already_configured") await self.async_set_unique_id(serial) self._abort_if_unique_id_configured() device_type = info[CONF_DEVICE_TYPE] device_type_name = DEVICE_TYPE_NAMES[device_type] try: data = await self._async_get_entry_data( serial, info[CONF_CREDENTIAL], device_type, device_type_name, info.get(CONF_HOST), ) except InvalidAuth: errors["base"] = "invalid_auth" except CannotConnect: errors["base"] = "cannot_connect" except CannotFind: errors["base"] = "cannot_find" else: return self.async_create_entry( title=device_type_name, data=data, ) info = info or {} return self.async_show_form( step_id="manual", data_schema=vol.Schema( { vol.Required(CONF_SERIAL, default=info.get(CONF_SERIAL, "")): str, vol.Required( CONF_CREDENTIAL, default=info.get(CONF_CREDENTIAL, "") ): str, vol.Required( CONF_DEVICE_TYPE, default=info.get(CONF_DEVICE_TYPE, "") ): vol.In(DEVICE_TYPE_NAMES), vol.Optional(CONF_HOST, default=info.get(CONF_HOST, "")): str, } ), errors=errors, ) async def async_step_host(self, info: Optional[dict] = None): """Handle step to set host.""" errors = {} if info is not None: try: data = await self._async_get_entry_data( self._device_info.serial, self._device_info.credential, self._device_info.product_type, self._device_info.name, info.get(CONF_HOST), ) except CannotConnect: errors["base"] = "cannot_connect" except CannotFind: errors["base"] = "cannot_find" else: return self.async_create_entry( title=self._device_info.name, data=data, ) info = info or {} return self.async_show_form( step_id="host", data_schema=vol.Schema( {vol.Optional(CONF_HOST, default=info.get(CONF_HOST, "")): str} ), errors=errors, ) async def async_step_discovery(self, info: DysonDeviceInfo): """Handle step initialized by dyson_cloud discovery.""" for entry in self._async_current_entries(): if entry.unique_id == info.serial: return self.async_abort(reason="already_configured") await self.async_set_unique_id(info.serial) self._abort_if_unique_id_configured() self.context["title_placeholders"] = { CONF_NAME: info.name, CONF_SERIAL: info.serial, } self._device_info = info return await self.async_step_host() async def _async_get_entry_data( self, serial: str, credential: str, device_type: str, name: str, host: Optional[str] = None, ) -> Optional[str]: """Try connect and return config entry data.""" await self._async_try_connect(serial, credential, device_type, host) return { CONF_SERIAL: serial, CONF_CREDENTIAL: credential, CONF_DEVICE_TYPE: device_type, CONF_NAME: name, CONF_HOST: host, } async def _async_try_connect( self, serial: str, credential: str, device_type: str, host: Optional[str] = None, ) -> None: """Try connect.""" device = get_device(serial, credential, device_type) # Find device using discovery if not host: discovered = threading.Event() def _callback(address: str) -> None: _LOGGER.debug("Found device at %s", address) nonlocal host host = address discovered.set() discovery = DysonDiscovery() discovery.register_device(device, _callback) discovery.start_discovery(await async_get_instance(self.hass)) succeed = await self.hass.async_add_executor_job( discovered.wait, DISCOVERY_TIMEOUT ) discovery.stop_discovery() if not succeed: _LOGGER.debug("Discovery timed out") raise CannotFind # Try connect to the device try: device.connect(host) except DysonInvalidCredential: raise InvalidAuth except DysonException as err: _LOGGER.debug("Failed to connect to device: %s", err) raise CannotConnect class CannotConnect(HomeAssistantError): """Represents connection failure.""" class CannotFind(HomeAssistantError): """Represents discovery failure.""" class InvalidAuth(HomeAssistantError): """Represents invalid authentication."""
harmonica/tests/utils.py
fatiando/harmonica
128
11117832
# Copyright (c) 2018 The Harmonica Developers. # Distributed under the terms of the BSD 3-Clause License. # SPDX-License-Identifier: BSD-3-Clause # # This code is part of the Fatiando a Terra project (https://www.fatiando.org) # # pylint: disable=invalid-name """ Decorators and useful functions for running tests """ import os import pytest def combine_decorators(*decorators): """ Combine several decorators into a single one """ def combination(func): for decorator in reversed(decorators): func = decorator(func) return func return combination # Check if Numba is disabled # (if NUMBA_DISABLE_JIT is not defined, we assume Numba jit is enabled) NUMBA_IS_DISABLED = bool(os.environ.get("NUMBA_DISABLE_JIT", default="0") != "0") # Decorator for pytest: run if Numba jit is enabled # # Tell pytest to run the test function only if Numba jit is enabled. To disable # Numba jit the environmental variable ```NUMBA_DISABLE_JIT``` must be set to # a value different than 0. # # Use this decorator on test functions that involve great computational load # and don't want to run if Numba jit is disabled. The decorated test functions # will be run and checked if pass or fail, but won't be taken into account for # meassuring coverage. If the test function will run Numba code, but doesn't # involve great computational load, we reccomend using the # ``@pytest.mark.use_numba`` instead. Therefore the test function will be run # twice: one with Numba jit enabled, and another one with Numba jit disable to # check coverage. run_only_with_numba = combine_decorators( pytest.mark.skipif(NUMBA_IS_DISABLED, reason="Numba jit is disabled"), pytest.mark.use_numba, )
examples/infer_states.py
zlapp/keras-ncp
1,087
11117844
# Copyright (2017-2020) # The Wormnet project # <NAME> (<EMAIL>) import numpy as np import tensorflow as tf import kerasncp as kncp from kerasncp.tf import LTCCell data_x = np.random.default_rng().normal(size=(100, 16, 10)) data_y = np.random.default_rng().normal(size=(100, 16, 1)) print("data_y.shape: ", str(data_y.shape)) wiring = kncp.wirings.FullyConnected(16, 8) # 16 units, 8 motor neurons rnn_cell = LTCCell(wiring) dense1 = tf.keras.layers.Dense(16, activation="tanh") dense2 = tf.keras.layers.Dense(1) inputs = tf.keras.Input(shape=(None, 10)) x = dense1(inputs) x = tf.keras.layers.RNN(rnn_cell, return_sequences=True)(x) x = dense2(x) trainable_model = tf.keras.Model(inputs, x) trainable_model.compile( optimizer=tf.keras.optimizers.Adam(0.01), loss=tf.keras.losses.MeanSquaredError() ) trainable_model.fit(x=data_x, y=data_y, batch_size=25, epochs=10) trainable_model.evaluate(x=data_x, y=data_y) # Now we need to construct a single-step model that accepts an initial hidden state as additional input inputs_single = tf.keras.Input(shape=(10,)) inputs_state = tf.keras.Input(shape=(rnn_cell.state_size,)) x = dense1(inputs_single) _, output_states = rnn_cell(x, inputs_state) single_step_model = tf.keras.Model([inputs_single, inputs_state], output_states) def infer_hidden_states(single_step_model, state_size, data_x): """ Infers the hidden states of a single-step RNN model Args: single_step_model: RNN model taking a pair (inputs,old_hidden_state) as input and outputting new_hidden_state state_size: Size of the RNN model (=number of units) data_x: Input data for which the hidden states should be inferred Returns: Tensor of shape (batch_size,sequence_length+1,state_size). The sequence starts with the initial hidden state (all zeros) and is therefore one time-step longer than the input sequence """ batch_size = data_x.shape[0] seq_len = data_x.shape[1] hidden = tf.zeros((batch_size, state_size)) hidden_states = [hidden] for t in range(seq_len): # Compute new hidden state from old hidden state + input at time t print("hidden.shape", hidden) hidden = single_step_model([data_x[:, t], hidden]) print("all", hidden) print("all", len(hidden)) hidden_states.append(hidden) return tf.stack(hidden_states, axis=1) # Now we can infer the hidden state states = infer_hidden_states(single_step_model, rnn_cell.state_size, data_x) print("Hidden states of first example ", states[0]) for i in range(wiring.units): print("Neuron {:0d} is a {:} neuron".format(i, wiring.get_type_of_neuron(i)))
test_runner/conftest.py
libzenith/zenith
189
11117846
<reponame>libzenith/zenith<filename>test_runner/conftest.py pytest_plugins = ("fixtures.zenith_fixtures", "fixtures.benchmark_fixture")
sdk/storage/azure-storage-file-datalake/tests/perfstress_tests/upload.py
rsdoherty/azure-sdk-for-python
2,728
11117850
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from ._test_base import _FileTest from azure_devtools.perfstress_tests import RandomStream from azure_devtools.perfstress_tests import AsyncRandomStream class UploadTest(_FileTest): def __init__(self, arguments): super().__init__(arguments) self.upload_stream = RandomStream(self.args.size) self.upload_stream_async = AsyncRandomStream(self.args.size) def run_sync(self): self.upload_stream.reset() self.file_client.upload_data( self.upload_stream, length=self.args.size, overwrite=True, max_concurrency=self.args.max_concurrency) async def run_async(self): self.upload_stream_async.reset() await self.async_file_client.upload_data( self.upload_stream_async, length=self.args.size, overwrite=True, max_concurrency=self.args.max_concurrency)
commandment/alembics/versions/2f1507bf6dc1_create_application_manifests_table.py
pythonModule/commandment
138
11117851
"""create application_manifests table Revision ID: 2f1507bf6dc1 Revises: <KEY> Create Date: 2017-10-15 17:37:04.645717 """ from alembic import op import sqlalchemy as sa import commandment.dbtypes from alembic import context # revision identifiers, used by Alembic. revision = '<KEY>' down_revision = '<KEY>' branch_labels = None depends_on = None def upgrade(): schema_upgrades() # if context.get_x_argument(as_dictionary=True).get('data', None): # data_upgrades() def downgrade(): # if context.get_x_argument(as_dictionary=True).get('data', None): # data_downgrades() schema_downgrades() def schema_upgrades(): op.create_table( 'application_manifests', sa.Column('id', sa.Integer(), primary_key=True), sa.Column('bundle_id', sa.String(), nullable=False), sa.Column('bundle_version', sa.String()), sa.Column('kind', sa.String()), sa.Column('size_in_bytes', sa.BigInteger()), sa.Column('subtitle', sa.String()), sa.Column('title', sa.String()), sa.Column('display_image_url', sa.String()), sa.Column('display_image_needs_shine', sa.Boolean()), sa.Column('full_size_image_url', sa.String()), sa.Column('full_size_image_needs_shine', sa.Boolean()), #op.add_column('application_manifests', sa.Column('full_size_image_needs_shine', sa.Boolean(), nullable=True)) #op.add_column('application_manifests', sa.Column('full_size_image_url', sa.String(), nullable=True)) # sa.UniqueConstraint('bundle_id', 'bundle_version', name='uq_application_bundle_version') ) op.create_table( 'application_manifest_checksums', sa.Column('id', sa.Integer(), primary_key=True), sa.Column('application_manifest_id', sa.Integer(), nullable=True), sa.Column('checksum_index', sa.Integer(), nullable=False), sa.Column('checksum_value', sa.String(), nullable=False), sa.ForeignKeyConstraint(['application_manifest_id'], ['application_manifests.id']), # sa.ForeignKeyConstraint(['application_manifest_id'], ['application_manifests.id'], ondelete="CASCADE"), # sa.UniqueConstraint('application_manifest_id', 'checksum_index', name='uq_application_checksum_index') ) # Commented items from an earlier migration: # op.create_table('applications_manifests', # sa.Column('id', sa.Integer(), nullable=False), # sa.Column('bundle_id', sa.String(), nullable=False), # sa.Column('bundle_version', sa.String(), nullable=True), # sa.Column('kind', sa.String(), nullable=True), # sa.Column('size_in_bytes', sa.BigInteger(), nullable=True), # sa.Column('subtitle', sa.String(), nullable=True), # sa.Column('title', sa.String(), nullable=True), # sa.PrimaryKeyConstraint('id') # ) # op.create_index(op.f('ix_applications_manifests_bundle_id'), 'applications_manifests', ['bundle_id'], unique=False) # op.create_index(op.f('ix_applications_manifests_bundle_version'), 'applications_manifests', ['bundle_version'], # unique=False) # op.create_table('application_manifest_checksums', # sa.Column('id', sa.Integer(), nullable=False), # sa.Column('application_manifest_id', sa.Integer(), nullable=True), # sa.Column('checksum_index', sa.Integer(), nullable=False), # sa.Column('checksum_value', sa.String(), nullable=False), # sa.ForeignKeyConstraint(['application_manifest_id'], ['applications_manifests.id'], ), # sa.PrimaryKeyConstraint('id') # ) # op.create_unique_constraint( # op.f('uq_application_manifest_checksum_manifest_index'), # 'application_manifest_checksums', ['application_manifest_id', 'checksum_index']) def schema_downgrades(): """schema downgrade migrations go here.""" # Commented items from an earlier migration: # op.drop_constraint(op.f('uq_application_manifest_checksum_manifest_index'), # table_name='application_manifest_checksums') # op.drop_table('application_manifest_checksums') # op.drop_index(op.f('ix_applications_manifests_bundle_version'), table_name='applications_manifests') # op.drop_index(op.f('ix_applications_manifests_bundle_id'), table_name='applications_manifests') # op.drop_table('applications_manifests') op.drop_table('application_manifest_checksums') op.drop_table('application_manifests') def data_upgrades(): """Add any optional data upgrade migrations here!""" pass def data_downgrades(): """Add any optional data downgrade migrations here!""" pass
tests/cli_tests/test_history.py
itamarhaber/iredis
1,857
11117896
import os import pexpect from pathlib import Path from textwrap import dedent def test_history_not_log_auth(cli): cli.sendline("AUTH 123") cli.expect(["Client sent AUTH, but no password is set", "127.0.0.1"]) cli.sendline("set foo bar") cli.expect("OK") with open(os.path.expanduser("~/.iredis_history"), "r") as history_file: content = history_file.read() assert "set foo bar" in content assert "AUTH" not in content def test_history_create_and_writing_with_config(): config_content = dedent( """ [main] history_location = /tmp/iredis_history.txt """ ) with open("/tmp/iredisrc", "w+") as etc_config: etc_config.write(config_content) cli = pexpect.spawn("iredis -n 15 --iredisrc /tmp/iredisrc", timeout=2) cli.expect("127.0.0.1") cli.sendline("set hello world") cli.expect("OK") cli.close() log = Path("/tmp/iredis_history.txt") assert log.exists() with open(log, "r") as logfile: content = logfile.read() assert "set hello world" in content
SREval.py
EdwardSmith1884/Multi-View-Silhouette-and-Depth-Decomposition-for-High-Resolution-3D-Object-Representation
123
11117911
import tensorflow as tf import os import sys sys.path.insert(0, './scripts/') import tensorlayer as tl import numpy as np import random from utils import * from models import * import argparse parser = argparse.ArgumentParser(description='3D-GAN implementation for 32*32*32 voxel output') parser.add_argument('-o','--object', default='chair', help='The name of the current experiment, this will be used to create folders and save models.') parser.add_argument('-b','--batchsize', default=32, help ='The batch size.', type=int) parser.add_argument('-depth','--depth', default='best', help ='Epoch from which to load the depth map predictor, if you want the best leave default.' ) parser.add_argument('-occ','--occ', default='best', help ='Epoch from which to load the occupancy map predictor, if you want the best leave default.' ) parser.add_argument('-dis','--distance', default=70, help ='The range in which distances will be predicted.', type=int) parser.add_argument('-high', default= 256, help='The size of the high dimension objects.', type= int) parser.add_argument('-low', default= 32, help='The size of the low dimension object.', type= int) args = parser.parse_args() checkpoint_dir = "checkpoint/" + args.object +'/' data_dir = 'data/voxels/' + args.object+ '/test' batchsize = args.batchsize high = args.high low = args.low distance = args.distance ratio = high // low #######inputs########## scope_depth = 'depth' scope_occ = 'occupancy' images_low = tf.placeholder(tf.float32, [batchsize, low, low, 1], name='images_low') # low res odm input side = tf.placeholder(tf.float32, [batchsize, low, low, 1], name='side') # the side being considered combined = tf.concat((images_low, side), axis = 3) ########## network computations ####################### net_depth, depth_pred = upscale(combined, ratio, scope = scope_depth, is_train=False, reuse = False) net_occ, occ_pred = upscale(combined, ratio, scope = scope_occ, is_train=False, reuse = False) net_depth.print_params(False) net_occ.print_params(False) ##### computing ####### config = tf.ConfigProto() config.gpu_options.allow_growth = True sess=tf.Session() tl.ops.set_gpu_fraction(sess=sess, gpu_fraction=0.999) sess.run(tf.global_variables_initializer()) # load networks for depth and occupancy load_networks(checkpoint_dir, sess, net_depth, args.depth, name ='depth') load_networks(checkpoint_dir, sess, net_occ, args.occ, name = 'occ') files = grab_files(data_dir) for idx in (xrange(0, len(files)/args.batchsize)): odms = [] cur_files = files[idx*batchsize:(idx+1)*batchsize] # loops over all sides for k in range(6): batch, _ = make_batch(cur_files, high, low, side = k, valid = True) depths, occs = sess.run([depth_pred,occ_pred], feed_dict={images_low:batch['low'], side: batch['side']}) odms.append(recover_odms(depths, occs, batch['low_up'], high, low, distance, threshold = 1.5*high//low)) # combining depths and occupancy maps to recover full odms # combining information odms = zip(odms[0], odms[1], odms[2], odms[3], odms[4], odms[5]) objs, small_objs = make_objs(cur_files) # loading the ground truth object and input object batch_predictions = zip(odms, objs, small_objs) # looping over batch for odm, obj, small_obj in (batch_predictions): small_obj = upsample(small_obj, high, low) prediction = apply_occupancy(np.array(small_obj), np.array(odm), high) prediction = apply_depth(np.array(prediction),np.array(odm),high) evaluate_SR(prediction, obj, small_obj, gt = False) # render model
src/api/dcps/python/test/test_topic_over_the_wire.py
brezillon/opensplice
133
11117913
<reponame>brezillon/opensplice<filename>src/api/dcps/python/test/test_topic_over_the_wire.py # # Vortex OpenSplice # # This software and documentation are Copyright 2006 to TO_YEAR ADLINK # Technology Limited, its affiliated companies and licensors. All rights # reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ''' Created on Nov 23, 2017 @author: prismtech ''' import sys from symbol import except_clause sys.path.append('../dds') from dds import DomainParticipant, Topic, Listener, Qos, DurabilityQosPolicy, DDSDurabilityKind, DDSException import unittest import time import xmlrunner import ddsutil from threading import Event # Data available listener class DataAvailableListener(Listener): def __init__(self, holder, event): Listener.__init__(self) self.holder = holder self.event = event def on_data_available(self, entity): l = entity.read(10) for (sd, si) in l: if si.valid_data: sd.print_vars() self.holder.result = str(sd) self.event.set() class ResultHolder(object): def __init__(self): self._result = '' @property def result(self): return self._result @result.setter def result(self, value): self._result = value class Test(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def testAccessingTopicMetaData(self): idl_path = 'idl/' + 'Thales' + '.idl' type_info = 'test::C_TopicStruct' gen_info = ddsutil.get_dds_classes_from_idl(idl_path, type_info) dp1 = DomainParticipant() tq1 = Qos([DurabilityQosPolicy(DDSDurabilityKind.PERSISTENT)]) topic_dp1 = gen_info.register_topic(dp1, 'DP1_C_TopicStruct',tq1) self.assertEqual('DP1_C_TopicStruct', topic_dp1.name, 'Topic name not as expected') self.assertEqual(type_info, topic_dp1.type_name, 'Type name not as expected') self.assertEqual('A_ID.A_ID,A_ID.A_subID', topic_dp1.keylist, 'Key list not as expected') self.assertEqual('<MetaData version="1.0.0"><Module name="test"><Struct name="T_ID"><Member name="A_ID"><Long/></Member><Member name="A_subID"><Long/></Member></Struct><TypeDef name="T_IDList"><Sequence><Type name="T_ID"/></Sequence></TypeDef><Struct name="C_TopicStruct"><Member name="A_ID"><Type name="T_ID"/></Member><Member name="A_ForeignIDList"><Type name="T_IDList"/></Member><Member name="value"><Long/></Member></Struct></Module></MetaData>', topic_dp1.metadescriptor, 'Meta descriptor not as expected') dp2 = DomainParticipant() topic_dp2 = dp2.find_topic('DP1_C_TopicStruct') self.assertIsNotNone(topic_dp2, 'Found topic is not None') self.assertEqual('DP1_C_TopicStruct', topic_dp2.name, 'Found topic name not as expected') self.assertEqual(type_info, topic_dp2.type_name, 'Found type name not as expected') self.assertEqual('A_ID.A_ID,A_ID.A_subID', topic_dp2.keylist, 'Found key list not as expected') self.assertEqual('<MetaData version="1.0.0"><Module name="test"><Struct name="T_ID"><Member name="A_ID"><Long/></Member><Member name="A_subID"><Long/></Member></Struct><TypeDef name="T_IDList"><Sequence><Type name="T_ID"/></Sequence></TypeDef><Struct name="C_TopicStruct"><Member name="A_ID"><Type name="T_ID"/></Member><Member name="A_ForeignIDList"><Type name="T_IDList"/></Member><Member name="value"><Long/></Member></Struct></Module></MetaData>', topic_dp2.metadescriptor, 'Found meta descriptor not as expected') pub2 = dp2.create_publisher() sub2 = dp2.create_subscriber() try: wr2 = pub2.create_datawriter(topic_dp2) self.assertFalse('Expected pub2.create_datawriter to throw exception') except DDSException: pass try: rd2 = sub2.create_datareader(topic_dp2) self.assertFalse('Expected sub2.create_datareader to throw exception') except DDSException: pass # now register the found topic locally, and test reading and writing between the participants local_topic_dp2 = ddsutil.register_found_topic_as_local(topic_dp2) gen_info2 = ddsutil.get_dds_classes_for_found_topic(topic_dp2) self.assertIsNotNone(gen_info2, 'Returned gen_info2 is None') tq2 = topic_dp2.qos self.assertIsNotNone(tq2) # TODO: need get copy topic qos #local_topic_dp2 = register_topic_locally(topic_dp2) # local_topic_dp2 = gen_info2.register_topic(dp2, topic_dp2.get_name(), tq2) # Reader for dp2 result_holder = ResultHolder() event = Event() rd2 = sub2.create_datareader(local_topic_dp2, tq2, DataAvailableListener(result_holder,event)) # Writer for dp1 pub1 = dp1.create_publisher() wr1 = pub1.create_datawriter(topic_dp1, tq1) # create the data Outer = gen_info.get_class('test::C_TopicStruct') Inner = gen_info.get_class('test::T_ID') data1 = Outer() data1.A_ID = Inner(A_ID=1,A_subID=12) data1.A_ForeignIDList = [Inner(A_ID=2,A_subID=23),Inner(A_ID=3,A_subID=34)] data1.value = 42 wr1.write(data1) # let the listener catch up... self.assertTrue(event.wait(10.0), 'wait timed out') self.assertEqual(str(data1), result_holder.result, 'read and write results do not match') def testSimplifiedFindAPI(self): idl_path = 'idl/' + 'Thales' + '.idl' type_info = 'test::C_TopicStruct' gen_info = ddsutil.get_dds_classes_from_idl(idl_path, type_info) dp1 = DomainParticipant() tq1 = Qos([DurabilityQosPolicy(DDSDurabilityKind.PERSISTENT)]) topic_dp1 = gen_info.register_topic(dp1, 'DP1_C_TopicStruct_2',tq1) self.assertEqual('DP1_C_TopicStruct_2', topic_dp1.name, 'Topic name not as expected') self.assertEqual(type_info, topic_dp1.type_name, 'Type name not as expected') self.assertEqual('A_ID.A_ID,A_ID.A_subID', topic_dp1.keylist, 'Key list not as expected') self.assertEqual('<MetaData version="1.0.0"><Module name="test"><Struct name="T_ID"><Member name="A_ID"><Long/></Member><Member name="A_subID"><Long/></Member></Struct><TypeDef name="T_IDList"><Sequence><Type name="T_ID"/></Sequence></TypeDef><Struct name="C_TopicStruct"><Member name="A_ID"><Type name="T_ID"/></Member><Member name="A_ForeignIDList"><Type name="T_IDList"/></Member><Member name="value"><Long/></Member></Struct></Module></MetaData>', topic_dp1.metadescriptor, 'Meta descriptor not as expected') dp2 = DomainParticipant() topic_dp2, gen_info2 = ddsutil.find_and_register_topic(dp2, 'DP1_C_TopicStruct_2') self.assertIsNotNone(topic_dp2, 'Found topic is not None') self.assertEqual('DP1_C_TopicStruct_2', topic_dp2.name, 'Found topic name not as expected') self.assertEqual(type_info, topic_dp2.type_name, 'Found type name not as expected') self.assertEqual('A_ID.A_ID,A_ID.A_subID', topic_dp2.keylist, 'Found key list not as expected') self.assertEqual('<MetaData version="1.0.0"><Module name="test"><Struct name="T_ID"><Member name="A_ID"><Long/></Member><Member name="A_subID"><Long/></Member></Struct><TypeDef name="T_IDList"><Sequence><Type name="T_ID"/></Sequence></TypeDef><Struct name="C_TopicStruct"><Member name="A_ID"><Type name="T_ID"/></Member><Member name="A_ForeignIDList"><Type name="T_IDList"/></Member><Member name="value"><Long/></Member></Struct></Module></MetaData>', topic_dp2.metadescriptor, 'Found meta descriptor not as expected') sub2 = dp2.create_subscriber() # now register the found topic locally, and test reading and writing between the participants self.assertIsNotNone(gen_info2, 'Returned gen_info2 is None') tq2 = topic_dp2.qos self.assertIsNotNone(tq2) # TODO: need get copy topic qos #local_topic_dp2 = register_topic_locally(topic_dp2) # local_topic_dp2 = gen_info2.register_topic(dp2, topic_dp2.get_name(), tq2) # Reader for dp2 result_holder = ResultHolder() event = Event() rd2 = sub2.create_datareader(topic_dp2, tq2, DataAvailableListener(result_holder, event)) # Writer for dp1 pub1 = dp1.create_publisher() wr1 = pub1.create_datawriter(topic_dp1, tq1) # create the data Outer = gen_info.get_class('test::C_TopicStruct') Inner = gen_info.get_class('test::T_ID') data1 = Outer() data1.A_ID = Inner(A_ID=21,A_subID=212) data1.A_ForeignIDList = [Inner(A_ID=22,A_subID=223),Inner(A_ID=23,A_subID=234)] data1.value = 242 wr1.write(data1) # let the listener catch up... self.assertTrue(event.wait(10.0), 'wait timed out') self.assertEqual(str(data1), result_holder.result, 'read and write results do not match') def testSimplifiedFindAPIWithNamedTuple(self): idl_path = 'idl/' + 'Thales' + '.idl' type_info = 'test::C_TopicStruct' gen_info = ddsutil.get_dds_classes_from_idl(idl_path, type_info) dp1 = DomainParticipant() tq1 = Qos([DurabilityQosPolicy(DDSDurabilityKind.PERSISTENT)]) topic_dp1 = gen_info.register_topic(dp1, 'DP1_C_TopicStruct_3',tq1) self.assertEqual('DP1_C_TopicStruct_3', topic_dp1.name, 'Topic name not as expected') self.assertEqual(type_info, topic_dp1.type_name, 'Type name not as expected') self.assertEqual('A_ID.A_ID,A_ID.A_subID', topic_dp1.keylist, 'Key list not as expected') self.assertEqual('<MetaData version="1.0.0"><Module name="test"><Struct name="T_ID"><Member name="A_ID"><Long/></Member><Member name="A_subID"><Long/></Member></Struct><TypeDef name="T_IDList"><Sequence><Type name="T_ID"/></Sequence></TypeDef><Struct name="C_TopicStruct"><Member name="A_ID"><Type name="T_ID"/></Member><Member name="A_ForeignIDList"><Type name="T_IDList"/></Member><Member name="value"><Long/></Member></Struct></Module></MetaData>', topic_dp1.metadescriptor, 'Meta descriptor not as expected') dp2 = DomainParticipant() topic_info = ddsutil.find_and_register_topic(dp2, 'DP1_C_TopicStruct_3') self.assertIsNotNone(topic_info.topic, 'Found topic is not None') self.assertEqual('DP1_C_TopicStruct_3', topic_info.topic.name, 'Found topic name not as expected') self.assertEqual(type_info, topic_info.topic.type_name, 'Found type name not as expected') self.assertEqual('A_ID.A_ID,A_ID.A_subID', topic_info.topic.keylist, 'Found key list not as expected') self.assertEqual('<MetaData version="1.0.0"><Module name="test"><Struct name="T_ID"><Member name="A_ID"><Long/></Member><Member name="A_subID"><Long/></Member></Struct><TypeDef name="T_IDList"><Sequence><Type name="T_ID"/></Sequence></TypeDef><Struct name="C_TopicStruct"><Member name="A_ID"><Type name="T_ID"/></Member><Member name="A_ForeignIDList"><Type name="T_IDList"/></Member><Member name="value"><Long/></Member></Struct></Module></MetaData>', topic_info.topic.metadescriptor, 'Found meta descriptor not as expected') sub2 = dp2.create_subscriber() # now register the found topic locally, and test reading and writing between the participants self.assertIsNotNone(topic_info.gen_info, 'Returned topic_info.gen_info is None') tq2 = topic_info.topic.qos self.assertIsNotNone(tq2) # TODO: need get copy topic qos #local_topic_dp2 = register_topic_locally(topic_dp2) # local_topic_dp2 = gen_info2.register_topic(dp2, topic_dp2.get_name(), tq2) # Reader for dp2 result_holder = ResultHolder() event = Event() rd2 = sub2.create_datareader(topic_info.topic, tq2, DataAvailableListener(result_holder, event)) # Writer for dp1 pub1 = dp1.create_publisher() wr1 = pub1.create_datawriter(topic_dp1, tq1) # create the data Outer = gen_info.get_class('test::C_TopicStruct') Inner = gen_info.get_class('test::T_ID') data1 = Outer() data1.A_ID = Inner(A_ID=31,A_subID=312) data1.A_ForeignIDList = [Inner(A_ID=32,A_subID=323),Inner(A_ID=33,A_subID=334)] data1.value = 342 wr1.write(data1) # let the listener catch up... self.assertTrue(event.wait(10.0), 'wait timed out') self.assertEqual(str(data1), result_holder.result, 'read and write results do not match') def testSimplifiedFindAPIWithNamedTupleAndDefaultPubSub(self): idl_path = 'idl/' + 'Thales' + '.idl' type_info = 'test::C_TopicStruct' gen_info = ddsutil.get_dds_classes_from_idl(idl_path, type_info) dp1 = DomainParticipant() tq1 = Qos([DurabilityQosPolicy(DDSDurabilityKind.PERSISTENT)]) topic_dp1 = gen_info.register_topic(dp1, 'DP1_C_TopicStruct_4',tq1) self.assertEqual('DP1_C_TopicStruct_4', topic_dp1.name, 'Topic name not as expected') self.assertEqual(type_info, topic_dp1.type_name, 'Type name not as expected') self.assertEqual('A_ID.A_ID,A_ID.A_subID', topic_dp1.keylist, 'Key list not as expected') self.assertEqual('<MetaData version="1.0.0"><Module name="test"><Struct name="T_ID"><Member name="A_ID"><Long/></Member><Member name="A_subID"><Long/></Member></Struct><TypeDef name="T_IDList"><Sequence><Type name="T_ID"/></Sequence></TypeDef><Struct name="C_TopicStruct"><Member name="A_ID"><Type name="T_ID"/></Member><Member name="A_ForeignIDList"><Type name="T_IDList"/></Member><Member name="value"><Long/></Member></Struct></Module></MetaData>', topic_dp1.metadescriptor, 'Meta descriptor not as expected') dp2 = DomainParticipant() topic_info = ddsutil.find_and_register_topic(dp2, 'DP1_C_TopicStruct_4') self.assertIsNotNone(topic_info.topic, 'Found topic is not None') self.assertEqual('DP1_C_TopicStruct_4', topic_info.topic.name, 'Found topic name not as expected') self.assertEqual(type_info, topic_info.topic.type_name, 'Found type name not as expected') self.assertEqual('A_ID.A_ID,A_ID.A_subID', topic_info.topic.keylist, 'Found key list not as expected') self.assertEqual('<MetaData version="1.0.0"><Module name="test"><Struct name="T_ID"><Member name="A_ID"><Long/></Member><Member name="A_subID"><Long/></Member></Struct><TypeDef name="T_IDList"><Sequence><Type name="T_ID"/></Sequence></TypeDef><Struct name="C_TopicStruct"><Member name="A_ID"><Type name="T_ID"/></Member><Member name="A_ForeignIDList"><Type name="T_IDList"/></Member><Member name="value"><Long/></Member></Struct></Module></MetaData>', topic_info.topic.metadescriptor, 'Found meta descriptor not as expected') # now register the found topic locally, and test reading and writing between the participants self.assertIsNotNone(topic_info.gen_info, 'Returned topic_info.gen_info is None') tq2 = topic_info.topic.qos self.assertIsNotNone(tq2) # TODO: need get copy topic qos #local_topic_dp2 = register_topic_locally(topic_dp2) # local_topic_dp2 = gen_info2.register_topic(dp2, topic_dp2.get_name(), tq2) # Reader for dp2 result_holder = ResultHolder() event = Event() rd2 = dp2.create_datareader(topic_info.topic, tq2, DataAvailableListener(result_holder, event)) # Writer for dp1 wr1 = dp1.create_datawriter(topic_dp1, tq1) # create the data Outer = gen_info.get_class('test::C_TopicStruct') Inner = gen_info.get_class('test::T_ID') data1 = Outer() data1.A_ID = Inner(A_ID=41,A_subID=412) data1.A_ForeignIDList = [Inner(A_ID=42,A_subID=423),Inner(A_ID=43,A_subID=434)] data1.value = 442 wr1.write(data1) # let the listener catch up... self.assertTrue(event.wait(10.0), 'wait timed out') self.assertEqual(str(data1), result_holder.result, 'read and write results do not match') if __name__ == "__main__": #import sys;sys.argv = ['', 'Test.testAccessingTopicMetaData'] unittest.main(testRunner=xmlrunner.XMLTestRunner(output='test-reports'), # these make sure that some options that are not applicable # remain hidden from the help menu. failfast=False, buffer=False, catchbreak=False)
sdk/python/pulumi_gcp/datacatalog/tag_template.py
sisisin/pulumi-gcp
121
11117930
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs from ._inputs import * __all__ = ['TagTemplateArgs', 'TagTemplate'] @pulumi.input_type class TagTemplateArgs: def __init__(__self__, *, fields: pulumi.Input[Sequence[pulumi.Input['TagTemplateFieldArgs']]], tag_template_id: pulumi.Input[str], display_name: Optional[pulumi.Input[str]] = None, force_delete: Optional[pulumi.Input[bool]] = None, project: Optional[pulumi.Input[str]] = None, region: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a TagTemplate resource. :param pulumi.Input[Sequence[pulumi.Input['TagTemplateFieldArgs']]] fields: Set of tag template field IDs and the settings for the field. This set is an exhaustive list of the allowed fields. This set must contain at least one field and at most 500 fields. Structure is documented below. :param pulumi.Input[str] tag_template_id: The id of the tag template to create. :param pulumi.Input[str] display_name: The display name for this template. :param pulumi.Input[bool] force_delete: This confirms the deletion of any possible tags using this template. Must be set to true in order to delete the tag template. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[str] region: Template location region. """ pulumi.set(__self__, "fields", fields) pulumi.set(__self__, "tag_template_id", tag_template_id) if display_name is not None: pulumi.set(__self__, "display_name", display_name) if force_delete is not None: pulumi.set(__self__, "force_delete", force_delete) if project is not None: pulumi.set(__self__, "project", project) if region is not None: pulumi.set(__self__, "region", region) @property @pulumi.getter def fields(self) -> pulumi.Input[Sequence[pulumi.Input['TagTemplateFieldArgs']]]: """ Set of tag template field IDs and the settings for the field. This set is an exhaustive list of the allowed fields. This set must contain at least one field and at most 500 fields. Structure is documented below. """ return pulumi.get(self, "fields") @fields.setter def fields(self, value: pulumi.Input[Sequence[pulumi.Input['TagTemplateFieldArgs']]]): pulumi.set(self, "fields", value) @property @pulumi.getter(name="tagTemplateId") def tag_template_id(self) -> pulumi.Input[str]: """ The id of the tag template to create. """ return pulumi.get(self, "tag_template_id") @tag_template_id.setter def tag_template_id(self, value: pulumi.Input[str]): pulumi.set(self, "tag_template_id", value) @property @pulumi.getter(name="displayName") def display_name(self) -> Optional[pulumi.Input[str]]: """ The display name for this template. """ return pulumi.get(self, "display_name") @display_name.setter def display_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "display_name", value) @property @pulumi.getter(name="forceDelete") def force_delete(self) -> Optional[pulumi.Input[bool]]: """ This confirms the deletion of any possible tags using this template. Must be set to true in order to delete the tag template. """ return pulumi.get(self, "force_delete") @force_delete.setter def force_delete(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "force_delete", value) @property @pulumi.getter def project(self) -> Optional[pulumi.Input[str]]: """ The ID of the project in which the resource belongs. If it is not provided, the provider project is used. """ return pulumi.get(self, "project") @project.setter def project(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "project", value) @property @pulumi.getter def region(self) -> Optional[pulumi.Input[str]]: """ Template location region. """ return pulumi.get(self, "region") @region.setter def region(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "region", value) @pulumi.input_type class _TagTemplateState: def __init__(__self__, *, display_name: Optional[pulumi.Input[str]] = None, fields: Optional[pulumi.Input[Sequence[pulumi.Input['TagTemplateFieldArgs']]]] = None, force_delete: Optional[pulumi.Input[bool]] = None, name: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, region: Optional[pulumi.Input[str]] = None, tag_template_id: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering TagTemplate resources. :param pulumi.Input[str] display_name: The display name for this template. :param pulumi.Input[Sequence[pulumi.Input['TagTemplateFieldArgs']]] fields: Set of tag template field IDs and the settings for the field. This set is an exhaustive list of the allowed fields. This set must contain at least one field and at most 500 fields. Structure is documented below. :param pulumi.Input[bool] force_delete: This confirms the deletion of any possible tags using this template. Must be set to true in order to delete the tag template. :param pulumi.Input[str] name: - The resource name of the tag template field in URL format. Example: projects/{project_id}/locations/{location}/tagTemplates/{tagTemplateId}/fields/{field} :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[str] region: Template location region. :param pulumi.Input[str] tag_template_id: The id of the tag template to create. """ if display_name is not None: pulumi.set(__self__, "display_name", display_name) if fields is not None: pulumi.set(__self__, "fields", fields) if force_delete is not None: pulumi.set(__self__, "force_delete", force_delete) if name is not None: pulumi.set(__self__, "name", name) if project is not None: pulumi.set(__self__, "project", project) if region is not None: pulumi.set(__self__, "region", region) if tag_template_id is not None: pulumi.set(__self__, "tag_template_id", tag_template_id) @property @pulumi.getter(name="displayName") def display_name(self) -> Optional[pulumi.Input[str]]: """ The display name for this template. """ return pulumi.get(self, "display_name") @display_name.setter def display_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "display_name", value) @property @pulumi.getter def fields(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TagTemplateFieldArgs']]]]: """ Set of tag template field IDs and the settings for the field. This set is an exhaustive list of the allowed fields. This set must contain at least one field and at most 500 fields. Structure is documented below. """ return pulumi.get(self, "fields") @fields.setter def fields(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TagTemplateFieldArgs']]]]): pulumi.set(self, "fields", value) @property @pulumi.getter(name="forceDelete") def force_delete(self) -> Optional[pulumi.Input[bool]]: """ This confirms the deletion of any possible tags using this template. Must be set to true in order to delete the tag template. """ return pulumi.get(self, "force_delete") @force_delete.setter def force_delete(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "force_delete", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ - The resource name of the tag template field in URL format. Example: projects/{project_id}/locations/{location}/tagTemplates/{tagTemplateId}/fields/{field} """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def project(self) -> Optional[pulumi.Input[str]]: """ The ID of the project in which the resource belongs. If it is not provided, the provider project is used. """ return pulumi.get(self, "project") @project.setter def project(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "project", value) @property @pulumi.getter def region(self) -> Optional[pulumi.Input[str]]: """ Template location region. """ return pulumi.get(self, "region") @region.setter def region(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "region", value) @property @pulumi.getter(name="tagTemplateId") def tag_template_id(self) -> Optional[pulumi.Input[str]]: """ The id of the tag template to create. """ return pulumi.get(self, "tag_template_id") @tag_template_id.setter def tag_template_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "tag_template_id", value) class TagTemplate(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, display_name: Optional[pulumi.Input[str]] = None, fields: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TagTemplateFieldArgs']]]]] = None, force_delete: Optional[pulumi.Input[bool]] = None, project: Optional[pulumi.Input[str]] = None, region: Optional[pulumi.Input[str]] = None, tag_template_id: Optional[pulumi.Input[str]] = None, __props__=None): """ A tag template defines a tag, which can have one or more typed fields. The template is used to create and attach the tag to GCP resources. To get more information about TagTemplate, see: * [API documentation](https://cloud.google.com/data-catalog/docs/reference/rest/v1/projects.locations.tagTemplates) * How-to Guides * [Official Documentation](https://cloud.google.com/data-catalog/docs) ## Example Usage ### Data Catalog Tag Template Basic ```python import pulumi import pulumi_gcp as gcp basic_tag_template = gcp.datacatalog.TagTemplate("basicTagTemplate", display_name="Demo Tag Template", fields=[ gcp.datacatalog.TagTemplateFieldArgs( display_name="Source of data asset", field_id="source", is_required=True, type=gcp.datacatalog.TagTemplateFieldTypeArgs( primitive_type="STRING", ), ), gcp.datacatalog.TagTemplateFieldArgs( display_name="Number of rows in the data asset", field_id="num_rows", type=gcp.datacatalog.TagTemplateFieldTypeArgs( primitive_type="DOUBLE", ), ), gcp.datacatalog.TagTemplateFieldArgs( display_name="PII type", field_id="pii_type", type=gcp.datacatalog.TagTemplateFieldTypeArgs( enum_type=gcp.datacatalog.TagTemplateFieldTypeEnumTypeArgs( allowed_values=[ gcp.datacatalog.TagTemplateFieldTypeEnumTypeAllowedValueArgs( display_name="EMAIL", ), gcp.datacatalog.TagTemplateFieldTypeEnumTypeAllowedValueArgs( display_name="SOCIAL SECURITY NUMBER", ), gcp.datacatalog.TagTemplateFieldTypeEnumTypeAllowedValueArgs( display_name="NONE", ), ], ), ), ), ], force_delete=False, region="us-central1", tag_template_id="my_template") ``` ## Import TagTemplate can be imported using any of these accepted formats ```sh $ pulumi import gcp:datacatalog/tagTemplate:TagTemplate default {{name}} ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] display_name: The display name for this template. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TagTemplateFieldArgs']]]] fields: Set of tag template field IDs and the settings for the field. This set is an exhaustive list of the allowed fields. This set must contain at least one field and at most 500 fields. Structure is documented below. :param pulumi.Input[bool] force_delete: This confirms the deletion of any possible tags using this template. Must be set to true in order to delete the tag template. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[str] region: Template location region. :param pulumi.Input[str] tag_template_id: The id of the tag template to create. """ ... @overload def __init__(__self__, resource_name: str, args: TagTemplateArgs, opts: Optional[pulumi.ResourceOptions] = None): """ A tag template defines a tag, which can have one or more typed fields. The template is used to create and attach the tag to GCP resources. To get more information about TagTemplate, see: * [API documentation](https://cloud.google.com/data-catalog/docs/reference/rest/v1/projects.locations.tagTemplates) * How-to Guides * [Official Documentation](https://cloud.google.com/data-catalog/docs) ## Example Usage ### Data Catalog Tag Template Basic ```python import pulumi import pulumi_gcp as gcp basic_tag_template = gcp.datacatalog.TagTemplate("basicTagTemplate", display_name="Demo Tag Template", fields=[ gcp.datacatalog.TagTemplateFieldArgs( display_name="Source of data asset", field_id="source", is_required=True, type=gcp.datacatalog.TagTemplateFieldTypeArgs( primitive_type="STRING", ), ), gcp.datacatalog.TagTemplateFieldArgs( display_name="Number of rows in the data asset", field_id="num_rows", type=gcp.datacatalog.TagTemplateFieldTypeArgs( primitive_type="DOUBLE", ), ), gcp.datacatalog.TagTemplateFieldArgs( display_name="PII type", field_id="pii_type", type=gcp.datacatalog.TagTemplateFieldTypeArgs( enum_type=gcp.datacatalog.TagTemplateFieldTypeEnumTypeArgs( allowed_values=[ gcp.datacatalog.TagTemplateFieldTypeEnumTypeAllowedValueArgs( display_name="EMAIL", ), gcp.datacatalog.TagTemplateFieldTypeEnumTypeAllowedValueArgs( display_name="SOCIAL SECURITY NUMBER", ), gcp.datacatalog.TagTemplateFieldTypeEnumTypeAllowedValueArgs( display_name="NONE", ), ], ), ), ), ], force_delete=False, region="us-central1", tag_template_id="my_template") ``` ## Import TagTemplate can be imported using any of these accepted formats ```sh $ pulumi import gcp:datacatalog/tagTemplate:TagTemplate default {{name}} ``` :param str resource_name: The name of the resource. :param TagTemplateArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(TagTemplateArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, display_name: Optional[pulumi.Input[str]] = None, fields: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TagTemplateFieldArgs']]]]] = None, force_delete: Optional[pulumi.Input[bool]] = None, project: Optional[pulumi.Input[str]] = None, region: Optional[pulumi.Input[str]] = None, tag_template_id: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = TagTemplateArgs.__new__(TagTemplateArgs) __props__.__dict__["display_name"] = display_name if fields is None and not opts.urn: raise TypeError("Missing required property 'fields'") __props__.__dict__["fields"] = fields __props__.__dict__["force_delete"] = force_delete __props__.__dict__["project"] = project __props__.__dict__["region"] = region if tag_template_id is None and not opts.urn: raise TypeError("Missing required property 'tag_template_id'") __props__.__dict__["tag_template_id"] = tag_template_id __props__.__dict__["name"] = None super(TagTemplate, __self__).__init__( 'gcp:datacatalog/tagTemplate:TagTemplate', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, display_name: Optional[pulumi.Input[str]] = None, fields: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TagTemplateFieldArgs']]]]] = None, force_delete: Optional[pulumi.Input[bool]] = None, name: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, region: Optional[pulumi.Input[str]] = None, tag_template_id: Optional[pulumi.Input[str]] = None) -> 'TagTemplate': """ Get an existing TagTemplate resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] display_name: The display name for this template. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TagTemplateFieldArgs']]]] fields: Set of tag template field IDs and the settings for the field. This set is an exhaustive list of the allowed fields. This set must contain at least one field and at most 500 fields. Structure is documented below. :param pulumi.Input[bool] force_delete: This confirms the deletion of any possible tags using this template. Must be set to true in order to delete the tag template. :param pulumi.Input[str] name: - The resource name of the tag template field in URL format. Example: projects/{project_id}/locations/{location}/tagTemplates/{tagTemplateId}/fields/{field} :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[str] region: Template location region. :param pulumi.Input[str] tag_template_id: The id of the tag template to create. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _TagTemplateState.__new__(_TagTemplateState) __props__.__dict__["display_name"] = display_name __props__.__dict__["fields"] = fields __props__.__dict__["force_delete"] = force_delete __props__.__dict__["name"] = name __props__.__dict__["project"] = project __props__.__dict__["region"] = region __props__.__dict__["tag_template_id"] = tag_template_id return TagTemplate(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="displayName") def display_name(self) -> pulumi.Output[Optional[str]]: """ The display name for this template. """ return pulumi.get(self, "display_name") @property @pulumi.getter def fields(self) -> pulumi.Output[Sequence['outputs.TagTemplateField']]: """ Set of tag template field IDs and the settings for the field. This set is an exhaustive list of the allowed fields. This set must contain at least one field and at most 500 fields. Structure is documented below. """ return pulumi.get(self, "fields") @property @pulumi.getter(name="forceDelete") def force_delete(self) -> pulumi.Output[Optional[bool]]: """ This confirms the deletion of any possible tags using this template. Must be set to true in order to delete the tag template. """ return pulumi.get(self, "force_delete") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ - The resource name of the tag template field in URL format. Example: projects/{project_id}/locations/{location}/tagTemplates/{tagTemplateId}/fields/{field} """ return pulumi.get(self, "name") @property @pulumi.getter def project(self) -> pulumi.Output[str]: """ The ID of the project in which the resource belongs. If it is not provided, the provider project is used. """ return pulumi.get(self, "project") @property @pulumi.getter def region(self) -> pulumi.Output[str]: """ Template location region. """ return pulumi.get(self, "region") @property @pulumi.getter(name="tagTemplateId") def tag_template_id(self) -> pulumi.Output[str]: """ The id of the tag template to create. """ return pulumi.get(self, "tag_template_id")
corehq/apps/smsbillables/migrations/0014_bootstrap_apposit_rates.py
dimagilg/commcare-hq
471
11118016
<gh_stars>100-1000 from django.db import migrations from corehq.apps.smsbillables.management.commands.bootstrap_apposit_gateway import ( bootstrap_apposit_gateway, ) def create_apposit_rates(apps, schema_editor): bootstrap_apposit_gateway(apps) class Migration(migrations.Migration): dependencies = [ ('smsbillables', '0013_auto_20160826_1531'), ] operations = { migrations.RunPython(create_apposit_rates), }
Gelatin/compiler/String.py
Etherbay/Gelatin
107
11118028
# Copyright (c) 2010-2017 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import re try: from urllib.parse import quote except ImportError: # Python 2 from urllib import quote from Gelatin import INDENT from .Token import Token _string_re = re.compile(r'(\\?)\$(\d*)') class String(Token): def __init__(self, context, data): self.context = context self.data = data def _expand_string(self, match): field = match.group(0) escape = match.group(1) fieldnum = match.group(2) # Check the variable name syntax. if escape: return '$' + fieldnum elif fieldnum == '': return '$' # Check the variable value. cmatch = self.context.re_stack[-1] try: return quote(cmatch.group(int(fieldnum) + 1), safe=' ') except IndexError as e: raise Exception( 'invalid field number %s in %s' % (fieldnum, self.data)) def value(self): return _string_re.sub(self._expand_string, self.data) def re_value(self): return re.escape(self.data) def dump(self, indent=0): return INDENT * indent + '\'' + self.data + '\''
task_set/tasks/losg_problems/problem_generator_test.py
deepneuralmachine/google-research
23,901
11118117
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for learning.brain.models.learned_optimizer.problems.problem_generator. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from task_set.tasks.losg_problems import problem_generator as pg from task_set.tasks.losg_problems import problem_spec import tensorflow.compat.v1 as tf class ProblemGeneratorTest(tf.test.TestCase): def testProblem(self): param_shapes = [(5, 1), (2, 2)] random_seed = 200 noise_stdev = 1.0 problem = pg.Problem(param_shapes, random_seed, noise_stdev) init = problem.init_tensors() self.assertLen(init, len(param_shapes)) for i, ps in enumerate(param_shapes): self.assertEqual(list(ps), init[i].get_shape().as_list()) init = problem.init_variables() self.assertLen(init, len(param_shapes)) for i, ps in enumerate(param_shapes): self.assertSequenceEqual(ps, init[i].get_shape()) self.assertIsInstance(init[i], tf.Variable) def testProblemGradients(self): param_shapes = [(1, 1)] random_seed = 200 noise_stdev = 1.0 problem = pg.Problem(param_shapes, random_seed, noise_stdev) x = tf.constant(2.) y = tf.constant(20.) parameters = [x, y] objective = x**2 + y**2 grads = problem.gradients(objective, parameters) self.assertLen(grads, len(parameters)) self.assertNotEqual(grads[0], grads[1]) def testSparseProblem_neverZero(self): zero_prob = 0.0 problem = pg.SparseProblem( problem_spec.Spec(pg.Quadratic, (5,), {}), zero_prob) self.assertEqual(zero_prob, problem.zero_prob) parameters = problem.init_tensors(seed=1234) objective = problem.objective(parameters) gradients = problem.gradients(objective, parameters) self.assertLen(gradients, 1) with self.test_session() as sess: self.assertTrue(all(sess.run(gradients[0]))) def testSparseProblem_alwaysZero(self): zero_prob = 1.0 problem = pg.SparseProblem( problem_spec.Spec(pg.Quadratic, (5,), {}), zero_prob) self.assertEqual(zero_prob, problem.zero_prob) parameters = problem.init_tensors(seed=1234) objective = problem.objective(parameters) gradients = problem.gradients(objective, parameters) self.assertLen(gradients, 1) with self.test_session() as sess: self.assertFalse(any(sess.run(gradients[0]))) def testSparseProblem_someProbability(self): tf.set_random_seed(1234) zero_prob = 0.5 problem = pg.SparseProblem( problem_spec.Spec(pg.Quadratic, (5,), {}), zero_prob) self.assertEqual(zero_prob, problem.zero_prob) parameters = problem.init_tensors(seed=1234) objective = problem.objective(parameters) gradients = problem.gradients(objective, parameters) self.assertLen(gradients, 1) with self.test_session() as sess: self.assertTrue(any(sess.run(gradients[0]))) self.assertFalse(all(sess.run(gradients[0]))) if __name__ == "__main__": tf.test.main()
crawler/tools/legacy/restore_region.py
eala/tw-rental-house-data
125
11118136
import sys import os import traceback import json sys.path.append('{}/../..'.format( os.path.dirname(os.path.realpath(__file__)))) from backend.db.models import House, HouseEtc, db from backend.db.enums import TopRegionField, SubRegionField rows = [] total = 0 def save(row, force=False): global rows global total if row: rows.append(row) if len(rows) >= 1000 or force: with db.atomic() as transaction: try: for r in rows: r.save() print('Done {}/{} rows'.format(len(rows), total)) total -= len(rows) rows = [] except: traceback.print_exc() transaction.rollback() def restore(): global total houses = House.select( House.id, House.top_region, House.sub_region ) total = houses.count() for house in houses: try: etc = HouseEtc.get( HouseEtc.house == house ) dd = etc.detail_dict lr = etc.list_raw if lr: try: lr = json.loads(lr) except json.decoder.JSONDecodeError: lr = eval(lr) etc.list_raw = json.dumps(lr, ensure_ascii=False) total += 1 save(etc) if dd and 'top_region' in dd and dd['top_region']: sub_region = '{}{}'.format(dd['top_region'], dd['sub_region']) house.top_region = getattr( TopRegionField.enums, dd['top_region']) house.sub_region = getattr( SubRegionField.enums, sub_region) save(house) elif lr and 'region_name' in lr and lr['region_name']: sub_region = '{}{}'.format(lr['region_name'], lr['section_name']) house.top_region = getattr( TopRegionField.enums, lr['region_name']) house.sub_region = getattr( SubRegionField.enums, sub_region) save(house) else: print('Cannot help for house {}'.format(house.id)) except HouseEtc.DoesNotExist: print('Cannot found house {}'.format(house.id)) if __name__ == '__main__': restore() save(None, True)
src/condor_contrib/condor_pigeon/src/condor_pigeon_client/skype_linux_tools/Skype4Py/Languages/de.py
neurodebian/htcondor
217
11118165
apiAttachAvailable = u'API verf\xfcgbar' apiAttachNotAvailable = u'Nicht verf\xfcgbar' apiAttachPendingAuthorization = u'Ausstehende Genehmigungsanfrage' apiAttachRefused = u'Abgelehnt' apiAttachSuccess = u'Erfolg' apiAttachUnknown = u'Unbekannt' budDeletedFriend = u'Aus Freundesliste gel\xf6scht' budFriend = u'Freund' budNeverBeenFriend = u'War noch nie in Freundesliste' budPendingAuthorization = u'Ausstehende Genehmigungsanfrage' budUnknown = u'Unbekannt' cfrBlockedByRecipient = u'Anruf von Empf\xe4nger gesperrt' cfrMiscError = u'Sonstiger Fehler' cfrNoCommonCodec = u'Kein Standard-Codec' cfrNoProxyFound = u'Kein Proxy gefunden' cfrNotAuthorizedByRecipient = u'Aktueller Benutzer nicht von Empf\xe4nger genehmigt' cfrRecipientNotFriend = u'Empf\xe4nger kein Freund' cfrRemoteDeviceError = u'Problem mit dem Audioger\xe4t der Gegenstelle' cfrSessionTerminated = u'Sitzung beendet' cfrSoundIOError = u'Ton-E/A-Fehler' cfrSoundRecordingError = u'Aufnahmefehler' cfrUnknown = u'Unbekannt' cfrUserDoesNotExist = u'Benutzer/Telefonnummer gibt es nicht' cfrUserIsOffline = u'Sie oder er ist offline' chsAllCalls = u'Dialog mit Altversion' chsDialog = u'Dialog' chsIncomingCalls = u'Mehrere m\xfcssen annehmen' chsLegacyDialog = u'Dialog mit Altversion' chsMissedCalls = u'Dialog' chsMultiNeedAccept = u'Mehrere m\xfcssen annehmen' chsMultiSubscribed = u'Mehrere abonniert' chsOutgoingCalls = u'Mehrere abonniert' chsUnknown = u'Unbekannt' chsUnsubscribed = u'Abonnement gek\xfcndigt' clsBusy = u'Besetzt' clsCancelled = u'Abbrechen' clsEarlyMedia = u'Wiedergabe von Early Media' clsFailed = u'Anruf leider fehlgeschlagen!' clsFinished = u'Beendet' clsInProgress = u'Aktives Gespr\xe4ch' clsLocalHold = u'In lokaler Wartestellung' clsMissed = u'Anruf in Abwesenheit von:' clsOnHold = u'Konferenz wird gehalten' clsRefused = u'Abgelehnt' clsRemoteHold = u'In Fern-Wartestellung' clsRinging = u'Gespr\xe4che' clsRouting = u'Wird weitergeleitet' clsTransferred = u'Unbekannt' clsTransferring = u'Unbekannt' clsUnknown = u'Unbekannt' clsUnplaced = u'Nie get\xe4tigt' clsVoicemailBufferingGreeting = u'Ansage wird gepuffert' clsVoicemailCancelled = u'Voicemail wurde abgebrochen' clsVoicemailFailed = u'Fehler bei Sprachnachricht' clsVoicemailPlayingGreeting = u'Ansage wird abgespielt' clsVoicemailRecording = u'Sprachnachricht wird aufgezeichnet' clsVoicemailSent = u'Voicemail wurde gesendet' clsVoicemailUploading = u'Voicemail wird hochgeladen' cltIncomingP2P = u'Eingehender P2P-Anruf' cltIncomingPSTN = u'Eingehender Anruf' cltOutgoingP2P = u'Ausgehender P2P-Anruf' cltOutgoingPSTN = u'Ausgehender Anruf' cltUnknown = u'Unbekannt' cmeAddedMembers = u'Hinzugef\xfcgte Mitglieder' cmeCreatedChatWith = u'Chat erstellt mit' cmeEmoted = u'Unbekannt' cmeLeft = u'Links' cmeSaid = u'Gesagt' cmeSawMembers = u'Gesehene Mitglieder' cmeSetTopic = u'Thema festlegen' cmeUnknown = u'Unbekannt' cmsRead = u'Gelesen' cmsReceived = u'Empfangen' cmsSending = u'Sende...' cmsSent = u'Gesendet' cmsUnknown = u'Unbekannt' conConnecting = u'Verbindungsaufbau' conOffline = u'Offline' conOnline = u'Online' conPausing = u'Wird angehalten' conUnknown = u'Unbekannt' cusAway = u'Abwesend' cusDoNotDisturb = u'Besch\xe4ftigt' cusInvisible = u'Als offline anzeigen' cusLoggedOut = u'Offline' cusNotAvailable = u'Nicht verf\xfcgbar' cusOffline = u'Offline' cusOnline = u'Online' cusSkypeMe = u'Skype Me-Modus' cusUnknown = u'Unbekannt' cvsBothEnabled = u'Video wird gesendet und empfangen' cvsNone = u'Kein Video' cvsReceiveEnabled = u'Video wird empfangen' cvsSendEnabled = u'Video wird gesendet' cvsUnknown = u'' grpAllFriends = u'Alle Freunde' grpAllUsers = u'Alle Benutzer' grpCustomGroup = u'Benutzerdefiniert' grpOnlineFriends = u'Online-Freunde' grpPendingAuthorizationFriends = u'Ausstehende Genehmigungsanfrage' grpProposedSharedGroup = u'Proposed Shared Group' grpRecentlyContactedUsers = u'K\xfcrzlich kontaktierte Benutzer' grpSharedGroup = u'Shared Group' grpSkypeFriends = u'Skype-Freunde' grpSkypeOutFriends = u'SkypeOut-Freunde' grpUngroupedFriends = u'Nicht gruppierte Freunde' grpUnknown = u'Unbekannt' grpUsersAuthorizedByMe = u'Von mir genehmigt' grpUsersBlockedByMe = u'Von mir blockiert' grpUsersWaitingMyAuthorization = u'Warten auf meine Genehmigung' leaAddDeclined = u'Hinzuf\xfcgung abgelehnt' leaAddedNotAuthorized = u'Hinzugef\xfcgter Benutzer muss genehmigt sein' leaAdderNotFriend = u'Hinzuf\xfcgender Benutzer muss Freund sein' leaUnknown = u'Unbekannt' leaUnsubscribe = u'Abonnement gek\xfcndigt' leaUserIncapable = u'Benutzer unf\xe4hig' leaUserNotFound = u'Kein Benutzer gefunden' olsAway = u'Abwesend' olsDoNotDisturb = u'Besch\xe4ftigt' olsNotAvailable = u'Nicht verf\xfcgbar' olsOffline = u'Offline' olsOnline = u'Online' olsSkypeMe = u'Skype Me-Modus' olsSkypeOut = u'SkypeOut' olsUnknown = u'Unbekannt' smsMessageStatusComposing = u'Composing' smsMessageStatusDelivered = u'Delivered' smsMessageStatusFailed = u'Failed' smsMessageStatusRead = u'Read' smsMessageStatusReceived = u'Received' smsMessageStatusSendingToServer = u'Sending to Server' smsMessageStatusSentToServer = u'Sent to Server' smsMessageStatusSomeTargetsFailed = u'Some Targets Failed' smsMessageStatusUnknown = u'Unknown' smsMessageTypeCCRequest = u'Confirmation Code Request' smsMessageTypeCCSubmit = u'Confirmation Code Submit' smsMessageTypeIncoming = u'Incoming' smsMessageTypeOutgoing = u'Outgoing' smsMessageTypeUnknown = u'Unknown' smsTargetStatusAcceptable = u'Acceptable' smsTargetStatusAnalyzing = u'Analyzing' smsTargetStatusDeliveryFailed = u'Delivery Failed' smsTargetStatusDeliveryPending = u'Delivery Pending' smsTargetStatusDeliverySuccessful = u'Delivery Successful' smsTargetStatusNotRoutable = u'Not Routable' smsTargetStatusUndefined = u'Undefined' smsTargetStatusUnknown = u'Unknown' usexFemale = u'Weiblich' usexMale = u'M\xe4nnlich' usexUnknown = u'Unbekannt' vmrConnectError = u'Verbindungsfehler' vmrFileReadError = u'Fehler beim Lesen der Datei' vmrFileWriteError = u'Fehler beim Schreiben in die Datei' vmrMiscError = u'Sonstiger Fehler' vmrNoError = u'Kein Fehler' vmrNoPrivilege = u'Keine Voicemail-Berechtigung' vmrNoVoicemail = u'Voicemail gibt es nicht' vmrPlaybackError = u'Fehler bei der Wiedergabe' vmrRecordingError = u'Fehler bei der Aufnahme' vmrUnknown = u'Unbekannt' vmsBlank = u'Leer' vmsBuffering = u'Pufferung' vmsDeleting = u'Wird gel\xf6scht' vmsDownloading = u'Download l\xe4uft' vmsFailed = u'Fehlgeschlagen' vmsNotDownloaded = u'Nicht gedownloadet' vmsPlayed = u'Abgespielt' vmsPlaying = u'Wiedergabe' vmsRecorded = u'Aufgenommen' vmsRecording = u'Sprachnachricht wird aufgezeichnet' vmsUnknown = u'Unbekannt' vmsUnplayed = u'Nicht abgespielt' vmsUploaded = u'Upload beendet' vmsUploading = u'Upload' vmtCustomGreeting = u'Benutzerdefinierte Ansage' vmtDefaultGreeting = u'Standardansage' vmtIncoming = u'Ich eine Sprachnachricht empfange' vmtOutgoing = u'Ausgehend' vmtUnknown = u'Unbekannt' vssAvailable = u'Verf\xfcgbar' vssNotAvailable = u'Nicht verf\xfcgbar' vssPaused = u'Angehalten' vssRejected = u'Abgelehnt' vssRunning = u'Wird ausgef\xfchrt' vssStarting = u'Wird gestartet' vssStopping = u'Wird gestoppt' vssUnknown = u'Unbekannt'
nodes/1.x/python/Math.DecimalToBinary.py
jdehotin/Clockworkfordynamo
147
11118175
<reponame>jdehotin/Clockworkfordynamo<gh_stars>100-1000 vals = IN[0] elementlist = [] for val in vals: elementlist.append(bin(val)) OUT = elementlist
netdev/version.py
maliciousgroup/netdev
199
11118185
<filename>netdev/version.py """ Netdev Version information """ __version__ = "0.9.3" __author__ = "<NAME>" __author_email__ = "<EMAIL>" __url__ = "http://netdev.readthedocs.io/"
setup.py
Medigram/pyapns
752
11118201
#!/usr/bin/env python try: from setuptools import setup except ImportError: from distutils.core import setup DOC = \ """ Features: * XML-RPC Based, works with any client in any language * Native Python API with Django and Pylons support * Scalable, fast and easy to distribute behind a proxy * Based on Twisted * Multi-application and dual environment support * Simplified feedback interface pyapns is an APNS provider that you install on your server and access through XML-RPC. To install you will need Python, Twisted_ and pyOpenSSL_. It's also recommended to install `python-epoll`_ for best performance (if epoll is not available, like on Mac OS X, you may want to use another library, like `py-kqueue`_. If you like easy_install try (it should take care of the dependancies for you):: $ sudo pip install pyapns pyapns is a service that runs persistently on your machine. To start it:: $ twistd -r epoll web --class=pyapns.server.APNSServer --port=7077 To get started right away, use the included client:: $ python >>> from pyapns import configure, provision, notify >>> configure({'HOST': 'http://localhost:7077/'}) >>> provision('myapp', open('cert.pem').read(), 'sandbox') >>> notify('myapp', 'hexlified_token_str', {'aps':{'alert': 'Hello!'}}) A lot more documentation and the issue tracker can be found on the `github page <http://github.com/samuraisam/pyapns>`. """ setup( name="pyapns", version="0.4.0", description="A universal Apple Push Notification Service (APNS) provider.", long_description=DOC, author="<NAME>", author_email="<EMAIL>", license="MIT", url="http://github.com/samuraisam/pyapns/tree/master", download_url="http://github.com/samuraisam/pyapns/tree/master", classifiers = [ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Software Development :: Libraries :: Python Modules'], packages=['pyapns'], package_data={}, install_requires=['Twisted>=8.2.0', 'pyOpenSSL>=0.10'] )
fhir/resources/STU3/tests/test_explanationofbenefit.py
cstoltze/fhir.resources
144
11118232
# -*- coding: utf-8 -*- """ Profile: http://hl7.org/fhir/StructureDefinition/ExplanationOfBenefit Release: STU3 Version: 3.0.2 Revision: 11917 Last updated: 2019-10-24T11:53:00+11:00 """ from pydantic.validators import bytes_validator # noqa: F401 from .. import fhirtypes # noqa: F401 from .. import explanationofbenefit def impl_explanationofbenefit_1(inst): assert inst.careTeam[0].provider.reference == "Practitioner/example" assert inst.careTeam[0].sequence == 1 assert inst.claim.reference == "Claim/100150" assert inst.claimResponse.reference == "ClaimResponse/R3500" assert inst.created == fhirtypes.DateTime.validate("2014-08-16") assert inst.disposition == "Claim settled as per contract." assert inst.enterer.reference == "Practitioner/1" assert inst.facility.reference == "Location/1" assert inst.id == "EB3500" assert ( inst.identifier[0].system == "http://www.BenefitsInc.com/fhir/explanationofbenefit" ) assert inst.identifier[0].value == "987654321" assert inst.insurance.coverage.reference == "Coverage/9876B1" assert inst.item[0].adjudication[0].amount.code == "USD" assert inst.item[0].adjudication[0].amount.system == "urn:iso:std:iso:4217" assert float(inst.item[0].adjudication[0].amount.value) == float(120.0) assert inst.item[0].adjudication[0].category.coding[0].code == "eligible" assert inst.item[0].adjudication[1].category.coding[0].code == "eligpercent" assert float(inst.item[0].adjudication[1].value) == float(0.8) assert inst.item[0].adjudication[2].amount.code == "USD" assert inst.item[0].adjudication[2].amount.system == "urn:iso:std:iso:4217" assert float(inst.item[0].adjudication[2].amount.value) == float(96.0) assert inst.item[0].adjudication[2].category.coding[0].code == "benefit" assert inst.item[0].careTeamLinkId[0] == 1 assert inst.item[0].encounter[0].reference == "Encounter/example" assert inst.item[0].net.code == "USD" assert inst.item[0].net.system == "urn:iso:std:iso:4217" assert float(inst.item[0].net.value) == float(135.57) assert inst.item[0].sequence == 1 assert inst.item[0].service.coding[0].code == "1200" assert inst.item[0].service.coding[0].system == "http://hl7.org/fhir/service-uscls" assert inst.item[0].servicedDate == fhirtypes.Date.validate("2014-08-16") assert inst.item[0].unitPrice.code == "USD" assert inst.item[0].unitPrice.system == "urn:iso:std:iso:4217" assert float(inst.item[0].unitPrice.value) == float(135.57) assert inst.organization.reference == "Organization/2" assert inst.outcome.coding[0].code == "complete" assert inst.outcome.coding[0].system == "http://hl7.org/fhir/remittance-outcome" assert inst.patient.reference == "Patient/pat1" assert inst.payee.party.reference == "Organization/2" assert inst.payee.type.coding[0].code == "provider" assert inst.payee.type.coding[0].system == "http://hl7.org/fhir/payeetype" assert inst.provider.reference == "Practitioner/1" assert inst.status == "active" assert inst.text.div == ( '<div xmlns="http://www.w3.org/1999/xhtml">A human-readable' " rendering of the ExplanationOfBenefit</div>" ) assert inst.text.status == "generated" assert inst.totalBenefit.code == "USD" assert inst.totalBenefit.system == "urn:iso:std:iso:4217" assert float(inst.totalBenefit.value) == float(96.0) assert inst.totalCost.code == "USD" assert inst.totalCost.system == "urn:iso:std:iso:4217" assert float(inst.totalCost.value) == float(135.57) assert inst.type.coding[0].code == "oral" assert inst.type.coding[0].system == "http://hl7.org/fhir/ex-claimtype" def test_explanationofbenefit_1(base_settings): """No. 1 tests collection for ExplanationOfBenefit. Test File: explanationofbenefit-example.json """ filename = base_settings["unittest_data_dir"] / "explanationofbenefit-example.json" inst = explanationofbenefit.ExplanationOfBenefit.parse_file( filename, content_type="application/json", encoding="utf-8" ) assert "ExplanationOfBenefit" == inst.resource_type impl_explanationofbenefit_1(inst) # testing reverse by generating data from itself and create again. data = inst.dict() assert "ExplanationOfBenefit" == data["resourceType"] inst2 = explanationofbenefit.ExplanationOfBenefit(**data) impl_explanationofbenefit_1(inst2)
samples/unmanage_node.py
oniram22/orionsdk-python
177
11118268
import requests from orionsdk import SwisClient from datetime import datetime, timedelta def main(): hostname = 'localhost' username = 'admin' password = '' swis = SwisClient(hostname, username, password) results = swis.query('SELECT NodeID, Caption FROM Orion.Nodes WHERE IPAddress = @ip_addr', ip_addr='127.0.0.1') if results['results']: nodeId = results['results'][0]['NodeID'] caption = results['results'][0]['Caption'] netObjectId = 'N:{}'.format(nodeId) now = datetime.utcnow() tomorrow = now + timedelta(days=1) swis.invoke('Orion.Nodes', 'Unmanage', netObjectId, now, tomorrow, False) print('Done...{} will be unmanaged until {}'.format(caption, tomorrow)) else: print("Device doesn't Exist") requests.packages.urllib3.disable_warnings() if __name__ == '__main__': main()
crowdsourcing/utils.py
Kyeongan/crowdsource-platform
138
11118324
import ast import datetime import hashlib import random import re import string from django.conf import settings from django.http import HttpResponse from django.template import Template from django.template.base import VariableNode from django.utils import timezone from django.utils.http import urlencode from oauth2_provider.oauth2_backends import OAuthLibCore, get_oauthlib_core from rest_framework.pagination import PageNumberPagination, LimitOffsetPagination from rest_framework.renderers import JSONRenderer from crowdsourcing.crypto import to_pk from crowdsourcing.redis import RedisProvider class SmallResultsSetPagination(LimitOffsetPagination): default_limit = 100 def is_discount_eligible(user): if user.email[-4:] in settings.NON_PROFIT_EMAILS: return True return False def get_pk(id_or_hash): try: project_id = int(id_or_hash) return project_id, False except Exception: return to_pk(id_or_hash), True def get_delimiter(filename, *args, **kwargs): delimiter_map = {'csv': ',', 'tsv': '\t'} delimiter = None extension = filename.split('.')[-1] if extension in delimiter_map: delimiter = delimiter_map[extension] return delimiter def get_model_or_none(model, *args, **kwargs): """ Get model object or return None, this will catch the DoesNotExist error. Keyword Arguments: model -- this is the model you want to query from other parameters are of variable length: e.g id=1 or username='jon.snow' """ try: return model.objects.get(*args, **kwargs) except model.DoesNotExist: return None def get_next_unique_id(model, field, value): """ Find next available incrementing value for a field in model. :param model: Model to be queried :param field: Model field to find value for :param value: Field value for which the next increment which is unique and available is to be found :return: the next unique increment value in model for the field considering index value from 1 """ condition = {} condition['%s__iregex' % field] = r'^%s[0-9]+$' % value values = model.objects.filter(**condition).values_list(field, flat=True) integers = map(lambda x: int(x.replace(value, '')), values) # complete sequence plus 1 extra if no gap exists all_values = range(1, len(integers) + 2) gap = list(set(all_values) - set(integers))[0] new_field_value = '%s%d' % (value, gap) return new_field_value def get_time_delta(time_stamp): if time_stamp is None: return "" difference = timezone.now() - time_stamp days = difference.days hours = difference.seconds // 3600 minutes = (difference.seconds // 60) % 60 if minutes > 0 and hours == 0 and days == 0: minutes_calculated = str(minutes) + " minutes " elif minutes > 0 and (hours != 0 or days != 0): minutes_calculated = "" else: minutes_calculated = "1 minute " return "{days}{hours}{minutes}".format(days=str(days) + " day(s) " if days > 0 else "", hours=str(hours) + " hour(s) " if hours > 0 and days == 0 else "", minutes=minutes_calculated) + "ago" class Oauth2Backend(OAuthLibCore): def _extract_params(self, request): """ Extract parameters from the Django request object. Such parameters will then be passed to OAuthLib to build its own Request object. The body should be encoded using OAuthLib urlencoded """ uri = self._get_escaped_full_path(request) http_method = request.method headers = {} # self.extract_headers(request) body = urlencode(self.extract_body(request)) # TODO return uri, http_method, body, headers def create_token_response(self, request): """ A wrapper method that calls create_token_response on `server_class` instance. :param request: The current django.http.HttpRequest object """ uri, http_method, body, headers = self._extract_params(request) headers, body, status = get_oauthlib_core().server.create_token_response(uri, http_method, body, headers) uri = headers.get("Location", None) return uri, headers, body, status def extract_body(self, request): """ Extracts the POST body from the Django request object :param request: The current django.http.HttpRequest object :return: provided POST parameters """ return request.data.items() class Oauth2Utils: def create_client(self, request, user): from oauth2_provider.models import Application oauth2_client = Application.objects.create(user=user, client_type=Application.CLIENT_PUBLIC, authorization_grant_type=Application.GRANT_PASSWORD) return oauth2_client def get_token(self, request): oauth2_backend = Oauth2Backend() uri, headers, body, status = oauth2_backend.create_token_response(request) response_data = {} response_data["message"] = "OK" response_data.update(ast.literal_eval(body)) return response_data, status def get_refresh_token(self, request): pass class SmallResultSetPagination(PageNumberPagination): page_size = 25 page_size_query_param = 'page_size' max_page_size = 100 class JSONResponse(HttpResponse): """ An HttpResponse that renders its content into JSON. """ def __init__(self, data, **kwargs): content = JSONRenderer().render(data) kwargs['content_type'] = 'application/json' super(JSONResponse, self).__init__(content, **kwargs) def generate_random_id(length=8, chars=string.ascii_lowercase + string.digits): return ''.join(random.choice(chars) for _ in range(length)) def get_relative_time(date_time): delta = datetime.timedelta(days=7) current = timezone.now() difference = current - date_time if difference.total_seconds() - delta.total_seconds() > 0: return date_time.strftime("%b") + ' ' + str(date_time.day) else: one_day = datetime.timedelta(days=1) if difference.total_seconds() - one_day.total_seconds() > 0: return date_time.strftime("%a") else: return date_time.strftime('%I:%M %p').lstrip('0') def get_worker_cache(worker_id): provider = RedisProvider() name = provider.build_key('worker', worker_id) worker_stats = provider.hgetall(name) worker_groups = provider.smembers(name + ':worker_groups') approved = int(worker_stats.get('approved', 0)) rejected = int(worker_stats.get('rejected', 0)) submitted = int(worker_stats.get('submitted', 0)) gender = worker_stats.get('gender') birthday_year = worker_stats.get('birthday_year') ethnicity = worker_stats.get('ethnicity') is_worker = worker_stats.get('is_worker', 0) is_requester = worker_stats.get('is_requester', 0) approval_rate = None if approved + rejected > 0: approval_rate = float(approved) / float(approved + rejected) worker_data = { "country": worker_stats.get('country', None), "approval_rate": approval_rate, "total_tasks": approved + rejected + submitted, "approved_tasks": approved, "worker_groups": list(worker_groups), "gender": gender, "birthday_year": birthday_year, "ethnicity": ethnicity, "is_worker": is_worker, "is_requester": is_requester } return worker_data def create_copy(instance): instance.pk = None instance.save() return instance def get_review_redis_message(match_group_id, project_key): message = { "type": "REVIEW", "payload": { "match_group_id": match_group_id, 'project_key': project_key, "is_done": True } } return message def replace_braces(s): return re.sub(r'\s(?=[^\{\}]*}})', '', unicode(s)) def get_template_string(initial_data, data): initial_data = replace_braces(initial_data) html_template = Template(initial_data) return_value = '' has_variables = False for node in html_template.nodelist: if isinstance(node, VariableNode): return_value += unicode(data.get(node.token.contents, '')) has_variables = True else: return_value += unicode(node.token.contents) return return_value, has_variables def get_template_tokens(initial_data): initial_data = replace_braces(initial_data) html_template = Template(initial_data) return [node.token.contents for node in html_template.nodelist if isinstance(node, VariableNode)] def flatten_dict(d, separator='_', prefix=''): return {prefix + separator + k if prefix else k: v for kk, vv in d.items() for k, v in flatten_dict(vv, separator, kk).items() } if isinstance(d, dict) else {prefix: d} def hash_task(data): return hashlib.sha256(repr(sorted(frozenset(flatten_dict(data))))).hexdigest() def hash_as_set(data): return hashlib.sha256(repr(sorted(frozenset(data)))).hexdigest() def get_trailing_number(s): m = re.search(r'\d+$', s) return int(m.group()) if m else None
src/super_gradients/training/datasets/datasets_utils.py
Deci-AI/super-gradients
308
11118415
<filename>src/super_gradients/training/datasets/datasets_utils.py import copy import os from abc import ABC, abstractmethod from multiprocessing import Value, Lock import random import numpy as np import torch.nn.functional as F import torchvision from PIL import Image import torch from super_gradients.common.sg_loggers.abstract_sg_logger import AbstractSGLogger from super_gradients.training.datasets.detection_datasets.detection_dataset import DetectionDataSet from super_gradients.common.abstractions.abstract_logger import get_logger from deprecated import deprecated from matplotlib.patches import Rectangle from torch.utils.tensorboard import SummaryWriter from torchvision.datasets import ImageFolder from super_gradients.training.datasets.auto_augment import rand_augment_transform from torchvision.transforms import transforms, InterpolationMode, RandomResizedCrop from tqdm import tqdm from super_gradients.training.utils.utils import AverageMeter from super_gradients.training.utils.detection_utils import DetectionVisualization import matplotlib.pyplot as plt def get_mean_and_std_torch(data_dir=None, dataloader=None, num_workers=4, RandomResizeSize=224): """ A function for getting the mean and std of large datasets using pytorch dataloader and gpu functionality. :param data_dir: String, path to none-library dataset folder. For example "/data/Imagenette" or "/data/TinyImagenet" :param dataloader: a torch DataLoader, as it would feed the data into the trainer (including transforms etc). :param RandomResizeSize: Int, the size of the RandomResizeCrop as it appears in the DataInterface (for example, for Imagenet, this value should be 224). :return: 2 lists,mean and std, each one of len 3 (1 for each channel) """ assert data_dir is None or dataloader is None, 'Please provide either path to data folder or DataLoader, not both.' if dataloader is None: traindir = os.path.join(os.path.abspath(data_dir), 'train') trainset = ImageFolder(traindir, transforms.Compose([transforms.RandomResizedCrop(RandomResizeSize), transforms.RandomHorizontalFlip(), transforms.ToTensor()])) dataloader = torch.utils.data.DataLoader(trainset, batch_size=1, num_workers=num_workers) print(f'Calculating on {len(dataloader.dataset.targets)} Training Samples') device = 'cuda:0' if torch.cuda.is_available() else 'cpu' h, w = 0, 0 for batch_idx, (inputs, targets) in enumerate(dataloader): inputs = inputs.to(device) if batch_idx == 0: h, w = inputs.size(2), inputs.size(3) print(f'Min: {inputs.min()}, Max: {inputs.max()}') chsum = inputs.sum(dim=(0, 2, 3), keepdim=True) else: chsum += inputs.sum(dim=(0, 2, 3), keepdim=True) mean = chsum / len(trainset) / h / w print(f'mean: {mean.view(-1)}') chsum = None for batch_idx, (inputs, targets) in enumerate(dataloader): inputs = inputs.to(device) if batch_idx == 0: chsum = (inputs - mean).pow(2).sum(dim=(0, 2, 3), keepdim=True) else: chsum += (inputs - mean).pow(2).sum(dim=(0, 2, 3), keepdim=True) std = torch.sqrt(chsum / (len(trainset) * h * w - 1)) print(f'std: {std.view(-1)}') return mean.view(-1).cpu().numpy().tolist(), std.view(-1).cpu().numpy().tolist() @deprecated(reason='Use get_mean_and_std_torch() instead. It is faster and more accurate') def get_mean_and_std(dataset): '''Compute the mean and std value of dataset.''' dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=1) mean = torch.zeros(3) std = torch.zeros(3) print('==> Computing mean and std..') j = 0 for inputs, targets in dataloader: if j % 10 == 0: print(j) j += 1 for i in range(3): mean[i] += inputs[:, i, :, :].mean() std[i] += inputs[:, i, :, :].std() mean.div_(len(dataset)) std.div_(len(dataset)) return mean, std class AbstractCollateFunction(ABC): """ A collate function (for torch DataLoader) """ @abstractmethod def __call__(self, batch): pass class ComposedCollateFunction(AbstractCollateFunction): """ A function (for torch DataLoader) which executes a sequence of sub collate functions """ def __init__(self, functions: list): self.functions = functions def __call__(self, batch): for f in self.functions: batch = f(batch) return batch class AtomicInteger: def __init__(self, value: int = 0): self._value = Value('i', value) def __set__(self, instance, value): self._value.value = value def __get__(self, instance, owner): return self._value.value class MultiScaleCollateFunction(AbstractCollateFunction): """ a collate function to implement multi-scale data augmentation according to https://arxiv.org/pdf/1612.08242.pdf """ _counter = AtomicInteger(0) _current_size = AtomicInteger(0) _lock = Lock() def __init__(self, target_size: int = None, min_image_size: int = None, max_image_size: int = None, image_size_steps: int = 32, change_frequency: int = 10): """ set parameters for the multi-scale collate function the possible image sizes are in range [min_image_size, max_image_size] in steps of image_size_steps a new size will be randomly selected every change_frequency calls to the collate_fn() :param target_size: scales will be [0.66 * target_size, 1.5 * target_size] :param min_image_size: the minimum size to scale down to (in pixels) :param max_image_size: the maximum size to scale up to (in pixels) :param image_size_steps: typically, the stride of the net, which defines the possible image size multiplications :param change_frequency: """ assert target_size is not None or (max_image_size is not None and min_image_size is not None), \ 'either target_size or min_image_size and max_image_size has to be set' assert target_size is None or max_image_size is None, 'target_size and max_image_size cannot be both defined' if target_size is not None: min_image_size = int(0.66 * target_size - ((0.66 * target_size) % image_size_steps) + image_size_steps) max_image_size = int(1.5 * target_size - ((1.5 * target_size) % image_size_steps)) print('Using multi-scale %g - %g' % (min_image_size, max_image_size)) self.sizes = np.arange(min_image_size, max_image_size + image_size_steps, image_size_steps) self.image_size_steps = image_size_steps self.frequency = change_frequency self._current_size = random.choice(self.sizes) def __call__(self, batch): with self._lock: # Important: this implementation was tailored for a specific input. it assumes the batch is a tuple where # the images are the first item assert isinstance(batch, tuple), 'this collate function expects the input to be a tuple (images, labels)' images = batch[0] if self._counter % self.frequency == 0: self._current_size = random.choice(self.sizes) self._counter += 1 assert images.shape[2] % self.image_size_steps == 0 and images.shape[3] % self.image_size_steps == 0, \ 'images sized not divisible by %d. (resize images before calling multi_scale)' % self.image_size_steps if self._current_size != max(images.shape[2:]): ratio = float(self._current_size) / max(images.shape[2:]) new_size = (int(round(images.shape[2] * ratio)), int(round(images.shape[3] * ratio))) images = F.interpolate(images, size=new_size, mode='bilinear', align_corners=False) return images, batch[1] _pil_interpolation_to_str = { Image.NEAREST: 'PIL.Image.NEAREST', Image.BILINEAR: 'PIL.Image.BILINEAR', Image.BICUBIC: 'PIL.Image.BICUBIC', Image.LANCZOS: 'PIL.Image.LANCZOS', Image.HAMMING: 'PIL.Image.HAMMING', Image.BOX: 'PIL.Image.BOX', } def _pil_interp(method): if method == 'bicubic': return InterpolationMode.BICUBIC elif method == 'lanczos': return InterpolationMode.LANCZOS elif method == 'hamming': return InterpolationMode.HAMMING elif method == 'nearest': return InterpolationMode.NEAREST elif method == 'bilinear': return InterpolationMode.BILINEAR elif method == 'box': return InterpolationMode.BOX else: raise ValueError("interpolation type must be one of ['bilinear', 'bicubic', 'lanczos', 'hamming', " "'nearest', 'box'] for explicit interpolation type, or 'random' for random") _RANDOM_INTERPOLATION = (InterpolationMode.BILINEAR, InterpolationMode.BICUBIC) class RandomResizedCropAndInterpolation(RandomResizedCrop): """ Crop the given PIL Image to random size and aspect ratio with explicitly chosen or random interpolation. A crop of random size (default: of 0.08 to 1.0) of the original size and a random aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop is finally resized to given size. This is popularly used to train the Inception networks. Args: size: expected output size of each edge scale: range of size of the origin size cropped ratio: range of aspect ratio of the origin aspect ratio cropped interpolation: Default: PIL.Image.BILINEAR """ def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation='default'): super(RandomResizedCropAndInterpolation, self).__init__(size=size, scale=scale, ratio=ratio, interpolation=interpolation) if interpolation == 'random': self.interpolation = _RANDOM_INTERPOLATION elif interpolation == 'default': self.interpolation = InterpolationMode.BILINEAR else: self.interpolation = _pil_interp(interpolation) def forward(self, img): """ Args: img (PIL Image): Image to be cropped and resized. Returns: PIL Image: Randomly cropped and resized image. """ i, j, h, w = self.get_params(img, self.scale, self.ratio) if isinstance(self.interpolation, (tuple, list)): interpolation = random.choice(self.interpolation) else: interpolation = self.interpolation return torchvision.transforms.functional.resized_crop(img, i, j, h, w, self.size, interpolation) def __repr__(self): if isinstance(self.interpolation, (tuple, list)): interpolate_str = ' '.join([_pil_interpolation_to_str[x] for x in self.interpolation]) else: interpolate_str = _pil_interpolation_to_str[self.interpolation] format_string = self.__class__.__name__ + '(size={0}'.format(self.size) format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale)) format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio)) format_string += ', interpolation={0})'.format(interpolate_str) return format_string STAT_LOGGER_FONT_SIZE = 15 class DatasetStatisticsTensorboardLogger: logger = get_logger(__name__) DEFAULT_SUMMARY_PARAMS = { 'sample_images': 32, # by default, 32 images will be sampled from each dataset 'plot_class_distribution': True, 'plot_box_size_distribution': True, 'plot_anchors_coverage': True, 'max_batches': 30 } def __init__(self, sg_logger: AbstractSGLogger, summary_params: dict = DEFAULT_SUMMARY_PARAMS): self.sg_logger = sg_logger self.summary_params = {**DatasetStatisticsTensorboardLogger.DEFAULT_SUMMARY_PARAMS, **summary_params} def analyze(self, data_loader: torch.utils.data.DataLoader, dataset_params: dict, title: str, anchors: list = None): """ :param data_loader: the dataset data loader :param dataset_params: the dataset parameters :param title: the title for this dataset (i.e. Coco 2017 test set) :param anchors: the list of anchors used by the model. applicable only for detection datasets """ if isinstance(data_loader.dataset, DetectionDataSet): self._analyze_detection(data_loader=data_loader, dataset_params=dataset_params, title=title, anchors=anchors) else: DatasetStatisticsTensorboardLogger.logger.warning('only DetectionDataSet are currently supported') def _analyze_detection(self, data_loader, dataset_params, title, anchors=None): """ Analyze a detection dataset :param data_loader: the dataset data loader :param dataset_params: the dataset parameters :param title: the title for this dataset (i.e. Coco 2017 test set) :param anchors: the list of anchors used by the model. if not provided, anchors coverage will not be analyzed """ try: color_mean = AverageMeter() color_std = AverageMeter() all_labels = [] for i, (images, labels) in enumerate(tqdm(data_loader)): if i >= self.summary_params['max_batches'] > 0: break if i == 0: if images.shape[0] > self.summary_params['sample_images']: samples = images[:self.summary_params['sample_images']] else: samples = images pred = [torch.zeros(size=(0, 6)) for _ in range(len(samples))] class_names = data_loader.dataset.all_classes_list result_images = DetectionVisualization.visualize_batch(image_tensor=samples, pred_boxes=pred, target_boxes=copy.deepcopy(labels), batch_name=title, class_names=class_names, box_thickness=1, gt_alpha=1.0) self.sg_logger.add_images(tag=f'{title} sample images', images=np.stack(result_images) .transpose([0, 3, 1, 2])[:, ::-1, :, :]) all_labels.append(labels) color_mean.update(torch.mean(images, dim=[0, 2, 3]), 1) color_std.update(torch.std(images, dim=[0, 2, 3]), 1) all_labels = torch.cat(all_labels, dim=0)[:, 1:].numpy() if self.summary_params['plot_class_distribution']: self._analyze_class_distribution(labels=all_labels, num_classes=dataset_params.num_classes, title=title) if self.summary_params['plot_box_size_distribution']: self._analyze_object_size_distribution(labels=all_labels, title=title) summary = '' summary += f'dataset size: {len(data_loader)} \n' summary += f'color mean: {color_mean.average} \n' summary += f'color std: {color_std.average} \n' if anchors is not None: coverage = self._analyze_anchors_coverage(anchors=anchors, image_size=dataset_params.train_image_size, title=title, labels=all_labels) summary += f'anchors: {anchors} \n' summary += f'anchors coverage: {coverage} \n' self.sg_logger.add_text(tag=f'{title} Statistics', text_string=summary) self.sg_logger.flush() except Exception as e: # any exception is caught here. we dont want the DatasetStatisticsLogger to crash any training DatasetStatisticsTensorboardLogger.logger.error(f'dataset analysis failed: {e}') def _analyze_class_distribution(self, labels: list, num_classes: int, title: str): hist, edges = np.histogram(labels[:, 0], num_classes) f = plt.figure(figsize=[10, 8]) plt.bar(range(num_classes), hist, width=0.5, color='#0504aa', alpha=0.7) plt.xlim(-1, num_classes) plt.grid(axis='y', alpha=0.75) plt.xlabel('Value', fontsize=STAT_LOGGER_FONT_SIZE) plt.ylabel('Frequency', fontsize=STAT_LOGGER_FONT_SIZE) plt.xticks(fontsize=STAT_LOGGER_FONT_SIZE) plt.yticks(fontsize=STAT_LOGGER_FONT_SIZE) plt.title(f'{title} class distribution', fontsize=STAT_LOGGER_FONT_SIZE) self.sg_logger.add_figure(f"{title} class distribution", figure=f) text_dist = '' for i, val in enumerate(hist): text_dist += f'[{i}]: {val}, ' self.sg_logger.add_text(tag=f"{title} class distribution", text_string=text_dist) def _analyze_object_size_distribution(self, labels: list, title: str): """ This function will add two plots to the tensorboard. one is a 2D histogram and the other is a scatter plot. in both cases the X axis is the object width and Y axis is the object width (both normalized by image size) :param labels: all the labels of the dataset of the shape [class_label, x_center, y_center, w, h] :param title: the dataset title """ # histogram plot hist, xedges, yedges = np.histogram2d(labels[:, 4], labels[:, 3], 50) # x and y are deliberately switched fig = plt.figure(figsize=(10, 6)) fig.suptitle(f'{title} boxes w/h distribution') ax = fig.add_subplot(121) ax.set_xlabel('W', fontsize=STAT_LOGGER_FONT_SIZE) ax.set_ylabel('H', fontsize=STAT_LOGGER_FONT_SIZE) plt.imshow(np.log(hist + 1), interpolation='nearest', origin='lower', extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]]) # scatter plot if len(labels) > 10000: # we randomly sample just 10000 objects so that the scatter plot will not get too dense labels = labels[np.random.randint(0, len(labels) - 1, 10000)] ax = fig.add_subplot(122) ax.set_xlabel('W', fontsize=STAT_LOGGER_FONT_SIZE) ax.set_ylabel('H', fontsize=STAT_LOGGER_FONT_SIZE) plt.scatter(labels[:, 3], labels[:, 4], marker='.') self.sg_logger.add_figure(tag=f'{title} boxes w/h distribution', figure=fig) @staticmethod def _get_rect(w, h): min_w = w / 4.0 min_h = h / 4.0 return Rectangle((min_w, min_h), w * 4 - min_w, h * 4 - min_h, linewidth=1, edgecolor='b', facecolor='none') @staticmethod def _get_score(anchors: np.ndarray, points: np.ndarray, image_size: int): """ Calculate the ratio (and 1/ratio) between each anchor width and height and each point (representing a possible object width and height). i.e. for an anchor with w=10,h=20 the point w=11,h=25 will have the ratios 11/10=1.1 and 25/20=1.25 or 10/11=0.91 and 20/25=0.8 respectively :param anchors: array of anchors of the shape [2,N] :param points: array of points of the shape [2,M] :param image_size the size of the input image :returns: an array of size [image_size - 1, image_size - 1] where each cell i,j represent the minimum ratio for that cell (point) from all anchors """ ratio = anchors[:, :, None] / points[:, ] inv_ratio = 1 / ratio min_ratio = 1 - np.minimum(ratio, inv_ratio) min_ratio = np.max(min_ratio, axis=1) to_closest_anchor = np.min(min_ratio, axis=0) to_closest_anchor[to_closest_anchor > 0.75] = 2 return to_closest_anchor.reshape(image_size - 1, -1) def _analyze_anchors_coverage(self, anchors: list, image_size: int, labels: list, title: str): """ This function will add anchors coverage plots to the tensorboard. :param anchors: a list of anchors :param image_size: the input image size for this training :param labels: all the labels of the dataset of the shape [class_label, x_center, y_center, w, h] :param title: the dataset title """ fig = plt.figure(figsize=(12, 5)) fig.suptitle(f'{title} anchors coverage') # box style plot ax = fig.add_subplot(121) ax.set_xlabel('W', fontsize=STAT_LOGGER_FONT_SIZE) ax.set_ylabel('H', fontsize=STAT_LOGGER_FONT_SIZE) ax.set_xlim([0, image_size]) ax.set_ylim([0, image_size]) anchors = np.array(anchors).reshape(-1, 2) for i in range(len(anchors)): rect = self._get_rect(anchors[i][0], anchors[i][1]) rect.set_alpha(0.3) rect.set_facecolor([random.random(), random.random(), random.random(), 0.3]) ax.add_patch(rect) # distance from anchor plot ax = fig.add_subplot(122) ax.set_xlabel('W', fontsize=STAT_LOGGER_FONT_SIZE) ax.set_ylabel('H', fontsize=STAT_LOGGER_FONT_SIZE) x = np.arange(1, image_size, 1) y = np.arange(1, image_size, 1) xx, yy = np.meshgrid(x, y, sparse=False) points = np.concatenate([xx.reshape(1, -1), yy.reshape(1, -1)]) color = self._get_score(anchors, points, image_size) ax.set_xlabel('W', fontsize=STAT_LOGGER_FONT_SIZE) ax.set_ylabel('H', fontsize=STAT_LOGGER_FONT_SIZE) plt.imshow(color, interpolation='nearest', origin='lower', extent=[0, image_size, 0, image_size]) # calculate the coverage for the dataset labels cover_masks = [] for i in range(len(anchors)): w_max = (anchors[i][0] / image_size) * 4 w_min = (anchors[i][0] / image_size) * 0.25 h_max = (anchors[i][1] / image_size) * 4 h_min = (anchors[i][1] / image_size) * 0.25 cover_masks.append(np.logical_and( np.logical_and(np.logical_and(labels[:, 3] < w_max, labels[:, 3] > w_min), labels[:, 4] < h_max), labels[:, 4] > h_min)) cover_masks = np.stack(cover_masks) coverage = np.count_nonzero(np.any(cover_masks, axis=0)) / len(labels) self.sg_logger.add_figure(tag=f'{title} anchors coverage', figure=fig) return coverage def get_color_augmentation(rand_augment_config_string: str, color_jitter: tuple, crop_size=224, img_mean=[0.485, 0.456, 0.406]): """ Returns color augmentation class. As these augmentation cannot work on top one another, only one is returned according to rand_augment_config_string :param rand_augment_config_string: string which defines the auto augment configurations. If none, color jitter will be returned. For possibile values see auto_augment.py :param color_jitter: tuple for color jitter value. :param crop_size: relevant only for auto augment :param img_mean: relevant only for auto augment :return: RandAugment transform or ColorJitter """ if rand_augment_config_string: auto_augment_params = dict(translate_const=int(crop_size * 0.45), img_mean=tuple([min(255, round(255 * x)) for x in img_mean])) color_augmentation = rand_augment_transform(rand_augment_config_string, auto_augment_params) else: # RandAugment includes colorjitter like augmentations, both cannot be applied together. color_augmentation = transforms.ColorJitter(*color_jitter) return color_augmentation
Python/42.TrappingRainWater.py
nizD/LeetCode-Solutions
263
11118463
''' LeetCode Link: https://leetcode.com/problems/trapping-rain-water Given n non-negative integers representing an elevation map where the width of each bar is 1, compute how much water it is able to trap after raining. The elevation map is represented by array [0,1,0,2,1,0,1,3,2,1,2,1]. In this case, 6 units of rain water (blue section) are being trapped. Example: Input: [0,1,0,2,1,0,1,3,2,1,2,1] Output: 6 Solution: Search from left to right and maintain a max height of left and right separately, Consider this as one-side wall of partial container. Fix the higher one and flow water from the lower part. For example, if current height of left is lower, we fill water in the left bin. We will do this until left meets right or we filled the whole container. Time Complexity: O(n) Space Complexity: O(1) ''' class Solution: def trap(self, heights: List[int]) -> int: water = 0 leftMaxHeight = 0 rightMaxHeight = 0 start = 0 end = len(heights) - 1 while start < end: if heights[start] < heights[end]: if heights[start] > leftMaxHeight: leftMaxHeight = heights[start] else: water += leftMaxHeight - heights[start] start+=1 else: if heights[end] > rightMaxHeight: rightMaxHeight = heights[end] else: water += rightMaxHeight - heights[end] end-=1 return water
label_studio/core/serializers.py
fpaupier/label-studio
8,264
11118481
import copy from rest_framework import serializers class SerializerOption: def __init__(self, data): self.data = copy.deepcopy(data) self._model_class = None self._base_serializer = serializers.ModelSerializer self._serializer_class = None self._fields = '__all__' self._exclude = None self._field_options = {} self._nested_fields = {} if not isinstance(data, dict): raise ValueError('Data has to be dict') self._serializer_class = self.data.get('serializer_class', None) self._model_class = self.data.get('model_class', None) if self._serializer_class is None and self._model_class is None: raise ValueError('Pass serializer_class or model_class') self._base_serializer = self.data.get('base_serializer', serializers.ModelSerializer) self._fields = self.data.get('fields', None) self._exclude = self.data.get('exclude', None) if self._fields and self._exclude: raise ValueError("Fields and exclude can't be passed simultaneously") if self._exclude is None: self._fields = '__all__' self._field_options = self.data.get('field_options', {}) for field_key, field_value in data.get('nested_fields', {}).items(): self._nested_fields[field_key] = SerializerOption(field_value) @property def serializer_class(self): return self._serializer_class @property def model_class(self): return self._model_class @property def base_serializer(self): return self._base_serializer @property def fields(self): return self._fields @property def exclude(self): return self._exclude @property def nested_fields(self): return self._nested_fields @property def field_options(self): return self._field_options def generate_serializer(option): """ Return serializer by option object: """ if option.serializer_class: ResultClass = option.serializer_class else: class ResultClass(option.base_serializer): class Meta: model = option.model_class fields = option.fields exclude = option.exclude def get_fields(self): fields = super().get_fields() for key, value in option.nested_fields.items(): fields[key] = generate_serializer(value) return fields return ResultClass(**option.field_options)
project/utils/general.py
MahjongRepository/tenhou-python-bot
201
11118495
import random import string from typing import List from mahjong.constants import EAST from mahjong.utils import is_honor, is_man, is_pin, is_sou, simplify # TODO move to mahjong lib def is_sangenpai(tile_34): return tile_34 >= 31 # TODO move to mahjong lib def is_tiles_same_suit(first_tile_34, second_tile_34): if is_pin(first_tile_34) and is_pin(second_tile_34): return True if is_man(first_tile_34) and is_man(second_tile_34): return True if is_sou(first_tile_34) and is_sou(second_tile_34): return True return False # TODO move to mahjong lib def is_dora_connector(tile_136: int, dora_indicators_136: List[int]) -> bool: tile_34 = tile_136 // 4 if is_honor(tile_34): return False for dora_indicator in dora_indicators_136: dora_indicator_34 = dora_indicator // 4 if not is_tiles_same_suit(dora_indicator_34, tile_34): continue simplified_tile = simplify(tile_34) simplified_dora_indicator = simplify(dora_indicator_34) if simplified_dora_indicator - 1 == simplified_tile: return True if simplified_dora_indicator + 1 == simplified_tile: return True return False def make_random_letters_and_digit_string(length=15): random_chars = string.ascii_lowercase + string.digits return "".join(random.choice(random_chars) for _ in range(length)) def revealed_suits_tiles(player, tiles_34): """ Return all reviled tiles separated by suits for provided tiles list """ return _suits_tiles_helper( tiles_34, lambda _tile_34_index, _tiles_34: player.number_of_revealed_tiles(_tile_34_index, _tiles_34) ) def separate_tiles_by_suits(tiles_34): """ Return tiles separated by suits for provided tiles list """ return _suits_tiles_helper(tiles_34, lambda _tile_34_index, _tiles_34: _tiles_34[_tile_34_index]) def _suits_tiles_helper(tiles_34, total_tiles_lambda): """ Separate tiles by suit """ suits = [ [0] * 9, [0] * 9, [0] * 9, ] for tile_34_index in range(0, EAST): total_tiles = total_tiles_lambda(tile_34_index, tiles_34) if not total_tiles: continue suit_index = None simplified_tile = simplify(tile_34_index) if is_man(tile_34_index): suit_index = 0 if is_pin(tile_34_index): suit_index = 1 if is_sou(tile_34_index): suit_index = 2 suits[suit_index][simplified_tile] += total_tiles return suits
cvxpylayers/jax/__init__.py
bamos/cvxpylayers
1,287
11118499
<gh_stars>1000+ from cvxpylayers.jax.cvxpylayer import CvxpyLayer # noqa: F401
test/components/circ_ref.py
sytelus/longview
3,453
11118521
<reponame>sytelus/longview import tensorwatch as tw import objgraph, time #pip install objgraph cli = tw.WatcherClient() time.sleep(10) del cli import gc gc.collect() import time time.sleep(2) objgraph.show_backrefs(objgraph.by_type('WatcherClient'), refcounts=True, filename='b.png')
tests/error/__init__.py
KingDarBoja/graphql-core
590
11118534
"""Tests for graphql.error"""