content
stringlengths 5
1.05M
|
---|
"""helpers.py: tests global scratch space"""
# TODO: move root/paths to pytest official global spaces
from os import path
import prosper.common.prosper_config as p_config
HERE = path.abspath(path.dirname(__file__))
ROOT = path.dirname(HERE)
APP_CONFIG = p_config.ProsperConfig(path.join(ROOT, '{{cookiecutter.library_name}}', 'app.cfg'))
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import re
from spack import *
from spack.package_test import compare_output_file, compile_c_and_execute
class Openblas(MakefilePackage):
"""OpenBLAS: An optimized BLAS library"""
homepage = 'https://www.openblas.net'
url = 'https://github.com/xianyi/OpenBLAS/archive/v0.2.19.tar.gz'
git = 'https://github.com/xianyi/OpenBLAS.git'
version('develop', branch='develop')
version('0.3.19', sha256='947f51bfe50c2a0749304fbe373e00e7637600b0a47b78a51382aeb30ca08562')
version('0.3.18', sha256='1632c1e8cca62d8bed064b37747e331a1796fc46f688626337362bf0d16aeadb')
version('0.3.17', sha256='df2934fa33d04fd84d839ca698280df55c690c86a5a1133b3f7266fce1de279f')
version('0.3.16', sha256='fa19263c5732af46d40d3adeec0b2c77951b67687e670fb6ba52ea3950460d79')
version('0.3.15', sha256='30a99dec977594b387a17f49904523e6bc8dd88bd247266e83485803759e4bbe')
version('0.3.14', sha256='d381935d26f9cae8e4bbd7d7f278435adf8e3a90920edf284bb9ad789ee9ad60')
version('0.3.13', sha256='79197543b17cc314b7e43f7a33148c308b0807cd6381ee77f77e15acf3e6459e')
version('0.3.12', sha256='65a7d3a4010a4e3bd5c0baa41a234797cd3a1735449a4a5902129152601dc57b')
version('0.3.11', sha256='bc4617971179e037ae4e8ebcd837e46db88422f7b365325bd7aba31d1921a673')
version('0.3.10', sha256='0484d275f87e9b8641ff2eecaa9df2830cbe276ac79ad80494822721de6e1693')
version('0.3.9', sha256='17d4677264dfbc4433e97076220adc79b050e4f8a083ea3f853a53af253bc380')
version('0.3.8', sha256='8f86ade36f0dbed9ac90eb62575137388359d97d8f93093b38abe166ad7ef3a8')
version('0.3.7', sha256='bde136122cef3dd6efe2de1c6f65c10955bbb0cc01a520c2342f5287c28f9379')
version('0.3.6', sha256='e64c8fe083832ffbc1459ab6c72f71d53afd3b36e8497c922a15a06b72e9002f')
version('0.3.5', sha256='0950c14bd77c90a6427e26210d6dab422271bc86f9fc69126725833ecdaa0e85')
version('0.3.4', sha256='4b4b4453251e9edb5f57465bf2b3cf67b19d811d50c8588cdf2ea1f201bb834f')
version('0.3.3', sha256='49d88f4494ae780e3d7fa51769c00d982d7cdb73e696054ac3baa81d42f13bab')
version('0.3.2', sha256='e8ba64f6b103c511ae13736100347deb7121ba9b41ba82052b1a018a65c0cb15')
version('0.3.1', sha256='1f5e956f35f3acdd3c74516e955d797a320c2e0135e31d838cbdb3ea94d0eb33')
version('0.3.0', sha256='cf51543709abe364d8ecfb5c09a2b533d2b725ea1a66f203509b21a8e9d8f1a1')
version('0.2.20', sha256='5ef38b15d9c652985774869efd548b8e3e972e1e99475c673b25537ed7bcf394')
version('0.2.19', sha256='9c40b5e4970f27c5f6911cb0a28aa26b6c83f17418b69f8e5a116bb983ca8557')
version('0.2.18', sha256='7d9f8d4ea4a65ab68088f3bb557f03a7ac9cb5036ef2ba30546c3a28774a4112')
version('0.2.17', sha256='0fe836dfee219ff4cadcc3567fb2223d9e0da5f60c7382711fb9e2c35ecf0dbf')
version('0.2.16', sha256='766f350d0a4be614812d535cead8c816fc3ad3b9afcd93167ea5e4df9d61869b')
version('0.2.15', sha256='73c40ace5978282224e5e122a41c8388c5a19e65a6f2329c2b7c0b61bacc9044')
variant('ilp64', default=False, description='Force 64-bit Fortran native integers')
variant('pic', default=True, description='Build position independent code')
variant('shared', default=True, description='Build shared libraries')
variant('consistent_fpcsr', default=False, description='Synchronize FP CSR between threads (x86/x86_64 only)')
variant('bignuma', default=False, description='Enable experimental support for up to 1024 CPUs/Cores and 128 numa nodes')
variant('symbol_suffix', default='none', description='Set a symbol suffix')
variant('locking', default=True, description='Build with thread safety')
variant(
'threads', default='none',
description='Multithreading support',
values=('pthreads', 'openmp', 'none'),
multi=False
)
# virtual dependency
provides('blas')
provides('lapack')
# OpenBLAS >=3.0 has an official way to disable internal parallel builds
patch('make.patch', when='@0.2.16:0.2.20')
# This patch is in a pull request to OpenBLAS that has not been handled
# https://github.com/xianyi/OpenBLAS/pull/915
# UPD: the patch has been merged starting version 0.2.20
patch('openblas_icc.patch', when='@:0.2.19%intel')
patch('openblas_icc_openmp.patch', when='@:0.2.20%[email protected]:')
patch('openblas_icc_fortran.patch', when='@:0.3.14%[email protected]:')
patch('openblas_icc_fortran2.patch', when='@:0.3.14%[email protected]:')
# See https://github.com/spack/spack/issues/15385
patch('lapack-0.3.9-xerbl.patch', when='@0.3.8:0.3.9 %intel')
# Fixes compilation error on POWER8 with GCC 7
# https://github.com/xianyi/OpenBLAS/pull/1098
patch('power8.patch', when='@0.2.18:0.2.19 %[email protected]: target=power8')
# Change file comments to work around clang 3.9 assembler bug
# https://github.com/xianyi/OpenBLAS/pull/982
patch('openblas0.2.19.diff', when='@0.2.19')
# Fix CMake export symbol error
# https://github.com/xianyi/OpenBLAS/pull/1703
patch('openblas-0.3.2-cmake.patch', when='@0.3.1:0.3.2')
# Disable experimental TLS code that lead to many threading issues
# https://github.com/xianyi/OpenBLAS/issues/1735#issuecomment-422954465
# https://github.com/xianyi/OpenBLAS/issues/1761#issuecomment-421039174
# https://github.com/xianyi/OpenBLAS/pull/1765
patch('https://github.com/xianyi/OpenBLAS/commit/4d183e5567346f80f2ef97eb98f8601c47f8cb56.patch',
sha256='714aea33692304a50bd0ccde42590c176c82ded4a8ac7f06e573dc8071929c33',
when='@0.3.3')
# Fix parallel build issues on filesystems
# with missing sub-second timestamp resolution
patch('https://github.com/xianyi/OpenBLAS/commit/79ea839b635d1fd84b6ce8a47e086f01d64198e6.patch',
sha256='f1b066a4481a50678caeb7656bf3e6764f45619686ac465f257c8017a2dc1ff0',
when='@0.3.0:0.3.3')
# Fix https://github.com/xianyi/OpenBLAS/issues/2431
# Patch derived from https://github.com/xianyi/OpenBLAS/pull/2424
patch('openblas-0.3.8-darwin.patch', when='@0.3.8 platform=darwin')
# Fix ICE in LLVM 9.0.0 https://github.com/xianyi/OpenBLAS/pull/2329
# Patch as in https://github.com/xianyi/OpenBLAS/pull/2597
patch('openblas_appleclang11.patch', when='@0.3.8:0.3.9 %[email protected]')
# There was an error in Reference-LAPACK that is triggeret by Xcode12
# fixed upstream by https://github.com/xianyi/OpenBLAS/pull/2808 and
# should be included in post 0.3.10 versions. Application to earlier
# versions was not tested.
# See also https://github.com/xianyi/OpenBLAS/issues/2870
patch('https://github.com/xianyi/OpenBLAS/commit/f42e84d46c52f4ee1e05af8f365cd85de8a77b95.patch',
sha256='7b1eec78d1b1f55d3a3f1249696be7da0e2e1cd3b7fadae852e97dc860f8a7fd',
when='@0.3.8:0.3.10 %[email protected]:')
# Add conditions to f_check to determine the Fujitsu compiler
# See https://github.com/xianyi/OpenBLAS/pull/3010
# UPD: the patch has been merged starting version 0.3.13
patch('openblas_fujitsu.patch', when='@:0.3.10 %fj')
patch('openblas_fujitsu_v0.3.11.patch', when='@0.3.11:0.3.12 %fj')
patch('openblas_fujitsu2.patch', when='@0.3.10:0.3.12 %fj')
# Use /usr/bin/env perl in build scripts
patch('0001-use-usr-bin-env-perl.patch', when='@:0.3.13')
# See https://github.com/spack/spack/issues/19932#issuecomment-733452619
conflicts('%[email protected]:7.3,8.0.0:8.2', when='@0.3.11:')
# See https://github.com/xianyi/OpenBLAS/issues/3074
conflicts('%gcc@:10.1', when='@0.3.13 target=ppc64le:')
# See https://github.com/spack/spack/issues/3036
conflicts('%intel@16', when='@0.2.15:0.2.19')
conflicts('+consistent_fpcsr', when='threads=none',
msg='FPCSR consistency only applies to multithreading')
conflicts('threads=pthreads', when='~locking', msg='Pthread support requires +locking')
conflicts('threads=openmp', when='%apple-clang', msg="Apple's clang does not support OpenMP")
conflicts('threads=openmp @:0.2.19', when='%clang', msg='OpenBLAS @:0.2.19 does not support OpenMP with clang!')
depends_on('perl', type='build')
@property
def parallel(self):
# unclear whether setting `-j N` externally was supported before 0.3
return self.spec.version >= Version('0.3.0')
@run_before('edit')
def check_compilers(self):
# As of 06/2016 there is no mechanism to specify that packages which
# depends on Blas/Lapack need C or/and Fortran symbols. For now
# require both.
if self.compiler.fc is None:
raise InstallError(
'OpenBLAS requires both C and Fortran compilers!'
)
@staticmethod
def _read_targets(target_file):
"""Parse a list of available targets from the OpenBLAS/TargetList.txt
file.
"""
micros = []
re_target = re.compile(r'^[A-Z0-9_]+$')
for line in target_file:
match = re_target.match(line)
if match is not None:
micros.append(line.strip().lower())
return micros
def _microarch_target_args(self):
"""Given a spack microarchitecture and a list of targets found in
OpenBLAS' TargetList.txt, determine the best command-line arguments.
"""
# Read available openblas targets
targetlist_name = join_path(self.stage.source_path, "TargetList.txt")
if os.path.exists(targetlist_name):
with open(targetlist_name) as f:
available_targets = self._read_targets(f)
else:
available_targets = []
# Get our build microarchitecture
microarch = self.spec.target
# List of arguments returned by this function
args = []
# List of available architectures, and possible aliases
openblas_arch = set(['alpha', 'arm', 'ia64', 'mips', 'mips64',
'power', 'riscv64', 'sparc', 'zarch'])
openblas_arch_map = {
'amd64': 'x86_64',
'powerpc64': 'power',
'i386': 'x86',
'aarch64': 'arm64',
}
openblas_arch.update(openblas_arch_map.keys())
openblas_arch.update(openblas_arch_map.values())
# Add spack-only microarchitectures to list
skylake = set(["skylake", "skylake_avx512"])
available_targets = set(available_targets) | skylake | openblas_arch
# Find closest ancestor that is known to build in blas
if microarch.name not in available_targets:
for microarch in microarch.ancestors:
if microarch.name in available_targets:
break
if self.version >= Version("0.3"):
# 'ARCH' argument causes build errors in older OpenBLAS
# see https://github.com/spack/spack/issues/15385
arch_name = microarch.family.name
if arch_name in openblas_arch:
# Apply possible spack->openblas arch name mapping
arch_name = openblas_arch_map.get(arch_name, arch_name)
args.append('ARCH=' + arch_name)
if microarch.vendor == 'generic' and microarch.name != 'riscv64':
# User requested a generic platform, or we couldn't find a good
# match for the requested one. Allow OpenBLAS to determine
# an optimized kernel at run time, including older CPUs, while
# forcing it not to add flags for the current host compiler.
args.append('DYNAMIC_ARCH=1')
if self.spec.version >= Version('0.3.12'):
# These are necessary to prevent OpenBLAS from targeting the
# host architecture on newer version of OpenBLAS, but they
# cause build errors on 0.3.5 .
args.extend(['DYNAMIC_OLDER=1', 'TARGET=GENERIC'])
elif microarch.name in skylake:
# Special case for renaming skylake family
args.append('TARGET=SKYLAKEX')
if microarch.name == "skylake":
# Special case for disabling avx512 instructions
args.append('NO_AVX512=1')
elif microarch.name == 'riscv64':
# Special case for renaming the generic riscv64 uarch to the
# corresponding OpenBLAS target. riscv64 does not yet support
# DYNAMIC_ARCH or TARGET=GENERIC. Once it does, this special
# case can go away.
args.append('TARGET=' + "RISCV64_GENERIC")
else:
args.append('TARGET=' + microarch.name.upper())
return args
@property
def make_defs(self):
# Configure fails to pick up fortran from FC=/abs/path/to/fc, but
# works fine with FC=/abs/path/to/gfortran.
# When mixing compilers make sure that
# $SPACK_ROOT/lib/spack/env/<compiler> have symlinks with reasonable
# names and hack them inside lib/spack/spack/compilers/<compiler>.py
make_defs = [
'CC={0}'.format(spack_cc),
'FC={0}'.format(spack_fc),
]
# force OpenBLAS to use externally defined parallel build
if self.spec.version < Version('0.3'):
make_defs.append('MAKE_NO_J=1') # flag defined by our make.patch
else:
make_defs.append('MAKE_NB_JOBS=0') # flag provided by OpenBLAS
# Add target and architecture flags
make_defs += self._microarch_target_args()
if '~shared' in self.spec:
if '+pic' in self.spec:
make_defs.extend([
'CFLAGS={0}'.format(self.compiler.cc_pic_flag),
'FFLAGS={0}'.format(self.compiler.f77_pic_flag)
])
make_defs += ['NO_SHARED=1']
# fix missing _dggsvd_ and _sggsvd_
if self.spec.satisfies('@0.2.16'):
make_defs += ['BUILD_LAPACK_DEPRECATED=1']
# serial, but still thread-safe version
if self.spec.satisfies('@0.3.7:'):
if '+locking' in self.spec:
make_defs += ['USE_LOCKING=1']
else:
make_defs += ['USE_LOCKING=0']
# Add support for multithreading
if self.spec.satisfies('threads=openmp'):
make_defs += ['USE_OPENMP=1', 'USE_THREAD=1']
elif self.spec.satisfies('threads=pthreads'):
make_defs += ['USE_OPENMP=0', 'USE_THREAD=1']
else:
make_defs += ['USE_OPENMP=0', 'USE_THREAD=0']
# 64bit ints
if '+ilp64' in self.spec:
make_defs += ['INTERFACE64=1']
suffix = self.spec.variants['symbol_suffix'].value
if suffix != 'none':
make_defs += ['SYMBOLSUFFIX={0}'.format(suffix)]
# Synchronize floating-point control and status register (FPCSR)
# between threads (x86/x86_64 only).
if '+consistent_fpcsr' in self.spec:
make_defs += ['CONSISTENT_FPCSR=1']
# Flang/f18 does not provide ETIME as an intrinsic
if self.spec.satisfies('%clang'):
make_defs.append('TIMER=INT_CPU_TIME')
# Prevent errors in `as` assembler from newer instructions
if self.spec.satisfies('%gcc@:4.8.4'):
make_defs.append('NO_AVX2=1')
# Fujitsu Compiler dose not add Fortran runtime in rpath.
if self.spec.satisfies('%fj'):
make_defs.append('LDFLAGS=-lfj90i -lfj90f -lfjsrcinfo -lelf')
# Newer versions of openblas will try to find ranlib in the compiler's
# prefix, for instance, .../lib/spack/env/gcc/ranlib, which will fail.
if self.spec.satisfies('@0.3.13:'):
make_defs.append('RANLIB=ranlib')
if self.spec.satisfies('+bignuma'):
make_defs.append('BIGNUMA=1')
return make_defs
@property
def headers(self):
# As in netlib-lapack, the only public headers for cblas and lapacke in
# openblas are cblas.h and lapacke.h. The remaining headers are private
# headers either included in one of these two headers, or included in
# one of the source files implementing functions declared in these
# headers.
return find_headers(['cblas', 'lapacke'], self.prefix.include)
@property
def libs(self):
spec = self.spec
# Look for openblas{symbol_suffix}
name = 'libopenblas'
search_shared = bool(spec.variants['shared'].value)
suffix = spec.variants['symbol_suffix'].value
if suffix != 'none':
name += suffix
return find_libraries(name, spec.prefix, shared=search_shared, recursive=True)
@property
def build_targets(self):
targets = ['libs', 'netlib']
# Build shared if variant is set.
if '+shared' in self.spec:
targets += ['shared']
return self.make_defs + targets
@run_after('build')
@on_package_attributes(run_tests=True)
def check_build(self):
make('tests', *self.make_defs, parallel=False)
@property
def install_targets(self):
make_args = [
'install',
'PREFIX={0}'.format(self.prefix),
]
return make_args + self.make_defs
@run_after('install')
@on_package_attributes(run_tests=True)
def check_install(self):
spec = self.spec
# Openblas may pass its own test but still fail to compile Lapack
# symbols. To make sure we get working Blas and Lapack, do a small
# test.
source_file = join_path(os.path.dirname(self.module.__file__),
'test_cblas_dgemm.c')
blessed_file = join_path(os.path.dirname(self.module.__file__),
'test_cblas_dgemm.output')
include_flags = spec['openblas'].headers.cpp_flags
link_flags = spec['openblas'].libs.ld_flags
if self.compiler.name == 'intel':
link_flags += ' -lifcore'
if self.spec.satisfies('threads=pthreads'):
link_flags += ' -lpthread'
if spec.satisfies('threads=openmp'):
link_flags += ' -lpthread ' + self.compiler.openmp_flag
output = compile_c_and_execute(
source_file, [include_flags], link_flags.split()
)
compare_output_file(output, blessed_file)
|
# -*- coding: utf-8 -*-
import sys
import os
import re
import logging
import numpy
from pandas import DataFrame
from pylie.methods.sybyl import AA_SYBYL_TYPES
if sys.version_info[0] < 3:
from StringIO import StringIO
import urlparse
import urllib2 as urllib
else:
from io import StringIO
from urllib import parse as urlparse
logger = logging.getLogger('pylie')
def _open_anything(source):
# Check if the source is a file and open
if os.path.isfile(source):
logger.debug("Reading file from disk {0}".format(source))
return open(source, 'r')
# Check if source is file already openend using 'open' or 'file' return
elif hasattr(source, 'read'):
logger.debug("Reading file %s from file object" % source.name)
return source
# Check if source is standard input
elif source == '-':
logger.debug("Reading file from standard input")
return sys.stdin
else:
# Check if source is a URL and try to open
try:
if urlparse.urlparse(source)[0] == 'http':
result = urllib.urlopen(source)
logger.debug("Reading file from URL with access info:\n %s" % result.info())
return result
except IOError:
logger.info("Unable to access URL")
# Check if source is file and try to open else regard as string
try:
return open(source)
except IOError:
logger.debug("Unable to access as file, try to parse as string")
return StringIO(str(source))
def read_gromacs_energy_file(file_or_buffer, columns=None, lowercase=True):
"""
Read GROMACS molecular dynamics trajectory energy file
Import all data columns into a pandas DataFrame including the compulsory
'FRAME' and 'Time' columns. Using `columns`, a columns selection can be
specified next to the FRAME and Time columns.
:param file_or_buffer: GROMACS energy file path or file-like object
:param columns: selection of columns to import
:type columns: :py:list
:param lowercase: convert all column headers to lowercase.
:type lowercase: :py:bool
:return: energy trajectory as Pandas DataFrame
:rtype: :pandas:DataFrame
"""
# Which columns to extract. Always the first two, FRAME and Time
extract_columns = ['FRAME', 'Time']
if columns:
extract_columns.extend(columns)
# Open the input regardless of its type using open_anything
file_or_buffer = _open_anything(file_or_buffer)
# Try getting the headers. GROMACS header starts with '#'
header = file_or_buffer.readline()
if not header.startswith('#'):
logger.warn("Not sure if this is a GROMACS energy file. Header line does not start with #")
# Frame and Time columns together with user specified ones should be present
header = header.split()[1:]
if set(extract_columns).intersection(set(header)) != set(extract_columns):
missing = set(extract_columns).difference(set(header))
logger.error("GROMACS energy file has no columns named: {0}".format(','.join(missing)))
return None
# If no columns selection defined, extract everything
if not columns:
extract_columns = header
# Try parsing the numeric data into a numpy array
try:
data = numpy.loadtxt(file_or_buffer, comments='#')
except IOError:
logger.error("Unable to import trajectory data from GROMACS energy file {0}".format(file_or_buffer.name))
return None
# Extract relevant columns and return as Pandas DataFrame
header_indexes = [header.index(n) for n in extract_columns]
extract_columns = ['frame', 'time'] + extract_columns[2:]
df = DataFrame(data[:, header_indexes], columns=extract_columns)
# Lowercase headers?
if lowercase:
df.columns = [col.lower() for col in extract_columns]
name = getattr(file_or_buffer, 'name', 'string')
logger.debug("Imported Gromacs MD energy data from {0}, {1} datapoints".format(name, df.shape))
file_or_buffer.close()
return df
def read_lie_etox_file(file_or_buffer):
# Open the input regardless of its type using open_anything
file_or_buffer = _open_anything(file_or_buffer)
ref_affinity = 1
# Import data from file. Check for pose consistency
data = []
cursor = 4
if ref_affinity is None:
cursor -= 1
for index, line in enumerate(file_or_buffer.readlines()):
line = line.strip()
if len(line) and not line.startswith('#'):
line = [float(n) for n in line.split()]
vdw = line[cursor:len(line):2]
coul = line[cursor + 1:len(line):2]
if len(vdw) != len(coul) or (len(vdw) + len(coul)) % 2 != 0:
logger.error("Number of pose VdW energies not match Coul energies in line {0}".format(index))
else:
for pose in range(len(vdw)):
data.append(line[0:cursor] + [pose + 1, vdw[pose], coul[pose]])
file_or_buffer.close()
df = DataFrame(data,
columns=['case', 'ref_affinity', 'vdw_unbound', 'coul_unbound', 'poses', 'vdw_bound', 'coul_bound'])
logger.info("Imported eTox LIE data from {0}, {1} datapoints".format(file_or_buffer.name, df.shape))
return df
class MOL2Parser(object):
"""
Parse a Tripos MOL2 file format.
"""
def __init__(self, columns):
self.mol_dict = dict([(n, []) for n in columns])
def parse(self, mol_file):
"""
Parse MOL2 atom definition into named columns and return as
dictionary.
Currently parses one model only and expects the order of the
columns to be respectively: aton number, atom name, x-coor,
y-coor, z-coor, SYBYL atom type, residue number, residue name
and charge.
MOL2 is a free format (no fixed column width). Their should be
at least one empty space between each subsequent value on a line.
The parser will raise an exception if this is not the case.
:param mol_file:
:return:
"""
read = False
model = 0
for line in mol_file.readlines():
if line.startswith('@<TRIPOS>ATOM'):
read = True
model += 1
continue
elif line.startswith('@<TRIPOS>BOND'):
read = False
break
if read:
l = line.split()
if not len(l) >= 9:
raise IOError('FormatError in mol2. Line: {0}'.format(line))
try:
self.mol_dict['atnum'].append(int(l[0]))
self.mol_dict['atname'].append(l[1].upper())
self.mol_dict['xcoor'].append(float(l[2]))
self.mol_dict['ycoor'].append(float(l[3]))
self.mol_dict['zcoor'].append(float(l[4]))
self.mol_dict['attype'].append(l[5])
self.mol_dict['resnum'].append(int(l[6]))
self.mol_dict['resname'].append(re.sub('{0}$'.format(l[6]), '', l[7]))
self.mol_dict['charge'].append(float(l[8]))
except ValueError as e:
raise IOError('FormatError in mol2. Line: {0}, error {1}'.format(line, e))
return self.mol_dict
class PDBParser(object):
def __init__(self, columns):
self.pdb_dict = dict([(n, []) for n in columns])
def parse(self, pdb_file):
atomline = re.compile('(ATOM)')
hetatmline = re.compile('HETATM')
modelline = re.compile('MODEL')
modelcount = 0
for line in pdb_file.readlines():
line = line[:-1]
if modelline.match(line):
modelcount += 1
continue
if atomline.match(line):
atomdict = self.__processatom(line, valuedict={'label': 'atom', 'model': modelcount})
atomdict['attype'] = self.__assign_sybyl_atomtype(atomdict)
for key, value in atomdict.items():
self.pdb_dict[key].append(value)
continue
if hetatmline.match(line):
atomdict = self.__processatom(line, valuedict={'label': 'hetatm', 'model': modelcount})
atomdict['attype'] = self.__assign_sybyl_atomtype(atomdict)
for key, value in atomdict.items():
self.pdb_dict[key].append(value)
return self.pdb_dict
@staticmethod
def __assign_sybyl_atomtype(valuedict):
"""
Add SYBYL atom type information.
Only supports predefined SYBYL types for common amino-acid atoms based
on the AA_SYBYL_TYPES dictionary.
"""
ra_id = '{0}-{1}'.format(valuedict.get('resname', ''), valuedict.get('atname', ''))
return AA_SYBYL_TYPES.get(ra_id, None)
def __processatom(self, line, valuedict=None):
"""Processes the atom line according to RCSB recomendations."""
if not valuedict:
valuedict = {}
valuedict['atnum'] = self.__processatomline(line, 12, minlen=6, vtype='int')
valuedict['atname'] = self.__processatomline(line, 16, minlen=12)
valuedict['atalt'] = self.__processatomline(line, 17, minlen=16)
valuedict['resname'] = self.__processatomline(line, 21, minlen=17)
valuedict['chain'] = self.__processatomline(line, 21)
valuedict['resnum'] = self.__processatomline(line, 26, minlen=22, vtype='int')
valuedict['resext'] = self.__processatomline(line, 27)
valuedict['xcoor'] = self.__processatomline(line, 38, minlen=30, vtype='float')
valuedict['ycoor'] = self.__processatomline(line, 46, minlen=38, vtype='float')
valuedict['zcoor'] = self.__processatomline(line, 54, minlen=46, vtype='float')
valuedict['occ'] = self.__processatomline(line, 60, minlen=54, vtype='float')
valuedict['b'] = self.__processatomline(line, 66, minlen=60, vtype='float')
valuedict['segid'] = self.__processatomline(line, 75, minlen=72)
valuedict['elem'] = self.__processatomline(line, 78, minlen=76)
return valuedict
@staticmethod
def __processatomline(line, maxlen, minlen=None, vtype='string'):
"""Test if a ATOM related parameter is found at its correct location within the ATOM line
(within the 'maxlen', 'minlen' character location identifiers). If it is found it is
converted to the appropriate type using the 'vtype' argument. If the type is a string
the letters are converted to upper case.
"""
if minlen is None:
if len(line) < maxlen or len(line[maxlen].strip()) == 0:
return None
else:
if vtype == 'int':
return int(line[maxlen])
elif vtype == 'float':
return float(line[maxlen])
else:
return line[maxlen].upper()
else:
if len(line) < maxlen or len(line[minlen:maxlen].strip()) == 0:
return None
else:
if vtype == 'int':
return int(line[minlen:maxlen])
elif vtype == 'float':
return float(line[minlen:maxlen])
else:
return (line[minlen:maxlen].strip()).upper()
|
from setuptools import setup
setup(
name = 'bplist',
packages = ['bplist'],
version = '1.0',
description = 'Apple binary Property List reader/writer for Python',
install_requires=[],
author = 'Vladimir Pouzanov',
author_email = '[email protected]',
url = 'https://github.com/farcaller/bplist-python',
keywords = ['bplist'],
classifiers = [],
license = 'MIT'
)
|
import os
import ctypes
import shutil
import time, datetime
def WriteFile(filename,content):
with open(filename,'a', encoding = "UTF-8") as fw:
fw.write(str(content)+'\n')
def countfiles():
path = r"C:\Users\Students\Desktop\批量转换"
ls = os.listdir(path)
count = 0
WriteFile("logs.txt", "文件列表如下:")
for i in ls:
if os.path.isfile(os.path.join(path,i)):
if not ".pdf" in i:
if ".ppt" in i or ".doc" in i:
WriteFile("logs.txt", " "+str(i))
count += 1
WriteFile("logs.txt", "[合计转换"+str(count)+"个文件]")
WriteFile("logs.txt", (datetime.datetime.now()).strftime("[%Y-%m-%d %H:%M:%S]")+" 准备开始转换任务")
countfiles()
|
from __future__ import print_function
from lime import lime_image, lime_text, lime_tabular
from aix360.algorithms.lbbe import LocalBBExplainer
class LimeTextExplainer(LocalBBExplainer):
"""
This class wraps the source class `LimeTextExplainer <https://lime-ml.readthedocs.io/en/latest/lime.html#module-lime.lime_text>`_
available in the `LIME <https://github.com/marcotcr/lime>`_ library.
Additional variables or functions from the source class can also be accessed via the 'explainer'
object variable that is initialized in '__init__' function of this class.
"""
def __init__(self, *argv, **kwargs):
"""
Initialize lime text explainer object.
"""
super(LimeTextExplainer, self).__init__(*argv, **kwargs)
self.explainer = lime_text.LimeTextExplainer(*argv, **kwargs)
def set_params(self, *argv, **kwargs):
"""
Optionally, set parameters for the explainer.
"""
pass
def explain_instance(self, *argv, **kwargs):
"""
Explain one or more input instances.
"""
self.explanation = self.explainer.explain_instance(*argv, **kwargs)
return (self.explanation)
class LimeImageExplainer(LocalBBExplainer):
"""
This class wraps the source class `LimeImageExplainer <https://lime-ml.readthedocs.io/en/latest/lime.html#module-lime.lime_image>`_
available in the `LIME <https://github.com/marcotcr/lime>`_ library.
Additional variables or functions from the source class can also be accessed via the 'explainer'
object variable that is initialized in '__init__' function of this class.
"""
def __init__(self, *argv, **kwargs):
"""
Initialize lime Image explainer object
"""
super(LimeImageExplainer, self).__init__(*argv, **kwargs)
self.explainer = lime_image.LimeImageExplainer(*argv, **kwargs)
def set_params(self, *argv, **kwargs):
"""
Optionally, set parameters for the explainer.
"""
pass
def explain_instance(self, *argv, **kwargs):
"""
Explain one or more input instances.
"""
self.explanation = self.explainer.explain_instance(*argv, **kwargs)
return (self.explanation)
class LimeTabularExplainer(LocalBBExplainer):
"""
This class wraps the source class `LimeTabularExplainer <https://lime-ml.readthedocs.io/en/latest/lime.html#module-lime.lime_tabular>`_
available in the `LIME <https://github.com/marcotcr/lime>`_ library.
Additional variables or functions from the source class can also be accessed via the 'explainer'
object variable that is initialized in '__init__' function of this class.
"""
def __init__(self, *argv, **kwargs):
"""
Initialize lime Tabular Explainer object
"""
super(LimeTabularExplainer, self).__init__(*argv, **kwargs)
self.explainer = lime_tabular.LimeTabularExplainer(*argv, **kwargs)
def set_params(self, verbose=0):
"""
Optionally, set parameters for the explainer.
"""
pass
def explain_instance(self, *argv, **kwargs):
"""
Explain one or more input instances.
"""
self.explanation = self.explainer.explain_instance(*argv, **kwargs)
return (self.explanation) |
import processing
layer = iface.activeLayer()
classes = {}
features = processing.features(layer)
for f in features:
attrs = f.attributes()
class_value = f['class']
if class_value in classes:
classes[class_value] += 1
else:
classes[class_value] = 1
print classes
|
from flask import Blueprint, request, session, jsonify
from Utils import Config, MongoConn, JSONUtils, PostgreConn
from bson import ObjectId
from Wrapper import AuthWrapper
from datetime import datetime
bp = Blueprint('api_dbtable', __name__, url_prefix='/api/dbtable')
#create table
@bp.route('/', methods=['POST'])
@AuthWrapper.require_api_token
# @AuthWrapper.require_admin_right
def db_table_api_create():
json = request.json
try:
PostgreConn.create_table(json)
msg = {
'Welcome': session['user']['username'],
'Message': "Succesfully create table"
}
except:
msg = {
'Welcome': session['user']['username'],
'Message': "Table already existed"
}
return jsonify(msg)
@bp.route('/select', methods=['POST'])
@AuthWrapper.require_api_token
def db_select():
json = request.json
rows = PostgreConn.db_table_select(json)
return JSONUtils.JSONEncoder().encode(rows)
@bp.route('/create',methods=['POST'])
@AuthWrapper.require_api_token
def db_create():
json = request.json
PostgreConn.db_table_create(json)
msg = {
'Welcome': session['user']['username'],
'Message': "Succesfully create table"
}
return jsonify(msg)
@bp.route('/drop', methods=['POST'])
@AuthWrapper.require_api_token
def db_drop():
json = request.json
PostgreConn.db_table_drop(json)
msg = {
'Welcome': session['user']['username'],
'Message': "Succesfully drop"
}
return jsonify(msg)
|
# Copyright (c) 2021 The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This script will run a simple boot exit test.
"""
import m5
from gem5.runtime import (
get_runtime_coherence_protocol,
get_runtime_isa,
)
from gem5.isas import ISA
from gem5.utils.requires import requires
from gem5.resources.resource import Resource
from gem5.coherence_protocol import CoherenceProtocol
from gem5.components.boards.x86_board import X86Board
from gem5.components.processors.cpu_types import CPUTypes
from gem5.components.processors.simple_processor import SimpleProcessor
from gem5.simulate.simulator import Simulator
import argparse
import importlib
parser = argparse.ArgumentParser(
description="A script to run the gem5 boot test. This test boots the "
"linux kernel."
)
parser.add_argument(
"-m",
"--mem-system",
type=str,
choices=("classic", "mi_example", "mesi_two_level"),
required=True,
help="The memory system.",
)
parser.add_argument(
"-n",
"--num-cpus",
type=int,
choices=(1, 2, 4, 8),
required=True,
help="The number of CPUs.",
)
parser.add_argument(
"-c",
"--cpu",
type=str,
choices=("kvm", "atomic", "timing", "o3"),
required=True,
help="The CPU type.",
)
parser.add_argument(
"-d",
"--dram-class",
type=str,
required=False,
default="DualChannelDDR3_1600",
help="The python class for the memory interface to use"
)
parser.add_argument(
"-b",
"--boot-type",
type=str,
choices=("systemd", "init"),
required=True,
help="The boot type.",
)
parser.add_argument(
"-t",
"--tick-exit",
type=int,
required=False,
help="The tick to exit the simulation. Note: using this may make the "
"selected boot-type selection pointless.",
)
parser.add_argument(
"-r",
"--resource-directory",
type=str,
required=False,
help="The directory in which resources will be downloaded or exist.",
)
args = parser.parse_args()
coherence_protocol_required = None
if args.mem_system == "mi_example":
coherence_protocol_required = CoherenceProtocol.MI_EXAMPLE
elif args.mem_system == "mesi_two_level":
coherence_protocol_required = CoherenceProtocol.MESI_TWO_LEVEL
requires(isa_required=ISA.X86,
coherence_protocol_required=coherence_protocol_required,
kvm_required=(args.cpu == "kvm"))
cache_hierarchy = None
if args.mem_system == "mi_example":
from gem5.components.cachehierarchies.ruby.\
mi_example_cache_hierarchy import (
MIExampleCacheHierarchy,
)
cache_hierarchy = MIExampleCacheHierarchy(size="32kB", assoc=8)
elif args.mem_system == "mesi_two_level":
from gem5.components.cachehierarchies.ruby.\
mesi_two_level_cache_hierarchy import (
MESITwoLevelCacheHierarchy,
)
cache_hierarchy = MESITwoLevelCacheHierarchy(
l1d_size="16kB",
l1d_assoc=8,
l1i_size="16kB",
l1i_assoc=8,
l2_size="256kB",
l2_assoc=16,
num_l2_banks=1,
)
elif args.mem_system == "classic":
from gem5.components.cachehierarchies.classic.\
private_l1_cache_hierarchy import (
PrivateL1CacheHierarchy,
)
cache_hierarchy = PrivateL1CacheHierarchy(l1d_size="16kB", l1i_size="16kB")
else:
raise NotImplementedError(
"Memory system '{}' is not supported in the boot tests.".format(
args.mem_system
)
)
assert cache_hierarchy != None
# Setup the system memory.
# Warning: This must be kept at 3GB for now. X86Motherboard does not support
# anything else right now!
python_module = "gem5.components.memory"
memory_class = getattr(
importlib.import_module(python_module), args.dram_class
)
memory = memory_class(size="3GiB")
# Setup a Processor.
cpu_type = None
if args.cpu == "kvm":
cpu_type = CPUTypes.KVM
elif args.cpu == "atomic":
cpu_type = CPUTypes.ATOMIC
elif args.cpu == "timing":
cpu_type = CPUTypes.TIMING
elif args.cpu == "o3":
cpu_type = CPUTypes.O3
else:
raise NotImplementedError(
"CPU type '{}' is not supported in the boot tests.".format(args.cpu)
)
assert cpu_type != None
processor = SimpleProcessor(cpu_type=cpu_type, num_cores=args.num_cpus)
# Setup the motherboard.
motherboard = X86Board(
clk_freq="3GHz",
processor=processor,
memory=memory,
cache_hierarchy=cache_hierarchy,
)
kernal_args = motherboard.get_default_kernel_args()
if args.boot_type == "init":
kernal_args.append("init=/root/exit.sh")
# Set the Full System workload.
motherboard.set_kernel_disk_workload(
kernel=Resource(
"x86-linux-kernel-5.4.49",
resource_directory=args.resource_directory,
),
disk_image=Resource(
"x86-ubuntu-18.04-img",
resource_directory=args.resource_directory,
),
kernel_args=kernal_args,
)
# Begin running of the simulation. This will exit once the Linux system boot
# is complete.
print("Running with ISA: " + get_runtime_isa().name)
print("Running with protocol: " + get_runtime_coherence_protocol().name)
print()
print("Beginning simulation!")
simulator = Simulator(board=motherboard)
if args.tick_exit:
simulator.run(max_ticks = args.tick_exit)
else:
simulator.run()
print(
"Exiting @ tick {} because {}.".format(
simulator.get_current_tick(),
simulator.get_last_exit_event_cause(),
)
) |
import os, sys, inspect, unittest, scalg
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
DATA = [
[2016, 21999, 62000, 181],
[2013, 21540, 89000, 223],
[2015, 18900, 100000, 223],
[2013, 24200, 115527, 223],
[2016, 24990, 47300, 223],
]
WEIGHTS = [1, 0, 0, 1]
class SE(unittest.TestCase):
def test_score_default(self):
expected_result = [
[2016, 21999, 62000, 181, 2.2756757812463335],
[2013, 21540, 89000, 223, 1.9553074815952338],
[2015, 18900, 100000, 223, 2.894245191297678],
[2013, 24200, 115527, 223, 1.1297208538587848],
[2016, 24990, 47300, 223, 3.0]
]
result = scalg.score(DATA, WEIGHTS)
assert result == expected_result
def test_score_scores(self):
expected_result = [
2.2756757812463335,
1.9553074815952338,
2.894245191297678,
1.1297208538587848,
3.0
]
result = scalg.score(DATA, WEIGHTS, get_scores=True)
assert result == expected_result
def test_score_score_lists(self):
expected_result = [
[1.0, 0.0, 0.6666666666666666, 0.0, 1.0],
[0.49113300492610834, 0.5665024630541872, 1.0, 0.12972085385878485, 0.0],
[0.7845427763202251, 0.38880501854104677, 0.22757852463101114, 0.0, 1.0],
[0.0, 1.0, 1.0, 1.0, 1.0]
]
result = scalg.score(DATA, WEIGHTS, get_score_lists=True)
assert result == expected_result
class SEC(unittest.TestCase):
def test_score_columns(self):
expected_result = [
[2016, 21999, 62000, 181, 1.4911330049261085],
[2013, 21540, 89000, 223, 0.5665024630541872],
[2015, 18900, 100000, 223, 1.6666666666666665],
[2013, 24200, 115527, 223, 0.12972085385878485],
[2016, 24990, 47300, 223, 1.0]
]
result = scalg.score_columns(DATA, [0, 1], WEIGHTS)
assert result == expected_result
|
#!/usr/bin/python
number = list(range(1, 21))
n = 1
for i in range(1, 20):
n = n * number[i]
for j in range(i):
if number[i] % number[j] == 0:
n = n / number[j]
number[i] = number[i] / number[j]
print(n)
|
import unittest
import torch
import torch.nn as nn
from utils import getModel, getModelWithOptimized
from msdnet.dataloader import get_dataloaders_alt
from data.ImagenetDataset import get_zipped_dataloaders
import os
from data.utils import getClassToIndexMapping
class TestInferencing(unittest.TestCase):
TEST_DATASET_PATH = os.path.join(os.getcwd(), "data", "imagenet_red")
TEST_DATASET_PATH_ALT = "data/imagenet_images"
def test000_testDenseNet121Output_withLoss_noException(self):
test_batch = 1
test_loader, _, _ = get_zipped_dataloaders(self.TEST_DATASET_PATH, test_batch)
test_criterion = nn.CrossEntropyLoss()
model = getModel('densenet121')
for i, (img, target) in enumerate(test_loader):
output = model(img)
test_criterion(output, target)
if i == 0: break
def test020_testDenseNet169Output_withLoss_noException(self):
test_batch = 1
test_loader, _, _ = get_zipped_dataloaders(self.TEST_DATASET_PATH, test_batch)
test_criterion = nn.CrossEntropyLoss()
model = getModel('densenet169')
for i, (img, target) in enumerate(test_loader):
output = model(img)
test_criterion(output, target)
if i == 0: break
def test030_labelAndIndexMapping(self):
test_batch = 1
test_loader, _, _ = get_zipped_dataloaders(self.TEST_DATASET_PATH, test_batch)
img, target = next(iter(test_loader))
index_path = os.path.join(self.TEST_DATASET_PATH, 'index-train.txt')
class_to_global_index = getClassToIndexMapping(index_path)
label_to_class = list(set(class_to_global_index))
label_to_class.sort()
self.assertEqual(len(label_to_class), 40)
self.assertEqual(len(class_to_global_index), len(test_loader))
index_path = os.path.join(self.TEST_DATASET_PATH, 'index-val.txt')
class_to_global_val_index = getClassToIndexMapping(index_path)
label_to_class_val = list(set(class_to_global_val_index))
label_to_class_val.sort()
self.assertEqual(len(label_to_class_val), len(label_to_class))
self.assertEqual(label_to_class_val, label_to_class)
def test040_DenseNetWithDenseNetDropLastNPolicy_NoExcpetion_OnForwardingWithBatchSize(self):
test_batch_size = 8
test_loader, _ , _ = get_zipped_dataloaders(self.TEST_DATASET_PATH, test_batch_size)
img, _ = next(iter(test_loader))
test_net = getModelWithOptimized('densenet121-skip-last', 3, test_batch_size)
with torch.no_grad():
output = test_net(img)
self.assertIsNotNone(output)
self.assertEqual(img.shape[0], test_batch_size)
|
# %% Imports
# General stuff
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from numpy.fft import fft, ifft
import os
import pathlib
# Audio processing/tools import
import librosa
import librosa.display
from scipy.io.wavfile import read
from IPython.display import Audio
#REMEMBER you need ffmpeg installed
# Import custom module containing useful functions
import sonicboom
# Define some decorator functions
import time
# %% Read the metadata
filedata = sonicboom.init_data('./data/UrbanSound8K/')
# %% Sample down
# samples down grouping by class - this gives me 15 (or whatever number) items from each class.
# as_index=False is important because otherwise Pandas calls the index and the column the same thing, confusing itself
filedata = filedata.groupby('class', as_index=False).apply(lambda x: x.sample(2))
# check that the sample down is working
# as_index=False is important because otherwise Pandas calls the index and the column the same thing, confusing itself
filedata.groupby('class', as_index=False)['slice_file_name'].nunique()
# %% Read one audio file to see what it contains
sonicboom.test_read_audio(filedata.path.iloc[16])
# %% PARALLEL Generate MFCCs and add to dataframe
from joblib import Parallel, delayed
startTime = time.perf_counter()
#non-parallel version
#filedata['mfccs'] = [sonicboom.mfccsEngineering(x) for x in filedata['path']]
# inputVar = input("0. All, \n \
# 1. MFCCS \n \
# 2. Mel-scaled spectrogram \n \
# 3. Short-time Fourier transform (STFT) \n \
# 4. Chromagram (STFT) \n \
# 5. Spectral contrast (STFT) \n \
# 6. Tonal centroid features (tonnetz) from harmonic components \n")
mfccs_exec = True
melSpec_exec = True
stft_exec = True
chroma_stft_exec = True
spectral_contrast_exec = True
tonnetz_exec = True
# if (inputVar == 0):
# mfccs_exec = True
# melSpec_exec = True
# stft_exec = True
# chroma_stft_exec = True
# spectral_contrast_exec = True
# tonnetz_exec = True
# elif (inputVar == 1):
# mfccs_exec = True
# elif (inputVar == 2):
# melSpec_exec = True
# elif (inputVar == 3):
# stft_exec = True
# elif (inputVar == 4):
# chroma_stft_exec = True
# elif (inputVar == 5):
# spectral_contrast_exec = True
# elif (inputVar == 6):
# tonnetz_exec = True
if (mfccs_exec == True):
#generate mfccs features
filedata['mfccs'] = Parallel(n_jobs=-1)(delayed(sonicboom.mfccsEngineering)(x) for x in filedata['path'])
if (melSpec_exec == True):
#generate melSpec features
filedata['melSpec'] = Parallel(n_jobs=-1)(delayed(sonicboom.melSpecEngineering)(x) for x in filedata['path'])
if (stft_exec == True):
#generate stft features
filedata['stft'] = Parallel(n_jobs=-1)(delayed(sonicboom.stftEngineering)(x) for x in filedata['path'])
if (chroma_stft_exec == True):
#generate chroma_stft features
filedata['chroma_stft'] = Parallel(n_jobs=-1)(delayed(sonicboom.chroma_stftEngineering)(x) for x in filedata['path'])
if (spectral_contrast_exec == True):
#generate spectral_contrast features
filedata['spectral_contrast'] = Parallel(n_jobs=-1)(delayed(sonicboom.spectral_contrastEngineering)(x) for x in filedata['path'])
if (tonnetz_exec == True):
#generate tonnetz features
filedata['tonnetz'] = Parallel(n_jobs=-1)(delayed(sonicboom.tonnetzEngineering)(x) for x in filedata['path'])
endTime = time.perf_counter()
runTime = endTime - startTime
print(f'Finished in {runTime:.4f} secs')
filedata.head()
#filedata.to_csv('./mfccsFeature.csv')
# %% Parallel check
#for x in range(len(filedata)):
# print(np.array_equal(filedata['mfccs'].iloc[x], filedata['mfccsParallel'].iloc[x]))
# %% prep features for models
#take mean of transposed mfccs (for some reason?) - this is now done in SonicBoom
#filedata['mfccs'] = [np.mean(x.T,axis=0) for x in filedata['mfccs']]
# %% Initial model generation: all of em
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, \
GradientBoostingClassifier, StackingClassifier, BaggingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import make_scorer
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import label_binarize
from sklearn.metrics import precision_score, recall_score, roc_auc_score
from sklearn.metrics import f1_score
from sklearn.model_selection import cross_validate
from mlxtend.plotting import plot_learning_curves
models = [
OneVsRestClassifier(LogisticRegression(random_state=1)),
OneVsRestClassifier(KNeighborsClassifier(n_neighbors=10)),
OneVsRestClassifier(DecisionTreeClassifier()),
OneVsRestClassifier(GaussianNB()),
OneVsRestClassifier(LinearSVC())
]
model_namelist = ['Logistic Regression',
'KNeighbors',
'Decision Tree',
'GaussianNB',
'SVM/Linear SVC'
]
scoring = {'precision': make_scorer(precision_score, average='micro'),
'recall': make_scorer(recall_score, average='micro'),
'f1': make_scorer(f1_score, average='micro'),
'roc_auc': make_scorer(roc_auc_score, average='micro'),
# 'mcc': make_scorer(matthews_corrcoef) <- cannot support multi-label
}
cv_result_entries = []
i = 0
X = pd.DataFrame(filedata['mfccs'].iloc[x] for x in range(len(filedata)))
y = label_binarize(
pd.DataFrame(filedata['classID'].iloc[x] for x in range(len(filedata))),
classes=[0,1,2,3,4,5,6,7,8,9]
)
# ### Loop cross validation through various models and generate results
for mod in models:
metrics = cross_validate(
mod,
X,
y,
cv=5,
scoring = scoring,
return_train_score=False,
n_jobs=-1
)
for key in metrics.keys():
for fold_index, score in enumerate(metrics[key]):
cv_result_entries.append((model_namelist[i], fold_index, key, score))
i += 1
cv_results_df = pd.DataFrame(cv_result_entries)
# %%
# ### Misclassification Errors
i=0
for model in models:
plt.figure()
plot_learning_curves(X, y, X, y, model)
plt.title('Learning Curve for ' + model_namelist[i], fontsize=14)
plt.xlabel('Training Set Size (%)', fontsize=12)
plt.ylabel('Misclassification Error', fontsize=12)
plt.show()
i += 1
# %% [markdown]
# ### Get predictions: prep for Confusion Matrix
y_test_pred = []
for model in models:
y_test_pred.append(model.predict(X))
# %% [markdown]
# ### Confusion Matrix
from sklearn.metrics import confusion_matrix
import seaborn as sns
CLASSES = ['A/C', 'Car Horn', 'Children Play', 'Dog Bark',
'Drilling', 'Engine Idle', 'Gun Shot', 'Jackhammer',
'Siren', 'Street Music']
i=0
for _ in models:
cm = confusion_matrix(np.argmax(y, axis=1),
np.argmax(y_test_pred[i], axis=1))
cm_df = pd.DataFrame(cm, index = CLASSES, columns = CLASSES)
cm_df.index.name = 'Actual'
cm_df.columns.name = 'Predicted'
plt.title('Confusion Matrix for ' + model_namelist[i], fontsize=14)
sns.heatmap(cm_df, annot=True, fmt='.6g', annot_kws={"size": 10}, cmap='Reds')
plt.show()
i += 1
# %%
|
import json
import itertools
import numpy as np
import sys
from data_utils import DataUtils, MyExceptions
from display_colour_image import DisplayColourImage
def solve(input_array, out_array=None, graphics=False):
assert type(input_array) == np.ndarray
column_index = np.nonzero(input_array[0, 0:])[0]
row_index = np.nonzero(input_array[0:, 9])[0]
output_array = np.copy(input_array)
for i, j in itertools.product(row_index.tolist(), column_index.tolist()):
output_array[i, j] = 2
# print(output_array)
if graphics:
DisplayColourImage("Solution for 2281f1f4",
input_array, output_array).display_data()
return output_array
def main(path , graphics):
t = DataUtils(path)
train, test = t.train, t.test
for _t in train + test:
inp, out = _t['input'], _t['output']
inp, out = np.asarray(inp), np.asarray(out)
output_array = solve(inp, out, graphics)
print(output_array)
if __name__ == "__main__":
# main()
import sys
if len(sys.argv) < 2:
raise MyExceptions("Please Specify a valid file path")
else:
path = sys.argv[1]
graphics = sys.argv[2] if len(sys.argv) ==3 else False
main(path, graphics)
|
import subprocess
from pathlib import Path
from datajob import logger
class DatajobPackageWheelError(Exception):
"""any exception occuring when constructing wheel in data job context."""
def create(project_root):
"""launch a subprocess to built a wheel.
todo - use the setuptools/disttools api to create a setup.py.
relying on a subprocess feels dangerous.
"""
setup_py_file = Path(project_root, "setup.py")
if setup_py_file.is_file():
logger.debug(f"found a setup.py file in {project_root}")
logger.debug("creating wheel for glue job")
cmd = f"cd {project_root}; python setup.py bdist_wheel"
print(f"wheel command: {cmd}")
# todo - shell=True is not secure
subprocess.call(cmd, shell=True)
else:
raise DatajobPackageWheelError(
f"no setup.py file detected in project root {project_root}. "
f"Hence we cannot create a python wheel for this project"
)
|
'''
Created on 30 Jan 2019
@author: simon
'''
import click, os, grp, pwd, getpass, shutil
from os import path
from k2 import app_installer
@click.group()
def k2():
pass
@k2.command()
@click.option('--base', help='Identify the location of the k2 base directory. If not set defaults to the current directory')
@click.option('--user', help='Identify the OS user that is the owner of the k2 web apps environment. Defaults the current owner of the base directory or the current user if the current base directory does not exist')
@click.option('--group', help='Identify the OS group that is the owner of the k2 web apps environment. Defaults the group the base directory or the group of the current user if the current base directory does not exist')
def config(base, user, group):
'''
Configure the current directory or the given base as the K2 base directory
'''
k2_base, k2_user, k2_group = _get_base_user_group(base, user, group)
_k2_env_summary(k2_base, k2_user, k2_group)
if not path.exists(k2_base):
print('Base directory does not exist. Creating...')
os.mkdir(k2_base, 0o775)
shutil.chown(k2_base, user=k2_user, group=k2_group)
alias, export = _read_profile(k2_user)
profile = _get_profile(k2_user)
if alias != None:
if not alias:
print('{profile} does not include k2 alias. Adding...')
with open(profile, 'a') as fp:
fp.write('\n\n')
fp.write(_alias_k2(k2_base))
if export != None:
if not export:
print('{profile} does not include export of k2_BASE. Adding...')
with open(profile, 'a') as fp:
fp.write('\n\n')
fp.write(_export_k2_base(k2_base))
def _k2_env_summary(base, user, group):
print()
print('K2_BASE: {base}'.format(base=base))
print('K2 Owner: {owner}'.format(owner=user))
print('K2 Group: {group}'.format(group=group))
print()
def _alias_k2(base):
return 'alias k2={base}/venv/bin/k2'.format(base=os.path.abspath(base))
def _export_k2_base(base):
return 'export K2_BASE={base}'.format(base=os.path.abspath(base))
def _get_profile(user):
home = os.path.expanduser('~'+user)
if os.path.exists(home):
if os.path.exists(home+'/.profile'):
return home+'/.profile'
elif os.path.exists(home+'/.bash_profile'):
return home+'/.bash_profile'
elif os.path.exists(home+'/.bash_rc'):
return home+'/.bash_rc'
def _read_profile(user):
profile = _get_profile(user)
if profile:
with open(profile, 'r') as fp:
lines = fp.readlines()
alias = False
export = False
for line in lines:
if line.strip()[0:9] == 'alias k2=':
alias = True
if line.strip()[0:15] == 'export K2_BASE=':
export = True
return alias, export
return None, None
def _get_base_user_group(base=None, user=None, group=None):
k2_base = base if base else os.getcwd()
if path.exists(k2_base):
stat_info = os.stat(k2_base)
k2_base_user = pwd.getpwuid(stat_info.st_uid)[0]
k2_base_group = grp.getgrgid(stat_info.st_gid)[0]
else:
k2_base_user = getpass.getuser()
k2_base_group = grp.getgrgid(pwd.getpwnam(k2_base_user).pw_gid).gr_name
k2_user = user if user else k2_base_user
k2_group = group if group else k2_base_group
return k2_base, k2_user, k2_group
@k2.command()
@click.argument('src')
@click.option('--name')
@click.option('--base')
def install(src, base, name):
'''
Install the application source identified by the src URL as a Flask application directory within the current
K2 base directory identified by the environment variable $K2_BASE
'''
if not base:
base = os.environ['K2_BASE']
if not base:
raise ValueError('The environment variable $K2_BASE is not set')
app_installer.install(src, base, name)
if __name__ == '__main__':
k2()
|
import json
import requests
class Cards(object):
__module__ = 'trello'
def __init__(self, apikey, token=None):
self._apikey = apikey
self._token = token
def get(self, card_id_or_shortlink, actions=None, actions_limit=None, action_fields=None, attachments=None, attachment_fields=None, members=None, member_fields=None, checkItemStates=None, checkItemState_fields=None, checklists=None, checklist_fields=None, board=None, board_fields=None, list=None, list_fields=None, fields=None):
resp = requests.get("https://trello.com/1/cards/%s" % (card_id_or_shortlink), params=dict(key=self._apikey, token=self._token, actions=actions, actions_limit=actions_limit, action_fields=action_fields, attachments=attachments, attachment_fields=attachment_fields, members=members, member_fields=member_fields, checkItemStates=checkItemStates, checkItemState_fields=checkItemState_fields, checklists=checklists, checklist_fields=checklist_fields, board=board, board_fields=board_fields, list=list, list_fields=list_fields, fields=fields), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_field(self, field, card_id_or_shortlink):
resp = requests.get("https://trello.com/1/cards/%s/%s" % (card_id_or_shortlink, field), params=dict(key=self._apikey, token=self._token), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_action(self, card_id_or_shortlink, filter=None, fields=None, limit=None, format=None, since=None, page=None, idModels=None):
resp = requests.get("https://trello.com/1/cards/%s/actions" % (card_id_or_shortlink), params=dict(key=self._apikey, token=self._token, filter=filter, fields=fields, limit=limit, format=format, since=since, page=page, idModels=idModels), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_attachment(self, card_id_or_shortlink, fields=None):
resp = requests.get("https://trello.com/1/cards/%s/attachments" % (card_id_or_shortlink), params=dict(key=self._apikey, token=self._token, fields=fields), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_board(self, card_id_or_shortlink, fields=None):
resp = requests.get("https://trello.com/1/cards/%s/board" % (card_id_or_shortlink), params=dict(key=self._apikey, token=self._token, fields=fields), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_board_field(self, field, card_id_or_shortlink):
resp = requests.get("https://trello.com/1/cards/%s/board/%s" % (card_id_or_shortlink, field), params=dict(key=self._apikey, token=self._token), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_checkItemState(self, card_id_or_shortlink, fields=None):
resp = requests.get("https://trello.com/1/cards/%s/checkItemStates" % (card_id_or_shortlink), params=dict(key=self._apikey, token=self._token, fields=fields), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_checklist(self, card_id_or_shortlink, cards=None, card_fields=None, checkItems=None, checkItem_fields=None, filter=None, fields=None):
resp = requests.get("https://trello.com/1/cards/%s/checklists" % (card_id_or_shortlink), params=dict(key=self._apikey, token=self._token, cards=cards, card_fields=card_fields, checkItems=checkItems, checkItem_fields=checkItem_fields, filter=filter, fields=fields), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_list(self, card_id_or_shortlink, fields=None):
resp = requests.get("https://trello.com/1/cards/%s/list" % (card_id_or_shortlink), params=dict(key=self._apikey, token=self._token, fields=fields), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_list_field(self, field, card_id_or_shortlink):
resp = requests.get("https://trello.com/1/cards/%s/list/%s" % (card_id_or_shortlink, field), params=dict(key=self._apikey, token=self._token), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_member(self, card_id_or_shortlink, fields=None):
resp = requests.get("https://trello.com/1/cards/%s/members" % (card_id_or_shortlink), params=dict(key=self._apikey, token=self._token, fields=fields), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_membersVoted(self, card_id_or_shortlink, fields=None):
resp = requests.get("https://trello.com/1/cards/%s/membersVoted" % (card_id_or_shortlink), params=dict(key=self._apikey, token=self._token, fields=fields), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def update(self, card_id_or_shortlink, name=None, desc=None, closed=None, idAttachmentCover=None, idList=None, pos=None, due=None, subscribed=None):
resp = requests.put("https://trello.com/1/cards/%s" % (card_id_or_shortlink), params=dict(key=self._apikey, token=self._token), data=dict(name=name, desc=desc, closed=closed, idAttachmentCover=idAttachmentCover, idList=idList, pos=pos, due=due, subscribed=subscribed))
resp.raise_for_status()
return json.loads(resp.content)
def update_checklist_checkItem_name_idCheckList_idCheckItem(self, idCheckList, idCheckItem, card_id_or_shortlink, value):
resp = requests.put("https://trello.com/1/cards/%s/checklist/%s/checkItem/%s/name" % (card_id_or_shortlink, idCheckList, idCheckItem), params=dict(key=self._apikey, token=self._token), data=dict(value=value))
resp.raise_for_status()
return json.loads(resp.content)
def update_checklist_checkItem_po_idCheckList_idCheckItem(self, idCheckList, idCheckItem, card_id_or_shortlink, value):
resp = requests.put("https://trello.com/1/cards/%s/checklist/%s/checkItem/%s/pos" % (card_id_or_shortlink, idCheckList, idCheckItem), params=dict(key=self._apikey, token=self._token), data=dict(value=value))
resp.raise_for_status()
return json.loads(resp.content)
def update_checklist_checkItem_state_idCheckList_idCheckItem(self, idCheckList, idCheckItem, card_id_or_shortlink, value):
resp = requests.put("https://trello.com/1/cards/%s/checklist/%s/checkItem/%s/state" % (card_id_or_shortlink, idCheckList, idCheckItem), params=dict(key=self._apikey, token=self._token), data=dict(value=value))
resp.raise_for_status()
return json.loads(resp.content)
def update_closed(self, card_id_or_shortlink, value):
resp = requests.put("https://trello.com/1/cards/%s/closed" % (card_id_or_shortlink), params=dict(key=self._apikey, token=self._token), data=dict(value=value))
resp.raise_for_status()
return json.loads(resp.content)
def update_desc(self, card_id_or_shortlink, value):
resp = requests.put("https://trello.com/1/cards/%s/desc" % (card_id_or_shortlink), params=dict(key=self._apikey, token=self._token), data=dict(value=value))
resp.raise_for_status()
return json.loads(resp.content)
def update_due(self, card_id_or_shortlink, value):
resp = requests.put("https://trello.com/1/cards/%s/due" % (card_id_or_shortlink), params=dict(key=self._apikey, token=self._token), data=dict(value=value))
resp.raise_for_status()
return json.loads(resp.content)
def update_idAttachmentCover(self, card_id_or_shortlink, value):
resp = requests.put("https://trello.com/1/cards/%s/idAttachmentCover" % (card_id_or_shortlink), params=dict(key=self._apikey, token=self._token), data=dict(value=value))
resp.raise_for_status()
return json.loads(resp.content)
def update_idList(self, card_id_or_shortlink, value):
resp = requests.put("https://trello.com/1/cards/%s/idList" % (card_id_or_shortlink), params=dict(key=self._apikey, token=self._token), data=dict(value=value))
resp.raise_for_status()
return json.loads(resp.content)
def update_name(self, card_id_or_shortlink, value):
resp = requests.put("https://trello.com/1/cards/%s/name" % (card_id_or_shortlink), params=dict(key=self._apikey, token=self._token), data=dict(value=value))
resp.raise_for_status()
return json.loads(resp.content)
def update_po(self, card_id_or_shortlink, value):
resp = requests.put("https://trello.com/1/cards/%s/pos" % (card_id_or_shortlink), params=dict(key=self._apikey, token=self._token), data=dict(value=value))
resp.raise_for_status()
return json.loads(resp.content)
def update_subscribed(self, card_id_or_shortlink, value):
resp = requests.put("https://trello.com/1/cards/%s/subscribed" % (card_id_or_shortlink), params=dict(key=self._apikey, token=self._token), data=dict(value=value))
resp.raise_for_status()
return json.loads(resp.content)
def new(self, name, idList, desc=None, pos=None, idCardSource=None, keepFromSource=None):
resp = requests.post("https://trello.com/1/cards" % (), params=dict(key=self._apikey, token=self._token), data=dict(name=name, idList=idList, desc=desc, pos=pos, idCardSource=idCardSource, keepFromSource=keepFromSource))
resp.raise_for_status()
return json.loads(resp.content)
def new_action_comment(self, card_id_or_shortlink, text):
resp = requests.post("https://trello.com/1/cards/%s/actions/comments" % (card_id_or_shortlink), params=dict(key=self._apikey, token=self._token), data=dict(text=text))
resp.raise_for_status()
return json.loads(resp.content)
def new_attachment(self, card_id_or_shortlink, file=None, url=None, name=None):
resp = requests.post("https://trello.com/1/cards/%s/attachments" % (card_id_or_shortlink), params=dict(key=self._apikey, token=self._token), data=dict(file=file, url=url, name=name))
resp.raise_for_status()
return json.loads(resp.content)
def new_checklist(self, card_id_or_shortlink, value):
resp = requests.post("https://trello.com/1/cards/%s/checklists" % (card_id_or_shortlink), params=dict(key=self._apikey, token=self._token), data=dict(value=value))
resp.raise_for_status()
return json.loads(resp.content)
def new_label(self, card_id_or_shortlink, value):
resp = requests.post("https://trello.com/1/cards/%s/labels" % (card_id_or_shortlink), params=dict(key=self._apikey, token=self._token), data=dict(value=value))
resp.raise_for_status()
return json.loads(resp.content)
def new_member(self, card_id_or_shortlink, value):
resp = requests.post("https://trello.com/1/cards/%s/members" % (card_id_or_shortlink), params=dict(key=self._apikey, token=self._token), data=dict(value=value))
resp.raise_for_status()
return json.loads(resp.content)
def new_membersVoted(self, card_id_or_shortlink, value):
resp = requests.post("https://trello.com/1/cards/%s/membersVoted" % (card_id_or_shortlink), params=dict(key=self._apikey, token=self._token), data=dict(value=value))
resp.raise_for_status()
return json.loads(resp.content)
def delete(self, card_id_or_shortlink):
resp = requests.delete("https://trello.com/1/cards/%s" % (card_id_or_shortlink), params=dict(key=self._apikey, token=self._token), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def delete_attachment_idAttachment(self, idAttachment, card_id_or_shortlink):
resp = requests.delete("https://trello.com/1/cards/%s/attachments/%s" % (card_id_or_shortlink, idAttachment), params=dict(key=self._apikey, token=self._token), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def delete_checklist_idChecklist(self, idChecklist, card_id_or_shortlink):
resp = requests.delete("https://trello.com/1/cards/%s/checklists/%s" % (card_id_or_shortlink, idChecklist), params=dict(key=self._apikey, token=self._token), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def delete_label_color(self, color, card_id_or_shortlink):
resp = requests.delete("https://trello.com/1/cards/%s/labels/%s" % (card_id_or_shortlink, color), params=dict(key=self._apikey, token=self._token), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def delete_member_idMember(self, idMember, card_id_or_shortlink):
resp = requests.delete("https://trello.com/1/cards/%s/members/%s" % (card_id_or_shortlink, idMember), params=dict(key=self._apikey, token=self._token), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def delete_membersVoted_idMember(self, idMember, card_id_or_shortlink):
resp = requests.delete("https://trello.com/1/cards/%s/membersVoted/%s" % (card_id_or_shortlink, idMember), params=dict(key=self._apikey, token=self._token), data=None)
resp.raise_for_status()
return json.loads(resp.content)
|
import json
import multiprocessing
from multivolumecopy.resolvers import resolver
import multivolumecopy.copyfile
class JobFileResolver(resolver.Resolver):
""" Read copyfile src/dst from a JSON jobfile.
Example:
Sample ``mvcopy-jobdata.json``
.. code-block:: json
[
{"src": "/src/a.txt", "dst": "/dst/a.txt", "relpath": "a.txt", "bytes": 1000, "index": 0},
{"src": "/src/b.txt", "dst": "/dst/b.txt", "relpath": "b.txt", "bytes": 1000, "index": 1},
{"src": "/src/c.txt", "dst": "/dst/c.txt", "relpath": "c.txt", "bytes": 1000, "index": 2},
{"src": "/src/d.txt", "dst": "/dst/d.txt", "relpath": "d.txt", "bytes": 1000, "index": 3},
{"src": "/src/e.txt", "dst": "/dst/e.txt", "relpath": "e.txt", "bytes": 1000, "index": 4}
]
"""
def __init__(self, filepath, options):
super(JobFileResolver, self).__init__(options)
self._filepath = filepath
def get_copyfiles(self, device_start_index=None, start_index=None):
"""
.. code-block:: python
[
CopyFile(src='/src/a.txt', dst='/dst/a.txt', relpath='a.txt', bytes=1024, index=0),
CopyFile(src='/src/b.txt', dst='/dst/b.txt', relpath='b.txt', bytes=1024, index=1),
CopyFile(src='/src/c.txt', dst='/dst/c.txt', relpath='c.txt', bytes=1024, index=2),
...
]
"""
with multiprocessing.Pool(processes=1) as pool:
return pool.apply(_get_copyfiles, (self._filepath, device_start_index, start_index))
def _get_copyfiles(filepath, device_start_index=None, start_index=None):
# JSON does not free memory well, so we do in separate process
with open(filepath, 'r') as fd:
raw_copyfiles = json.loads(fd.read())
return tuple([multivolumecopy.copyfile.CopyFile(*x) for x in raw_copyfiles])
|
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
class BasePage():
"""Base class to initialize the base page that will be called from pages"""
def __init__(self, driver):
self.driver = driver
def find_element(self, *locator):
return self.driver.find_element(*locator)
def open(self, uri):
url = self.base_url + uri
class IssuesPage(BasePage):
"""Issues page action methods come here. in our e.g . Github/issues.com"""
class LoginPage(BasePage):
"""Login page action methods come here. in our e.g . Github/login.com"""
def fill_sign_in(self):
self.find_element(LoginPageLocators.SIGN_IN).click()
'''
browser = webdriver.Firefox()
browser.get('https://github.com/ZoranPandovski/al-go-rithms/issues')
sign_in = browser.find_element_by_link_text('Sign in')
sign_in.click()
user_name = browser.find_element_by_id('login_field')
password = browser.find_element_by_id('password')
user_name.send_keys('[email protected]')
password.send_keys('220682zpp')
sign_in1 = browser.find_element_by_name('commit')
sign_in1.click()
link = browser.find_element_by_link_text(
'Add new algorithm, data structure, puzzle, cipher')
link.click()
close_issue = browser.find_elements_by_class_name('js-comment-and-button')
# close_issue.click()
new_issue = browser.find_element_by_link_text('New issue')
new_issue.click()
wait = WebDriverWait(browser, 20)
title_input = wait.until(
EC.presence_of_element_located((By.ID, 'issue_title')))
title_input.send_keys('Test')
description_input = wait.until(
EC.presence_of_element_located((By.ID, 'issue_body')))
description_input.clear()
description_input.send_keys('Test 1')
# labels are second element
labels_btn = browser.find_elements_by_class_name('discussion-sidebar-heading')[
1]
labels_btn.click()
labels_list = wait.until(
EC.element_to_be_clickable((By.CSS_SELECTOR, '[data-label-name="bug"]')))
browser.execute_script("arguments[0].click();", labels_list)
# actions = ActionChains(browser)
# browser.quit()
'''
|
# Adapted from OpenUserBot for Uniborg
"""Download & Upload Images on Telegram\n
Syntax: `.img <number> <Name>` or `.img <number> (replied message)`
\n Upgraded and Google Image Error Fixed by @kirito6969
"""
from userbot.utils.google_images_download import googleimagesdownload
import os
import shutil
from re import findall
from uniborg.util import admin_cmd
@borg.on(admin_cmd(pattern="img ?(\d+)? ?(.*)?"))
async def img_sampler(event):
await event.edit("`Processing Bsdk..`")
reply = await event.get_reply_message()
if event.pattern_match.group(2):
query = event.pattern_match.group(2)
elif reply:
query = reply.message
else:
await event.edit("`What I am Supposed to Search u Dumb Ass(Donkey)`")
return
lim = findall(r"lim=\d+", query)
lim = event.pattern_match.group(1)
try:
lim = lim[0]
lim = lim.replace("lim=", "")
query = query.replace("lim=" + lim[0], "")
except IndexError:
lim = 5
response = googleimagesdownload()
# creating list of arguments
arguments = {
"keywords": query,
"limit": lim,
"format": "jpg",
"no_directory": "no_directory"
}
# passing the arguments to the function
paths = response.download(arguments)
lst = paths[0][query]
await event.client.send_file(await event.client.get_input_entity(event.chat_id), lst)
shutil.rmtree(os.path.dirname(os.path.abspath(lst[0])))
await event.delete()
|
import mongoengine
class User(mongoengine.Document):
username: mongoengine.EmailField()
# password: mongoengine.ReferenceField |
#!/usr/bin/env python3
from bs4 import BeautifulSoup
import requests
import ftfy
import glob
import argparse
import os
import jsonlines
from tqdm import tqdm
from datetime import datetime
def main(args):
# Create the new file. Overwrite if it exits
f = open(args.output_file, "w+")
f.close()
n = 0
with jsonlines.open(args.output_file, 'w') as writer:
with jsonlines.open(args.input_file) as reader:
for wikipost in tqdm(reader):
written = 0
myarticle = {}
myarticle['doc_type'] = args.doc_type
myarticle['id'] = args.doc_type + \
"_" + wikipost['title']+"_"+str(n)
myarticle['language_reported'] = args.language_reported
myarticle['paragraphs'] = []
myarticle['title'] = wikipost['title']
sectiontext = ' '.join(wikipost['section_texts'])
alltext = list(filter(bool, sectiontext.splitlines()))
pid = 0
for p in alltext:
paragraph = {}
text = " ".join(p.split())
text = ftfy.fix_text(text)
paragraph['paragraph_id'] = pid
paragraph['text'] = text
myarticle['paragraphs'].append(paragraph)
pid += 1
writer.write(myarticle)
n += 1
print(f'{n} posts are written to {args.output_file}')
def parse_args():
# Parse commandline
parser = argparse.ArgumentParser(
description="Process the downloaded Wikipedia files. Output is an UTF-8 JSON lines")
parser.add_argument('--language_reported', required=False, default="N/A",
type=str, help='Language reported. Can be nob, nno, no, da, sv, is or N/A')
parser.add_argument('--doc_type', required=True, type=str,
help='For instance wikipedia_download_no')
parser.add_argument('--year', default="", type=str,
help='Selects only one year')
parser.add_argument('-o', '--output_file', required=True,
help='Output file name. Will overwrite it exists')
parser.add_argument('-i', '--input_file',
required=True, help='Input file.')
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|
import os
import h5py
import sys
import skimage.io
import numpy as np
import tensorflow as tf
from keras.backend import clear_session
import urllib.request
from app import app
from flask import Flask, request, redirect, jsonify, send_from_directory
from flask_restful import Api, Resource
from werkzeug.utils import secure_filename
from database.database import DAO
sys.path.insert(0, "../retrieval")
from detector import Detector
import extractor
api = Api(app)
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
# Add Detector
graph = tf.get_default_graph()
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def get_id(query_feature):
path = "../featureCNN_map.h5"
h5f = h5py.File(path,'r')
feats = h5f['feats'][:]
id = h5f['id'][:]
h5f.close()
# # feats.T = transformasi
# similarity
scores = np.dot(query_feature, feats.T)
# sort
rank_ID = np.argsort(scores)[::-1]
rank_score = scores[rank_ID]
id_rank = id[rank_ID]
# score > 0.5
rank = np.r_[(rank_score>0.5).nonzero()]
id_rank = id_rank[rank]
return id_rank
class Retrieval(Resource):
def post(self):
if 'file' not in request.files:
resp = jsonify({'message' : 'No file part in the request'})
resp.status_code = 400
return resp
file = request.files['file']
# Save Image To Server
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
image_path = 'image/uploads/'+filename
# Add Extractor
# Add Database
database = DAO()
# Object Detection
image = skimage.io.imread(image_path)
clear_session()
image_detector = Detector("../weight/mask_rcnn_fashion.h5")
detection_results = image_detector.detection(image)
# Dominan Object
big_object, big_ix = image_detector.get_biggest_box(detection_results['rois'])
cropped_object = image_detector.crop_object(image, big_object)
# Extract
clear_session()
image_extractor = extractor.Extractor()
query_image_feature = image_extractor.extract_feat(cropped_object)
# similarity
id = get_id(query_image_feature)
data = []
result = database.getProduct(id)
for res in result:
data.append(res.to_dict())
resp = jsonify({'data': data})
resp.status_code = 200
return resp
def get(self):
database = DAO()
result = database.getAll()
data = []
for res in result:
data.append(res.to_dict())
resp = jsonify({'data': data})
resp.status_code = 200
return resp
class ImageServer(Resource):
def get(self, filename):
return send_from_directory(app.static_folder, filename)
api.add_resource(Retrieval, '/retrieval/image', endpoint='image')
api.add_resource(ImageServer, '/image/<string:filename>', endpoint='get')
if __name__ == "__main__":
app.run(host= '0.0.0.0', debug=True) |
#!/usr/bin/python3
"""
Initializes the filestore
"""
from models.engine.file_storage import FileStorage
storage = FileStorage()
storage.reload()
|
"""
Reporters of twistedchecker.
"""
|
from pykeyboard import PyKeyboard
import time
import pyperclip
def fileManager(photopath):
pyperclip.copy(photopath)
k = PyKeyboard()
time.sleep(1)
k.press_key(k.control_key) #autoit.win_active("Open") #open can change by your os language if not open change that
k.tap_key("l")
k.release_key(k.alt_key)
time.sleep(1)
k.press_key(k.control_key) #autoit.control_send("Open", "Edit1", photopath)
k.tap_key("v")
k.release_key(k.control_key)
time.sleep(1)
k.tap_key(k.enter_key) #autoit.control_send("Open", "Edit1", "{ENTER}")
def textToClipboard(text):
pyperclip.copy(text) |
import datetime
from django.contrib import admin
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Max
from . import models, forms
from address.biz import geocode
from utils import common
from utils.django_base import BaseAdmin
# Register your models here.
class ParkingPositionInline(admin.TabularInline):
model = models.ParkingPosition
extra = 0
class ParkingLotDocInline(admin.TabularInline):
model = models.ParkingLotDoc
form = forms.ParkingLotDocForm
extra = 0
class ParkingLotImageInline(admin.TabularInline):
model = models.ParkingLotImage
extra = 0
class ParkingLotCommentInline(admin.TabularInline):
model = models.ParkingLotComment
extra = 0
class ParkingLotKeyInline(admin.TabularInline):
model = models.ParkingLotKey
extra = 0
class ParkingLotStaffHistoryInline(admin.TabularInline):
model = models.ParkingLotStaffHistory
extra = 0
def has_add_permission(self, request):
return False
# def has_delete_permission(self, request, obj=None):
# return False
class ParkingPositionKeyInline(admin.TabularInline):
model = models.ParkingPositionKey
extra = 0
class ManagementCompanyStaffInline(admin.TabularInline):
model = models.ManagementCompanyStaff
extra = 0
@admin.register(models.ParkingLotType)
class ParkingLotTypeAdmin(BaseAdmin):
list_display = ('code', 'name')
list_display_links = ('code', 'name')
# @admin.register(models.LeaseManagementCompany)
# class LeaseManagementCompanyAdmin(BaseAdmin):
# list_display = ('name', 'department', 'position', 'staff', 'address', 'tel', 'email')
#
#
# @admin.register(models.BuildingManagementCompany)
# class BuildingManagementCompanyAdmin(BaseAdmin):
# list_display = ('name', 'department', 'position', 'staff', 'address', 'tel', 'email')
@admin.register(models.ManagementCompany)
class ManagementCompanyAdmin(BaseAdmin):
list_display = ('name', 'address', 'tel', 'email')
inlines = (ManagementCompanyStaffInline,)
@admin.register(models.TryPuttingOperator)
class TryPuttingOperatorAdmin(BaseAdmin):
pass
@admin.register(models.ParkingLot)
class ParkingLotAdmin(BaseAdmin):
form = forms.ParkingLotForm
icon = '<i class="material-icons">local_parking</i>'
list_display = ('code', 'name', 'category', 'address', 'subscription_list_send_type')
search_fields = ('code', 'name',)
inlines = (ParkingLotCommentInline, ParkingLotStaffHistoryInline, ParkingLotDocInline, ParkingLotImageInline,
ParkingLotKeyInline)
def save_model(self, request, obj, form, change):
if change is False or (
'pref_name' in form.changed_data or
'city_name' in form.changed_data or
'town_name' in form.changed_data or
'aza_name' in form.changed_data or
'other_name' in form.changed_data
):
# 新規の場合、または住所変更した場合、座標を取得しなおします。
coordinate = geocode(obj.address)
if coordinate.get('lng', None):
obj.lng = coordinate.get('lng', None)
if coordinate.get('lat', None):
obj.lat = coordinate.get('lat', None)
if coordinate.get('post_code', None):
obj.post_code = coordinate.get('post_code', None)
# 担当者変更時、駐車場担当者履歴追加
if change and 'staff' in form.changed_data:
queryset = models.ParkingLotStaffHistory.objects.public_filter(parking_lot=obj)
try:
last_staff = models.ParkingLot.objects.get(pk=obj.pk).staff
last_start_date = models.ParkingLot.objects.get(pk=obj.pk).staff_start_date
history_end_date = queryset.aggregate(Max('end_date')).get('end_date__max', None)
if (history_end_date is None or history_end_date < obj.staff_start_date) and last_start_date != obj.staff_start_date:
models.ParkingLotStaffHistory.objects.create(
parking_lot=obj,
member=last_staff,
start_date=last_start_date,
end_date=(obj.staff_start_date + datetime.timedelta(days=-1))
)
except ObjectDoesNotExist:
pass
super(ParkingLotAdmin, self).save_model(request, obj, form, change)
@admin.register(models.ParkingPosition)
class ParkingPosition(BaseAdmin):
form = forms.ParkingPositionForm
list_display = ('parking_lot', 'name', 'length', 'width', 'height', 'weight')
list_display_links = ('parking_lot', 'name',)
search_fields = ('parking_lot__code', 'parking_lot__name')
fieldsets = (
(None, {
'fields': (
'parking_lot',
'name', 'category', 'cost',
)
}),
("賃料", {
'classes': ('collapse',),
'fields': (
('price_recruitment_no_tax', 'price_recruitment'),
('price_homepage_no_tax', 'price_homepage'),
('price_handbill_no_tax', 'price_handbill'),
)
}),
("サイズ", {
'classes': ('collapse',),
'fields': (
('length', 'width', 'height', 'weight'),
('tyre_width', 'tyre_width_ap', 'min_height', 'min_height_ap'),
('f_value', 'r_value',),
)
}),
('備考', {
'fields': (
'comment',
)
}),
)
inlines = (ParkingPositionKeyInline,)
save_as = True
def save_model(self, request, obj, form, change):
continued_positions = common.get_continued_positions(obj.name)
if continued_positions:
split_positions = []
else:
split_positions = [s for s in obj.name.split(',') if s]
continued_positions.extend(split_positions)
if not change and continued_positions:
# 複数の車室を追加の場合
for name in continued_positions:
if models.ParkingPosition.objects.public_filter(parking_lot=obj.parking_lot, name=name).count() == 0:
obj.pk = None
obj.name = name
obj.save()
else:
super(ParkingPosition, self).save_model(request, obj, form, change)
|
import time
import urllib.request
import concurrent.futures
url_list = [
"https://www.baidu.com", "https://www.qq.com", "https://www.sogou.com",
"https://www.cnblogs.com"
]
def get_html(url, timeout=10):
with urllib.request.urlopen(url, timeout=timeout) as conn:
return conn.read()
def main():
start_time = time.time()
with concurrent.futures.ThreadPoolExecutor() as executor:
# 用字典可以通过返回的 future 拿到 url
tasks = [executor.submit(get_html, url) for url in url_list]
# 遍历完成的 future 对象
for task in concurrent.futures.as_completed(tasks):
try:
result = task.result()
except Exception as ex:
print(ex)
else:
print(len(result))
print(time.time() - start_time)
if __name__ == "__main__":
main()
|
import os
opta_combined_file = 'opta_combined_ds.csv'
wyscout_dataset = 'soccerlogs_ds.csv'
data_folder_path = os.path.join('gender_analysis', 'data')
|
from pysnow.api_handlers.api_handler import APIHandler
class AnalysisHandler(APIHandler):
def __init__(self, requestor):
super().__init__(requestor)
|
import numpy as np
from matplotlib import pyplot
def dft_2d(im: np.ndarray):
f = np.zeros_like(im, dtype=np.complex128)
m, n = im.shape
c = -2j * np.pi
g = np.zeros_like(im, dtype=np.float64)
for u in range(m):
for v in range(n):
for x in range(m):
for y in range(n):
g[x, y] = u * x / m + v * y / n
f[u, v] = np.sum(im * np.exp(c * g))
return f
def dft_1d(vec: np.ndarray):
f = np.zeros_like(vec, dtype=np.complex128)
n = vec.size
g = -2j * np.pi / n * np.arange(n, dtype=np.complex128)
for k in range(n):
f[k] = np.sum(vec * np.exp(g * k))
return f
def dft_2d_by_1d(im: np.ndarray):
m, n = im.shape
f = np.zeros_like(im, dtype=np.complex128)
g = np.zeros_like(im, dtype=np.complex128)
for u in range(m):
g[u, :] = dft_1d(im[u, :])
for v in range(n):
f[:, v] = dft_1d(g[:, v])
return f
if __name__ == '__main__':
im = pyplot.imread('../images/zebra_line.jpg') / 256
# shift fourier transform
im_s = im.copy()
im_s[1::2, ::2] *= -1
im_s[::2, 1::2] *= -1
im_f = dft_2d_by_1d(im_s)
# augment output
im_f = np.abs(im_f)
im_f = np.log(im_f)
im_f = im_f / np.max(im_f)
# plotting
fig, ax = pyplot.subplots(1, 2)
fig.set_size_inches(12, 8)
fig.set_tight_layout(True)
for a in ax:
a.axis('off')
ax[0].imshow(im, cmap='gray')
ax[0].set_title('Origin', fontsize=16)
ax[1].imshow(im_f, cmap='gray')
ax[1].set_title('Fourier transform', fontsize=16)
pyplot.show()
|
"""Tests `oasis test`."""
import os.path as osp
from subprocess import PIPE
# pylint: disable=relative-beyond-top-level
from .conftest import SAMPLE_KEY
def test_invoke_npm(oenv, mock_tool):
mock_tool.create_at(osp.join(oenv.bin_dir, 'npm'))
proj_dir = oenv.create_project()
oenv.run('rm yarn.lock', cwd=proj_dir) # prevent auto-detection of yarn
cp = oenv.run('oasis test', cwd=proj_dir, stdout=PIPE)
assert mock_tool.parse_output(cp.stdout)[0]['args'][0] == '--prefix'
assert mock_tool.parse_output(cp.stdout)[0]['args'][2] == 'build'
assert mock_tool.parse_output(cp.stdout)[1]['args'][0] == '--prefix'
assert mock_tool.parse_output(cp.stdout)[1]['args'][2] == 'test'
def test_invoke_yarn(oenv, mock_tool):
mock_tool.create_at(osp.join(oenv.bin_dir, 'yarn'))
proj_dir = oenv.create_project()
oenv.run('oasis test', cwd=proj_dir)
def test_testing_profile_options(oenv, mock_tool):
mock_tool.create_at(osp.join(oenv.bin_dir, 'yarn'))
proj_dir = oenv.create_project()
# note below: 0th invocation of "npm" is `npm install`, which is done without OASIS_PROFILE
cp = oenv.run('oasis test', cwd=proj_dir, stdout=PIPE)
assert mock_tool.parse_output(cp.stdout)[1]['env']['OASIS_PROFILE'] == 'local'
oenv.run(f'oasis config profile.default.credential "{SAMPLE_KEY}"')
cp = oenv.run('oasis test --profile default', cwd=proj_dir, stdout=PIPE)
assert mock_tool.parse_output(cp.stdout)[1]['env']['OASIS_PROFILE'] == 'default'
cp = oenv.run('oasis test --profile oasisbook', cwd=proj_dir, stderr=PIPE, check=False)
assert '`profile.oasisbook` does not exist' in cp.stderr
|
#!/usr/bin/env python
import commands
import json
import requests
COMMAND_OUTPUT = commands.getoutput('yarn application -list')
IS_RUNNING = False
for line in COMMAND_OUTPUT.splitlines():
fields = line.split('\t')
if len(fields) >= 6:
app = fields[1].strip()
state = fields[5].strip()
if app == '${component_job_name}':
IS_RUNNING = True
yarn_app_id = fields[0].strip()
tracking_url = fields[8].strip()
break
if IS_RUNNING:
URL = '%s/jobs' % tracking_url
FLINK_JOB_LIST = requests.get(URL)
FLINK_JOB_LIST = json.loads(FLINK_JOB_LIST.text)
FLINK_JOBID = FLINK_JOB_LIST['jobs-running'][0]
STATUS, OUT = commands.getstatusoutput('flink stop %s -yid %s' % (FLINK_JOBID, yarn_app_id))
if STATUS != 0:
commands.getoutput('flink cancel %s -yid %s' % (FLINK_JOBID, yarn_app_id))
commands.getoutput('yarn application -kill %s' % yarn_app_id)
|
XXX XXXXXXX XXXXXX XXXXXXX
XXXXXXX XX XXXX XXXX
XXXXXXXXX XXX XXXXX XXXX XXXX XXXXXXXX XXXXXXXXXXX XXXX
XXX XXXX XXXX XXXXXXXXXX XX XXXXXX XXX
XXXXXXXX XX XXXXXXXXX XX XXXX XXX XXXXXXXXXX XXXXXXXX XXXXXX
XX XXXX XXXXXXX XXXXXXXXX XXX XXXXXXXX XX XX XXX XXXXXXXX
XXXXXXXX
XXX XXXXXXXX XXX XXXX XXXXXXXX XXX XXXXXXXX XX XXXX XXXX XXXX
XXXXXXX XX XXXXX XXX XXXXXX XXX XX XXXXXXXXX XXX XXX XXXXXXX XXXXXX
XXXXXXX XX XXXXXXXX XX XXXXXXXXX XXXX XXXXXXX XX XXXXX XXX XXXXXX XXXX
XXXXXXXXXXXX XXXX XXXX XXX XXXXXXXX XX XXXX XXX XXX XXX XXXXXX XXXX
XXXXXXX XXXXXX XXXXXXX XXXXXXX XX XXXX XX XXX XXXX XXXXXXXX
XXXXXXXXXXXX XXXXXXXX XXX XX XXX XXXXX XXXXXXX XXXXX XXXXXXX XXXXXX XX
XXXXX XXX XXXXX XXXXX XXXX XXXXXXXX XXXXXXXXXX XXXXXXXX XX XXXXXXX XX
XXX XXX XXXXXXX XXXXXXX XXXXXX XXXXXXX XXXXXXXXX XXX XXX XXXXX XX XX
XXXX XXXXXXXXX XXXX
XXXX XX XXXXX XX XXXX XXXXXXXXX XX XXX XXXXXXXXX XX XXXXXXXX XXX
XXXXXX XXX XXXXXXX XXXXXX XXXXXXXX XXX XXXXXXXX XX XXXX XXXX XXXX XXX
XXXX XXX XXXXXXX XX XXXXXXXXXX XXXXXX XX XXXX XXXXXXXX XXXX XXXXXX XXX
XXXX XXXXXXX XX XXX XXXXXX XXXX XXX XXXXXXX XXXXXX XXXX XX XXX XXX XX
XX XXX XXXX XXX XXXX XXX XXX XXXXXX XXX XXXXXXXX XX XXX XXXXXX XX XX
XX XXX XXXX XXXXXXXXX XXX XXXX XXX XXXX XXX XXX XX XXXXX XXXXXXX
XX XXXXXXX XXXX XXXXXXX XX XXXX XX XXXX XXXXXXXXXXXX XXXX XXXXXX
XXXXXX XX XXXX XXX XXXXX XXXXXX XX XX XXX XXX XX XXXXXXXXX XXX XXXXXXX
XXXXX XXXXXXXXXXXX XXXXXXXXX XX XXXXXXX XXXXXXXXXXXXXXXX XXX XXX XX XXX
XXXXXXXXXX XXXXXX XX XXX XXXXXXXXX XX XX XXX XXXXXX XXX
XXX XXXXXXXX XX XXX XXXXXXXXXX XXXXXX XX XXXX X XXXXXXXX XXXXXXX
XXXXXX XX XXX X XXXX XXX XXXX XXXX XXX XXXXXXXXXX XXX XXX XXXXXX XXXX
XXX XXXXX XXX XXXX XXXX XXXX XXXX XXXXX XXXX XXXXXXX XX XXX XXX XXX
XXXXXX XXXXX XXX XXX XXXX XXXX XXXX XXXXX XXXXX XX XXXX XXXX XXXXX
XXXXXXX
XX XXXXXXX XXXX XXXXXX XXXX XXX XXXXXX XXX XXXXXXXXX XXX XXXXXXXXX XXX
XXX XXXXX XXX XXXX XXXXXXX XXXXX XXXXX XXX XXXXX XXXXXXXXXX XX XXXXX
XXXXXXXXXX XXXXXX XXXXXX XXX XXXXXXXXX
XXXXX XXX XXXX XXXXXXXX XXXXXXXXXX XXX XXXXX XX XXXX XX XXXX XXXXXXX
XXXX XXXXXXXX XXXXXXXXXXX XXXX XXXXX XX XX XXXXXXXX XXX XXXX XXXX
XXXXXXXXX XX XXX XXXXXXXX XX XXXXXXXX XX XXXXXXX XXXX XXX XXXXXX XXX XX
XXXX XXX XXXXXXXXXX XX XXXX XXXX XXXX XXXX XXXX XX XXX XXX XXXXXXXXX XX
XXXX XXX XXXXXXXX XXXXXXXXXX XX XXXXXX XXXX XXX XXXXXXX XX XXX XXXXXXXX
XXXXXXXX XXXXXXXXXXXX
XXXXXXXX XXX XXXX XXXXXXX XX XXXXXXXXXX XXXXXXXXXX XX XXXXXXXX
XXXXXXXX XX XXXX XX XXXXX XXX XXXXXX XXXX XXXXXXXXXXXXXX XX X XXXX
XXXXXXX XXXX XXXXXXXXXXXX XXXXXX XXXXXX XXXXXXXXX XX XXXXXX XXXXXX XXX
XXXXXXX XXXXXXXXXXXX XX XXXXXXX XXXXX XX XXXX XXXX XX XXXXX XXXX XXX
XXXXXX XXXX XX XXXXXXXX XXX XXXXXXXXXX XXXX XXX XX XXX XXXXXXXX XX XXXX
XXX XXXXXXX XXXXX XXX XXXXXXXXXX XXX XXXXXXXX XXXXXXXXXXXX XXX
XXXXXXXXXXXX XXXXXXX
XXX XXXXXXX XXXXXX XXXXXXX
XXXXX XXX XXXXXXXXXX XXX XXXXXXXX XXXXXXXXXXXX XXX XXXXXXXXXXXX
XX XXXX XXXXXXX XXXXXXX XX XXX XXXXXXX XX XXXXX XXXX XXXXX XXXXXXXX
X XXXXXX XXXXXX XX XXX XXXXXXXXX XXXXXX XXXXXX XX XXX XX XXXXXXXXXXX
XXXXX XXX XXXXX XX XXXX XXXXXXX XXXXXX XXXXXXXX XXX XXXXXXXXXX XXXXXX
XXXXXX XX XXX XXXX XXXXXXX XX XXXXX XXX X XXXXX XXXXX XX XXX XXXXXXXX
XXXXX XXXXXX XXX XXXXXXX XX XXX XXXXXXXXXX XXXX XXXXX XXXXXXXXX XXXX
XXXX XX XX XXXX X XXXX XXXXXXXXXX XXX XXXXXXX XX X XXXXXXX XX XXX
XXXXXX XXXXXXXX XX XXXX XXXXXXXXXXXXX XXXXXX XXXXXXXXXX XXXX XXXXXXX
XXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXX XX XXXXXXXX XXXXXXX XXXXXXXXXX XX
XXX XXXX XXXXXXXXXXXXXXXX XXXX XXXXXXXX XX XXXXXXXXX XX XXXXXX
XXXXXXXXXX XXXXX XXXX XXXXXXXX XXXXXXXXXXXX XXX XXXXXXXXXXXX XXX XXX
XXXXXXX XX XXXX XXXXXXXX XXXX XXX XXXXXXX XXX XXXXXX XXX XXX XX
XXXXXXX XXX XXXXXXX XX XXX XXXXXXXXXXX XXX XXX XXXXXX XXXX XXX XXXXXXX
XX XXXXXXX XXXX XX XXX XXXXXXXX XXXXXXXXXX X XXXX XXXXX XX XXX
XXXXXXX XXXXXXXXXXXX XX XXXXXX XXXX XXXX XX XXXXXXX XXX XXXXXXXXX
XXXXXXX XXXX XX XXXX XXXXXXX XX XXXX XXX XXXXXXX XXXXX
XX XXX XXX XXXX XXX XXXXXXXXXX XXXXXXXX XXXXXX XX XXX XXXXXXXXX
XXXXXX XXXX XX XXX XXXXXXX XXX XX XXX XXXXXXX XXXXXXXX XXXX XXX
XXXXXXXXXXXXX XXX XXXXXXXXXXXXX XXXXXXX XX XXXX XXXX XX XXXXXXXXXXX
XXXXXXXXX XXXXXX XXX XXXXXXXXXX XX XXXXXXXXX XXXX XXXXXX XXX XXX
XXXXXXX XXXX XXXXX XX XXXX XXXXXXX XXX XX XXX XXXXXXX XX XXX XXXXXXXXX
XXX XXXX XXX XXXXX XXXXXXXXXX XX XXX XXXXXXX X XXXX XX XXXX XXXXXXX
XXXXX XXXX XXX XXXXXXXX
XXX XXX XXXXXX X XXX XXX XXX XXXXXXXX XXX XX XXXXXXXXXXXX X XXXXX XXX
XXX XXX XX XXXX XXXXXX XXXXX XXXXXXXX XXXXXXXXXX XX XXXXXXXX XXX X XXXX
XX XXX XXX XXXXXX XXXX XXXX XX XXXXXX XX XXX XXXXXXX XX XXX XXXXXXX
XX XXX XXXX XXXXXXX X XXXX XXXXX XX XXX XXXXXXXX XXX XXXX XXX
XXXXXXXXXX XXXX XXXXXXXXXXXXX XX XXXX XXXXX XXX XXXXX XX XXXXXXX X
XXXXXX XXXXXXXX XXXX XXX XXXX XXXX XXX XX XXXXX XXXXXXXXXXX
XX XXX XXXX XXXXX XXX XXXXXXXX XXXXX XX XXXXX XXXXXXXXX XXXXXXX
XXXXXXX XXXX XXX XXXXXXX XXX XXXXX XXX XXX XXXX XX XXX XXXXXXX
XX XXX XXXX XXXXX XXX XXXX XXXX XXX XXXXXXXXXX XX XXXXXXXX XXXX XX
XXXXX XX XX XXXX XXXXXXXX XX XX XXXXXXX XXXX XXX XXXXXXX XX XXX
XXXX XXXXXXXX XX XX XXXXXXXX XX X XXXXX XX XX XXXXXX XX XXX XXXXX
XXXXXXX XXXXX XXX XXXXX XX XXXX XXXXXXXX
XX XX XXX XXXXXXXX XXXXXXX XXXXXXXX XXXXX XXXXXXXX XXXXXXXXXXXXX
XXXX XXXX XXX XXXX XXXXX XXX XXXX XXXXXXX XXXXXXX XXX XXXX
XXXXXXXXXXX XXX XX XXX XXXX XXXXXXXX XXXX XX XXXXX XX XXXXXXX XX
XXXXXXXXXXXX XXXXXXXXX XX XXXXXXXXXXX XXXXXXXXX XXXXXX XXX X
XXXXXX XXXX XXXXX XX XX XXXXXXXX XXX XXXXX XXXXXX XXXX XXX XXXXXXX
X XXXXXXXXX XXX XXXX XXXXX XXX XXXXXXXXXXXX XXX XXXXXXX XXXXX
XXXXX XXXXXXXXXXX XXX XXXXXXX XXX XXXX XXX XX XXXX X XXXX XX XXXX
XXXXXXXX XXXXXXXXXXX XX XXX XXXXXXX XXXXXX XX XXXXXXXXXXX XXX
XXXX XXX XXXXXXXX XXXXX XXXX XX XXXXXXXXXXXXX XXXX XXXX XXXXX XX
XXX XXXXXXX XX XXX XXXXXXXX XX XXXXX XX XXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXX XXXXX XX XXX XXXXXXXX XXXX XX X XXXXXX XX
XXXXXXXXXXXX XXXXXXXX XX XXXX XXXX XXX XXX XXXXXXX XXXX XXX XXXXXXXX
XXX XXX XX XXXXXXXXXX XXXXXXXXXX XXXXXXXXXXX XXX XXXXXXXX XXXXX XX
XXXXXXXXXXX XXXX XXXX XXXXXXXX XXX XXX XXXXXX XX XXX XXXXX XX XXXXX
XXXXXXXX XXXX XXX XXXXXXXXXX XXXX XX XXXXXXXX XXXXXX XXX XXXX XXX
XXXXXXXXXX XXX XXXX XXXXXXXX XX XXXX XX X XXXXX XXXXX XX X XXXX XXXXX
XX XXX XXXXXXXX XXX XXXXXXXXXXXX XX XXX XXXXX XXXX XX XX XXX XXXXX XX
XXXX XXXXXXXX XXXXX XXXXXXXXXXX XXX XXXXX XXXXXXXXX XXXXXX XX XXX
XXXXXX XXXXXX XXX XXXX XX XXXX XXX XXXXX XXXX XXXXXXXXXX XX XXX XXXXX XXX
XXXXX XX XX XXX XXX XXXXXX XX XXXX XXXXXXX XX XXXXX XXXXXX XX XXXXXXX
XXXX XXXXXX XX XXXX XXXXXXX XXXXXXXX XX XXXX XXXXXXX XXX XXXXXX XX XX
XXXXXXXX XXX XXXXX XX XXXXXXX XXX XXXXXXXXXXXX XX XXXXXXXXXX XX
XXXXXXXXXX XXXXX XXXXX XX XXX XXXXXXXX
XX XXXXXXXXX XXXX XXXXXXXXXXX XX XXXXXXX XXXX XXX XXXXX XX XXX XXXXXXX
XXXX XXX XXXXXXX XXX XXXX X XXXX XXXXX XX XXX XXXXXXXX XX X XXXXXX XX
X XXXXXXX XX XXXXXXXXXXXX XXXXXX XXXX XXX XXXXX XXX XXXXX XXXX XXXXX
XXX XXXXX XX XXXX XXXXXXXX
XX XXX XXX XXXX XXX XXXXXXXXXX XXX XXXXXXX XXX X XXXX XXXXX XX XXX
XXXXX XXXXXXX XX XX XXXXXX XXXX XX XXXXXXXXXX XXXX XXXXX XXX XXXXX XX
XXXXXXXX X XXX X XXXXX XXXXXXXX XXXX XXX XXXX XX XXX XX XXX XXXXXXXXXX
XX XXXXXXXXX XX XXXX XXX XXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXXX XXXXX XXXX XX XXXXXXXXXXX XXXXX XXX XXXXX XX XXXXXXXX
X XXX X XXXXX XX X XXXXXX XXXXXXXXXXX XXXX XXX XXXXXXXX XXXXXXXXXXXX XXX
XX XXXXXXXXX XX XXXX X XXXXXXX XXXXXX XXXXX XXX XX XXXXX XXXXX
XXXXXX XX XXXX XXX XXXXX XXXXXX XXX X XXXXXX XX XXXX XXXX XXXX
XXXX XX XXXXXXXXXX XXXXXXXXXX XXXXXX XXXXXXXXXXXXX X XXXXXXXX
XXXXXXXXXXXXXXXX XXXX XX XXX XXXXXXXXXXXXX XXXXXX XXXXX XX XX
XXXXXXXXXXX XXXXX XXX XXXXX XX XXXXXXXX X XXX X XXXXX XX X XXXXXX
XXXXXXXXXXX XXXX XXX XXXXXXXX XXXXXXXXXXXX XXX
XX XXXXXXXXX XX XXXX XXX XXXXXXXXXXX XXX XXXXXXXX XX XX XXX XXXXX
XX XXXXXXXXXX XXXXXXXXXXXXX XXXXXX XXXXX XXXXX XXXXXXXXXXX XX
XXXXXXX XXXX XXX XXXXXXXXXXXXX XXXXXXXXXXXX XXX XXXX XX XXX
XXXXXXXX XXX XXXXXXX XX XXXXXX XXXX XX XXXXXXXXXX XXXX XXXX XXXX
XX XXXXXX XX XXXXXX XXXX XXXXXXXXXX X XXXXXXX
XXX XXXXXX XXXX XXX X XXXX XXXXX XXX XXXXXXXXX XXXX XX XXX XXXX XXX
XXXXXX XXXXXXXXXXXXX XX XXX XXX XX XXXXXXXXXX XXXXX XXXXXXXX XXXXXX
XXXX XXXXX XXX XXX XXXXXX XXXX XXX XXX XXXXXXX XX XXXXXXXXX XXXX XXX
XXXXXXXXXX XXXXXXXXX XXXXXXXXXX XXXXXX XXXX XXX XXXXXXX XXXX XX
XXXXXXX XXXXXXXXXXX XXX XXXXXXXXXXXX XX XXX XXXXXXXXXXX XXXXXXXX XX X
XXXXXXX XXXXXXXXXX XXX XXXXXX XXXX XXXXXXXXXXX XXXX XXX XXXXXXX
XXXXXXXX XXXX XX XXXXXXXX XXXXXXXXXXX XXX XXXXXX XXXXXX XX XXXXXX
XXXXX XXXX XXX XXXXX XXXXXXXXXX XXXXXXXXXX XXXXXXX XXX XX XXX XX XXX
XXXXXXXXX XXXXXX XX XXXXX XXX XXXXXXXXXX XXXXX XXXXXX XXXX XXXXXXXXX
XXXXXX XXXXXXXXXXX XXX XXXXXXXXXXX
XX XXXXXXXXXXXX XX XXXXXXXXXX XX XXXXXX XXXX XX XXXX XX XXXXXXXX
XXXXXX XX XXXX XXXX X XXXXXXXXXX XXXXXX XXXX XXXXXXXX XXXXXXXXXX
XXXXXX XX XXXX XXX XXXXXX XXXX XXXX XXX XXXX XXXXX XXXXXX XX
XXXXXXXXXXXX XX XXX XXXXXX XXXXX XXXX XXXXXX XXXXX XXXXXXX XXX XXX
XXXXXXXXX XX XXXX XXX XXXXXX XXXXX XXXX XXX XXXXXX XXXXX
XX XXX XXX XXX XXXXX XXXXXXX XXXXXXXXXXX XX XXXXXXXXXX XXX XXXXXXX
XXXXXX XX XXXXXXXXX XXXXXXXX XXXXX XXXX XXXXXXXX XXX XXXXXXX
XXXXXXXXX XX XXXXX XXXXXXX XXXXXXXXXX XX XXXXXXXXXX XXX XXXXXXX XX
XXXXX XXX XXXX XXXXXXXXXXXXX XXXXXXXXX XXXX XXXXXX XXXXX XXXX XXXXXXXX
XXXXXXXX XXXXXXX XXX XXXX XXXXXXXX XXXXXXX XX XXXXXXX XXXX XXX XXXXX
XXXX XXXXXXX XXXX XXX XXXX XXXXX XXXXXXXX XXXXXXXXXX XX XXXX XX XXXX
XXXXXXX XXXXXX XX XXXX XXXXXXXXXXX
XX XXX XXX XXX XXXXXXXX XX XXXXXX XXXX XXXXXXXX XXXXX XXX XXXX XXX
XXXXXX XXX XXXXXXXX XXXXXXX XXXX XXXXXX XXX XXXXXXXXXX XX XXXXXX XX
XXXXXXXXXX XXX XXXXXXX XX XXX XXXXXXXXXX XXXXXX XXXXX XXXXXXX XXX
XXXXXXXXXX XX XXX XX XXX XX XXX XXXXXX XXXX XXXXXXXX XXXXXXXXXX XX
XXXXXXXXX XX XXXXXXXXXXXX XXX XXXXXXX XXX XXX XXXX XXXXX XX XXX
XXXXXXXXX XXX XXXXXXXX XXXX XXXXXXXXXX XX XXXX XXXXXXX XX XX XXX XXX
XXX XXX XXXXX XXX XXXXXXXXXX XXX XXXXXXXX XXXXXXXXXXXX XX XXXXXXXXX
XXX XXXXXXX XX XXXXX XXXXX XX XXX
XX XXXX XXXX XXX XXXXXXXXXXXX XXX XXXXXXX XXX XXX XXXX XXXXX XX XXX
XXXXXXXXX XXX XXXXXXXXX XXXXXXXXXXXXX XXXXXXXX X XXXXXXX XXXX XXX
XXXXXXXX XXXXXXXX XX XXXXX XXXXXXXXXX XX XXXXXX XXX XXXXXXX XXXXXXX XX
XXXXX XXXXX XXX XXXXXXXXXXX XXX XXX XXX XXXXXX XXX XXXXXXX
XXXXXXXXXXXX XX XXX XXXXXXXXXXX XXXXXXXX XX XXX XXXXXX XXXXXXX XXXXXXX
XXX XXX XXX XXXXXXXXXXX XXX XXXXXXXXX XXXXXXXXXX XX XXXXX XXXXXXX XX
XXXX XXXXXXXX
XX XXX XX X XXXXXXXXXXX XX X XXXXX XXXXXXXX XX XXXXXXXXXX XX XXXXXX
XXXXXXXXXXXX XX XXX XXX XXXXX XXXXXX XXXX XXXXXXX XX XXXXXX XXXXXXXX
XXXXXXXXXX XXX XXXXXXX XX XXX XXXXXXXX XX XXXXX XXXXXX XXXXXXXXX XX
XXXXXXXXXX XXXX XXXXXXXXXX XXX XXXXXXXXXX XX XXXX XXXXXXXX XXXX XX XXX
XXXXXX XXX XXXX XXX XXXXXXXXXX XX XXXX XXXXXXXX XX XXX XXXXXX
XXXXXXXXXX XX XX XX XXXXXXX XXXXXXXXXXXXXX XXXX XXXXXXXXXXX XXXXX XXXX
XXXXXXX XXX XXX XXXXX XXXXXXXXX XXXXXXXXXXXX XXXX XX X XXXXXXXXXXX XXX
XXX XXX XXXXXXXXXX XXX XXXXXXX XX XXXX XXX XXXXXXXX XX X XXXXXX
XXXXXXX XXXXX XXX XXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXX XX XXX XXXXXXX XX
XXX XXXXX XXX XXXXXXX XXXXXX XXXXXXXX XX XXXXXXXXXX XXXXXXX XXXX XXXX
XXX XXXX XXX XXX XXXXX XXXXXXX XXXX XX XXX XXXX XXXXXXX XXXXX XX XX
XXXXXXX XXXXXXXX XXXX XXXXXXXXXXXX XX XXX XXXXXXXX
XX XXX XXXXXXX XX XXXX XXXXXXX XX XXXX XXXXXXX XX XXXXXXXXXXXXX XXXXX
XXX XXXXXXXXXX XXXXXXXXXXXXX XXX XXXXXXX XX XXX XXXXXXX XX XXXXXXXX XX
XXXXX XXX XXX XXXXXXX XX X XXXXX XX XXXXXXXX XX XXXXX XX XXXXX
XXXXXXXXXXXXXX
XX XX XXX XXX XXXXXXX XX XXXX XXXXXXX XX XXXXXX XXX XX XXXXXXXX XXX
XXXXXXX XX XXXXX XXXXXXXX XXXXX XXXXXX XX XX XXXXXXX XXXXXXXX XX XXX
XXXX XXXXXXX XXXX XXXXXXX XXX XXX XXXX XXXXXXX XX XXXXXXXXXX XXX
XXXXXXXXX XX XXX XXXX XXXXXXXX XXXXXXXXXXXX XXXXXXX XXXXX XX
XXXXXXXXXXX XX XXXXXX XXXXXXX XXXXXXXXXX XXXX XXXXXX XXXX XXXX
XXXXXXXX XXXXXXXXXXXXX XX XXX XXXX XXXXX XX XXXXXXXX XXXXXXXXXXX
XXXXXXX XXXX XXXXXX XX XXXXXXXX XX XXXXXXXXXX XXXXXXXXXXX XX XXXX
XXXXXXX XX XX XX XX XXX XXXXXXXXXXXX XX XXXXXX XX XX XX XXX XX XXXXXXX
XX XXXXXXXXXX XXXXXXXX XXXXXXX XXX XXXXX XXXXXX XXX X XXXXXXXX XXXXXX
XXXXXX XXXX XXXXXXX
XXXX XXXXXXX XX XXXXXXXX XX XXXX XXXXXXXXXX XXXXX XXXX XX XXXXXXXX XX
XX X XXXXXXXXXXX XX XXX XXXX XX XXXX XXXXXXXX
XX XX XXX XXXXXXXXXXXX XXXXXX XXX XX XXX XXXXXXX XX XXXXXXXXXX XX
XXXXXXX XXXXXXXXX XXXXXX XX XXXXXXX XX XX XXXXXXXXXXX XXXXXXXXXXX XXX
XXXXXXXX XXXXXXXXX XXXXXX XXX XXXXXX XXX XXXXXXX XXXXX XXXX XXXXXXX
XXX XXX XX XXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXX XXXXXXXXX
XXXXX XXXXXXXXXX XX XXXX XXXXXXXXXXXX XX XXXXXXXXX XXXX XX XX XXXXX
XXXXXXXXX XXX XXXX XXXXXXXXX XX XXXX XXXXX XXXX XXXXXXX XXXXXXXXXXXX
XXX XXXXXXXXXX XX XX XXXXXXX XX XXX XXXX XX XXXX XXXXXXXX
XX XXX XXXX XXXXXXXX XXXXXXXXXX XXX XXXXXXX XXXXXXX XXXXXX XXX XXXXXXXX
XX XXX XXXXXXX XXXXXX XXXXXXX XXXX XXXX XX XXXXX XXXX XXX XXXXXXXX XXXX
XX XXXXXXX XX XXXXXX XX XXX XXXXXXX XXXXXXXX XXX XXX XXXXXX XX XXXXXX XX
XXXXXXX XXX XXXXXXXX XX XXXXXXXXX
XXXX XXXXXXX XX XXXXX X XXXXXXXXXXXXXX XXXXXXX XXXXXXX XX XXX XXXXXXX
XXXXXXXXX X XXXXXXX XXXXXX XX XXXX XXXXXXX XXXXX XXXXXXX XX XX XXX XXXX
XXXXX XXXXXXXXX XXX XXXX XXX XXXXXX XX XXXXXXXXX XXX XXXXX XXX XXXXXXXXXX
XXXXXX XX XXXX XXXXXXX XX XX XXX XXXXX XXXXXXX XXXXXXXXX XX XXX XXXX
XXXXXXXX XXXXXXXXXXX XX XXX XXXXXXX XXXX XXX XXXXXXX X XXXXXXX XXXXXX XX
XXXX XXXXXXXX XXX XXX XXXXXX XXX XXXXXXX XXXX XXXXXXXXX XX XXX XXXX XXXXXXXX
XXXXXXXXXXX
XXX XX XXX XXXX XX XXXXXXXXXXX XXXXX XX XXX XXXXXXX XXXX XXXXX XXXX
XXXXXXXX XXXXX XXXXXXXXXXXX XXXXXXXXXX XXX XXXXXXXXXX XXXXX XX XXX XXXXXX
XX XXX XXX XXXXXXXXXXX XXX XXXXXXXX XXXXX XX XXXXXXXXXXX XX XXX XXXX
XXXXXXXX XXXXXXXXXXX XXXXX XX XXX XXXX XXXXXXXX XXXXXXXXXXX XX XXXXXXXXX
XXXX XXXXXXXXXX XXX XXXXX XXX XXXXXXXX XXXX XX XXXXXX XX XXX XXX XXXXX
XX XXXXXXXXXX XXX XXXX XXXXXX XX XXX XXXXXXXXXXX XX XXX XXXX XXXXXXXX XXX
XX XXXXXXXXX XXX XXXXXXX XXX XXXXX XX XXXXXXXX XXXXXXXXXX
XX XXXXXXXX
XXX XXXXXXX XXX XXXXXXX XX XXXXXXXX XXXX XX XXXXXXX XXXXX XX XX XXXXXXXX
XXX XXX XXXXXXXX XX XXX XXXXXX XXXXXXXXX XX XXXXXXXXXX XXXX XXXXXX XXXX
XXXXXXXXX XXXXXX XX XXXXXXX XXX XXXXXXXXX XXXXXXX XXXXXX XXXXX XXXXXXX
XXXXXXX XXX XXXXXXX XXX XXX XXXXXXX XXXXXXXX XX XXX XXXXX XXXXXX XXXXXXXXX
XX XXXXXXXX XXXXXXXXXX XXX XXX XXXXXXX XXX XXX XXXXXXX XXXXXXXXXX XX
XXXXXXXXXXXXXXX XXX XXXXXXX XXX X XXXXXXXXXX XXXXXXXX XXX XXXXXX XXXX XX
XX XXX XXXXXXX XXX XXXXXXXXXXX XX XXX XXXXXXX XX XXXX XXXX XXXXXX XXX
XXXXXXX XXXXX XXXXXXXXXX XXX XXXXXX XXX XXXX XX XXX XXXXXXXXX XXXXXXXXXX
XXXXXX XX XXXXXXXXXXX
XXX XX XX XXXXX XXXXXX XXXXXXXX XX XXXXXXXXXX XXX XX XXXXXX XX XX XXXXXXX
XXXX XXX XXXXXXXXX XXXXXXX XX XXX XXXXX XXXXX XXX XXX XXXXXX XXXXXX
XXXXXXXXXXXX XXX XXXXXXX XX XXXXXXXXX XXXXXX XX XXXXXX XX XXX XXX XXXXXXXX
XXXXXXXXX XXX XXXXXXXX XXXXXXXX XXXXXXXXXX XX XXXXXXXXXXXXX XXXXXXX XXXXXXX
XXX XX XXX XXX XX XXXXXXXXX XX XXX XXX XXXXXXX XXXXXXXXXX XXX XXX XXXXXXX
XX XXXX XX XXXX XX XXXX XXXXX XXXXXXXX XXXXXXXXXX XX XXXXXX XXXXXXXXX XX
XXX XX XXXXX XXXXXXX XX X XXXXXXX XX XXX XXXXXXX XX XXXXXXX XXXX XXX XXXXX
XXXXXXXXXX XXXX XX XXXX XXXXXX XX XXXXX XXXXX XXX XXXX XXXXXXX XX XXX
XXXXXXXXXXX XX XXXX XXXXXXXX
XXX XX XXXXX XXX XXXXXXXXXX
XXX XX XXXXX XXXXX XXXXX XX XXXX XXX XXXXXXXX
XX XXX XXXXXXX X XXX XXXXXXXX XXX XXX XXXX XX XX XX XX XXX XXXXXXXX
XXXXXXXX XXX XX XXX XXXXXXX XXX XXXX XXX XX XXXXXXX XXXX XX XX XXXX XX
XXXX XXXXXXXX XXXXX XXXXXXXX XXX XXXXXXXXXXXX XXX XXXXXX XXXXX XXXXX XXXXXX
XX XX XXX XXXXXX XXX XXXXXXXXX XXXXXXX XX XXX XXXXXXXX XX XX XXXXXX
XX XXXXXX XXXX XX XXX XXXXX XX XXXX XXXXXX XXXX XX XXXX XXXXXXXXXXX
XXXXXX XXX XXXXXXXXX XX XXXXXXXXX XXX XXXX XXXX XXXXXX XXXX XX XXXXX
XXX XXXXXXXXXXX XXXX XXX X XXXXXXX XX XXXXX XXX XXXX XXXXXX XX XXXXXX
XXXX XXXX XX XXXX XXX XXXXXXXXX XXXX XXX X XXXXX XXXX XX XXXX XX XXXXXX
XXXXXXXXX XXX XXXX XXXXX XX XXXXXXX
XXXX XXXXXXX XX XXXX XXXXXXXXX XXX XXX XXXXXXXXXXXX XX XXXXXX XXXXXX
XX XXXXX XXX XXXXX XX XXX XXX XXXXXXX XXXXXX XXXXXXX XX XXXXXXXXX XX
XXX XXXX XXXXXXXX XXXXXXXXXXX XXXXXX XXXXXXX X XX XXX XXXXXXXX XX
XXX XXXX XXXXXXX XXX XXXXX XXXXXXXX
XXXX XXXXXXX XX XXXXXXXXXXX XX XXX XXXX XXXX XX XXXX XX XXXXXXX
XXX XXXXXXX XXX XXXXXXXXX XXXXXXX XXXX XXX XXXXXXX XXXXXXXX XX
XXXXXXXXXXXXXXX XX XXXXXXX XXX X XXXXXXXXXX XXXXXXXX XXX XXX
XXX XXXXXXX XXXXXX XXXXXXX XXX XXXX XXXXXXXX
XXX XXXXXX XXXX XXXXXXXX X XXXX XX XXX XXX XXXXXXX XXXXXX XXXXXXX
XXXXX XXXX XXXX XXXXXXXX XX XXXX XXXXX XX XXX XXXX XXXXXXXX
XXXXXXXXXXX XXXXX XXX XXXX XXXX XXXXXXXXXX XX XXXXXX XXXX
XXXX XXX XXXXXXXXXXX XX XXX XX XXXXXXX XXX XX XXXXXXXXXX XXX XXXXX XXXXX
XX XXX XXXXXXX XX XXXXXXXXXXXX XXXX XX XXXXXX X XXXXX XXXXXX XXXX XXXX
XXXX XX XXXXXX XX XX XXXXXXXXXXX XXXXX
XXXXXXXXXXX XXXXXXX XXX XXXXXXXXX XXX XXXX XXXX XX XXXXXX
XXXXXXXXXXX XXXXX XXXX XXXXXXXXXX XX XXXXXXXXX XXX XXXXXXX XXXX XXXXX XXX
XXXX XX XXXX XXXXXXXXX XXX XXX XXX XXXXXXX XX XXXXXXXXXXXX XX
XXXXX XXXXXXX XXXXXXXXXXX XXXX XXXXX XX XXX XXXXXXXX
XXX XXXXXXXXXXXX XXXXXXXX XXXXX XX XXX XXXXX XX XXXXXX XXXX XXX XXXXXXXXXXX
XXXXX XX XXX XXXXXXX XXXXXX XXXXXXXX XX XXXXXXX XXX XXXXXXXX XXX XXX XXX
XX XXXXXX XXXXXXXXX XXXXX XXXX XXXXX XX XXX XXXXX XXX XXXX XXXXX XXXX XX
XXXXXXXXXXXX XX XXXX XXXXXXXXXXXXXXX XXXXX XXXX XXXXXXXX
XXX XXXXXX XXXX XXX XXXX XXXXXXXX XXX XXX XXXX XX X XXXXXXXXXXX XX XXXX
XXXXXXX XX XXXX XX XXXX X XXXXXXXXXX XXXXXXXXXXX XXX XXX XXXXXXXX XX
XXXXXXXXXX XXXX XX X XXXXXXX XXXXX XXX XXXXXX
XXXXXXXXX XXXXX XXXXXX XXXXXXXXX XXX XXXXXXXXX XXXXXXXX XX XXX XXXXXXX
XXXXXXXXXXXXX XXXXXX XXXXX XXXXXX XX XXXXXXXXXX XXXXXXX XX XXXXX XXXXXXX
XXXXXXXXXX XX XX XXXXXX X XXXXX XXXX
XX XXXXX XXXXXXXXX XX XXXX
XXXX XXXXXXX XXXXXX XXXXXXX XXXX XXX XXXXXX XXXXXXXXXXXXX XXXX XXXXXXX XXXX
XXXXXXXXXXX XXXXXXXXX XX XXXX XXXXXXX XX X XXXXXXXXXX XXXXXXXX XXX XXX
XXXXXXXX XX XXXX XXXXXX XX XXXXXX XXXXXXX XXXXXXXXXXX XXXXXXXXXXXX XXXX XXX
XXXXXXXX XX XXXX XX XXXX XXX XXXX XX XXX XXX XXX XXX XXXXXXX XXXXXXX
XXXXXX XXXXXXX XXXXXXX XX XXXX XXXXXXXX
|
from hamcrest import *
from mc.string import String
def test_string_should_strip_prefix():
assert_that(String("abc").strip_prefix("a"), equal_to("bc"))
assert_that(String("abc").strip_prefix("abc"), equal_to(""))
assert_that(String("abc").strip_prefix(""), equal_to("abc"))
def test_string_should_throw_on_strip_prefix_if_arg_is_no_prefix():
assert_that(calling(lambda: String("abc").strip_prefix("c")),
raises(AssertionError))
def test_string_should_have_str_attr():
assert_that(String("abc").upper(), equal_to("ABC"))
|
import pandas as pd # used for making csv files
from selenium import webdriver # used for getting the web driver (the chrome window)
from selenium.webdriver.support.ui import Select # we will be using a dropdown
from selenium.webdriver.chrome.options import Options # dropdown
from queue import Queue # multi threading
from threading import Thread # multi threading
chrome_options = Options()
#chrome_options.add_argument("--headless") # arguments to improve performance
chrome_options.add_argument("--disable-composited-antialiasing")
chrome_options.add_argument("--disable-font-subpixel-positioning")
prefs = {"profile.managed_default_content_settings.images": 2} # stop images rendering - this reduces network usage
chrome_options.add_experimental_option("prefs", prefs)
PATH1 = "/home/waffleman69/Documents/code/chromedriver" # this is the driver location
driver = webdriver.Chrome(PATH1, options=chrome_options) # use the driver
driver.get("https://www.top500.org/statistics/sublist/") # open the top500 website
lists = driver.find_elements_by_xpath("//select[@name='t500list']/option") # get all of the lists (from 1993)
to_be_selected = []
for i in lists:
to_be_selected.append(i.get_attribute('innerHTML')) # get the date / name of the list (eg. November 2013)
links = {}
for i in to_be_selected:
links[i] = {}
select = Select(driver.find_element_by_name('t500list')) # get the dropdown
select.select_by_visible_text(i) # select the dropdown option
python_button = driver.find_elements_by_xpath("//button[@class='btn btn-primary']")[0]
python_button.click()
print('now')
current_list = driver.find_elements_by_xpath("//table/tbody/tr/td/a") # get links to all of the computers
rank = 1
for j in current_list:
if "system" in j.get_attribute('href'): # there are 2 links in each table division, we want the link to the system
links[i][rank] = j.get_attribute('href') # the link is at the date and rank in the dict
rank+=1
driver.quit() # close initial driver - it will not be needed
threads = 20 # the amount of threads to be used (these are not physical threads, I have an 8 thread cpu). This allows many web pages to load at once
drivers = [webdriver.Chrome(PATH1, options=chrome_options) for i in range(threads)] # create 1 driver per thread
workers = []
done = [False for i in range(threads)] # to check if it is done
def info(start, interval, driver_used): # parses the html
for i in links:
print(i) # print the current list to see how far along we are
for j in range(start,500,interval): # goes over the top 500 computers per list
driver_used.get(links[i][j]) # submit an HTML rerquest
links[i][j] = {} # get all of the info on a particular system
name = driver_used.find_element_by_xpath("//div[@class='col-sm-12 col-md-9 col-lg-9']/h1")
links[i][j]["Name"] = name.get_attribute('innerHTML')
rows = driver_used.find_elements_by_xpath("//table[@class='table table-condensed']/tbody/tr")
for row in rows:
if len(row.find_elements_by_xpath('.//*')) == 2: # parse through the table
stuff = row.find_elements_by_xpath('.//*')
category = stuff[0].get_attribute('innerHTML').strip()
value = stuff[1].get_attribute('innerHTML').strip()
links[i][j][category] = value # add info to a dictionary
driver_used.quit() # after all of the info is collected we do not need the browser window open
done[start-1] = True # update the done list so the next part can run
queue = Queue()
class DownloadWorker(Thread): # create a class to create a worker
def __init__(self, queue, starting, interval, driver_used):
Thread.__init__(self)
self.queue = queue # create variables for the info function
self.starting = starting
self.interval = interval
self.driver_used = driver_used
def run(self):
info(self.starting, self.interval, self.driver_used) # call the parsing function
for x in range(threads):
worker = DownloadWorker(queue, x+1, threads, drivers[x]) # initiate a worker
worker.start() # run the worker
uniques = []
while True:
if done == [True for i in range(threads)]:
for i in links: # this loop will get all of the unique categories
for j in links[i]:
for k in links[i][j]:
if k not in uniques and len(k) !=1: # only if new
uniques.append(k)
for i in links:
yearlist = {} # everything from the current list
for j in uniques:
temp = []
for k in links[i]:
try:
temp.append(links[i][k][j].split("\n")[0]) # add the current category and computer to the list
except:
temp.append(False) # if it doesnt exist then add 'False'
yearlist[j] = temp # add this category to the dict
df = pd.DataFrame(yearlist) # turn the dict to a pandas dataframe
print(df.head()) # head - this is a sort of sanity check
df.to_csv(f"~/Documents/code/top500/{i}.csv") # export to a CSV file
break # end the infinite loop and subsequently the program |
import binascii
import logging.handlers
import mimetypes
import os
import base58
import requests
import urllib
import json
import textwrap
import random
from twisted.web import server
from twisted.internet import defer, threads, error, reactor
from twisted.internet.task import LoopingCall
from twisted.python.failure import Failure
from lbryschema.claim import ClaimDict
from lbryschema.uri import parse_lbry_uri
from lbryschema.error import URIParseError
# TODO: importing this when internet is disabled raises a socket.gaierror
from lbrynet.core.system_info import get_lbrynet_version
from lbrynet import conf, analytics
from lbrynet.conf import LBRYCRD_WALLET, LBRYUM_WALLET, PTC_WALLET
from lbrynet.reflector import reupload
from lbrynet.reflector import ServerFactory as reflector_server_factory
from lbrynet.core.log_support import configure_loggly_handler
from lbrynet.lbry_file.client.EncryptedFileDownloader import EncryptedFileSaverFactory
from lbrynet.lbry_file.client.EncryptedFileDownloader import EncryptedFileOpenerFactory
from lbrynet.lbry_file.client.EncryptedFileOptions import add_lbry_file_to_sd_identifier
from lbrynet.lbry_file.EncryptedFileMetadataManager import DBEncryptedFileMetadataManager
from lbrynet.lbry_file.StreamDescriptor import EncryptedFileStreamType
from lbrynet.file_manager.EncryptedFileManager import EncryptedFileManager
from lbrynet.daemon.Downloader import GetStream
from lbrynet.daemon.Publisher import Publisher
from lbrynet.daemon.ExchangeRateManager import ExchangeRateManager
from lbrynet.daemon.auth.server import AuthJSONRPCServer
from lbrynet.core.PaymentRateManager import OnlyFreePaymentsManager
from lbrynet.core import utils, system_info
from lbrynet.core.StreamDescriptor import StreamDescriptorIdentifier, download_sd_blob
from lbrynet.core.Session import Session
from lbrynet.core.Wallet import LBRYumWallet, SqliteStorage, ClaimOutpoint
from lbrynet.core.looping_call_manager import LoopingCallManager
from lbrynet.core.server.BlobRequestHandler import BlobRequestHandlerFactory
from lbrynet.core.server.ServerProtocol import ServerProtocolFactory
from lbrynet.core.Error import InsufficientFundsError, UnknownNameError, NoSuchSDHash
from lbrynet.core.Error import NoSuchStreamHash, UnknownClaimID, UnknownURI
from lbrynet.core.Error import NullFundsError, NegativeFundsError
log = logging.getLogger(__name__)
INITIALIZING_CODE = 'initializing'
LOADING_DB_CODE = 'loading_db'
LOADING_WALLET_CODE = 'loading_wallet'
LOADING_FILE_MANAGER_CODE = 'loading_file_manager'
LOADING_SERVER_CODE = 'loading_server'
STARTED_CODE = 'started'
WAITING_FOR_FIRST_RUN_CREDITS = 'waiting_for_credits'
STARTUP_STAGES = [
(INITIALIZING_CODE, 'Initializing'),
(LOADING_DB_CODE, 'Loading databases'),
(LOADING_WALLET_CODE, 'Catching up with the blockchain'),
(LOADING_FILE_MANAGER_CODE, 'Setting up file manager'),
(LOADING_SERVER_CODE, 'Starting lbrynet'),
(STARTED_CODE, 'Started lbrynet'),
(WAITING_FOR_FIRST_RUN_CREDITS, 'Waiting for first run credits'),
]
# TODO: make this consistent with the stages in Downloader.py
DOWNLOAD_METADATA_CODE = 'downloading_metadata'
DOWNLOAD_TIMEOUT_CODE = 'timeout'
DOWNLOAD_RUNNING_CODE = 'running'
DOWNLOAD_STOPPED_CODE = 'stopped'
STREAM_STAGES = [
(INITIALIZING_CODE, 'Initializing'),
(DOWNLOAD_METADATA_CODE, 'Downloading metadata'),
(DOWNLOAD_RUNNING_CODE, 'Started %s, got %s/%s blobs, stream status: %s'),
(DOWNLOAD_STOPPED_CODE, 'Paused stream'),
(DOWNLOAD_TIMEOUT_CODE, 'Stream timed out')
]
CONNECTION_STATUS_CONNECTED = 'connected'
CONNECTION_STATUS_NETWORK = 'network_connection'
CONNECTION_MESSAGES = {
CONNECTION_STATUS_CONNECTED: 'No connection problems detected',
CONNECTION_STATUS_NETWORK: "Your internet connection appears to have been interrupted",
}
SHORT_ID_LEN = 20
class IterableContainer(object):
def __iter__(self):
for attr in dir(self):
if not attr.startswith("_"):
yield getattr(self, attr)
def __contains__(self, item):
for attr in self:
if item == attr:
return True
return False
class Checker(object):
"""The looping calls the daemon runs"""
INTERNET_CONNECTION = 'internet_connection_checker'
CONNECTION_STATUS = 'connection_status_checker'
class _FileID(IterableContainer):
"""The different ways a file can be identified"""
NAME = 'name'
SD_HASH = 'sd_hash'
FILE_NAME = 'file_name'
STREAM_HASH = 'stream_hash'
CLAIM_ID = "claim_id"
OUTPOINT = "outpoint"
ROWID = "rowid"
FileID = _FileID()
# TODO add login credentials in a conf file
# TODO alert if your copy of a lbry file is out of date with the name record
class NoValidSearch(Exception):
pass
class CheckInternetConnection(object):
def __init__(self, daemon):
self.daemon = daemon
def __call__(self):
self.daemon.connected_to_internet = utils.check_connection()
class AlwaysSend(object):
def __init__(self, value_generator, *args, **kwargs):
self.value_generator = value_generator
self.args = args
self.kwargs = kwargs
def __call__(self):
d = defer.maybeDeferred(self.value_generator, *self.args, **self.kwargs)
d.addCallback(lambda v: (True, v))
return d
# If an instance has a lot of blobs, this call might get very expensive.
# For reflector, with 50k blobs, it definitely has an impact on the first run
# But doesn't seem to impact performance after that.
@defer.inlineCallbacks
def calculate_available_blob_size(blob_manager):
blob_hashes = yield blob_manager.get_all_verified_blobs()
blobs = yield defer.DeferredList([blob_manager.get_blob(b) for b in blob_hashes])
defer.returnValue(sum(b.length for success, b in blobs if success and b.length))
class Daemon(AuthJSONRPCServer):
"""
LBRYnet daemon, a jsonrpc interface to lbry functions
"""
def __init__(self, root, analytics_manager):
AuthJSONRPCServer.__init__(self, conf.settings['use_auth_http'])
self.allowed_during_startup = [
'stop', 'status', 'version',
]
self.db_dir = conf.settings['data_dir']
self.download_directory = conf.settings['download_directory']
if conf.settings['BLOBFILES_DIR'] == "blobfiles":
self.blobfile_dir = os.path.join(self.db_dir, "blobfiles")
else:
log.info("Using non-default blobfiles directory: %s", conf.settings['BLOBFILES_DIR'])
self.blobfile_dir = conf.settings['BLOBFILES_DIR']
self.data_rate = conf.settings['data_rate']
self.max_key_fee = conf.settings['max_key_fee']
self.disable_max_key_fee = conf.settings['disable_max_key_fee']
self.download_timeout = conf.settings['download_timeout']
self.run_reflector_server = conf.settings['run_reflector_server']
self.wallet_type = conf.settings['wallet']
self.delete_blobs_on_remove = conf.settings['delete_blobs_on_remove']
self.peer_port = conf.settings['peer_port']
self.reflector_port = conf.settings['reflector_port']
self.dht_node_port = conf.settings['dht_node_port']
self.use_upnp = conf.settings['use_upnp']
self.startup_status = STARTUP_STAGES[0]
self.connected_to_internet = True
self.connection_status_code = None
self.platform = None
self.current_db_revision = 3
self.db_revision_file = conf.settings.get_db_revision_filename()
self.session = None
self.uploaded_temp_files = []
self._session_id = conf.settings.get_session_id()
# TODO: this should probably be passed into the daemon, or
# possibly have the entire log upload functionality taken out
# of the daemon, but I don't want to deal with that now
self.analytics_manager = analytics_manager
self.lbryid = utils.generate_id()
self.wallet_user = None
self.wallet_password = None
self.query_handlers = {}
self.waiting_on = {}
self.streams = {}
self.name_cache = {}
self.exchange_rate_manager = ExchangeRateManager()
calls = {
Checker.INTERNET_CONNECTION: LoopingCall(CheckInternetConnection(self)),
Checker.CONNECTION_STATUS: LoopingCall(self._update_connection_status),
}
self.looping_call_manager = LoopingCallManager(calls)
self.sd_identifier = StreamDescriptorIdentifier()
self.stream_info_manager = None
self.lbry_file_manager = None
@defer.inlineCallbacks
def setup(self):
reactor.addSystemEventTrigger('before', 'shutdown', self._shutdown)
configure_loggly_handler()
@defer.inlineCallbacks
def _announce_startup():
def _announce():
self.announced_startup = True
self.startup_status = STARTUP_STAGES[5]
log.info("Started lbrynet-daemon")
log.info("%i blobs in manager", len(self.session.blob_manager.blobs))
yield self.session.blob_manager.get_all_verified_blobs()
yield _announce()
log.info("Starting lbrynet-daemon")
self.looping_call_manager.start(Checker.INTERNET_CONNECTION, 3600)
self.looping_call_manager.start(Checker.CONNECTION_STATUS, 30)
self.exchange_rate_manager.start()
yield self._initial_setup()
yield threads.deferToThread(self._setup_data_directory)
yield self._check_db_migration()
yield self._load_caches()
yield self._get_session()
yield self._get_analytics()
yield add_lbry_file_to_sd_identifier(self.sd_identifier)
yield self._setup_stream_identifier()
yield self._setup_lbry_file_manager()
yield self._setup_query_handlers()
yield self._setup_server()
log.info("Starting balance: " + str(self.session.wallet.get_balance()))
yield _announce_startup()
def _get_platform(self):
if self.platform is None:
self.platform = system_info.get_platform()
return self.platform
def _initial_setup(self):
def _log_platform():
log.info("Platform: %s", json.dumps(self._get_platform()))
return defer.succeed(None)
d = _log_platform()
return d
def _load_caches(self):
name_cache_filename = os.path.join(self.db_dir, "stream_info_cache.json")
if os.path.isfile(name_cache_filename):
with open(name_cache_filename, "r") as name_cache_file:
name_cache = name_cache_file.read()
try:
self.name_cache = json.loads(name_cache)
log.info("Loaded claim info cache")
except ValueError:
log.warning("Unable to load claim info cache")
def _check_network_connection(self):
self.connected_to_internet = utils.check_connection()
def _check_lbrynet_connection(self):
def _log_success():
log.info("lbrynet connectivity test passed")
def _log_failure():
log.info("lbrynet connectivity test failed")
wonderfullife_sh = ("6f3af0fa3924be98a54766aa2715d22c6c1509c3f7fa32566df4899"
"a41f3530a9f97b2ecb817fa1dcbf1b30553aefaa7")
d = download_sd_blob(self.session, wonderfullife_sh, self.session.base_payment_rate_manager)
d.addCallbacks(lambda _: _log_success, lambda _: _log_failure)
def _update_connection_status(self):
self.connection_status_code = CONNECTION_STATUS_CONNECTED
if not self.connected_to_internet:
self.connection_status_code = CONNECTION_STATUS_NETWORK
def _start_server(self):
if self.peer_port is not None:
server_factory = ServerProtocolFactory(self.session.rate_limiter,
self.query_handlers,
self.session.peer_manager)
try:
log.info("Daemon bound to port: %d", self.peer_port)
self.lbry_server_port = reactor.listenTCP(self.peer_port, server_factory)
except error.CannotListenError as e:
import traceback
log.error("Couldn't bind to port %d. Visit lbry.io/faq/how-to-change-port for"
" more details.", self.peer_port)
log.error("%s", traceback.format_exc())
raise ValueError("%s lbrynet may already be running on your computer.", str(e))
return defer.succeed(True)
def _start_reflector(self):
if self.run_reflector_server:
log.info("Starting reflector server")
if self.reflector_port is not None:
reflector_factory = reflector_server_factory(
self.session.peer_manager,
self.session.blob_manager
)
try:
self.reflector_server_port = reactor.listenTCP(self.reflector_port,
reflector_factory)
log.info('Started reflector on port %s', self.reflector_port)
except error.CannotListenError as e:
log.exception("Couldn't bind reflector to port %d", self.reflector_port)
raise ValueError(
"{} lbrynet may already be running on your computer.".format(e))
return defer.succeed(True)
def _stop_reflector(self):
if self.run_reflector_server:
log.info("Stopping reflector server")
try:
if self.reflector_server_port is not None:
self.reflector_server_port, p = None, self.reflector_server_port
return defer.maybeDeferred(p.stopListening)
except AttributeError:
return defer.succeed(True)
return defer.succeed(True)
def _stop_file_manager(self):
if self.lbry_file_manager:
self.lbry_file_manager.stop()
return defer.succeed(True)
def _stop_server(self):
try:
if self.lbry_server_port is not None:
self.lbry_server_port, old_port = None, self.lbry_server_port
log.info('Stop listening to %s', old_port)
return defer.maybeDeferred(old_port.stopListening)
else:
return defer.succeed(True)
except AttributeError:
return defer.succeed(True)
def _setup_server(self):
self.startup_status = STARTUP_STAGES[4]
d = self._start_server()
d.addCallback(lambda _: self._start_reflector())
return d
def _setup_query_handlers(self):
handlers = [
BlobRequestHandlerFactory(
self.session.blob_manager,
self.session.wallet,
self.session.payment_rate_manager,
self.analytics_manager
),
self.session.wallet.get_wallet_info_query_handler_factory(),
]
return self._add_query_handlers(handlers)
def _add_query_handlers(self, query_handlers):
for handler in query_handlers:
query_id = handler.get_primary_query_identifier()
self.query_handlers[query_id] = handler
return defer.succeed(None)
def _clean_up_temp_files(self):
for path in self.uploaded_temp_files:
try:
log.debug('Removing tmp file: %s', path)
os.remove(path)
except OSError:
pass
def _shutdown(self):
log.info("Closing lbrynet session")
log.info("Status at time of shutdown: " + self.startup_status[0])
self.looping_call_manager.shutdown()
if self.analytics_manager:
self.analytics_manager.shutdown()
self._clean_up_temp_files()
d = self._stop_server()
d.addErrback(log.fail(), 'Failure while shutting down')
d.addCallback(lambda _: self._stop_reflector())
d.addErrback(log.fail(), 'Failure while shutting down')
d.addCallback(lambda _: self._stop_file_manager())
d.addErrback(log.fail(), 'Failure while shutting down')
if self.session is not None:
d.addCallback(lambda _: self.session.shut_down())
d.addErrback(log.fail(), 'Failure while shutting down')
return d
def _update_settings(self, settings):
setting_types = {
'download_directory': str,
'data_rate': float,
'download_timeout': int,
'peer_port': int,
'max_key_fee': dict,
'use_upnp': bool,
'run_reflector_server': bool,
'cache_time': int,
'reflect_uploads': bool,
'share_usage_data': bool,
'disable_max_key_fee': bool,
'peer_search_timeout': int,
'sd_download_timeout': int,
}
for key, setting_type in setting_types.iteritems():
if key in settings:
if isinstance(settings[key], setting_type):
conf.settings.update({key: settings[key]},
data_types=(conf.TYPE_RUNTIME, conf.TYPE_PERSISTED))
elif setting_type is dict and isinstance(settings[key], (unicode, str)):
decoded = json.loads(str(settings[key]))
conf.settings.update({key: decoded},
data_types=(conf.TYPE_RUNTIME, conf.TYPE_PERSISTED))
else:
try:
converted = setting_type(settings[key])
conf.settings.update({key: converted},
data_types=(conf.TYPE_RUNTIME, conf.TYPE_PERSISTED))
except Exception as err:
log.warning(err.message)
log.warning("error converting setting '%s' to type %s from type %s", key,
setting_type, str(type(settings[key])))
conf.settings.save_conf_file_settings()
self.data_rate = conf.settings['data_rate']
self.max_key_fee = conf.settings['max_key_fee']
self.disable_max_key_fee = conf.settings['disable_max_key_fee']
self.download_directory = conf.settings['download_directory']
self.download_timeout = conf.settings['download_timeout']
return defer.succeed(True)
def _write_db_revision_file(self, version_num):
with open(self.db_revision_file, mode='w') as db_revision:
db_revision.write(str(version_num))
def _setup_data_directory(self):
old_revision = 1
self.startup_status = STARTUP_STAGES[1]
log.info("Loading databases")
if not os.path.exists(self.download_directory):
os.mkdir(self.download_directory)
if not os.path.exists(self.db_dir):
os.mkdir(self.db_dir)
self._write_db_revision_file(self.current_db_revision)
log.debug("Created the db revision file: %s", self.db_revision_file)
if not os.path.exists(self.blobfile_dir):
os.mkdir(self.blobfile_dir)
log.debug("Created the blobfile directory: %s", str(self.blobfile_dir))
if not os.path.exists(self.db_revision_file):
log.warning("db_revision file not found. Creating it")
self._write_db_revision_file(old_revision)
def _check_db_migration(self):
old_revision = 1
if os.path.exists(self.db_revision_file):
old_revision = int(open(self.db_revision_file).read().strip())
if old_revision > self.current_db_revision:
raise Exception('This version of lbrynet is not compatible with the database')
def update_version_file_and_print_success():
self._write_db_revision_file(self.current_db_revision)
log.info("Finished upgrading the databases.")
if old_revision < self.current_db_revision:
from lbrynet.db_migrator import dbmigrator
log.info("Upgrading your databases")
d = threads.deferToThread(
dbmigrator.migrate_db, self.db_dir, old_revision, self.current_db_revision)
d.addCallback(lambda _: update_version_file_and_print_success())
return d
return defer.succeed(True)
@defer.inlineCallbacks
def _setup_lbry_file_manager(self):
log.info('Starting to setup up file manager')
self.startup_status = STARTUP_STAGES[3]
self.stream_info_manager = DBEncryptedFileMetadataManager(self.db_dir)
yield self.stream_info_manager.setup()
self.lbry_file_manager = EncryptedFileManager(
self.session,
self.stream_info_manager,
self.sd_identifier,
download_directory=self.download_directory
)
yield self.lbry_file_manager.setup()
log.info('Done setting up file manager')
def _get_analytics(self):
if not self.analytics_manager.is_started:
self.analytics_manager.start()
self.analytics_manager.register_repeating_metric(
analytics.BLOB_BYTES_AVAILABLE,
AlwaysSend(calculate_available_blob_size, self.session.blob_manager),
frequency=300
)
def _get_session(self):
def get_wallet():
if self.wallet_type == LBRYCRD_WALLET:
raise ValueError('LBRYcrd Wallet is no longer supported')
elif self.wallet_type == LBRYUM_WALLET:
log.info("Using lbryum wallet")
config = {'auto_connect': True}
if conf.settings['lbryum_wallet_dir']:
config['lbryum_path'] = conf.settings['lbryum_wallet_dir']
storage = SqliteStorage(self.db_dir)
wallet = LBRYumWallet(storage, config)
return defer.succeed(wallet)
elif self.wallet_type == PTC_WALLET:
log.info("Using PTC wallet")
from lbrynet.core.PTCWallet import PTCWallet
return defer.succeed(PTCWallet(self.db_dir))
else:
raise ValueError('Wallet Type {} is not valid'.format(self.wallet_type))
d = get_wallet()
def create_session(wallet):
self.session = Session(
conf.settings['data_rate'],
db_dir=self.db_dir,
lbryid=self.lbryid,
blob_dir=self.blobfile_dir,
dht_node_port=self.dht_node_port,
known_dht_nodes=conf.settings['known_dht_nodes'],
peer_port=self.peer_port,
use_upnp=self.use_upnp,
wallet=wallet,
is_generous=conf.settings['is_generous_host']
)
self.startup_status = STARTUP_STAGES[2]
d.addCallback(create_session)
d.addCallback(lambda _: self.session.setup())
return d
def _setup_stream_identifier(self):
file_saver_factory = EncryptedFileSaverFactory(
self.session.peer_finder,
self.session.rate_limiter,
self.session.blob_manager,
self.stream_info_manager,
self.session.wallet,
self.download_directory
)
self.sd_identifier.add_stream_downloader_factory(
EncryptedFileStreamType, file_saver_factory)
file_opener_factory = EncryptedFileOpenerFactory(
self.session.peer_finder,
self.session.rate_limiter,
self.session.blob_manager,
self.stream_info_manager,
self.session.wallet
)
self.sd_identifier.add_stream_downloader_factory(
EncryptedFileStreamType, file_opener_factory)
return defer.succeed(None)
def _download_blob(self, blob_hash, rate_manager=None, timeout=None):
"""
Download a blob
:param blob_hash (str): blob hash
:param rate_manager (PaymentRateManager), optional: the payment rate manager to use,
defaults to session.payment_rate_manager
:param timeout (int): blob timeout
:return: BlobFile
"""
if not blob_hash:
raise Exception("Nothing to download")
rate_manager = rate_manager or self.session.payment_rate_manager
timeout = timeout or 30
return download_sd_blob(self.session, blob_hash, rate_manager, timeout)
@defer.inlineCallbacks
def _download_name(self, name, claim_dict, claim_id, timeout=None, file_name=None):
"""
Add a lbry file to the file manager, start the download, and return the new lbry file.
If it already exists in the file manager, return the existing lbry file
"""
if claim_id in self.streams:
downloader = self.streams[claim_id]
result = yield downloader.finished_deferred
defer.returnValue(result)
else:
download_id = utils.random_string()
self.analytics_manager.send_download_started(download_id, name, claim_dict)
self.streams[claim_id] = GetStream(self.sd_identifier, self.session,
self.exchange_rate_manager, self.max_key_fee,
self.disable_max_key_fee,
conf.settings['data_rate'], timeout,
file_name)
try:
lbry_file, finished_deferred = yield self.streams[claim_id].start(claim_dict, name)
finished_deferred.addCallback(
lambda _: self.analytics_manager.send_download_finished(download_id,
name,
claim_dict))
result = yield self._get_lbry_file_dict(lbry_file, full_status=True)
del self.streams[claim_id]
except Exception as err:
log.warning('Failed to get %s: %s', name, err)
self.analytics_manager.send_download_errored(download_id, name, claim_dict)
del self.streams[claim_id]
result = {'error': err.message}
defer.returnValue(result)
@defer.inlineCallbacks
def _publish_stream(self, name, bid, claim_dict, file_path=None, certificate_id=None,
claim_address=None, change_address=None):
publisher = Publisher(self.session, self.lbry_file_manager, self.session.wallet,
certificate_id)
parse_lbry_uri(name)
if bid <= 0.0:
raise Exception("Invalid bid")
if not file_path:
claim_out = yield publisher.publish_stream(name, bid, claim_dict, claim_address,
change_address)
else:
claim_out = yield publisher.create_and_publish_stream(name, bid, claim_dict, file_path,
claim_address, change_address)
if conf.settings['reflect_uploads']:
d = reupload.reflect_stream(publisher.lbry_file)
d.addCallbacks(lambda _: log.info("Reflected new publication to lbry://%s", name),
log.exception)
self.analytics_manager.send_claim_action('publish')
log.info("Success! Published to lbry://%s txid: %s nout: %d", name, claim_out['txid'],
claim_out['nout'])
defer.returnValue(claim_out)
def _get_long_count_timestamp(self):
dt = utils.utcnow() - utils.datetime_obj(year=2012, month=12, day=21)
return int(dt.total_seconds())
def _update_claim_cache(self):
f = open(os.path.join(self.db_dir, "stream_info_cache.json"), "w")
f.write(json.dumps(self.name_cache))
f.close()
return defer.succeed(True)
@defer.inlineCallbacks
def _resolve_name(self, name, force_refresh=False):
"""Resolves a name. Checks the cache first before going out to the blockchain.
Args:
name: the lbry://<name> to resolve
force_refresh: if True, always go out to the blockchain to resolve.
"""
parsed = parse_lbry_uri(name)
resolution = yield self.session.wallet.resolve(parsed.name, check_cache=not force_refresh)
if parsed.name in resolution:
result = resolution[parsed.name]
defer.returnValue(result)
def _get_or_download_sd_blob(self, blob, sd_hash):
if blob:
return self.session.blob_manager.get_blob(blob[0])
def _check_est(downloader):
if downloader.result is not None:
downloader.cancel()
d = defer.succeed(None)
reactor.callLater(self.search_timeout, _check_est, d)
d.addCallback(
lambda _: download_sd_blob(
self.session, sd_hash, self.session.payment_rate_manager))
return d
def get_or_download_sd_blob(self, sd_hash):
"""Return previously downloaded sd blob if already in the blob
manager, otherwise download and return it
"""
d = self.session.blob_manager.completed_blobs([sd_hash])
d.addCallback(self._get_or_download_sd_blob, sd_hash)
return d
def get_size_from_sd_blob(self, sd_blob):
"""
Get total stream size in bytes from a sd blob
"""
d = self.sd_identifier.get_metadata_for_sd_blob(sd_blob)
d.addCallback(lambda metadata: metadata.validator.info_to_show())
d.addCallback(lambda info: int(dict(info)['stream_size']))
return d
def _get_est_cost_from_stream_size(self, size):
"""
Calculate estimated LBC cost for a stream given its size in bytes
"""
if self.session.payment_rate_manager.generous:
return 0.0
return size / (10 ** 6) * conf.settings['data_rate']
@defer.inlineCallbacks
def get_est_cost_using_known_size(self, uri, size):
"""
Calculate estimated LBC cost for a stream given its size in bytes
"""
cost = self._get_est_cost_from_stream_size(size)
resolved = yield self.session.wallet.resolve(uri)
if uri in resolved and 'claim' in resolved[uri]:
claim = ClaimDict.load_dict(resolved[uri]['claim']['value'])
final_fee = self._add_key_fee_to_est_data_cost(claim.source_fee, cost)
result = yield self._render_response(final_fee)
defer.returnValue(result)
else:
defer.returnValue(None)
def get_est_cost_from_sd_hash(self, sd_hash):
"""
Get estimated cost from a sd hash
"""
d = self.get_or_download_sd_blob(sd_hash)
d.addCallback(self.get_size_from_sd_blob)
d.addCallback(self._get_est_cost_from_stream_size)
return d
def _get_est_cost_from_metadata(self, metadata, name):
d = self.get_est_cost_from_sd_hash(metadata.source_hash)
def _handle_err(err):
if isinstance(err, Failure):
log.warning(
"Timeout getting blob for cost est for lbry://%s, using only key fee", name)
return 0.0
raise err
d.addErrback(_handle_err)
d.addCallback(lambda data_cost: self._add_key_fee_to_est_data_cost(metadata.source_fee,
data_cost))
return d
def _add_key_fee_to_est_data_cost(self, fee, data_cost):
fee_amount = 0.0 if not fee else self.exchange_rate_manager.convert_currency(fee.currency,
"LBC",
fee.amount)
return data_cost + fee_amount
@defer.inlineCallbacks
def get_est_cost_from_uri(self, uri):
"""
Resolve a name and return the estimated stream cost
"""
resolved = yield self.session.wallet.resolve(uri)
if resolved:
claim_response = resolved[uri]
else:
claim_response = None
result = None
if claim_response and 'claim' in claim_response:
if 'value' in claim_response['claim'] and claim_response['claim']['value'] is not None:
claim_value = ClaimDict.load_dict(claim_response['claim']['value'])
cost = yield self._get_est_cost_from_metadata(claim_value, uri)
result = round(cost, 5)
else:
log.warning("Failed to estimate cost for %s", uri)
defer.returnValue(result)
def get_est_cost(self, uri, size=None):
"""Get a cost estimate for a lbry stream, if size is not provided the
sd blob will be downloaded to determine the stream size
"""
if size is not None:
return self.get_est_cost_using_known_size(uri, size)
return self.get_est_cost_from_uri(uri)
@defer.inlineCallbacks
def _get_lbry_file_dict(self, lbry_file, full_status=False):
key = binascii.b2a_hex(lbry_file.key) if lbry_file.key else None
full_path = os.path.join(lbry_file.download_directory, lbry_file.file_name)
mime_type = mimetypes.guess_type(full_path)[0]
if os.path.isfile(full_path):
with open(full_path) as written_file:
written_file.seek(0, os.SEEK_END)
written_bytes = written_file.tell()
else:
written_bytes = False
if full_status:
size = yield lbry_file.get_total_bytes()
file_status = yield lbry_file.status()
message = STREAM_STAGES[2][1] % (file_status.name, file_status.num_completed,
file_status.num_known, file_status.running_status)
else:
size = None
message = None
claim = yield self.session.wallet.get_claim(lbry_file.claim_id, check_expire=False)
if claim and 'value' in claim:
metadata = claim['value']
else:
metadata = None
if claim and 'channel_name' in claim:
channel_name = claim['channel_name']
else:
channel_name = None
if lbry_file.txid and lbry_file.nout is not None:
outpoint = repr(ClaimOutpoint(lbry_file.txid, lbry_file.nout))
else:
outpoint = None
if claim and 'has_signature' in claim:
has_signature = claim['has_signature']
else:
has_signature = None
if claim and 'signature_is_valid' in claim:
signature_is_valid = claim['signature_is_valid']
else:
signature_is_valid = None
result = {
'completed': lbry_file.completed,
'file_name': lbry_file.file_name,
'download_directory': lbry_file.download_directory,
'points_paid': lbry_file.points_paid,
'stopped': lbry_file.stopped,
'stream_hash': lbry_file.stream_hash,
'stream_name': lbry_file.stream_name,
'suggested_file_name': lbry_file.suggested_file_name,
'sd_hash': lbry_file.sd_hash,
'name': lbry_file.name,
'outpoint': outpoint,
'claim_id': lbry_file.claim_id,
'download_path': full_path,
'mime_type': mime_type,
'key': key,
'total_bytes': size,
'written_bytes': written_bytes,
'message': message,
'metadata': metadata
}
if channel_name is not None:
result['channel_name'] = channel_name
if has_signature is not None:
result['has_signature'] = has_signature
if signature_is_valid is not None:
result['signature_is_valid'] = signature_is_valid
defer.returnValue(result)
@defer.inlineCallbacks
def _get_lbry_file(self, search_by, val, return_json=False, full_status=False):
lbry_file = None
if search_by in FileID:
for l_f in self.lbry_file_manager.lbry_files:
if l_f.__dict__.get(search_by) == val:
lbry_file = l_f
break
else:
raise NoValidSearch('{} is not a valid search operation'.format(search_by))
if return_json and lbry_file:
lbry_file = yield self._get_lbry_file_dict(lbry_file, full_status=full_status)
defer.returnValue(lbry_file)
@defer.inlineCallbacks
def _get_lbry_files(self, return_json=False, full_status=False, **kwargs):
lbry_files = list(self.lbry_file_manager.lbry_files)
if kwargs:
for search_type, value in iter_lbry_file_search_values(kwargs):
lbry_files = [l_f for l_f in lbry_files if l_f.__dict__[search_type] == value]
if return_json:
file_dicts = []
for lbry_file in lbry_files:
lbry_file_dict = yield self._get_lbry_file_dict(lbry_file, full_status=full_status)
file_dicts.append(lbry_file_dict)
lbry_files = file_dicts
log.debug("Collected %i lbry files", len(lbry_files))
defer.returnValue(lbry_files)
# TODO: do this and get_blobs_for_sd_hash in the stream info manager
def get_blobs_for_stream_hash(self, stream_hash):
def _iter_blobs(blob_hashes):
for blob_hash, blob_num, blob_iv, blob_length in blob_hashes:
if blob_hash:
yield self.session.blob_manager.get_blob(blob_hash, length=blob_length)
def _get_blobs(blob_hashes):
dl = defer.DeferredList(list(_iter_blobs(blob_hashes)), consumeErrors=True)
dl.addCallback(lambda blobs: [blob[1] for blob in blobs if blob[0]])
return dl
d = self.stream_info_manager.get_blobs_for_stream(stream_hash)
d.addCallback(_get_blobs)
return d
def get_blobs_for_sd_hash(self, sd_hash):
d = self.stream_info_manager.get_stream_hash_for_sd_hash(sd_hash)
d.addCallback(self.get_blobs_for_stream_hash)
return d
############################################################################
# #
# JSON-RPC API methods start here #
# #
############################################################################
@defer.inlineCallbacks
@AuthJSONRPCServer.flags(session_status="-s", dht_status="-d")
def jsonrpc_status(self, session_status=False, dht_status=False):
"""
Get daemon status
Usage:
status [-s] [-d]
Options:
-s : include session status in results
-d : include dht network and peer status
Returns:
(dict) lbrynet-daemon status
{
'lbry_id': lbry peer id, base58
'installation_id': installation id, base58
'is_running': bool
'is_first_run': bool
'startup_status': {
'code': status code
'message': status message
},
'connection_status': {
'code': connection status code
'message': connection status message
},
'blockchain_status': {
'blocks': local blockchain height,
'blocks_behind': remote_height - local_height,
'best_blockhash': block hash of most recent block,
},
If given the session status option:
'session_status': {
'managed_blobs': count of blobs in the blob manager,
'managed_streams': count of streams in the file manager
}
If given the dht status option:
'dht_status': {
'kbps_received': current kbps receiving,
'kbps_sent': current kdps being sent,
'total_bytes_sent': total bytes sent
'total_bytes_received': total bytes received
'queries_received': number of queries received per second
'queries_sent': number of queries sent per second
'recent_contacts': count of recently contacted peers
'unique_contacts': count of unique peers
}
}
"""
# on startup, the wallet or network won't be available but we still need this call to work
has_wallet = self.session and self.session.wallet and self.session.wallet.network
local_height = self.session.wallet.network.get_local_height() if has_wallet else 0
remote_height = self.session.wallet.network.get_server_height() if has_wallet else 0
best_hash = (yield self.session.wallet.get_best_blockhash()) if has_wallet else None
response = {
'lbry_id': base58.b58encode(self.lbryid),
'installation_id': conf.settings.installation_id,
'is_running': self.announced_startup,
'is_first_run': self.session.wallet.is_first_run if has_wallet else None,
'startup_status': {
'code': self.startup_status[0],
'message': self.startup_status[1],
},
'connection_status': {
'code': self.connection_status_code,
'message': (
CONNECTION_MESSAGES[self.connection_status_code]
if self.connection_status_code is not None
else ''
),
},
'blocks_behind': remote_height - local_height, # deprecated. remove from UI, then here
'blockchain_status': {
'blocks': local_height,
'blocks_behind': remote_height - local_height,
'best_blockhash': best_hash,
}
}
if session_status:
blobs = yield self.session.blob_manager.get_all_verified_blobs()
response['session_status'] = {
'managed_blobs': len(blobs),
'managed_streams': len(self.lbry_file_manager.lbry_files),
}
if dht_status:
response['dht_status'] = self.session.dht_node.get_bandwidth_stats()
defer.returnValue(response)
def jsonrpc_version(self):
"""
Get lbry version information
Usage:
version
Returns:
(dict) Dictionary of lbry version information
{
'build': (str) build type (e.g. "dev", "rc", "release"),
'ip': (str) remote ip, if available,
'lbrynet_version': (str) lbrynet_version,
'lbryum_version': (str) lbryum_version,
'lbryschema_version': (str) lbryschema_version,
'os_release': (str) os release string
'os_system': (str) os name
'platform': (str) platform string
'processor': (str) processor type,
'python_version': (str) python version,
}
"""
platform_info = self._get_platform()
log.info("Get version info: " + json.dumps(platform_info))
return self._render_response(platform_info)
def jsonrpc_report_bug(self, message=None):
"""
Report a bug to slack
Usage:
report_bug (<message> | --message=<message>)
Returns:
(bool) true if successful
"""
platform_name = self._get_platform()['platform']
report_bug_to_slack(
message,
conf.settings.installation_id,
platform_name,
get_lbrynet_version()
)
return self._render_response(True)
def jsonrpc_settings_get(self):
"""
Get daemon settings
Usage:
settings_get
Returns:
(dict) Dictionary of daemon settings
See ADJUSTABLE_SETTINGS in lbrynet/conf.py for full list of settings
"""
return self._render_response(conf.settings.get_adjustable_settings_dict())
@AuthJSONRPCServer.auth_required
@defer.inlineCallbacks
def jsonrpc_settings_set(self, **kwargs):
"""
Set daemon settings
Usage:
settings_set [<download_directory> | --download_directory=<download_directory>]
[<data_rate> | --data_rate=<data_rate>]
[<download_timeout> | --download_timeout=<download_timeout>]
[<peer_port> | --peer_port=<peer_port>]
[<max_key_fee> | --max_key_fee=<max_key_fee>]
[<disable_max_key_fee> | --disable_max_key_fee=<disable_max_key_fee>]
[<use_upnp> | --use_upnp=<use_upnp>]
[<run_reflector_server> | --run_reflector_server=<run_reflector_server>]
[<cache_time> | --cache_time=<cache_time>]
[<reflect_uploads> | --reflect_uploads=<reflect_uploads>]
[<share_usage_data> | --share_usage_data=<share_usage_data>]
[<peer_search_timeout> | --peer_search_timeout=<peer_search_timeout>]
[<sd_download_timeout> | --sd_download_timeout=<sd_download_timeout>]
Options:
<download_directory>, --download_directory=<download_directory> : (str)
<data_rate>, --data_rate=<data_rate> : (float), 0.0001
<download_timeout>, --download_timeout=<download_timeout> : (int), 180
<peer_port>, --peer_port=<peer_port> : (int), 3333
<max_key_fee>, --max_key_fee=<max_key_fee> : (dict) maximum key fee for downloads,
in the format: {
"currency": <currency_symbol>,
"amount": <amount>
}. In the CLI, it must be an escaped
JSON string
Supported currency symbols:
LBC
BTC
USD
<disable_max_key_fee>, --disable_max_key_fee=<disable_max_key_fee> : (bool), False
<use_upnp>, --use_upnp=<use_upnp> : (bool), True
<run_reflector_server>, --run_reflector_server=<run_reflector_server> : (bool), False
<cache_time>, --cache_time=<cache_time> : (int), 150
<reflect_uploads>, --reflect_uploads=<reflect_uploads> : (bool), True
<share_usage_data>, --share_usage_data=<share_usage_data> : (bool), True
<peer_search_timeout>, --peer_search_timeout=<peer_search_timeout> : (int), 3
<sd_download_timeout>, --sd_download_timeout=<sd_download_timeout> : (int), 3
Returns:
(dict) Updated dictionary of daemon settings
"""
yield self._update_settings(kwargs)
defer.returnValue(conf.settings.get_adjustable_settings_dict())
def jsonrpc_help(self, command=None):
"""
Return a useful message for an API command
Usage:
help [<command> | --command=<command>]
Options:
<command>, --command=<command> : command to retrieve documentation for
"""
if command is None:
return self._render_response({
'about': 'This is the LBRY JSON-RPC API',
'command_help': 'Pass a `command` parameter to this method to see ' +
'help for that command (e.g. `help command=resolve_name`)',
'command_list': 'Get a full list of commands using the `commands` method',
'more_info': 'Visit https://lbry.io/api for more info',
})
fn = self.callable_methods.get(command)
if fn is None:
raise Exception(
"No help available for '{}'. It is not a valid command.".format(command)
)
return self._render_response({
'help': textwrap.dedent(fn.__doc__)
})
def jsonrpc_commands(self):
"""
Return a list of available commands
Usage:
commands
Returns:
(list) list of available commands
"""
return self._render_response(sorted([command for command in self.callable_methods.keys()]))
@AuthJSONRPCServer.flags(include_unconfirmed='-u')
def jsonrpc_wallet_balance(self, address=None, include_unconfirmed=False):
"""
Return the balance of the wallet
Usage:
wallet_balance [<address> | --address=<address>] [-u]
Options:
<address> : If provided only the balance for this address will be given
-u : Include unconfirmed
Returns:
(float) amount of lbry credits in wallet
"""
if address is None:
return self._render_response(float(self.session.wallet.get_balance()))
else:
return self._render_response(float(
self.session.wallet.get_address_balance(address, include_unconfirmed)))
@defer.inlineCallbacks
def jsonrpc_daemon_stop(self):
"""
Stop lbrynet-daemon
Usage:
daemon_stop
Returns:
(string) Shutdown message
"""
log.info("Shutting down lbrynet daemon")
response = yield self._render_response("Shutting down")
reactor.callLater(0.1, reactor.fireSystemEvent, "shutdown")
defer.returnValue(response)
@defer.inlineCallbacks
@AuthJSONRPCServer.flags(full_status='-f')
def jsonrpc_file_list(self, **kwargs):
"""
List files limited by optional filters
Usage:
file_list [--sd_hash=<sd_hash>] [--file_name=<file_name>] [--stream_hash=<stream_hash>]
[--claim_id=<claim_id>] [--outpoint=<outpoint>] [--rowid=<rowid>]
[--name=<name>]
[-f]
Options:
--sd_hash=<sd_hash> : get file with matching sd hash
--file_name=<file_name> : get file with matching file name in the
downloads folder
--stream_hash=<stream_hash> : get file with matching stream hash
--claim_id=<claim_id> : get file with matching claim id
--outpoint=<outpoint> : get file with matching claim outpoint
--rowid=<rowid> : get file with matching row id
--name=<name> : get file with matching associated name claim
-f : full status, populate the 'message' and 'size' fields
Returns:
(list) List of files
[
{
'completed': (bool) true if download is completed,
'file_name': (str) name of file,
'download_directory': (str) download directory,
'points_paid': (float) credit paid to download file,
'stopped': (bool) true if download is stopped,
'stream_hash': (str) stream hash of file,
'stream_name': (str) stream name ,
'suggested_file_name': (str) suggested file name,
'sd_hash': (str) sd hash of file,
'name': (str) name claim attached to file
'outpoint': (str) claim outpoint attached to file
'claim_id': (str) claim ID attached to file,
'download_path': (str) download path of file,
'mime_type': (str) mime type of file,
'key': (str) key attached to file,
'total_bytes': (int) file size in bytes, None if full_status is false
'written_bytes': (int) written size in bytes
'message': (str), None if full_status is false
'metadata': (dict) Metadata dictionary
},
]
"""
result = yield self._get_lbry_files(return_json=True, **kwargs)
response = yield self._render_response(result)
defer.returnValue(response)
@defer.inlineCallbacks
@AuthJSONRPCServer.flags(force='-f')
def jsonrpc_resolve_name(self, name, force=False):
"""
Resolve stream info from a LBRY name
Usage:
resolve_name <name> [-f]
Options:
-f : force refresh and do not check cache
Returns:
(dict) Metadata dictionary from name claim, None if the name is not
resolvable
"""
try:
metadata = yield self._resolve_name(name, force_refresh=force)
except UnknownNameError:
log.info('Name %s is not known', name)
defer.returnValue(None)
else:
defer.returnValue(metadata)
@defer.inlineCallbacks
def jsonrpc_claim_show(self, name=None, txid=None, nout=None, claim_id=None):
"""
Resolve claim info from a LBRY name
Usage:
claim_show <name> [<txid> | --txid=<txid>] [<nout> | --nout=<nout>]
[<claim_id> | --claim_id=<claim_id>]
Options:
<txid>, --txid=<txid> : look for claim with this txid
<nout>, --nout=<nout> : look for claim with this nout
<claim_id>, --claim_id=<claim_id> : look for claim with this claim id
Returns:
(dict) Dictionary contaning claim info, (bool) false if claim is not
resolvable
{
'txid': (str) txid of claim
'nout': (int) nout of claim
'amount': (float) amount of claim
'value': (str) value of claim
'height' : (int) height of claim takeover
'claim_id': (str) claim ID of claim
'supports': (list) list of supports associated with claim
}
"""
try:
if claim_id:
claim_results = yield self.session.wallet.get_claim(claim_id)
elif txid and nout is not None:
outpoint = ClaimOutpoint(txid, nout)
claim_results = yield self.session.wallet.get_claim_by_outpoint(outpoint)
else:
claim_results = yield self.session.wallet.resolve(name)
if claim_results:
claim_results = claim_results[name]
result = format_json_out_amount_as_float(claim_results)
except (TypeError, UnknownNameError, UnknownClaimID, UnknownURI):
result = False
response = yield self._render_response(result)
defer.returnValue(response)
@AuthJSONRPCServer.auth_required
@defer.inlineCallbacks
@AuthJSONRPCServer.flags(force='-f')
def jsonrpc_resolve(self, force=False, uri=None, uris=[]):
"""
Resolve given LBRY URIs
Usage:
resolve [-f] (<uri> | --uri=<uri>) [<uris>...]
Options:
-f : force refresh and ignore cache
Returns:
Dictionary of results, keyed by uri
'<uri>': {
If a resolution error occurs:
'error': Error message
If the uri resolves to a channel or a claim in a channel:
'certificate': {
'address': (str) claim address,
'amount': (float) claim amount,
'effective_amount': (float) claim amount including supports,
'claim_id': (str) claim id,
'claim_sequence': (int) claim sequence number,
'decoded_claim': (bool) whether or not the claim value was decoded,
'height': (int) claim height,
'depth': (int) claim depth,
'has_signature': (bool) included if decoded_claim
'name': (str) claim name,
'supports: (list) list of supports [{'txid': txid,
'nout': nout,
'amount': amount}],
'txid': (str) claim txid,
'nout': (str) claim nout,
'signature_is_valid': (bool), included if has_signature,
'value': ClaimDict if decoded, otherwise hex string
}
If the uri resolves to a claim:
'claim': {
'address': (str) claim address,
'amount': (float) claim amount,
'effective_amount': (float) claim amount including supports,
'claim_id': (str) claim id,
'claim_sequence': (int) claim sequence number,
'decoded_claim': (bool) whether or not the claim value was decoded,
'height': (int) claim height,
'depth': (int) claim depth,
'has_signature': (bool) included if decoded_claim
'name': (str) claim name,
'channel_name': (str) channel name if claim is in a channel
'supports: (list) list of supports [{'txid': txid,
'nout': nout,
'amount': amount}]
'txid': (str) claim txid,
'nout': (str) claim nout,
'signature_is_valid': (bool), included if has_signature,
'value': ClaimDict if decoded, otherwise hex string
}
}
"""
uris = tuple(uris)
if uri is not None:
uris += (uri,)
results = {}
valid_uris = tuple()
for u in uris:
try:
parse_lbry_uri(u)
valid_uris += (u, )
except URIParseError:
results[u] = {"error": "%s is not a valid uri" % u}
resolved = yield self.session.wallet.resolve(*valid_uris, check_cache=not force)
for resolved_uri in resolved:
results[resolved_uri] = resolved[resolved_uri]
response = yield self._render_response(results)
defer.returnValue(response)
@AuthJSONRPCServer.auth_required
@defer.inlineCallbacks
def jsonrpc_get(self, uri, file_name=None, timeout=None):
"""
Download stream from a LBRY name.
Usage:
get <uri> [<file_name> | --file_name=<file_name>] [<timeout> | --timeout=<timeout>]
Options:
<file_name> : specified name for the downloaded file
<timeout> : download timeout in number of seconds
<download_directory> : path to directory where file will be saved
Returns:
(dict) Dictionary contaning information about the stream
{
'completed': (bool) true if download is completed,
'file_name': (str) name of file,
'download_directory': (str) download directory,
'points_paid': (float) credit paid to download file,
'stopped': (bool) true if download is stopped,
'stream_hash': (str) stream hash of file,
'stream_name': (str) stream name,
'suggested_file_name': (str) suggested file name,
'sd_hash': (str) sd hash of file,
'name': (str) name claim attached to file
'outpoint': (str) claim outpoint attached to file
'claim_id': (str) claim ID attached to file,
'download_path': (str) download path of file,
'mime_type': (str) mime type of file,
'key': (str) key attached to file,
'total_bytes': (int) file size in bytes, None if full_status is false
'written_bytes': (int) written size in bytes
'message': (str), None if full_status is false
'metadata': (dict) Metadata dictionary
}
"""
timeout = timeout if timeout is not None else self.download_timeout
resolved_result = yield self.session.wallet.resolve(uri)
if resolved_result and uri in resolved_result:
resolved = resolved_result[uri]
else:
resolved = None
if not resolved or 'value' not in resolved:
if 'claim' not in resolved:
raise Exception(
"Failed to resolve stream at lbry://{}".format(uri.replace("lbry://", "")))
else:
resolved = resolved['claim']
name = resolved['name']
claim_id = resolved['claim_id']
claim_dict = ClaimDict.load_dict(resolved['value'])
if claim_id in self.streams:
log.info("Already waiting on lbry://%s to start downloading", name)
yield self.streams[claim_id].data_downloading_deferred
lbry_file = yield self._get_lbry_file(FileID.CLAIM_ID, claim_id, return_json=False)
if lbry_file:
if not os.path.isfile(os.path.join(lbry_file.download_directory, lbry_file.file_name)):
log.info("Already have lbry file but missing file in %s, rebuilding it",
lbry_file.download_directory)
yield lbry_file.start()
else:
log.info('Already have a file for %s', name)
result = yield self._get_lbry_file_dict(lbry_file, full_status=True)
else:
result = yield self._download_name(name, claim_dict, claim_id, timeout=timeout,
file_name=file_name)
response = yield self._render_response(result)
defer.returnValue(response)
@AuthJSONRPCServer.auth_required
@defer.inlineCallbacks
def jsonrpc_file_set_status(self, status, **kwargs):
"""
Start or stop downloading a file
Usage:
file_set_status <status> [--sd_hash=<sd_hash>] [--file_name=<file_name>]
[--stream_hash=<stream_hash>] [--claim_id=<claim_id>]
[--outpoint=<outpoint>] [--rowid=<rowid>]
[--name=<name>]
Options:
--sd_hash=<sd_hash> : set status of file with matching sd hash
--file_name=<file_name> : set status of file with matching file name in the
downloads folder
--stream_hash=<stream_hash> : set status of file with matching stream hash
--claim_id=<claim_id> : set status of file with matching claim id
--outpoint=<outpoint> : set status of file with matching claim outpoint
--rowid=<rowid> : set status of file with matching row id
--name=<name> : set status of file with matching associated name claim
Returns:
(str) Confirmation message
"""
if status not in ['start', 'stop']:
raise Exception('Status must be "start" or "stop".')
search_type, value = get_lbry_file_search_value(kwargs)
lbry_file = yield self._get_lbry_file(search_type, value, return_json=False)
if not lbry_file:
raise Exception('Unable to find a file for {}:{}'.format(search_type, value))
if status == 'start' and lbry_file.stopped or status == 'stop' and not lbry_file.stopped:
yield self.lbry_file_manager.toggle_lbry_file_running(lbry_file)
msg = "Started downloading file" if status == 'start' else "Stopped downloading file"
else:
msg = (
"File was already being downloaded" if status == 'start'
else "File was already stopped"
)
response = yield self._render_response(msg)
defer.returnValue(response)
@AuthJSONRPCServer.auth_required
@defer.inlineCallbacks
@AuthJSONRPCServer.flags(delete_from_download_dir='-f', delete_all='--delete_all')
def jsonrpc_file_delete(self, delete_from_download_dir=False, delete_all=False, **kwargs):
"""
Delete a LBRY file
Usage:
file_delete [-f] [--delete_all] [--sd_hash=<sd_hash>] [--file_name=<file_name>]
[--stream_hash=<stream_hash>] [--claim_id=<claim_id>]
[--outpoint=<outpoint>] [--rowid=<rowid>]
[--name=<name>]
Options:
-f, --delete_from_download_dir : delete file from download directory,
instead of just deleting blobs
--delete_all : if there are multiple matching files,
allow the deletion of multiple files.
Otherwise do not delete anything.
--sd_hash=<sd_hash> : delete by file sd hash
--file_name<file_name> : delete by file name in downloads folder
--stream_hash=<stream_hash> : delete by file stream hash
--claim_id=<claim_id> : delete by file claim id
--outpoint=<outpoint> : delete by file claim outpoint
--rowid=<rowid> : delete by file row id
--name=<name> : delete by associated name claim of file
Returns:
(bool) true if deletion was successful
"""
lbry_files = yield self._get_lbry_files(return_json=False, **kwargs)
if len(lbry_files) > 1:
if not delete_all:
log.warning("There are %i files to delete, use narrower filters to select one",
len(lbry_files))
response = yield self._render_response(False)
defer.returnValue(response)
else:
log.warning("Deleting %i files",
len(lbry_files))
if not lbry_files:
log.warning("There is no file to delete")
result = False
else:
for lbry_file in lbry_files:
file_name, stream_hash = lbry_file.file_name, lbry_file.stream_hash
if lbry_file.claim_id in self.streams:
del self.streams[lbry_file.claim_id]
yield self.lbry_file_manager.delete_lbry_file(lbry_file,
delete_file=delete_from_download_dir)
log.info("Deleted file: %s", file_name)
result = True
response = yield self._render_response(result)
defer.returnValue(response)
@defer.inlineCallbacks
def jsonrpc_stream_cost_estimate(self, uri, size=None):
"""
Get estimated cost for a lbry stream
Usage:
stream_cost_estimate <uri> [<size> | --size=<size>]
Options:
<size>, --size=<size> : stream size in bytes. if provided an sd blob won't be
downloaded.
Returns:
(float) Estimated cost in lbry credits, returns None if uri is not
resolveable
"""
cost = yield self.get_est_cost(uri, size)
defer.returnValue(cost)
@AuthJSONRPCServer.auth_required
@AuthJSONRPCServer.queued
@defer.inlineCallbacks
def jsonrpc_channel_new(self, channel_name, amount):
"""
Generate a publisher key and create a new '@' prefixed certificate claim
Usage:
channel_new (<channel_name> | --channel_name=<channel_name>)
(<amount> | --amount=<amount>)
Returns:
(dict) Dictionary containing result of the claim
{
'tx' : (str) hex encoded transaction
'txid' : (str) txid of resulting claim
'nout' : (int) nout of the resulting claim
'fee' : (float) fee paid for the claim transaction
'claim_id' : (str) claim ID of the resulting claim
}
"""
try:
parsed = parse_lbry_uri(channel_name)
if not parsed.is_channel:
raise Exception("Cannot make a new channel for a non channel name")
if parsed.path:
raise Exception("Invalid channel uri")
except (TypeError, URIParseError):
raise Exception("Invalid channel name")
if amount <= 0:
raise Exception("Invalid amount")
if amount > self.session.wallet.get_balance():
raise InsufficientFundsError()
result = yield self.session.wallet.claim_new_channel(channel_name, amount)
self.analytics_manager.send_new_channel()
log.info("Claimed a new channel! Result: %s", result)
response = yield self._render_response(result)
defer.returnValue(response)
@AuthJSONRPCServer.auth_required
@defer.inlineCallbacks
def jsonrpc_channel_list_mine(self):
"""
Get my channels
Usage:
channel_list_mine
Returns:
(list) ClaimDict
"""
result = yield self.session.wallet.channel_list()
response = yield self._render_response(result)
defer.returnValue(response)
@AuthJSONRPCServer.auth_required
@AuthJSONRPCServer.queued
@defer.inlineCallbacks
def jsonrpc_publish(self, name, bid, metadata=None, file_path=None, fee=None, title=None,
description=None, author=None, language=None, license=None,
license_url=None, thumbnail=None, preview=None, nsfw=None, sources=None,
channel_name=None, channel_id=None,
claim_address=None, change_address=None):
"""
Make a new name claim and publish associated data to lbrynet,
update over existing claim if user already has a claim for name.
Fields required in the final Metadata are:
'title'
'description'
'author'
'language'
'license'
'nsfw'
Metadata can be set by either using the metadata argument or by setting individual arguments
fee, title, description, author, language, license, license_url, thumbnail, preview, nsfw,
or sources. Individual arguments will overwrite the fields specified in metadata argument.
Usage:
publish (<name> | --name=<name>) (<bid> | --bid=<bid>) [--metadata=<metadata>]
[--file_path=<file_path>] [--fee=<fee>] [--title=<title>]
[--description=<description>] [--author=<author>] [--language=<language>]
[--license=<license>] [--license_url=<license_url>] [--thumbnail=<thumbnail>]
[--preview=<preview>] [--nsfw=<nsfw>] [--sources=<sources>]
[--channel_name=<channel_name>] [--channel_id=<channel_id>]
[--claim_address=<claim_address>] [--change_address=<change_address>]
Options:
--metadata=<metadata> : ClaimDict to associate with the claim.
--file_path=<file_path> : path to file to be associated with name. If provided,
a lbry stream of this file will be used in 'sources'.
If no path is given but a metadata dict is provided,
the source from the given metadata will be used.
--fee=<fee> : Dictionary representing key fee to download content:
{
'currency': currency_symbol,
'amount': float,
'address': str, optional
}
supported currencies: LBC, USD, BTC
If an address is not provided a new one will be
automatically generated. Default fee is zero.
--title=<title> : title of the publication
--description=<description> : description of the publication
--author=<author> : author of the publication
--language=<language> : language of the publication
--license=<license> : publication license
--license_url=<license_url> : publication license url
--thumbnail=<thumbnail> : thumbnail url
--preview=<preview> : preview url
--nsfw=<nsfw> : title of the publication
--sources=<sources> : {'lbry_sd_hash':sd_hash} specifies sd hash of file
--channel_name=<channel_name> : name of the publisher channel name in the wallet
--channel_id=<channel_id> : claim id of the publisher channel, does not check
for channel claim being in the wallet. This allows
publishing to a channel where only the certificate
private key is in the wallet.
--claim_address=<claim_address> : address where the claim is sent to, if not specified
new address wil automatically be created
Returns:
(dict) Dictionary containing result of the claim
{
'tx' : (str) hex encoded transaction
'txid' : (str) txid of resulting claim
'nout' : (int) nout of the resulting claim
'fee' : (float) fee paid for the claim transaction
'claim_id' : (str) claim ID of the resulting claim
}
"""
try:
parse_lbry_uri(name)
except (TypeError, URIParseError):
raise Exception("Invalid name given to publish")
if bid <= 0.0:
raise Exception("Invalid bid")
if bid >= self.session.wallet.get_balance():
raise InsufficientFundsError('Insufficient funds. ' \
'Make sure you have enough LBC to deposit')
metadata = metadata or {}
if fee is not None:
metadata['fee'] = fee
if title is not None:
metadata['title'] = title
if description is not None:
metadata['description'] = description
if author is not None:
metadata['author'] = author
if language is not None:
metadata['language'] = language
if license is not None:
metadata['license'] = license
if license_url is not None:
metadata['licenseUrl'] = license_url
if thumbnail is not None:
metadata['thumbnail'] = thumbnail
if preview is not None:
metadata['preview'] = preview
if nsfw is not None:
metadata['nsfw'] = bool(nsfw)
metadata['version'] = '_0_1_0'
# check for original deprecated format {'currency':{'address','amount'}}
# add address, version to fee if unspecified
if 'fee' in metadata:
if len(metadata['fee'].keys()) == 1 and isinstance(metadata['fee'].values()[0], dict):
raise Exception('Old format for fee no longer supported. ' \
'Fee must be specified as {"currency":,"address":,"amount":}')
if 'amount' in metadata['fee'] and 'currency' in metadata['fee']:
if not metadata['fee']['amount']:
log.warning("Stripping empty fee from published metadata")
del metadata['fee']
elif 'address' not in metadata['fee']:
address = yield self.session.wallet.get_unused_address()
metadata['fee']['address'] = address
if 'fee' in metadata and 'version' not in metadata['fee']:
metadata['fee']['version'] = '_0_0_1'
claim_dict = {
'version': '_0_0_1',
'claimType': 'streamType',
'stream': {
'metadata': metadata,
'version': '_0_0_1'
}
}
if sources is not None:
claim_dict['stream']['source'] = sources
log.info("Publish: %s", {
'name': name,
'file_path': file_path,
'bid': bid,
'claim_address': claim_address,
'change_address': change_address,
'claim_dict': claim_dict,
})
if channel_id:
certificate_id = channel_id
elif channel_name:
certificate_id = None
my_certificates = yield self.session.wallet.channel_list()
for certificate in my_certificates:
if channel_name == certificate['name']:
certificate_id = certificate['claim_id']
break
if not certificate_id:
raise Exception("Cannot publish using channel %s" % channel_name)
else:
certificate_id = None
result = yield self._publish_stream(name, bid, claim_dict, file_path, certificate_id,
claim_address, change_address)
response = yield self._render_response(result)
defer.returnValue(response)
@AuthJSONRPCServer.auth_required
@defer.inlineCallbacks
def jsonrpc_claim_abandon(self, claim_id):
"""
Abandon a name and reclaim credits from the claim
Usage:
claim_abandon (<claim_id> | --claim_id=<claim_id>)
Return:
(dict) Dictionary containing result of the claim
{
txid : (str) txid of resulting transaction
fee : (float) fee paid for the transaction
}
"""
try:
abandon_claim_tx = yield self.session.wallet.abandon_claim(claim_id)
self.analytics_manager.send_claim_action('abandon')
response = yield self._render_response(abandon_claim_tx)
except BaseException as err:
log.warning(err)
# pylint: disable=unsubscriptable-object
if len(err.args) and err.args[0] == "txid was not found in wallet":
raise Exception("This transaction was not found in your wallet")
else:
response = yield self._render_response(err)
defer.returnValue(response)
@AuthJSONRPCServer.auth_required
@defer.inlineCallbacks
def jsonrpc_claim_new_support(self, name, claim_id, amount):
"""
Support a name claim
Usage:
claim_new_support (<name> | --name=<name>) (<claim_id> | --claim_id=<claim_id>)
(<amount> | --amount=<amount>)
Return:
(dict) Dictionary containing result of the claim
{
txid : (str) txid of resulting support claim
nout : (int) nout of the resulting support claim
fee : (float) fee paid for the transaction
}
"""
result = yield self.session.wallet.support_claim(name, claim_id, amount)
self.analytics_manager.send_claim_action('new_support')
defer.returnValue(result)
@AuthJSONRPCServer.auth_required
@defer.inlineCallbacks
def jsonrpc_claim_send_to_address(self, claim_id, address, amount=None):
"""
Send a name claim to an address
Usage:
claim_send_to_address (<claim_id> | --claim_id=<claim_id>)
(<address> | --address=<address>)
[<amount> | --amount=<amount>]
Options:
<amount> : Amount of credits to claim name for, defaults to the current amount
on the claim
"""
result = yield self.session.wallet.send_claim_to_address(claim_id, address, amount)
response = yield self._render_response(result)
defer.returnValue(response)
# TODO: claim_list_mine should be merged into claim_list, but idk how to authenticate it -Grin
@AuthJSONRPCServer.auth_required
def jsonrpc_claim_list_mine(self):
"""
List my name claims
Usage:
claim_list_mine
Returns
(list) List of name claims owned by user
[
{
'address': (str) address that owns the claim
'amount': (float) amount assigned to the claim
'blocks_to_expiration': (int) number of blocks until it expires
'category': (str) "claim", "update" , or "support"
'claim_id': (str) claim ID of the claim
'confirmations': (int) number of blocks of confirmations for the claim
'expiration_height': (int) the block height which the claim will expire
'expired': (bool) true if expired, false otherwise
'height': (int) height of the block containing the claim
'is_spent': (bool) true if claim is abandoned, false otherwise
'name': (str) name of the claim
'txid': (str) txid of the cliam
'nout': (int) nout of the claim
'value': (str) value of the claim
},
]
"""
d = self.session.wallet.get_name_claims()
d.addCallback(format_json_out_amount_as_float)
d.addCallback(lambda claims: self._render_response(claims))
return d
@defer.inlineCallbacks
def jsonrpc_claim_list(self, name):
"""
List current claims and information about them for a given name
Usage:
claim_list (<name> | --name=<name>)
Returns
(dict) State of claims assigned for the name
{
'claims': (list) list of claims for the name
[
{
'amount': (float) amount assigned to the claim
'effective_amount': (float) total amount assigned to the claim,
including supports
'claim_id': (str) claim ID of the claim
'height': (int) height of block containing the claim
'txid': (str) txid of the claim
'nout': (int) nout of the claim
'supports': (list) a list of supports attached to the claim
'value': (str) the value of the claim
},
]
'supports_without_claims': (list) supports without any claims attached to them
'last_takeover_height': (int) the height of last takeover for the name
}
"""
claims = yield self.session.wallet.get_claims_for_name(name)
defer.returnValue(claims)
@AuthJSONRPCServer.auth_required
@defer.inlineCallbacks
def jsonrpc_claim_list_by_channel(self, page=0, page_size=10, uri=None, uris=[]):
"""
Get paginated claims in a channel specified by a channel uri
Usage:
claim_list_by_channel (<uri> | --uri=<uri>) [<uris>...] [--page=<page>]
[--page_size=<page_size>]
Options:
--page=<page> : which page of results to return where page 1 is the first
page, defaults to no pages
--page_size=<page_size> : number of results in a page, default of 10
Returns:
{
resolved channel uri: {
If there was an error:
'error': (str) error message
'claims_in_channel_pages': total number of pages with <page_size> results,
If a page of results was requested:
'returned_page': page number returned,
'claims_in_channel': [
{
'absolute_channel_position': (int) claim index number in sorted list of
claims which assert to be part of the
channel
'address': (str) claim address,
'amount': (float) claim amount,
'effective_amount': (float) claim amount including supports,
'claim_id': (str) claim id,
'claim_sequence': (int) claim sequence number,
'decoded_claim': (bool) whether or not the claim value was decoded,
'height': (int) claim height,
'depth': (int) claim depth,
'has_signature': (bool) included if decoded_claim
'name': (str) claim name,
'supports: (list) list of supports [{'txid': txid,
'nout': nout,
'amount': amount}],
'txid': (str) claim txid,
'nout': (str) claim nout,
'signature_is_valid': (bool), included if has_signature,
'value': ClaimDict if decoded, otherwise hex string
}
],
}
}
"""
uris = tuple(uris)
if uri is not None:
uris += (uri, )
results = {}
valid_uris = tuple()
for chan_uri in uris:
try:
parsed = parse_lbry_uri(chan_uri)
if not parsed.is_channel:
results[chan_uri] = {"error": "%s is not a channel uri" % parsed.name}
elif parsed.path:
results[chan_uri] = {"error": "%s is a claim in a channel" % parsed.path}
else:
valid_uris += (chan_uri, )
except URIParseError:
results[chan_uri] = {"error": "%s is not a valid uri" % chan_uri}
resolved = yield self.session.wallet.resolve(*valid_uris, check_cache=False, page=page,
page_size=page_size)
for u in resolved:
if 'error' in resolved[u]:
results[u] = resolved[u]
else:
results[u] = {
'claims_in_channel_pages': resolved[u]['claims_in_channel_pages']
}
if page:
results[u]['returned_page'] = page
results[u]['claims_in_channel'] = resolved[u].get('claims_in_channel', [])
response = yield self._render_response(results)
defer.returnValue(response)
@AuthJSONRPCServer.auth_required
def jsonrpc_transaction_list(self):
"""
List transactions belonging to wallet
Usage:
transaction_list
Returns:
(list) List of transactions
"""
d = self.session.wallet.get_history()
d.addCallback(lambda r: self._render_response(r))
return d
def jsonrpc_transaction_show(self, txid):
"""
Get a decoded transaction from a txid
Usage:
transaction_show (<txid> | --txid=<txid>)
Returns:
(dict) JSON formatted transaction
"""
d = self.session.wallet.get_transaction(txid)
d.addCallback(lambda r: self._render_response(r))
return d
@AuthJSONRPCServer.auth_required
def jsonrpc_wallet_is_address_mine(self, address):
"""
Checks if an address is associated with the current wallet.
Usage:
wallet_is_address_mine (<address> | --address=<address>)
Returns:
(bool) true, if address is associated with current wallet
"""
d = self.session.wallet.address_is_mine(address)
d.addCallback(lambda is_mine: self._render_response(is_mine))
return d
@AuthJSONRPCServer.auth_required
def jsonrpc_wallet_public_key(self, address):
"""
Get public key from wallet address
Usage:
wallet_public_key (<address> | --address=<address>)
Returns:
(list) list of public keys associated with address.
Could contain more than one public key if multisig.
"""
d = self.session.wallet.get_pub_keys(address)
d.addCallback(lambda r: self._render_response(r))
return d
@AuthJSONRPCServer.auth_required
@defer.inlineCallbacks
def jsonrpc_wallet_list(self):
"""
List wallet addresses
Usage:
wallet_list
Returns:
List of wallet addresses
"""
addresses = yield self.session.wallet.list_addresses()
response = yield self._render_response(addresses)
defer.returnValue(response)
@AuthJSONRPCServer.auth_required
def jsonrpc_wallet_new_address(self):
"""
Generate a new wallet address
Usage:
wallet_new_address
Returns:
(str) New wallet address in base58
"""
def _disp(address):
log.info("Got new wallet address: " + address)
return defer.succeed(address)
d = self.session.wallet.get_new_address()
d.addCallback(_disp)
d.addCallback(lambda address: self._render_response(address))
return d
@AuthJSONRPCServer.auth_required
def jsonrpc_wallet_unused_address(self):
"""
Return an address containing no balance, will create
a new address if there is none.
Usage:
wallet_unused_address
Returns:
(str) Unused wallet address in base58
"""
def _disp(address):
log.info("Got unused wallet address: " + address)
return defer.succeed(address)
d = self.session.wallet.get_unused_address()
d.addCallback(_disp)
d.addCallback(lambda address: self._render_response(address))
return d
@AuthJSONRPCServer.auth_required
@defer.inlineCallbacks
def jsonrpc_send_amount_to_address(self, amount, address):
"""
Queue a payment of credits to an address
Usage:
send_amount_to_address (<amount> | --amount=<amount>) (<address> | --address=<address>)
Returns:
(bool) true if payment successfully scheduled
"""
if amount < 0:
raise NegativeFundsError()
elif not amount:
raise NullFundsError()
reserved_points = self.session.wallet.reserve_points(address, amount)
if reserved_points is None:
raise InsufficientFundsError()
yield self.session.wallet.send_points_to_address(reserved_points, amount)
self.analytics_manager.send_credits_sent()
defer.returnValue(True)
def jsonrpc_block_show(self, blockhash=None, height=None):
"""
Get contents of a block
Usage:
block_show (<blockhash> | --blockhash=<blockhash>) | (<height> | --height=<height>)
Options:
<blockhash>, --blockhash=<blockhash> : hash of the block to look up
<height>, --height=<height> : height of the block to look up
Returns:
(dict) Requested block
"""
if blockhash is not None:
d = self.session.wallet.get_block(blockhash)
elif height is not None:
d = self.session.wallet.get_block_info(height)
d.addCallback(lambda b: self.session.wallet.get_block(b))
else:
# TODO: return a useful error message
return server.failure
d.addCallback(lambda r: self._render_response(r))
return d
@AuthJSONRPCServer.auth_required
@defer.inlineCallbacks
def jsonrpc_blob_get(self, blob_hash, timeout=None, encoding=None, payment_rate_manager=None):
"""
Download and return a blob
Usage:
blob_get (<blob_hash> | --blob_hash=<blob_hash>) [--timeout=<timeout>]
[--encoding=<encoding>] [--payment_rate_manager=<payment_rate_manager>]
Options:
--timeout=<timeout> : timeout in number of seconds
--encoding=<encoding> : by default no attempt at decoding is made,
can be set to one of the
following decoders:
'json'
--payment_rate_manager=<payment_rate_manager> : if not given the default payment rate
manager will be used.
supported alternative rate managers:
'only-free'
Returns
(str) Success/Fail message or (dict) decoded data
"""
decoders = {
'json': json.loads
}
timeout = timeout or 30
payment_rate_manager = get_blob_payment_rate_manager(self.session, payment_rate_manager)
blob = yield self._download_blob(blob_hash, rate_manager=payment_rate_manager,
timeout=timeout)
if encoding and encoding in decoders:
blob_file = blob.open_for_reading()
result = decoders[encoding](blob_file.read())
blob.close_read_handle(blob_file)
else:
result = "Downloaded blob %s" % blob_hash
response = yield self._render_response(result)
defer.returnValue(response)
@AuthJSONRPCServer.auth_required
@defer.inlineCallbacks
def jsonrpc_blob_delete(self, blob_hash):
"""
Delete a blob
Usage:
blob_delete (<blob_hash> | --blob_hash=<blob_hash)
Returns:
(str) Success/fail message
"""
if blob_hash not in self.session.blob_manager.blobs:
response = yield self._render_response("Don't have that blob")
defer.returnValue(response)
try:
stream_hash = yield self.stream_info_manager.get_stream_hash_for_sd_hash(blob_hash)
yield self.stream_info_manager.delete_stream(stream_hash)
except Exception as err:
pass
yield self.session.blob_manager.delete_blobs([blob_hash])
response = yield self._render_response("Deleted %s" % blob_hash)
defer.returnValue(response)
def jsonrpc_peer_list(self, blob_hash, timeout=None):
"""
Get peers for blob hash
Usage:
peer_list (<blob_hash> | --blob_hash=<blob_hash>) [<timeout> | --timeout=<timeout>]
Options:
<timeout>, --timeout=<timeout> : peer search timeout in seconds
Returns:
(list) List of contacts
"""
timeout = timeout or conf.settings['peer_search_timeout']
d = self.session.peer_finder.find_peers_for_blob(blob_hash, timeout=timeout)
d.addCallback(lambda r: [[c.host, c.port, c.is_available()] for c in r])
d.addCallback(lambda r: self._render_response(r))
return d
def jsonrpc_blob_announce_all(self):
"""
Announce all blobs to the DHT
Usage:
blob_announce_all
Returns:
(str) Success/fail message
"""
d = self.session.blob_manager.immediate_announce_all_blobs()
d.addCallback(lambda _: self._render_response("Announced"))
return d
@defer.inlineCallbacks
def jsonrpc_reflect(self, sd_hash):
"""
Reflect a stream
Usage:
reflect (<sd_hash> | --sd_hash=<sd_hash>)
Returns:
(bool) true if successful
"""
lbry_file = yield self._get_lbry_file(FileID.SD_HASH, sd_hash, return_json=False)
if lbry_file is None:
raise Exception('No file found for give sd hash')
yield reupload.reflect_stream(lbry_file)
defer.returnValue("Reflect success")
@defer.inlineCallbacks
@AuthJSONRPCServer.flags(needed="-n", finished="-f")
def jsonrpc_blob_list(self, uri=None, stream_hash=None, sd_hash=None, needed=None,
finished=None, page_size=None, page=None):
"""
Returns blob hashes. If not given filters, returns all blobs known by the blob manager
Usage:
blob_list [-n] [-f] [<uri> | --uri=<uri>] [<stream_hash> | --stream_hash=<stream_hash>]
[<sd_hash> | --sd_hash=<sd_hash>] [<page_size> | --page_size=<page_size>]
[<page> | --page=<page>]
Options:
-n : only return needed blobs
-f : only return finished blobs
<uri>, --uri=<uri> : filter blobs by stream in a uri
<stream_hash>, --stream_hash=<stream_hash> : filter blobs by stream hash
<sd_hash>, --sd_hash=<sd_hash> : filter blobs by sd hash
<page_size>, --page_size=<page_size> : results page size
<page>, --page=<page> : page of results to return
Returns:
(list) List of blob hashes
"""
if uri:
metadata = yield self._resolve_name(uri)
sd_hash = utils.get_sd_hash(metadata)
blobs = yield self.get_blobs_for_sd_hash(sd_hash)
elif stream_hash:
try:
blobs = yield self.get_blobs_for_stream_hash(stream_hash)
except NoSuchStreamHash:
blobs = []
elif sd_hash:
try:
blobs = yield self.get_blobs_for_sd_hash(sd_hash)
except NoSuchSDHash:
blobs = []
else:
blobs = self.session.blob_manager.blobs.itervalues()
if needed:
blobs = [blob for blob in blobs if not blob.is_validated()]
if finished:
blobs = [blob for blob in blobs if blob.is_validated()]
blob_hashes = [blob.blob_hash for blob in blobs]
page_size = page_size or len(blob_hashes)
page = page or 0
start_index = page * page_size
stop_index = start_index + page_size
blob_hashes_for_return = blob_hashes[start_index:stop_index]
response = yield self._render_response(blob_hashes_for_return)
defer.returnValue(response)
def jsonrpc_blob_reflect_all(self):
"""
Reflects all saved blobs
Usage:
blob_reflect_all
Returns:
(bool) true if successful
"""
d = self.session.blob_manager.get_all_verified_blobs()
d.addCallback(reupload.reflect_blob_hashes, self.session.blob_manager)
d.addCallback(lambda r: self._render_response(r))
return d
@defer.inlineCallbacks
def jsonrpc_get_availability(self, uri, sd_timeout=None, peer_timeout=None):
"""
Get stream availability for lbry uri
Usage:
get_availability (<uri> | --uri=<uri>) [<sd_timeout> | --sd_timeout=<sd_timeout>]
[<peer_timeout> | --peer_timeout=<peer_timeout>]
Options:
<sd_timeout>, --sd_timeout=<sd_timeout> : sd blob download timeout
<peer_timeout>, --peer_timeout=<peer_timeout> : how long to look for peers
Returns:
(float) Peers per blob / total blobs
"""
def _get_mean(blob_availabilities):
peer_counts = []
for blob_availability in blob_availabilities:
for blob, peers in blob_availability.iteritems():
peer_counts.append(peers)
if peer_counts:
return round(1.0 * sum(peer_counts) / len(peer_counts), 2)
else:
return 0.0
def read_sd_blob(sd_blob):
sd_blob_file = sd_blob.open_for_reading()
decoded_sd_blob = json.loads(sd_blob_file.read())
sd_blob.close_read_handle(sd_blob_file)
return decoded_sd_blob
resolved_result = yield self.session.wallet.resolve(uri)
if resolved_result and uri in resolved_result:
resolved = resolved_result[uri]
else:
defer.returnValue(None)
if 'claim' in resolved:
metadata = resolved['claim']['value']
else:
defer.returnValue(None)
sd_hash = utils.get_sd_hash(metadata)
sd_timeout = sd_timeout or conf.settings['sd_download_timeout']
peer_timeout = peer_timeout or conf.settings['peer_search_timeout']
blobs = []
try:
blobs = yield self.get_blobs_for_sd_hash(sd_hash)
need_sd_blob = False
log.info("Already have sd blob")
except NoSuchSDHash:
need_sd_blob = True
log.info("Need sd blob")
blob_hashes = [blob.blob_hash for blob in blobs]
if need_sd_blob:
# we don't want to use self._download_descriptor here because it would create a stream
try:
sd_blob = yield self._download_blob(sd_hash, timeout=sd_timeout)
except Exception as err:
response = yield self._render_response(0.0)
log.warning(err)
defer.returnValue(response)
decoded = read_sd_blob(sd_blob)
blob_hashes = [blob.get("blob_hash") for blob in decoded['blobs']
if blob.get("blob_hash")]
sample = random.sample(blob_hashes, min(len(blob_hashes), 5))
log.info("check peers for %i of %i blobs in stream", len(sample), len(blob_hashes))
availabilities = yield self.session.blob_tracker.get_availability_for_blobs(sample,
peer_timeout)
mean_availability = _get_mean(availabilities)
response = yield self._render_response(mean_availability)
defer.returnValue(response)
@defer.inlineCallbacks
@AuthJSONRPCServer.flags(a_arg='-a', b_arg='-b')
def jsonrpc_cli_test_command(self, pos_arg, pos_args=[], pos_arg2=None, pos_arg3=None,
a_arg=False, b_arg=False):
"""
This command is only for testing the CLI argument parsing
Usage:
cli_test_command [-a] [-b] (<pos_arg> | --pos_arg=<pos_arg>)
[<pos_args>...] [--pos_arg2=<pos_arg2>]
[--pos_arg3=<pos_arg3>]
Options:
-a, --a_arg : a arg
-b, --b_arg : b arg
<pos_arg2>, --pos_arg2=<pos_arg2> : pos arg 2
<pos_arg3>, --pos_arg3=<pos_arg3> : pos arg 3
Returns:
pos args
"""
out = (pos_arg, pos_args, pos_arg2, pos_arg3, a_arg, b_arg)
response = yield self._render_response(out)
defer.returnValue(response)
def loggly_time_string(dt):
formatted_dt = dt.strftime("%Y-%m-%dT%H:%M:%S")
milliseconds = str(round(dt.microsecond * (10.0 ** -5), 3))
return urllib.quote_plus(formatted_dt + milliseconds + "Z")
def get_loggly_query_string(installation_id):
base_loggly_search_url = "https://lbry.loggly.com/search#"
now = utils.now()
yesterday = now - utils.timedelta(days=1)
params = {
'terms': 'json.installation_id:{}*'.format(installation_id[:SHORT_ID_LEN]),
'from': loggly_time_string(yesterday),
'to': loggly_time_string(now)
}
data = urllib.urlencode(params)
return base_loggly_search_url + data
def report_bug_to_slack(message, installation_id, platform_name, app_version):
webhook = utils.deobfuscate(conf.settings['SLACK_WEBHOOK'])
payload_template = "os: %s\n version: %s\n<%s|loggly>\n%s"
payload_params = (
platform_name,
app_version,
get_loggly_query_string(installation_id),
message
)
payload = {
"text": payload_template % payload_params
}
requests.post(webhook, json.dumps(payload))
def get_lbry_file_search_value(search_fields):
for searchtype in FileID:
value = search_fields.get(searchtype, None)
if value is not None:
return searchtype, value
raise NoValidSearch('{} is missing a valid search type'.format(search_fields))
def iter_lbry_file_search_values(search_fields):
for searchtype in FileID:
value = search_fields.get(searchtype, None)
if value is not None:
yield searchtype, value
def get_blob_payment_rate_manager(session, payment_rate_manager=None):
if payment_rate_manager:
rate_managers = {
'only-free': OnlyFreePaymentsManager()
}
if payment_rate_manager in rate_managers:
payment_rate_manager = rate_managers[payment_rate_manager]
log.info("Downloading blob with rate manager: %s", payment_rate_manager)
return payment_rate_manager or session.payment_rate_manager
# lbryum returns json loadeable object with amounts as decimal encoded string,
# convert them into floats for the daemon
# TODO: daemon should also use decimal encoded string
def format_json_out_amount_as_float(obj):
if isinstance(obj, dict):
for k, v in obj.iteritems():
if k == 'amount' or k == 'effective_amount':
obj[k] = float(obj[k])
if isinstance(v, (dict, list)):
obj[k] = format_json_out_amount_as_float(v)
elif isinstance(obj, list):
obj = [format_json_out_amount_as_float(o) for o in obj]
return obj
|
import datetime
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.ticker as ticker
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
dates = ['3/25/2020', '3/26/2020', '3/27/2020', '3/28/2020', '3/29/2020', '3/30/2020', '3/31/2020', '4/1/2020', '4/2/2020', '4/3/2020', '4/4/2020', '4/5/2020', '4/6/2020', '4/7/2020', '4/8/2020', '4/9/2020', '4/10/2020', '4/11/2020', '4/12/2020', '4/13/2020', '4/14/2020', '4/15/2020', '4/16/2020', '4/17/2020', '4/18/2020', '4/19/2020', '4/20/2020', '4/21/2020', '4/22/2020', '4/23/2020', '4/24/2020', '4/25/2020', '4/26/2020', '4/27/2020', '4/28/2020', '4/29/2020', '4/30/2020', '5/1/2020', '5/2/2020', '5/3/2020', '5/4/2020', '5/5/2020', '5/6/2020', '5/7/2020', '5/8/2020', '5/9/2020', '5/10/2020', '5/11/2020', '5/12/2020', '5/13/2020', '5/14/2020', '5/15/2020', '5/16/2020', '5/17/2020', '5/18/2020', '5/19/2020', '5/20/2020']
# format dates
x_values = [datetime.datetime.strptime(d, "%m/%d/%Y").date() for d in dates]
ax = plt.gca()
formatter = mdates.DateFormatter("%m/%d")
ax.xaxis.set_major_formatter(formatter)
# create x-axis
ax.xaxis.set_major_locator(mdates.WeekdayLocator(byweekday=(MO, TU, WE, TH, FR, SA, SU), interval=4))
# minor tick = daily
ax.xaxis.set_minor_locator(mdates.WeekdayLocator(byweekday=(MO, TU, WE, TH, FR, SA, SU)))
# format y-axis
ax.get_yaxis().set_major_formatter(ticker.FuncFormatter(lambda x, pos: format(int(x), ',')))
# us cases (minus NY state cases)
us_cases = [26.2, 31.09, 26.1, 22.01, 17.33, 16.7, 17.09, 17.06, 15.6, 14.54, 13.59, 8.96, 9.79, 9.73, 8.46, 8.55, 7.67, 6.79, 5.75, 5.03, 4.91, 4.45, 5.61, 5.31, 4.54, 4.15, 3.91, 4, 4.36, 4.48, 4.69, 3.89, 3.31, 2.6, 3.09, 2.99, 3.41, 3.83, 3.29, 2.7, 2.25, 2.36, 2.47, 2.72, 2.67, 2.38, 1.91, 1.64, 2, 1.8, 2.08, 2.16, 2.09, 1.67, 1.74, 1.58, 1.74]
# ny state cases
ny_cases = [20.05, 20.92, 19.8, 17.21, 13.75, 11.74, 13.98, 10.45, 10.36, 11.35, 10.54, 7.32, 7.09, 6.23, 7.55, 7.11, 6.61, 5.83, 4.56, 3.36, 3.68, 5.72, 3.98, 3.31, 3.09, 2.56, 1.95, 1.69, 2.2, 2.43, 3.09, 3.89, 2.09, 1.37, 1.07, 1.55, 1.56, 1.3, 1.51, 1.1, .8, .7, .87, 1.08, .9, .82, .68, .49, .42, .64, 1.51, .7, .54, .36, .42, .43, .59]
# create the graph
plt.plot(x_values, us_cases, color='#12cfdf', linewidth=2)
plt.plot(x_values, ny_cases, color='#1b42f1', linewidth=2)
# text labels
plt.title('Covid-19 in the United States: New Diagnoses in US as a Whole (Minus NY State) vs NY State')
plt.xlabel('Date')
plt.ylabel('Percent Increase')
plt.legend(['US Confirmed (Except for NY State)', 'NY State Confirmed'], loc='upper right')
plt.show() |
from functools import partial
import numpy as np
import torch
import torch.nn as nn
from src.utils import args, get_shape
from src.modules import *
# ----- NN Model Seleciton -----
if args.model == 'VAE':
if args.network == 'densenet32':
from .image_networks.densenet32 import *
else:
raise NotImplementedError("Please use 'densenet32' as 'network' argument.")
# ----- Variational AutoEncoder -----
class VAE(nn.Module):
"""
Variational AutoEncoder.
Author:
Ioannis Gatopoulos.
"""
def __init__(self, x_shape, prior=args.prior):
super().__init__()
self.x_shape = x_shape
self.z_dim = args.z_dim
self.z_shape = get_shape(self.z_dim)
# p(z)
self.p_z = globals()[prior](self.z_shape)
# q(z | x)
self.q_z = q_z(self.z_shape, self.x_shape)
# p(x | z)
self.p_x = p_x(self.x_shape, self.z_shape)
# likelihood distribution
self.recon_loss = partial(dmol_loss, nc=self.x_shape[0])
self.sample_distribution = partial(sample_from_dmol, nc=self.x_shape[0])
def initialize(self, dataloader):
""" Data dependent init for weight normalization
(Automatically done during the first forward pass).
"""
with torch.no_grad():
x, _ = next(iter(dataloader))
x = x.to(args.device)
output = self.forward(x)
self.calculate_elbo(x, output)
return
@staticmethod
def reparameterize(z_mu, z_logvar):
""" z ~ N(z| z_mu, z_logvar)
"""
epsilon = torch.randn_like(z_mu)
return z_mu + torch.exp(0.5 * z_logvar) * epsilon
@torch.no_grad()
def generate(self, n_samples=args.n_samples):
# u ~ p(u)
z = self.p_z.sample(z_shape=self.z_shape, n_samples=n_samples, device=args.device).to(args.device)
# x ~ p(x| z)
x_logits = self.p_x(z)
x_hat = self.sample_distribution(x_logits, random_sample=False)
return x_hat
@torch.no_grad()
def reconstruct(self, x, **kwargs):
x_logits = self.forward(x).get('x_logits')
x_hat = self.sample_distribution(x_logits, random_sample=False)
return x_hat
def calculate_elbo(self, input, outputs):
# unpack variables
x, x_logits = input, outputs.get('x_logits')
z_q, z_q_mean, z_q_logvar = outputs.get('z_q'), outputs.get('z_q_mean'), outputs.get('z_q_logvar')
# Reconstraction loss
# N E_q [ ln p(x|z) ]
RE = - self.recon_loss(x, x_logits).mean()
# Regularization loss
# N E_q [ ln q(z) - ln p(z) ]
log_p_z = self.p_z.log_p(z_q)
log_q_z = log_normal_diag(z_q, z_q_mean, z_q_logvar)
KL = (log_q_z - log_p_z).mean()
# Total negative lower bound loss
nelbo = RE + KL
diagnostics = {
"bpd" : (nelbo.item()) / (np.prod(x.shape[1:]) * np.log(2.)),
"nelbo" : nelbo.item(),
"RE" : RE.mean(dim=0).item(),
"KL" : KL.mean(dim=0).item(),
}
return nelbo, diagnostics
def forward(self, x, **kwargs):
""" Forward pass through the inference and the generative model.
"""
# z ~ q(z| x)
z_q_mean, z_q_logvar = self.q_z(x)
z_q = self.reparameterize(z_q_mean, z_q_logvar)
# x ~ p(x| z)
x_logits = self.p_x(z_q)
return {
"z_q" : z_q,
"z_q_mean" : z_q_mean,
"z_q_logvar" : z_q_logvar,
"x_logits" : x_logits
}
def encode(self, x):
""" Performs encoding and returns z_q"""
z_q_mean, z_q_logvar = self.q_z(x)
z_q = self.reparameterize(z_q_mean, z_q_logvar)
return z_q
def get_mean(self, x):
z_q_mean, _ = self.q_z(x)
return z_q_mean
if __name__ == "__main__":
pass
|
import warnings
from .sparse_lut import SparseLUT
from typing import Any |
#!/usr/bin/env python
from redis import Redis
import os
import tqdm
redis = Redis.from_url(
"redis://" + os.environ.get("EXECUTOR_CONSTR", "127.0.0.1:6379/0")
)
from job_registry import base
import sys
current_resultfile = sys.argv[1]
# read current resultfile
with open(current_resultfile) as fh:
lines = fh.readlines()
# fetch missing from redis
results = []
jobids = []
for line in tqdm.tqdm(lines):
line = line.rstrip()
if line.startswith("JOB: "):
jobid = line.strip().split()[1]
jobkey = "job:" + jobid
if redis.hget(jobkey, "error") is not None:
redis.hdel(jobkey, "error")
redis.lpush("queue", jobid)
|
from cluster_vcf_records import variant_tracking
def run(options):
with open(options.vcf_fofn) as f:
vcf_files = [line.rstrip() for line in f]
tracker = variant_tracking.VariantTracker(options.outdir, options.ref_fasta)
tracker.merge_vcf_files(
vcf_files,
temp_dir=options.temp_dir,
cpus=options.cpus,
mem_limit=options.mem_limit,
force=options.force,
sample_limit=options.sample_limit,
)
|
from pygame.locals import *
from random import randint
import pygame
import time
from operator import *
class Player:
x = [0]
y = [0]
size = 44
direction = 0
length = 3
MaxAllowedMove = 2
updateMove = 0
def __init__(self, length):
self.length = length
for i in range(0, 2000):
self.x.append(-100)
self.y.append(-100)
# initial positions, no collision.
self.x[0] = 1 * 44
self.x[0] = 2 * 44
def update(self):
self.updateMove = self.updateMove + 1
if gt(self.updateMove, self.MaxAllowedMove):
# update previous positions
for i in range(self.length - 1, 0, -1):
self.x[i] = self.x[i - 1]
self.y[i] = self.y[i - 1]
# update position of head of snake
if self.direction == 0:
self.x[0] = self.x[0] + self.size
if self.direction == 1:
self.x[0] = self.x[0] - self.size
if self.direction == 2:
self.y[0] = self.y[0] - self.size
if self.direction == 3:
self.y[0] = self.y[0] + self.size
self.updateMove = 0
def moveRight(self):
self.direction = 0
def moveLeft(self):
self.direction = 1
def moveUp(self):
self.direction = 2
def moveDown(self):
self.direction = 3
def draw(self, surface, image):
for i in range(0, self.length):
surface.blit(image, (self.x[i], self.y[i]))
class Computer:
x = [0]
y = [0]
size = 44
direction = 0
length = 3
MaxAllowedMove = 2
updateMove = 0
def __init__(self, length):
self.length = length
for i in range(0, 2000):
self.x.append(-100)
self.y.append(-100)
# initial positions, no collision.
self.x[0] = 1 * 44
self.y[0] = 4 * 44
def update(self):
self.updateMove = self.updateMove + 1
if gt(self.updateMove, self.MaxAllowedMove):
# update previous positions
for i in range(self.length - 1, 0, -1):
self.x[i] = self.x[i - 1]
self.y[i] = self.y[i - 1]
# update position of head of snake
if self.direction == 0:
self.x[0] = self.x[0] + self.size
if self.direction == 1:
self.x[0] = self.x[0] - self.size
if self.direction == 2:
self.y[0] = self.y[0] - self.size
if self.direction == 3:
self.y[0] = self.y[0] + self.size
self.updateMove = 0
def moveRight(self):
self.direction = 0
def moveLeft(self):
self.direction = 1
def moveUp(self):
self.direction = 2
def moveDown(self):
self.direction = 3
def target(self, dx, dy):
if gt(self.x[0] , dx):
self.moveLeft()
if lt(self.x[0] , dx):
self.moveRight()
if self.x[0] == dx:
if lt(self.y[0] , dy):
self.moveDown()
if gt(self.y[0] , dy):
self.moveUp()
def draw(self, surface, image):
for i in range(0, self.length):
surface.blit(image, (self.x[i], self.y[i]))
class Game:
def checkCollision(self, x1, y1, x2, y2, blockSize):
if ge(x1 , x2) and le(x1 , x2 + blockSize):
if ge(y1 , y2) and le(y1, y2 + blockSize):
return True
return False
class Frog:
x = 0
y = 0
size = 44
def __init__(self, x, y):
self.x = x * self.size
self.y = y * self.size
def draw(self, surface, image):
surface.blit(image, (self.x, self.y))
class App:
Width = 800
Height = 600
player = 0
Frog = 0
def __init__(self):
self._running = True
self.surface = None
self._image_surf = None
self._Frog_surf = None
self.game = Game()
self.player = Player(5)
self.Frog = Frog(8, 5)
self.computer = Computer(5)
def loader(self):
pygame.init()
self.surface = pygame.display.set_mode((self.Width, self.Height), pygame.HWSURFACE)
self._running = True
self._image_surf = pygame.image.load("snake.png").convert()
self._Frog_surf = pygame.image.load("frog-main.png").convert()
def on_event(self, event):
if event.type == QUIT:
self._running = False
def main(self):
self.computer.target(self.Frog.x, self.Frog.y)
self.player.update()
self.computer.update()
# does snake eat Frog?
for i in range(0, self.player.length):
if self.game.checkCollision(self.Frog.x, self.Frog.y, self.player.x[i], self.player.y[i], 44):
self.Frog.x = randint(2, 9) * 44
self.Frog.y = randint(2, 9) * 44
self.player.length = self.player.length + 1
# does computer eat Frog?
for i in range(0, self.player.length):
if self.game.checkCollision(self.Frog.x, self.Frog.y, self.computer.x[i], self.computer.y[i], 44):
self.Frog.x = randint(2, 9) * 44
self.Frog.y = randint(2, 9) * 44
#to increase length
# self.computer.length = self.computer.length + 1
# does snake collide with itself?
for i in range(2, self.player.length):
if self.game.checkCollision(self.player.x[0], self.player.y[0], self.player.x[i], self.player.y[i], 40):
print( "You lose! ")
exit(0)
pass
def renderer(self):
self.surface.fill((0, 0, 0))
self.player.draw(self.surface, self._image_surf)
self.Frog.draw(self.surface, self._Frog_surf)
self.computer.draw(self.surface, self._image_surf)
pygame.display.flip()
def on_cleanup(self):
pygame.quit()
def handler(self):
if self.loader() == False:
self._running = False
while (self._running):
pygame.event.pump()
keys = pygame.key.get_pressed()
if (keys[K_RIGHT]):
self.player.moveRight()
if (keys[K_LEFT]):
self.player.moveLeft()
if (keys[K_UP]):
self.player.moveUp()
if (keys[K_DOWN]):
self.player.moveDown()
if (keys[K_ESCAPE]):
self._running = False
self.main()
self.renderer()
time.sleep(50.0 / 1000.0);
self.on_cleanup()
if __name__ == "__main__":
main = App()
main.handler()
|
from etherscan.contracts import Contract
import json
import pandas as pd
with open('C:/Yiru Xiong-Professional/实习/CryptoAlgoWheel/S1/task3/api_key.json', mode='r') as key_file:
key = json.loads(key_file.read())['key']
address = '0xfb6916095ca1df60bb79ce92ce3ea74c37c5d359'
api = Contract(address=address, api_key=key)
def get_abi():
abi = api.get_abi()
text_file = open("S1_task3_contracts_abi_text.txt", "w")
n = text_file.write(abi)
text_file.close()
#get_abi()
def get_sourcecode():
sourcecode = api.get_sourcecode()
text_file = open("S1_tsak3_contracts_sourcecode_text.txt", "w")
n = text_file.write(sourcecode[0]['SourceCode'])
text_file.close()
#get_sourcecode()
|
################################################################################
# Author: Fanyang Cheng
# Date: 27/03/2021
# Description: This file ask user to give a "letter phone number" and it will
# convert it into a typical phone number.
################################################################################
#define convert_number function
def convert_number(n):
n = n.lower() #lower case all of the letters
nl = []
for i in n:
nl.append(i)#chagne it into a list
#build a list include all alphabetical options
value = [["a","b","c"],["d","e","f"],["g","h","i"],["j","k","l"],["m","n","o"],["p","q","r","s"],["t","u","v"],["w","x","y","z"]]
for i in range(len(nl)):
for j in range(len(value)):
if nl[i] in value[j]:
nl[i] = str(j+2)
return("".join(nl))
def main():
n = input("Enter a telephone number: ")
o = convert_number(n)
print("The phone number is",o)
if __name__ == '__main__':
main()
|
from tamr_unify_client.base_resource import BaseResource
from tamr_unify_client.categorization.category.collection import CategoryCollection
class Taxonomy(BaseResource):
"""A project's taxonomy"""
@classmethod
def from_json(cls, client, data, api_path):
return super().from_data(client, data, api_path)
@property
def name(self):
""":type: str"""
return self._data.get("name")
def categories(self):
"""Retrieves the categories of this taxonomy.
:returns: A collection of the taxonomy categories.
:rtype: :class:`~tamr_unify_client.categorization.category.collection.CategoryCollection`
"""
alias = self.api_path + "/categories"
return CategoryCollection(self.client, alias)
def __repr__(self):
return (
f"{self.__class__.__module__}."
f"{self.__class__.__qualname__}("
f"relative_id={self.relative_id!r}, "
f"name={self.name!r})"
)
|
"""
Converts various NB forms to other equivalents. In addtion, programs combining rules
"""
## LJ Conversions
def _LJ_ab_to_ab(coeffs):
"""
Convert AB representation to AB representation of the LJ potential
"""
return {'A': coeffs['A'], 'B': coeffs['B']}
def _LJ_epsilonsigma_to_ab(coeffs):
"""
Convert epsilon/sigma representation to AB representation of the LJ
potential
"""
A = 4.0 * coeffs['epsilon'] * coeffs['sigma']**12.0
B = 4.0 * coeffs['epsilon'] * coeffs['sigma']**6.0
return {"A": A, "B": B}
def _LJ_ab_to_epsilonsigma(coeffs):
"""
Convert AB representation to epsilon/sigma representation of the LJ
potential
"""
if (coeffs['A'] == 0.0 and coeffs['B'] == 0.0):
return {"sigma": 0.0, "epsilon": 0.0}
try:
sigma = (coeffs['A'] / coeffs['B'])**(1.0 / 6.0)
epsilon = coeffs['B']**2.0 / (4.0 * coeffs['A'])
except ZeroDivisionError:
raise ZeroDivisionError("Lennard Jones functional form conversion not possible, division by zero found.")
return {"sigma": sigma, "epsilon": epsilon}
def _LJ_rminepsilon_to_ab(coeffs):
"""
Convert rmin/epsilon representation to AB representation of the LJ
potential
"""
A = coeffs['epsilon'] * coeffs['Rmin']**12.0
B = 2 * coeffs['epsilon'] * coeffs['Rmin']**6.0
return {"A": A, "B": B}
def _LJ_ab_to_rminepsilon(coeffs):
"""
Convert AB representation to Rmin/epsilon representation of the LJ potential
"""
if (coeffs['A'] == 0.0 and coeffs['B'] == 0.0):
return {"sigma": 0.0, "epsilon": 0.0}
try:
Rmin = (2.0 * coeffs['A'] / coeffs['B'])**(1.0 / 6.0)
Eps = coeffs['B']**2.0 / (4.0 * coeffs['A'])
except ZeroDivisionError:
raise ZeroDivisionError("Lennard Jones functional form conversion not possible, division by zero found.")
return {"Rmin": Rmin, "epsilon": Eps}
_LJ_conversion_matrix = {
'AB': (['A', 'B'], _LJ_ab_to_ab, _LJ_ab_to_ab),
'epsilon/sigma': (['epsilon', 'sigma'], _LJ_epsilonsigma_to_ab, _LJ_ab_to_epsilonsigma),
'epsilon/Rmin': (['epsilon', 'Rmin'], _LJ_rminepsilon_to_ab, _LJ_ab_to_rminepsilon),
}
def convert_LJ_coeffs(coeffs, origin, final):
difference = set([origin, final]) - set(_LJ_conversion_matrix.keys())
if (difference):
raise KeyError("Conversion cannot be made since %s is not in conversion matrix %s" %
(difference, _LJ_conversion_matrix.keys()))
difference = set(coeffs.keys()) - set(_LJ_conversion_matrix[origin][0])
if (difference):
raise KeyError("The key %s in the coefficient dictionary is not in the list of allowed keys %s" %
(difference, _LJ_conversion_matrix[origin][0]))
internal = _LJ_conversion_matrix[origin][1](coeffs)
external = _LJ_conversion_matrix[final][2](internal)
return external
## LJ combining rules
def _lorentz_berthelot(sigma_epsilon_i, sigma_epsilon_j):
new_params = {}
new_params['sigma'] = (sigma_epsilon_i['sigma'] + sigma_epsilon_j['sigma']) * 0.5
new_params['epsilon'] = (sigma_epsilon_i['epsilon'] * sigma_epsilon_j['epsilon'])**(1. / 2.)
return new_params
def _geometric(sigma_epsilon_i, sigma_epsilon_j):
new_params = {}
new_params['sigma'] = (sigma_epsilon_i['sigma'] * sigma_epsilon_j['sigma'])**(1. / 2.)
new_params['epsilon'] = (sigma_epsilon_i['epsilon'] * sigma_epsilon_j['epsilon'])**(1. / 2.)
return new_params
def _sixthpower(sigma_epsilon_i, sigma_epsilon_j):
new_params = {}
new_params['sigma'] = ((sigma_epsilon_i['sigma']**6. * sigma_epsilon_j['sigma'] ** 6.) / 2.) ** (1./6.)
new_params['epsilon'] = (2 * (sigma_epsilon_i['epsilon'] * sigma_epsilon_j['epsilon']) ** (1./2.) * sigma_epsilon_i['sigma'] ** 3.
* sigma_epsilon_j['sigma'] ** 3.) /(sigma_epsilon_i['sigma'] ** 6. * sigma_epsilon_j['sigma'] ** 6.)
return new_params
def mix_LJ(coeff_i, coeff_j, mixing_rule, origin="AB", final="AB"):
# Calculate interactions between two atom types based on specified mixing rules
# First convert from input form to internal AB representation
internal_coeff_i = convert_LJ_coeffs(coeff_i, origin=origin, final="AB")
internal_coeff_j = convert_LJ_coeffs(coeff_j, origin=origin, final="AB")
# Convert from internal AB representation to epsilon/sigma
sigma_epsilon_i = convert_LJ_coeffs(internal_coeff_i, origin="AB", final="epsilon/sigma")
sigma_epsilon_j = convert_LJ_coeffs(internal_coeff_j, origin="AB", final="epsilon/sigma")
# Calculate new parameters based on mixing rules
mixing_rule = mixing_rule.lower()
new_params = LJ_mixing_functions[mixing_rule](sigma_epsilon_i, sigma_epsilon_j)
# Convert from epsilon-sigma to AB, then to final specified form. Double conversion is necessary because of
# form of conversion matrix.
convert_params_temp = convert_LJ_coeffs(new_params, origin="epsilon/sigma", final="AB")
convert_params = convert_LJ_coeffs(convert_params_temp, origin="AB", final=final)
return convert_params
LJ_mixing_functions = {
"lorentz-berthelot" : _lorentz_berthelot,
"arithmetic": _lorentz_berthelot,
"geometric" : _geometric,
"sixthpower" : _sixthpower,
#"kong": _kong,
} |
# vim:ts=4:sw=4:et:
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# no unicode literals
import os
import WatchmanTestCase
@WatchmanTestCase.expand_matrix
class TestSizeExpr(WatchmanTestCase.WatchmanTestCase):
def test_size_expr(self):
root = self.mkdtemp()
self.touchRelative(root, "empty")
with open(os.path.join(root, "notempty"), "w") as f:
f.write("foo")
with open(os.path.join(root, "1k"), "w") as f:
f.truncate(1024)
self.watchmanCommand("watch", root)
tests = [
["eq", 0, ["empty"]],
["ne", 0, ["1k", "notempty"]],
["gt", 0, ["1k", "notempty"]],
["gt", 2, ["1k", "notempty"]],
["ge", 3, ["1k", "notempty"]],
["gt", 3, ["1k"]],
["le", 3, ["empty", "notempty"]],
["lt", 3, ["empty"]],
]
for (op, operand, expect) in tests:
res = self.watchmanCommand(
"query", root, {"expression": ["size", op, operand], "fields": ["name"]}
)
self.assertFileListsEqual(
res["files"], expect, message=repr((op, operand, expect))
)
self.removeRelative(root, "1k")
self.assertFileListsEqual(
self.watchmanCommand(
"query", root, {"expression": ["size", "gt", 100], "fields": ["name"]}
)["files"],
[],
message="removed file is not matched",
)
|
d,p,q=map(int,input().split())
p,q=max(p,q),min(p,q)
prod=0
if d//p <= 1000000:
ans=10**10
while True:
qprod = 0
dremain = d - p*prod
if dremain > 0:
qprod = dremain // q
if dremain % q > 0:
qprod += 1
ans=min(ans, prod*p + qprod*q)
prod+=1
if qprod == 0: break
print(ans)
exit(0)
prod = d // p
dremain = d - p*prod
start_i = prod + 1 if dremain else prod
ans=10**10
for i in range(start_i, start_i - 1000000, -1):
if i<0: break
prod = i
qprod = 0
dremain = d - p*prod
if dremain > 0:
qprod = dremain // q
if dremain % q > 0:
qprod += 1
ans=min(ans, prod*p + qprod*q)
print(ans) |
c.Exchange.course_id="meinkurs"
|
import bpy
from bpy.types import WindowManager, AddonPreferences
from bpy.props import StringProperty, EnumProperty
try:
import sverchok
from sverchok.utils.sv_IO_panel_tools import import_tree
SVERCHOK_AVAILABLE=True
except ImportError:
SVERCHOK_AVAILABLE=False
from os.path import join, basename, dirname, exists
import json
import webbrowser
from assethubclient import client
class Asset(client.Asset):
def __init__(self, *args, **kwargs):
if len(args) == 1 and isinstance(args[0], client.Asset) and not kwargs:
self.dict = args[0].dict
else:
super(Asset, self).__init__(*args, **kwargs)
def is_text(self):
ctype = self.get_data_content_type()
return ctype.startswith('text/') or (ctype == 'application/json')
def is_json(self):
ctype = self.get_data_content_type()
return ctype == 'application/json'
def get_json_data(self):
result = self.get_data(content_type='application/json')
return json.loads(result.decode('utf-8'))
def store_to_text_block(self, name=None):
if not self.is_text():
raise TypeError("Asset content type is not text")
if name is None:
name = basename(self.data)
if not name in bpy.data.texts:
bpy.data.texts.new(name)
content = self.get_data()
bpy.data.texts[name].clear()
bpy.data.texts[name].write(content.decode('utf-8'))
def import_sverchok_tree(self, name=None):
if not SVERCHOK_AVAILABLE:
raise ImportError("Sverchok is not available")
if not name:
name = basename(self.data)
json = self.get_json_data()
ng = bpy.data.node_groups.new(name=name, type='SverchCustomTreeType')
import_tree(ng, nodes_json=json)
class AssetHubClient(client.AssetHubClient):
def __init__(self, *args, **kwargs):
super(AssetHubClient, self).__init__(*args, **kwargs)
self.asset_constructor = Asset
if not self.application:
self.application = 'blender'
def post_text_block(self, name, title=None, license=None, content_type="text/plain"):
if license is None:
license = self.license
if license is None:
license = "CC0"
if title is None:
title = name
content = bpy.data.texts[name].as_string()
asset = Asset(dict(title=title, application=self.application, component=self.component, license=license))
return self.post(asset, content, file_name=name, content_type=content_type)
assethub_client = None
assethub_components = []
assethub_licenses = []
assethub_tags = []
preview_collections = {}
known_assets = {}
def get_preferences():
return bpy.context.user_preferences.addons.get("assethubclient").preferences
def get_assethub_client(context):
global assethub_client
if assethub_client is not None:
return assethub_client
addon = get_preferences()
assethub_client = AssetHubClient(addon.assethub_url, application="blender")
return assethub_client
def previews_from_assethub(self, context):
enum_items = []
if context is None:
return enum_items
wm = context.window_manager
component = wm.assethub_component
tag = wm.assethub_tag
if not component:
return enum_items
directory = join(dirname(__file__), "thumbs", component)
pcoll = preview_collections.get(component, None)
if pcoll is None:
pcoll = bpy.utils.previews.new()
pcoll.previews = {}
preview_collections[component] = pcoll
if tag in pcoll.previews:
return pcoll.previews[tag]
c = get_assethub_client(context)
c.component = component
if tag != "__all__":
c.tag = tag
else:
c.tag = None
component = client.Component.get("blender", wm.assethub_component)
if component:
c.appversion = component.get_current_version()
for idx, asset in enumerate(c.list()):
id = str(asset.id)
if not asset.image:
icon = 'NONE'
else:
thumbnail_path = join(directory, asset.get_thumbnail_name())
if not exists(thumbnail_path):
asset.download_thumbnail(thumbnail_path)
if id not in pcoll:
thumb = pcoll.load(id, thumbnail_path, 'IMAGE')
else:
thumb = pcoll[id]
icon = thumb.icon_id
description = asset.description()
known_assets[id] = asset
enum_items.append((id, asset.title, description, icon, idx))
pcoll.previews[tag] = enum_items
return enum_items
def get_asset(self, context, id):
if not known_assets:
previews_from_assethub(self, context)
return known_assets.get(id, None)
def get_asset_description(self, context, id):
asset = get_asset(self, context, id)
if not asset:
return None
return asset.description()
def components_from_assethub(self, context):
global assethub_components
if len(assethub_components) > 0 or context is None:
return assethub_components
c = get_assethub_client(context)
for idx, comp in enumerate(c.get_components()):
notes = comp.notes_en
if not notes:
notes = comp.title_en
assethub_components.append((comp.slug, comp.title_en, notes))
return assethub_components
def tags_from_assethub(self, context):
global assethub_tags
if len(assethub_tags) > 0 or context is None:
return assethub_tags
assethub_tags.append(("__all__", "Any", "Any tag"))
c = get_assethub_client(context)
for idx, tag in enumerate(c.get_tags()):
name = tag.name
if not name:
name = tag.slug
assethub_tags.append((tag.slug, name, name))
return assethub_tags
def licenses_from_assethub(self, context):
global assethub_licenses
if len(assethub_licenses) > 0 or context is None:
return assethub_licenses
c = get_assethub_client(context)
for idx, license in enumerate(c.get_licenses()):
assethub_licenses.append((license.slug, license.title, license.title))
return assethub_licenses
class ImportPanel(bpy.types.Panel):
bl_label = "Import from AssetHub"
bl_idname = "import.assethub.panel"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
def draw(self, context):
layout = self.layout
wm = context.window_manager
layout.prop(wm, "assethub_component")
layout.prop(wm, "assethub_tag")
layout.template_icon_view(wm, "assethub_asset")
if wm.assethub_asset:
asset = get_asset(self, context, wm.assethub_asset)
layout.label(asset.title)
layout.label("License: {}".format(asset.license))
# description = get_asset_description(self, context, wm.assethub_asset)
# if description:
# for line in description.split("\n"):
# layout.label(line)
wm.assethub_asset_url = asset.url
layout.operator("browse.assethub_asset")
layout.operator("import.assethub")
class BrowseAssetOperator(bpy.types.Operator):
bl_label = "Open in browser"
bl_idname = "browse.assethub_asset"
def execute(self, context):
wm = context.window_manager
url = wm.assethub_asset_url
webbrowser.open(url)
return {'FINISHED'}
class ImportOperator(bpy.types.Operator):
bl_label = "Import from AssetHub"
bl_idname = "import.assethub"
def execute(self, context):
wm = context.window_manager
#print("Params: comp={}, preview={}".format(wm.assethub_component, wm.assethub_asset))
c = get_assethub_client(context)
asset = c.get(wm.assethub_asset)
component = client.Component.get("blender", wm.assethub_component)
if component is None:
print("Dont know how to import objects for component " + wm.assethub_component)
else:
component.import_asset(asset)
return {'FINISHED'}
class PostScriptPanel(bpy.types.Panel):
bl_label = "Post to AssetHub"
bl_idname = "export.assethub.sverchok-sn1.panel"
bl_space_type = "TEXT_EDITOR"
bl_region_type = "UI"
def draw(self, context):
layout = self.layout
wm = context.window_manager
op = layout.operator("export.assethub_sverchok_sn1")
space = bpy.context.space_data
if op and isinstance(space, bpy.types.SpaceTextEditor) and space.text:
op.name = space.text.name
class PostScriptOperator(bpy.types.Operator):
bl_label = "Post to AssetHub"
bl_idname = "export.assethub_sverchok_sn1"
name = StringProperty(name="Text block name")
title = StringProperty(name="Title")
license = EnumProperty(name="License", items = licenses_from_assethub)
def draw(self, context):
layout = self.layout
layout.prop(self, "name")
layout.prop(self, "title")
layout.prop(self, "license")
def invoke(self, context, event):
return context.window_manager.invoke_props_dialog(self)
def execute(self, context):
#space = bpy.context.space_data
#if not isinstance(space, bpy.types.SpaceTextEditor):
c = get_assethub_client(context)
addon = get_preferences()
c.component = "sverchok-sn1"
c.username = addon.username
c.password = addon.password
#print(c.username, c.password)
#print(self.license)
c.post_text_block(self.name, title=self.title, license=self.license, content_type="text/x-python")
return {'FINISHED'}
class SettingsPanel(bpy.types.AddonPreferences):
bl_label = "AssetHub settings"
bl_idname = __package__
assethub_url = StringProperty(
name = "AssetHub URL",
default = "http://assethub.iportnov.tech/")
username = StringProperty(name="AssetHub user name")
password = StringProperty(name="AssetHub password", subtype="PASSWORD")
def draw(self, context):
layout = self.layout
layout.prop(self, "assethub_url")
layout.prop(self, "username")
layout.prop(self, "password")
class SverchokSn1(client.Component):
def import_asset(self, asset):
asset.store_to_text_block()
def get_current_version(self):
if SVERCHOK_AVAILABLE:
vs = sverchok.bl_info['version']
return ".".join(str(v) for v in vs)
def get_min_compatible_version(self):
return "0.5.0.0"
class SverchokLayout(client.Component):
def import_asset(self, asset):
asset.import_sverchok_tree()
def get_current_version(self):
if SVERCHOK_AVAILABLE:
vs = sverchok.bl_info['version']
return ".".join(str(v) for v in vs)
def get_min_compatible_version(self):
return "0.5.0.0"
def menu_func(self, context):
self.layout.operator("import.assethub", text="Import from AssetHub")
def register():
WindowManager.assethub_component = EnumProperty(name="Component", items = components_from_assethub)
WindowManager.assethub_tag = EnumProperty(name="Tag", default=None, items = tags_from_assethub)
WindowManager.assethub_asset = EnumProperty(name="Asset", items = previews_from_assethub)
WindowManager.assethub_asset_url = StringProperty(name="Asset URL")
client.Component.register("blender", "sverchok-sn1", SverchokSn1)
client.Component.register("blender", "sverchok-layout", SverchokLayout)
bpy.utils.register_class(ImportOperator)
bpy.utils.register_class(SettingsPanel)
bpy.utils.register_class(ImportPanel)
bpy.utils.register_class(PostScriptPanel)
bpy.utils.register_class(PostScriptOperator)
bpy.utils.register_class(BrowseAssetOperator)
bpy.types.INFO_MT_file_import.append(menu_func)
def unregister():
from bpy.types import WindowManager
for pcoll in preview_collections.values():
bpy.utils.previews.remove(pcoll)
preview_collections.clear()
del WindowManager.assethub_asset
del WindowManager.assethub_asset_url
del WindowManager.assethub_component
del WindowManager.assethub_tag
bpy.utils.unregister_class(ImportOperator)
bpy.utils.unregister_class(SettingsPanel)
bpy.utils.unregister_class(ImportPanel)
bpy.utils.unregister_class(PostScriptPanel)
bpy.utils.unregister_class(PostScriptOperator)
bpy.utils.unregister_class(BrowseAssetOperator)
bpy.types.INFO_MT_file_import.remove(menu_func)
if __name__ == "__main__":
register()
|
import numpy as np
import matplotlib.pyplot as plt
import math
## Integrand function
def f(x): return x**5+x**4
## Integration interval
a =-1.0
b = 1.0
## Number of random number generations
n = 10000
## Standard MC implementation
h=[]
for k in range(1,1500):
x=np.random.uniform(a,b,n) # [a,b]=[-1.0,1.0]
eval_funct=f(x)
h.append((b-a)*np.sum(eval_funct)/(n))
S=(b-a)*(np.sum(eval_funct))/n
n=10000.0
mu_camp=(np.sum(eval_funct))/n
var_camp=1/(n-1)*np.sum((eval_funct-mu_camp)**2)
var=(b-a)**2*(1/n)*var_camp
print (S,'Integral Mean with Standard MC')
print (var,'Variance with Standard MC')
print(sqrt(var), 'Standard Deviation with Standard MC')
## Plotting a histogram of the generated gaussian
hist,bin_edges=np.histogram(h,bins=100)
plt.figure()
plt.hist(h,bin_edges)
plt.xlabel('Bins')
plt.ylabel('Counts')
plt.title('MC evaluation of Integral value [Standard Sampling]')
axes=plt.gca()
plt.show() |
import argparse
from pathlib import Path
def main():
parser = argparse.ArgumentParser(
description='Unused Image Remover: Unused image removal tool for image annotation work')
parser.add_argument('image_dir', help='Input the image directory. (e.g. ./images)')
parser.add_argument('-d', '--delete', action='store_true', help='Delete the images.')
args = parser.parse_args()
image_dir = args.image_dir
delete_flag = args.delete
unused_images = find_unused_images(image_dir)
for img in unused_images:
print(img)
if delete_flag:
p = Path(image_dir) / img
p.unlink()
if delete_flag:
print(f'\nRemoved {len(unused_images)} images.')
else:
print(f'\n{len(unused_images)} unused images were found.')
print('To actually delete it, specify "-d" as an argument.')
def find_unused_images(image_dir: str) -> list:
p = Path(image_dir)
texts = []
for txt in p.glob('*.txt'):
texts.append(txt.name.split('.')[0])
texts.sort()
unused_images = []
for img in p.glob('*.jpeg'):
if img.name.split('.')[0] not in texts:
unused_images.append(img.name)
unused_images.sort()
return unused_images
if __name__ == '__main__':
main()
|
from py_v_sdk.api import *
from py_v_sdk.chain import *
from py_v_sdk.account import *
from py_v_sdk.tx_req import *
from py_v_sdk.model import *
from py_v_sdk.contract import *
from py_v_sdk.contract.nft_ctrt import *
from py_v_sdk.contract.atomic_swap_ctrt import *
import py_v_sdk.log
|
import numpy as np
import tensorflow as tf
from collections import OrderedDict, defaultdict
from itertools import chain, combinations
from libspn.graph.scope import Scope
from libspn.graph.node import OpNode, Input
from libspn.inference.type import InferenceType
from libspn import utils
from libspn import conf
from libspn.exceptions import StructureError
from libspn.log import get_logger
from libspn.utils.serialization import register_serializable
@register_serializable
class ProductsLayer(OpNode):
"""A node representing all products in a layer in an SPN.
Args:
*values (input_like): Inputs providing input values to this node.
See :meth:`~libspn.Input.as_input` for possible values.
num_or_size_prods (int or list of int):
Int: Number of product ops modelled by this node. In which case, all
the products modelled will have a common size.
List: Size of each product op modelled by this node. Number of
products modelled would be the length of the list.
name (str): Name of the node.
"""
logger = get_logger()
info = logger.info
def __init__(self, *values, num_or_size_prods=1, name="ProductsLayer"):
self._values = []
super().__init__(inference_type=InferenceType.MARGINAL, name=name)
self.set_values(*values)
self.set_prod_sizes(num_or_size_prods)
def set_prod_sizes(self, num_or_size_prods):
# Total size of value input_size
total_values_size = sum(
len(v.indices) if v and v.indices else v.node.get_out_size() if v else 0
for v in self._values)
if isinstance(num_or_size_prods, int): # Total number of prodcut ops to be modelled
if not num_or_size_prods > 0:
raise StructureError("In %s 'num_or_size_prods': %s need to be > 0"
% self, num_or_size_prods)
self._num_prods = num_or_size_prods
self._prod_input_sizes = [total_values_size // self._num_prods] * self._num_prods
elif isinstance(num_or_size_prods, list): # Size of each modelled product op
if not len(num_or_size_prods) > 0:
raise StructureError("In %s 'num_or_size_prods': %s cannot be an empty list"
% self, num_or_size_prods)
self._prod_input_sizes = num_or_size_prods
self._num_prods = len(num_or_size_prods)
self._num_or_size_prods = num_or_size_prods
def serialize(self):
data = super().serialize()
data['values'] = [(i.node.name, i.indices) for i in self._values]
data['num_prods'] = self._num_prods
data['prod_input_sizes'] = self._prod_input_sizes
data['num_or_size_prods'] = self._num_or_size_prods
return data
def deserialize(self, data):
super().deserialize(data)
self.set_values()
self._num_prods = data['num_prods']
self._prod_input_sizes = data['prod_input_sizes']
self._num_or_size_prods = data['num_or_size_prods']
def deserialize_inputs(self, data, nodes_by_name):
super().deserialize_inputs(data, nodes_by_name)
self._values = tuple(Input(nodes_by_name[nn], i)
for nn, i in data['values'])
@property
@utils.docinherit(OpNode)
def inputs(self):
return self._values
@property
def num_prods(self):
"""int: Number of Product ops modelled by this node."""
return self._num_prods
def set_num_prods(self, num_prods=1):
"""Set the number of Product ops modelled by this node.
Args:
num_prods (int): Number of Product ops modelled by this node.
"""
self._num_prods = num_prods
@property
def num_or_size_prods(self):
"""int: Number of Product ops modelled by this node."""
return self._num_or_size_prods
def set_num_or_size_prods(self, num_or_size_prods=1):
"""Set the number of Product ops modelled by this node.
Args:
num_prods (int): Number of Product ops modelled by this node.
"""
self._num_or_size_prods = num_or_size_prods
@property
def values(self):
"""list of Input: List of value inputs."""
return self._values
def set_values(self, *values):
"""Set the inputs providing input values to this node. If no arguments
are given, all existing value inputs get disconnected.
Args:
*values (input_like): Inputs providing input values to this node.
See :meth:`~libspn.Input.as_input` for possible values.
"""
self._values = self._parse_inputs(*values)
def add_values(self, *values):
"""Add more inputs providing input values to this node.
Args:
*values (input_like): Inputs providing input values to this node.
See :meth:`~libspn.Input.as_input` for possible values.
"""
self._values = self._values + self._parse_inputs(*values)
@property
def _const_out_size(self):
return True
@utils.lru_cache
def _compute_out_size(self, *input_out_sizes):
return self._num_prods
def _compute_scope(self, *value_scopes):
if not self._values:
raise StructureError("%s is missing input values." % self)
# Gather and flatten value scopes
flat_value_scopes = list(chain.from_iterable(self._gather_input_scopes(
*value_scopes)))
# Divide gathered and flattened value scopes into sublists, one per
# modeled product op.
prod_input_sizes = np.cumsum(np.array(self._prod_input_sizes)).tolist()
prod_input_sizes.insert(0, 0)
value_scopes_lists = [flat_value_scopes[start:stop] for start, stop in
zip(prod_input_sizes[:-1], prod_input_sizes[1:])]
return [Scope.merge_scopes(vsl) for vsl in value_scopes_lists]
def _compute_valid(self, *value_scopes):
if not self._values:
raise StructureError("%s is missing input values." % self)
value_scopes_ = self._gather_input_scopes(*value_scopes)
# If already invalid, return None
if any(s is None for s in value_scopes_):
return None
# Check product decomposability
flat_value_scopes = list(chain.from_iterable(value_scopes_))
# Divide gathered and flattened value scopes into sublists, one per
# modeled product op.
prod_input_sizes = np.cumsum(np.array(self._prod_input_sizes)).tolist()
prod_input_sizes.insert(0, 0)
value_scopes_lists = [flat_value_scopes[start:stop] for start, stop in
zip(prod_input_sizes[:-1], prod_input_sizes[1:])]
for scopes in value_scopes_lists:
for s1, s2 in combinations(scopes, 2):
if s1 & s2:
ProductsLayer.info("%s is not decomposable", self)
return None
return self._compute_scope(*value_scopes)
def _combine_values_and_indices(self, value_tensors):
"""
Concatenates input tensors and returns the nested indices that are
required for gathering all product inputs to a reducible set of columns.
"""
# Chose list instead of dict to maintain order
unique_tensors = []
unique_offsets = []
combined_indices = []
flat_col_indices = []
flat_tensor_offsets = []
tensor_offset = 0
for value_inp, value_tensor in zip(self._values, value_tensors):
# Get indices. If not there, will be [0, 1, ... , len-1]
indices = value_inp.indices if value_inp.indices else \
np.arange(value_inp.node.get_out_size()).tolist()
flat_col_indices.append(indices)
if value_tensor not in unique_tensors:
# Add the tensor and offsets ot unique
unique_tensors.append(value_tensor)
unique_offsets.append(tensor_offset)
# Add offsets
flat_tensor_offsets.append([tensor_offset for _ in indices])
tensor_offset += value_tensor.shape[1].value
else:
# Find offset from list
offset = unique_offsets[unique_tensors.index(value_tensor)]
# After this, no need to update tensor_offset, since the current value_tensor will
# wasn't added to unique
flat_tensor_offsets.append([offset for _ in indices])
# Flatten the tensor offsets and column indices
flat_tensor_offsets = np.asarray(list(chain(*flat_tensor_offsets)))
flat_col_indices = np.asarray(list(chain(*flat_col_indices)))
# Offset in flattened arrays
offset = 0
for size in self._prod_input_sizes:
# Now indices can be found by adding up column indices and tensor offsets
indices = flat_col_indices[offset:offset + size] + \
flat_tensor_offsets[offset:offset + size]
# Combined indices contains an array for each reducible set of columns
combined_indices.append(indices)
offset += size
return combined_indices, tf.concat(unique_tensors, 1)
@utils.lru_cache
def _compute_value_common(self, *value_tensors, padding_value=0.0):
"""Common actions when computing value."""
# Check inputs
if not self._values:
raise StructureError("%s is missing input values." % self)
# Prepare values
if self._num_prods > 1:
indices, value_tensor = self._combine_values_and_indices(value_tensors)
# Create a 3D tensor with dimensions [batch, num-prods, max-prod-input-sizes]
# The last axis will have zeros or ones (for log or non-log) when the
# prod-input-size < max-prod-input-sizes
reducible_values = utils.gather_cols_3d(value_tensor, indices,
pad_elem=padding_value)
return reducible_values
else:
# Gather input tensors
value_tensors = self._gather_input_tensors(*value_tensors)
return tf.concat(value_tensors, 1)
@utils.lru_cache
def _compute_log_value(self, *value_tensors):
values = self._compute_value_common(*value_tensors, padding_value=0.0)
# Wrap the log value with its custom gradient
@tf.custom_gradient
def log_value(*unique_tensors):
# Defines gradient for the log value
def gradient(gradients):
scattered_grads = self._compute_log_mpe_path(gradients, *value_tensors)
return [sg for sg in scattered_grads if sg is not None]
return tf.reduce_sum(values, axis=-1, keepdims=(False if self._num_prods > 1
else True)), gradient
unique_tensors = self._get_differentiable_inputs(*value_tensors)
if conf.custom_gradient:
return log_value(*unique_tensors)
else:
return tf.reduce_sum(
values, axis=-1, keep_dims=(False if self._num_prods > 1 else True))
@utils.lru_cache
def _get_differentiable_inputs(self, *value_tensors):
unique_tensors = list(OrderedDict.fromkeys(value_tensors))
return unique_tensors
@utils.lru_cache
def _compute_log_mpe_value(self, *value_tensors):
return self._compute_log_value(*value_tensors)
def _collect_count_indices_per_input(self):
"""
For every unique (input, index) pair in the node's values list, collects
and returns all column-indices of the counts tensor, for which the unique
pair is a child of.
"""
# Create a list of each input, paired with all the indices assosiated
# with it
# Eg: self._values = [(A, [0, 2, 3]),
# (B, 1),
# (A, None),
# (B, [1, 2])]
# expanded_inputs_list = [(A, 0), (A, 2), (A, 3),
# (B, 1),
# (A, 0), (A, 1), (A, 2), (A, 3),
# (B, 1), (B, 2)]
expanded_inputs_list = []
for inp in self._values:
if inp.indices is None:
for i in range(inp.node.get_out_size()):
expanded_inputs_list.append((inp.node, i))
elif isinstance(inp.indices, list):
for i in inp.indices:
expanded_inputs_list.append((inp.node, i))
elif isinstance(inp.indices, int):
expanded_inputs_list.append((inp.node, inp.indices))
# Create a list grouping together all inputs to each product modelled
# Eg: self._prod_input_sizes = [2, 3, 2, 1, 2]
# prod_inputs_lists = [[(A, 0), (A, 2)], # Prod-0
# [(A, 3), (B, 1),(A, 0)], # Prod-1
# [(A, 1), (A, 2)], # Prod-2
# [(A, 3)], # Prod-3
# [(B, 1), (B, 2)]] # Prod-4
prod_input_sizes = np.cumsum(np.array(self._prod_input_sizes)).tolist()
prod_input_sizes.insert(0, 0)
prod_inputs_lists = [expanded_inputs_list[start:stop] for start, stop in
zip(prod_input_sizes[:-1], prod_input_sizes[1:])]
# Create a dictionary with each unique input and index pair as it's key,
# and a list of product-indices as the corresponding value
# Eg: unique_inps_inds_dict = {(A, 0): [0, 1], # Prod-0 and Prod-1
# (A, 1): [2], # Prod-2
# (A, 2): [0, 2], # Prod-0 and Prod-2
# (A, 3): [1], # Prod-1
# (B, 1): [1, 4], # Prod-1 and Prod-4
# (B, 2): [4]} # Prod-4
unique_inps_inds = defaultdict(list)
for idx, inps in enumerate(prod_inputs_lists):
for inp in inps:
unique_inps_inds[inp] += [idx]
# Sort dictionary based on key - Sorting ensures avoiding scatter op when
# the original inputs is passed without indices
unique_inps_inds = OrderedDict(sorted(unique_inps_inds.items()))
# Collect all product indices as a nested list of indices to gather from
# counts tensor
# Eg: gather_counts_indices = [[0, 1],
# [2],
# [0, 2],
# [1],
# [1, 4],
# [4]]
gather_counts_indices = [v for v in unique_inps_inds.values()]
# Create an ordered dictionary of unique inputs to this node as key,
# and a list of unique indices per input as the corresponding value
# Eg: unique_inps = {A: [0, 1, 2, 3]
# B: [1, 2]}
unique_inps = OrderedDict()
for inp, ind in unique_inps_inds.keys():
unique_inps[inp] = []
for inp, ind in unique_inps_inds.keys():
unique_inps[inp] += [ind]
return gather_counts_indices, unique_inps
@utils.lru_cache
def _compute_log_mpe_path(self, counts, *value_values,
use_unweighted=False, sample=False, sample_prob=None):
# Check inputs
if not self._values:
raise StructureError("%s is missing input values." % self)
# For each unique (input, index) pair in the values list, collect counts
# index of all counts for which the pair is a child of
gather_counts_indices, unique_inputs = self._collect_count_indices_per_input()
if self._num_prods > 1:
# Gather columns from the counts tensor, per unique (input, index) pair
reducible_values = utils.gather_cols_3d(counts, gather_counts_indices)
# Sum gathered counts together per unique (input, index) pair
summed_counts = tf.reduce_sum(reducible_values, axis=-1)
else:
# Calculate total inputs size
inputs_size = sum([v_input.get_size(v_value) for v_input, v_value in
zip(self._values, value_values)])
# Tile counts only if input is larger than 1
summed_counts = (tf.tile(counts, [1, inputs_size]) if inputs_size > 1
else counts)
# For each unique input in the values list, calculate the number of
# unique indices
unique_inp_sizes = [len(v) for v in unique_inputs.values()]
# Split the summed-counts tensor per unique input, based on input-sizes
unique_input_counts = tf.split(summed_counts, unique_inp_sizes, axis=-1) \
if len(unique_inp_sizes) > 1 else [summed_counts]
# Scatter each unique-counts tensor to the respective input, only once
# per unique input in the values list
scattered_counts = [None] * len(self._values)
for (node, inds), cnts in zip(unique_inputs.items(), unique_input_counts):
for i, (inp, val) in enumerate(zip(self._values, value_values)):
if inp.node == node:
scattered_counts[i] = utils.scatter_cols(
cnts, inds, int(val.get_shape()[0 if val.get_shape().ndims
== 1 else 1]))
break
return scattered_counts
def _compute_log_gradient(self, gradients, *value_values):
return self._compute_log_mpe_path(gradients, *value_values)
def disconnect_inputs(self):
self._values = None
@property
def is_layer(self):
return True |
"""empty message
Revision ID: 12f833559028
Revises: e7607c2bc445
Create Date: 2017-10-13 18:08:41.824664
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
# revision identifiers, used by Alembic.
revision = "12f833559028"
down_revision = "e7607c2bc445"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"event",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("uid", sa.String(length=255), nullable=False, unique=True),
sa.Column("summary", sa.Text(), nullable=True),
sa.Column("description", sa.Text(), nullable=True),
sa.Column("start", sa.TIMESTAMP(), nullable=True),
sa.Column("end", sa.TIMESTAMP(), nullable=True),
sa.Column("timestamp", sa.TIMESTAMP(), nullable=True),
sa.Column("url", sqlalchemy_utils.types.url.URLType(), nullable=True),
sa.Column("location", sa.Text(), nullable=True),
sa.Column("source_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["source_id"], ["calendar_source.id"], ondelete="CASCADE",
),
sa.PrimaryKeyConstraint("id"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("event")
# ### end Alembic commands ###
|
from pyeasyga import pyeasyga
from . import sga, cga, charts
def mono_binary(function, goal_function, size, popsz, noe, maximise=True):
# Define data
data = [0] * size
#
# Save the fitness of each execution
results_sga = []
hmdatas_sga = []
# Execute the sGA `noe` times (noe: number of executions)
for _ in range(noe):
# Heat map data
hmdata = {}
# Set sGA
ga = pyeasyga.GeneticAlgorithm(data, population_size=popsz,
maximise_fitness=maximise, generations=200)
ga.fitness_function = function
# Update default functions
ga.create_first_generation = sga.create_first_generation
ga.create_next_generation = sga.create_next_generation
ga.best_individual = sga.best_individual
ga.rank_population = sga.rank_population
ga.calculate_population_fitness = sga.bn_calculate_population_fitness
ga.run = sga.bn_run
# Run binary sGA
ga.run(ga, hmdata)
# Get best individual
fitness, _ = ga.best_individual(ga)
# Update extraction variables
results_sga.append(fitness)
hmdatas_sga.append(hmdata)
#
# Save the fitness of each execution
results_cga = []
hmdatas_cga = []
# Execute the cGA `noe` times (noe: number of executions)
for _ in range(noe):
# Heat map data
hmdata = {}
# Set cGA
ga = pyeasyga.GeneticAlgorithm(data, population_size=popsz,
maximise_fitness=maximise, generations=200)
ga.fitness_function = function
# Update default functions
ga.best_individual = cga.best_individual
ga.create_individual = cga.bn_create_individual
ga.run = cga.bn_run
# Run binary cGA
ga.run(ga, hmdata)
# Get best individual
fitness, _ = ga.best_individual(ga)
# Update extraction variables
results_cga.append(fitness)
hmdatas_cga.append(hmdata)
#
# Get goal of the fitness function
goal = goal_function(data)
#
# Plot result charts
filename = function.__name__ + '_' + str(size) + '_' + str(popsz)
charts.results(results_sga, results_cga, popsz, goal, noe, filename)
# Plot heat map charts
for i, _ in enumerate(hmdatas_sga):
charts.heat_map(hmdatas_sga[i], hmdatas_cga[i], filename, i + 1)
def mono_real(function, goal_function, size, popsz, noe, maximise=False):
# Define data
data = [0] * size
#
# Save the fitness of each execution
results_sga = []
hmdatas_sga = []
# Execute the sGA `noe` times (noe: number of executions)
for _ in range(noe):
# Heat map data
hmdata = {}
# Set sGA
ga = pyeasyga.GeneticAlgorithm(data, population_size=popsz,
maximise_fitness=maximise, generations=200)
ga.fitness_function = function
# Update default functions
ga.create_first_generation = sga.create_first_generation
ga.create_next_generation = sga.create_next_generation
ga.best_individual = sga.best_individual
ga.rank_population = sga.rank_population
ga.create_individual = sga.rn_create_individual
ga.mutate_function = sga.rn_mutate_function
ga.calculate_population_fitness = sga.rn_calculate_population_fitness
ga.run = sga.rn_run
# Run real sGA
ga.run(ga, hmdata)
# Get best individual
fitness, _ = ga.best_individual(ga)
# Update extraction variables
results_sga.append(fitness)
hmdatas_sga.append(hmdata)
#
# Save the fitness of each execution
results_cga = []
hmdatas_cga = []
# Execute the cGA `noe` times (noe: number of executions)
for _ in range(noe):
# Heat map data
hmdata = {}
# Set cGA
ga = pyeasyga.GeneticAlgorithm(data, population_size=popsz,
maximise_fitness=maximise, generations=200)
ga.fitness_function = function
# Update default functions
ga.best_individual = cga.best_individual
ga.create_individual = cga.rn_create_individual
ga.run = cga.rn_run
# Run real cGA
ga.run(ga, hmdata)
# Get best individual
fitness, _ = ga.best_individual(ga)
# Update extraction variables
results_cga.append(fitness)
hmdatas_cga.append(hmdata)
#
# Get goal of the fitness function
goal = goal_function(data)
#
# Plot result charts
filename = function.__name__ + '_' + str(size) + '_' + str(popsz)
charts.results(results_sga, results_cga, popsz, goal, noe, filename)
# Plot heat map charts
for i, _ in enumerate(hmdatas_sga):
charts.heat_map(hmdatas_sga[i], hmdatas_cga[i], filename, i + 1)
def multi_binary(function, goal_function, size, popsz, noe, maximise=True):
# Define data
data = [0] * size
#
# Save the fitness of each execution
results_sga = []
hmdatas_sga = []
# Execute the sGA `noe` times (noe: number of executions)
for _ in range(noe):
# Heat map data
hmdata = {}
# Set sGA
ga = pyeasyga.GeneticAlgorithm(data, population_size=popsz,
maximise_fitness=maximise, generations=200)
ga.fitness_function = function
# Update default functions
ga.create_first_generation = sga.create_first_generation
ga.create_next_generation = sga.create_next_generation
ga.best_individual = sga.best_individual
ga.rank_population = sga.rank_population
ga.calculate_population_fitness = sga.bn_calculate_population_fitness
ga.run = sga.bn_run
# Run binary sGA
ga.run(ga, hmdata, multi=True)
# Get best individual
nondominated = ga.best_individual(ga, multi=True)
nddata = []
for nd in nondominated:
# Update extraction variables
fitness, _ = nd
nddata.append(fitness)
results_sga.append(nddata)
hmdatas_sga.append(hmdata)
#
# Save the fitness of each execution
results_cga = []
hmdatas_cga = []
# Execute the cGA `noe` times (noe: number of executions)
for _ in range(noe):
# Heat map data
hmdata = {}
# Set cGA
ga = pyeasyga.GeneticAlgorithm(data, population_size=popsz,
maximise_fitness=maximise, generations=200)
ga.fitness_function = function
# Update default functions
ga.best_individual = cga.best_individual
ga.create_individual = cga.bn_create_individual
ga.run = cga.bn_run
# Run binary cGA
ga.run(ga, hmdata, multi=True)
# Get best individual
nondominated = ga.best_individual(ga, multi=True)
nddata = []
for nd in nondominated:
# Update extraction variables
fitness, _ = nd
nddata.append(fitness)
results_cga.append(nddata)
hmdatas_cga.append(hmdata)
#
# Get goal of the fitness function
fa_goal, fb_goal = goal_function
goal = fa_goal(data), fb_goal(data)
#
# Plot hypervolume charts
fa, fb = function
fname = fa.__name__ + '_' + fb.__name__ + '_'
filename = fname + str(size) + '_' + str(popsz)
for i, _ in enumerate(results_sga):
charts.hypervolume(results_sga[i], results_cga[i], goal,
True, filename, i + 1)
#
# Plot heat map charts
for i, _ in enumerate(hmdatas_sga):
charts.heat_map(hmdatas_sga[i], hmdatas_cga[i], filename, i + 1)
def multi_real(function, goal_function, size, popsz, noe, maximise=False):
# Define data
data = [0] * size
#
# Save the fitness of each execution
results_sga = []
hmdatas_sga = []
# Execute the sGA `noe` times (noe: number of executions)
for _ in range(noe):
# Heat map data
hmdata = {}
# Set sGA
ga = pyeasyga.GeneticAlgorithm(data, population_size=popsz,
maximise_fitness=maximise, generations=200)
ga.fitness_function = function
# Update default functions
ga.create_first_generation = sga.create_first_generation
ga.create_next_generation = sga.create_next_generation
ga.best_individual = sga.best_individual
ga.rank_population = sga.rank_population
ga.create_individual = sga.rn_create_individual
ga.mutate_function = sga.rn_mutate_function
ga.calculate_population_fitness = sga.rn_calculate_population_fitness
ga.run = sga.rn_run
# Run real sGA
ga.run(ga, hmdata, multi=True)
# Get best individual
nondominated = ga.best_individual(ga, multi=True)
nddata = []
for nd in nondominated:
# Update extraction variables
fitness, _ = nd
nddata.append(fitness)
results_sga.append(nddata)
hmdatas_sga.append(hmdata)
#
# Save the fitness of each execution
results_cga = []
hmdatas_cga = []
# Execute the cGA `noe` times (noe: number of executions)
for _ in range(noe):
# Heat map data
hmdata = {}
# Set cGA
ga = pyeasyga.GeneticAlgorithm(data, population_size=popsz,
maximise_fitness=maximise, generations=200)
ga.fitness_function = function
# Update default functions
ga.best_individual = cga.best_individual
ga.create_individual = cga.rn_create_individual
ga.run = cga.rn_run
# Run real cGA
ga.run(ga, hmdata, multi=True)
# Get best individual
nondominated = ga.best_individual(ga, multi=True)
nddata = []
for nd in nondominated:
# Update extraction variables
fitness, _ = nd
nddata.append(fitness)
results_cga.append(nddata)
hmdatas_cga.append(hmdata)
#
# Get goal of the fitness function
fa_goal, fb_goal = goal_function
goal = fa_goal(data), fb_goal(data)
#
# Plot hypervolume charts
fa, fb = function
fname = fa.__name__ + '_' + fb.__name__ + '_'
filename = fname + str(size) + '_' + str(popsz)
for i, _ in enumerate(results_sga):
charts.hypervolume(results_sga[i], results_cga[i], goal,
False, filename, i + 1)
#
# Plot heat map charts
for i, _ in enumerate(hmdatas_sga):
charts.heat_map(hmdatas_sga[i], hmdatas_cga[i], filename, i + 1)
|
from flask import current_app, request
from flask.ext.restful import Resource
from flask.ext.discoverer import advertise
from flask.ext.cache import Cache
from SIMBAD import get_simbad_data
from SIMBAD import do_position_query
from SIMBAD import parse_position_string
from utils import get_objects_from_query_string
import time
import timeout_decorator
class IncorrectPositionFormatError(Exception):
pass
class ObjectSearch(Resource):
"""Return object identifiers for a given object string"""
scopes = []
rate_limit = [1000, 60 * 60 * 24]
decorators = [advertise('scopes', 'rate_limit')]
def post(self):
stime = time.time()
# Get the supplied list of identifiers
identifiers = []
objects = []
facets = []
input_type = None
for itype in ['identifiers', 'objects', 'facets']:
try:
identifiers = request.json[itype]
identifiers = map(str, identifiers)
input_type = itype
except:
pass
if not input_type:
current_app.logger.error('No identifiers and objects were specified for SIMBAD object query')
return {"Error": "Unable to get results!",
"Error Info": "No identifiers/objects found in POST body"}, 200
# We should either have a list of identifiers, a list of object names or a list of facets
if len(identifiers) == 0 and len(objects) == 0 and len(facets) == 0:
current_app.logger.error('No identifiers, objects or facets were specified for SIMBAD object query')
return {"Error": "Unable to get results!",
"Error Info": "No identifiers/objects found in POST body"}, 200
# How many iden identifiers do we have?
id_num = len(identifiers)
if id_num == 0:
return {"Error": "Unable to get results!",
"Error Info": "No identifiers/objects found in POST body"}, 200
# Source to query
source = 'simbad'
# Now check if we have anything cached for them
cached = {id:current_app.cache.get(id.upper()) for id in identifiers if current_app.cache.get(id.upper())}
if source in ['simbad','all'] and len(identifiers) > 0:
# If we have cached values, filter those out from the initial list
if cached:
current_app.logger.debug('Received %s %s. Using %s entries from cache.' % (id_num, input_type, len(cached)))
identifiers = [id for id in identifiers if not current_app.cache.get(id.upper())]
if identifiers:
ident_upper = [i.upper() for i in identifiers]
# We have identifiers, not found in the cache
result = get_simbad_data(identifiers, input_type)
if 'Error' in result:
# An error was returned!
current_app.logger.error('Failed to find data for SIMBAD %s query!'%input_type)
return result
else:
# We have results!
duration = time.time() - stime
current_app.logger.info('Found objects for SIMBAD %s in %s user seconds.' % (input_type, duration))
# Before returning results, cache them
for ident, value in result['data'].items():
current_app.cache.set(ident.upper(), value, timeout=current_app.config.get('OBJECTS_CACHE_TIMEOUT'))
# Now pick the entries in the results that correspond with the original object names
if input_type == 'objects':
result['data'] = {k: result['data'].get(k.upper()) for k in identifiers}
# If we had results from cache, merge these in
if cached:
res = cached.copy()
res.update(result.get('data',{}))
return res
# Otherwise just send back the results
else:
return result.get('data',{})
elif cached:
# We only had cached results
return cached
else:
# This should never happen
current_app.logger.error('No data found, even though we had %s! Should never happen!'%input_type)
result = {
"Error": "Failed to find data for SIMBAD %s query!"%input_type,
"Error Info": "No results found, where results were expected! Needs attention!"
}
return result
class PositionSearch(Resource):
"""Return publication information for a cone search"""
scopes = []
rate_limit = [1000, 60 * 60 * 24]
decorators = [advertise('scopes', 'rate_limit')]
def get(self, pstring):
# The following position strings are supported
# 1. 05 23 34.6 -69 45 22:0 6 (or 05h23m34.6s -69d45m22s:0m6s)
# 2. 05 23 34.6 -69 45 22:0.166666 (or 05h23m34.6s -69d45m22s:0.166666)
# 3. 80.89416667 -69.75611111:0.166666
stime = time.time()
# If we're given a string with qualifiers ('h', etc), convert to one without
current_app.logger.info('Attempting SIMBAD position search: %s'%pstring)
try:
RA, DEC, radius = parse_position_string(pstring)
except Exception, err:
current_app.logger.error('Position string could not be parsed: %s' % pstring)
return {'Error': 'Unable to get results!',
'Error Info': 'Invalid position string: %s'%pstring}, 200
try:
result = do_position_query(RA, DEC, radius)
except timeout_decorator.timeout_decorator.TimeoutError:
current_app.logger.error('Position query %s timed out' % pstring)
return {'Error': 'Unable to get results!',
'Error Info': 'Position query timed out'}, 200
return result
class QuerySearch(Resource):
"""Given a Solr query with object names, return a Solr query with SIMBAD identifiers"""
scopes = []
rate_limit = [1000, 60 * 60 * 24]
decorators = [advertise('scopes', 'rate_limit')]
def post(self):
stime = time.time()
# Get the supplied list of identifiers
identifiers = []
query = None
itype = None
name2id = {}
try:
query = request.json['query']
input_type = 'query'
except:
current_app.logger.error('No query was specified for SIMBAD object search')
return {"Error": "Unable to get results!",
"Error Info": "No identifiers/objects found in POST body"}, 200
# If we get the request from BBB, the value of 'query' is actually an array
if isinstance(query, list):
try:
solr_query = query[0]
except:
solr_query = ''
else:
solr_query = query
current_app.logger.info('Received SIMBAD object query: %s'%solr_query)
new_query = solr_query.replace('object:','simbid:')
# If we receive a (Solr) query string, we need to parse out the object names
try:
identifiers = get_objects_from_query_string(solr_query)
except:
current_app.logger.error('Parsing the identifiers out of the query string blew up!')
return {"Error": "Unable to get results!",
"Error Info": "No objects found in query string"}, 200
identifiers = [iden for iden in identifiers if iden.lower() not in ['object',':']]
# How many object names did we fid?
id_num = len(identifiers)
# Keep a list with the object names we found
identifiers_orig = identifiers
# If we did not find any object names, there is nothing to do!
if id_num == 0:
return {"Error": "Unable to get results!",
"Error Info": "No identifiers/objects found in POST body"}, 200
# Source to query
source = 'simbad'
if source in ['simbad','all'] and len(identifiers) > 0:
if identifiers:
for ident in identifiers:
result = get_simbad_data([ident], 'objects')
if 'Error' in result:
# An error was returned!
current_app.logger.error('Failed to find data for SIMBAD %s query!'%input_type)
return result
try:
SIMBADid =[e.get('id',0) for e in result['data'].values()][0]
except:
SIMBADid = '0'
name2id[ident] = SIMBADid
for oname in identifiers:
try:
SIMBADid = name2id.get(oname)
except:
SIMBADid = '0'
new_query = new_query.replace(oname, SIMBADid)
return {"query": new_query}
else:
# This should never happen
current_app.logger.error('No data found, even though we had %s! Should never happen!'%input_type)
result = {
"Error": "Failed to find data for SIMBAD %s query!"%input_type,
"Error Info": "No results found, where results were expected! Needs attention!"
}
return result
|
from .. import base
class Metadata(base.Resource):
NAME_ATTR = 'display_name'
def __repr__(self):
return "<Metadata>"
class MetadataManager(base.Manager):
resource_class = Metadata
def get(self, server_id,key):
url = "/servers/%s/metadata/%s" % (server_id,key)
return self._get(url, "metadata")
def delete(self, server_id,key):
url = "/servers/%s/metadata/%s" % (server_id,key)
return self._delete(url)
def list(self, server_id):
url = "/servers/%s/metadata" % server_id
return self._get(url, "metadata")
def update(self, server_id, key, body):
url = "/servers/%s/metadata/%s" % (server_id,key)
return self._update(url, body, "metadata")
# Temporary bug fix , Currently update is replacing the metadata and create is merging the data
def replace(self, server_id, body):
url = "/servers/%s/metadata" % server_id
return self._update(url, body, "metadata")
def merge(self, server_id, body):
url = "/servers/%s/metadata" % server_id
return self._create(url, body, "metadata")
|
import numpy as np
from ..field import CartesianGrid, UnstructuredCoords
from .generic import make_obstructed_circular_aperture, make_spider
def make_vlt_aperture():
pass
def make_subaru_aperture():
pass
def make_lbt_aperture():
pass
def make_magellan_aperture(normalized=False):
'''Make the Magellan aperture.
Parameters
----------
normalized : boolean
If this is True, the outer diameter will be scaled to 1. Otherwise, the
diameter of the pupil will be 6.5 meters.
Returns
-------
Field generator
The Magellan aperture.
'''
pupil_diameter = 6.5 #m
spider_width1 = 0.75 * 0.0254 #m
spider_width2 = 1.5 * 0.0254 #m
central_obscuration_ratio = 0.29
spider_offset = [0,0.34] #m
if normalized:
spider_width1 /= pupil_diameter
spider_width2 /= pupil_diameter
spider_offset = [x / pupil_diameter for x in spider_offset]
pupil_diameter = 1
spider_offset = np.array(spider_offset)
mirror_edge1 = (pupil_diameter / (2*np.sqrt(2)), pupil_diameter / (2*np.sqrt(2)))
mirror_edge2 = (-pupil_diameter / (2*np.sqrt(2)), pupil_diameter / (2*np.sqrt(2)))
mirror_edge3 = (pupil_diameter / (2*np.sqrt(2)), -pupil_diameter / (2*np.sqrt(2)))
mirror_edge4 = (-pupil_diameter / (2*np.sqrt(2)), -pupil_diameter / (2*np.sqrt(2)))
obstructed_aperture = make_obstructed_circular_aperture(pupil_diameter, central_obscuration_ratio)
spider1 = make_spider(spider_offset, mirror_edge1, spider_width1)
spider2 = make_spider(spider_offset, mirror_edge2, spider_width1)
spider3 = make_spider(-spider_offset, mirror_edge3, spider_width2)
spider4 = make_spider(-spider_offset, mirror_edge4, spider_width2)
def func(grid):
return obstructed_aperture(grid) * spider1(grid) * spider2(grid) * spider3(grid) * spider4(grid)
return func
def make_keck_aperture():
pass
def make_luvoir_aperture():
pass
def make_elt_aperture():
pass
|
from tgalice.testing.testing_utils import make_context
from dm import StretchDM
def test_hello():
dm = StretchDM()
resp = dm.respond(make_context(text='Привет', new_session=True))
assert '30' in resp.text
|
from pathlib import Path
from sqlalchemy import desc
import quetz
from quetz.utils import add_temp_static_file
from . import db_models
from .api import get_db_manager, router
from .repo_signer import RepoSigner
@quetz.hookimpl
def register_router():
return router
@quetz.hookimpl
def post_package_indexing(tempdir: Path, channel_name, subdirs, files, packages):
with get_db_manager() as db:
# the most recent created key is fetched since we
# cannot get `user_id` outside a request / API call.
query = (
db.query(db_models.RepodataSigningKey)
.filter(
db_models.RepodataSigningKey.channel_name == channel_name,
)
.order_by(desc('time_created'))
.first()
)
if query:
for subdir in subdirs:
repodata_folderpath = tempdir / channel_name / subdir
RepoSigner(repodata_folderpath, query.private_key)
with open(
tempdir / channel_name / subdir / "repodata_signed.json"
) as f:
repodata_signed = f.read()
add_temp_static_file(
repodata_signed,
channel_name,
subdir,
"repodata_signed.json",
tempdir,
files,
)
|
'''
Copyright 2017, Fujitsu Network Communications, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
try:
import site
import os
import sys
from collections import OrderedDict
print("import os was successful")
def check_warrior_default_modules_import():
try:
from warrior.Framework import Utils
except Exception as e:
e = str(e)
if e.startswith("No module named 'warrior"):
pkg_name = e.split("No module named")[-1].split("'")[1]
pkg_parent = "warrior_" + pkg_name.split("warrior")[-1]
pkg_full_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "warrior_modules", pkg_parent)
if os.path.exists(pkg_full_path):
if pkg_full_path not in sys.path:
sys.path.append(pkg_full_path)
check_warrior_default_modules_import()
else:
print_error("{0}\n".format(str(e)))
else:
raise
else:
print("import Utils was successful")
check_warrior_default_modules_import()
from warrior.Framework import Utils
from warrior.WarriorCore.Classes.rerun_testsuite import execute_failedsuite
from warrior.WarriorCore.Classes.rerun_project import execute_failedproject
print("Import rerun was successful")
import shutil
print("import shutil was successful")
import warrior.Framework.Utils.email_utils as email
print("import email was successful")
from warrior.Framework.Utils.print_Utils import print_error, print_info, print_debug
print("import print_Utils was successful")
from warrior.WarriorCore import testcase_driver, testsuite_driver, project_driver
print("import testcase_driver, testsuite_driver, project_driver were successful")
from warrior.WarriorCore import ironclaw_driver, framework_detail
print("import ironclaw_driver, framework_detail were successful")
from warrior.WarriorCore.Classes.jira_rest_class import Jira
print("import jira_rest_class was successful")
from warrior.Framework.ClassUtils import database_utils_class
print("import database_utils_class was successful")
from xml.etree import ElementTree as et
print("import element tree was successful")
except:
print("\033[1;31m*********************************************")
print(" !-Unable to import library in for Warrior Framework in warrior_cli_driver")
print(" !-Successful imported libraries are printed above")
print(" !-Please check your import statements for any code added to the framework")
print(" !-Possible cause could be circular import")
print("*********************************************\033[0m")
raise
import re
import multiprocessing
from os.path import dirname, abspath
from warrior.Framework.Utils import config_Utils, file_Utils, xml_Utils
from warrior.Framework.Utils.data_Utils import get_credentials
import warrior.Framework.Utils.encryption_utils as Encrypt
from warrior.WarriorCore.Classes import war_cli_class
"""Handle all the cli command, new functions may be added later"""
def update_jira_by_id(jiraproj, jiraid, exec_dir, status):
""" If jiraid is provided, upload the log and result file to jira """
if jiraid is not False:
jira_obj = Jira(jiraproj)
if jira_obj.status is True:
# Get the current jira issue status
issue_status = jira_obj.get_jira_issue_status(jiraid)
isReopend = False
# Reopen the jira issue if it is closed, to upload the execution logs
if issue_status and issue_status.lower() == "closed":
print_info("Reopening Jira issue '{0}' to upload warrior "
"execution logs".format(jiraid))
jira_obj.set_jira_issue_status(jiraid, "Reopened")
isReopend = True
# Upload execution logs
zip_file = shutil.make_archive(exec_dir, 'zip', exec_dir)
jira_obj.upload_logfile_to_jira_issue(jiraid, zip_file)
# Close the jira issue if it is reopened in the above if block
if isReopend is True:
print_info("Closing Jira issue '{0}' which was reopened "
"earlier".format(jiraid))
jira_obj.set_jira_issue_status(jiraid, "Closed")
# Update Jira issue based on warrior execution status
jira_obj.update_jira_issue(jiraid, status)
else:
print_info("jiraid not provided, will not update jira issue")
def add_live_table_divs(livehtmllocn, file_list):
"""
add the divs for the live html table
"""
root_attribs = {'id': 'liveTables'}
root = Utils.xml_Utils.create_element("div", "", **root_attribs)
# for each iteration create a div with id = the iteration number
# the table for tis iteration will be added under this div
for i in range(0, len(file_list)):
marker_start = 'table-{0}starts'.format(str(i))
marker_end = 'table-{0}ends'.format(str(i))
div_attribs = {'id': str(i)}
elem = Utils.xml_Utils.create_subelement(root, 'div', div_attribs)
start_comment = Utils.xml_Utils.create_comment_element(marker_start)
end_comment = Utils.xml_Utils.create_comment_element(marker_end)
elem.append(start_comment)
elem.append(end_comment)
# write the tree to the file
if isinstance(livehtmllocn, str):
xml_Utils.write_tree_to_file(root, livehtmllocn)
elif isinstance(livehtmllocn, multiprocessing.managers.DictProxy):
livehtmllocn["html_result"] = xml_Utils.convert_element_to_string(root)
def file_execution(cli_args, abs_filepath, default_repo):
"""
Call the corresponded driver of each file type
"""
result = False
a_defects = cli_args.ad
jiraproj = cli_args.jiraproj
jiraid = cli_args.jiraid
if Utils.xml_Utils.getRoot(abs_filepath).tag == 'Testcase':
default_repo['war_file_type'] = "Case"
result, _, data_repository = testcase_driver.main(
abs_filepath, data_repository=default_repo,
runtype='SEQUENTIAL_KEYWORDS',
auto_defects=a_defects, jiraproj=jiraproj, jiraid=jiraid)
if not Utils.data_Utils.get_object_from_datarepository('genericdatafile'):
update_jira_by_id(jiraproj, jiraid, os.path.dirname(
data_repository['wt_resultsdir']), result)
email.compose_send_email("Test Case: ", abs_filepath,
data_repository['wt_logsdir'],
data_repository['wt_resultsdir'], result)
elif Utils.xml_Utils.getRoot(abs_filepath).tag == 'TestSuite':
default_repo['war_file_type'] = "Suite"
result, suite_repository = testsuite_driver.main(
abs_filepath, auto_defects=a_defects,
jiraproj=jiraproj, data_repository=default_repo)
update_jira_by_id(jiraproj, jiraid,
suite_repository['suite_execution_dir'], result)
email.compose_send_email("Test Suite: ", abs_filepath,
suite_repository['ws_logs_execdir'],
suite_repository['ws_results_execdir'], result)
elif Utils.xml_Utils.getRoot(abs_filepath).tag == 'Project':
default_repo['war_file_type'] = "Project"
result, project_repository = project_driver.main(
abs_filepath, auto_defects=a_defects,
jiraproj=jiraproj, data_repository=default_repo)
update_jira_by_id(jiraproj, jiraid,
project_repository['project_execution_dir'], result)
email.compose_send_email("Project: ", abs_filepath,
project_repository['wp_logs_execdir'],
project_repository['wp_results_execdir'], result)
else:
print_error("Unrecognized root tag in the input xml file ! exiting!!!")
return result
def group_execution(parameter_list, cli_args, db_obj, overwrite, livehtmlobj):
"""
Process the parameter list and prepare environment for file_execution
"""
livehtmllocn = cli_args.livehtmllocn
abs_cur_dir = os.path.abspath(os.curdir)
status = True
iter_count = 0 ## this iter is used for live html results
for parameter in parameter_list:
default_repo = OrderedDict()
result = False
# check if the input parameter is an xml file
if Utils.file_Utils.get_extension_from_path(parameter) == '.xml':
filepath = parameter
framework_detail.warrior_banner()
abs_filepath = Utils.file_Utils.getAbsPath(filepath, abs_cur_dir)
print_info('Absolute path: {0}'.format(abs_filepath))
if Utils.file_Utils.fileExists(abs_filepath):
if list(overwrite.items()):
default_repo.update(overwrite)
if db_obj is not False and db_obj.status is True:
default_repo.update({'db_obj': db_obj})
else:
default_repo.update({'db_obj': False})
# pdate livehtmllocn to default repo
if livehtmllocn or livehtmlobj is not None:
live_html_dict = {}
live_html_dict['livehtmllocn'] = \
livehtmllocn if livehtmlobj is None else livehtmlobj
live_html_dict['iter'] = iter_count
default_repo.update({'live_html_dict': live_html_dict})
if iter_count == 0 and livehtmlobj is None:
add_live_table_divs(livehtmllocn, parameter_list)
elif iter_count == 0 and livehtmlobj is not None:
add_live_table_divs(livehtmlobj, parameter_list)
# Adding user repo's to pythonpath
path_list = []
if default_repo.get("pythonpath", False):
path_list = default_repo.get("pythonpath").split(":")
print_info("user repositories path list is {}".format(path_list))
for path in path_list:
if os.path.exists(path):
sys.path.append(path)
else:
print_error("Given pythonpath doesn't exist : {}".format(path))
result = file_execution(cli_args, abs_filepath, default_repo)
else:
print_error("file does not exist !! exiting!!")
else:
print_error("unrecognized file format !!!")
status = status and result
iter_count += 1
return status
# def execution(parameter_list, a_defects, cse_execution, iron_claw,
# jiraproj, overwrite, jiraid, dbsystem, livehtmllocn):
def execution(parameter_list, cli_args, overwrite, livehtmlobj):
"""Parses the input parameters (i.e. sys.argv)
If the input parameter is an xml file:
- check if file exists, if exists
- if the input is a testcase xml file, execute the testcase
- if the input is a testsuite xml file, excute the testsuite
- if the input is a project xml file, excute the project
If the input is not an xml file:
- check if it is a json object/array respresenting a valid Warrior
suite structure, if yes to execute a build
Arguments:
1. parameter_list = list of command line parameters supplied by
the user to execute Warrior
"""
if livehtmlobj:
config_Utils.redirect_print.katana_console_log(livehtmlobj)
if cli_args.version:
framework_detail.warrior_framework_details()
sys.exit(0)
if not parameter_list:
print_error("Provide at least one xml file to execute")
sys.exit(1)
iron_claw = cli_args.ironclaw
dbsystem = cli_args.dbsystem
status = False
if iron_claw:
status = ironclaw_driver.main(parameter_list)
else:
db_obj = database_utils_class.create_database_connection(dbsystem=dbsystem)
status = group_execution(parameter_list, cli_args, db_obj, overwrite, livehtmlobj)
if db_obj is not False and db_obj.status is True:
db_obj.close_connection()
return status
def warrior_execute_entry(*args, **kwargs):
"""
main method
filepath: required at least one
auto_defects:
version:
iron_claw:
jiraproj:
overwrite:
jiraid:
dbsystem:
livehtmllocn:
"""
try:
if len(sys.argv) >= 2:
if sys.argv[1] == "-tc_gen":
print_info("Initializing tc generator tool !!")
tc_generator_dir_path = "WarriorTools/tc_generator"
current_working_directory = dirname(dirname(abspath(__file__)))
tc_generator_path = os.path.join(current_working_directory, tc_generator_dir_path)
os.system("python {}/tc_generator {}".format(tc_generator_path, " ".join(sys.argv[2:])))
sys.exit()
if sys.argv[1] == "-warrior_py3_migration_tool":
print_info("Initializing tc warrior_py3_migration_tool tool !!")
war_path = dirname(dirname(abspath(__file__)))
warrior_py3_migration_tool_path = "{}/WarriorTools/warrior_py3_migration_tools".\
format(war_path)
os.system("python {}/warrior_py3_migration_tool {}".format(warrior_py3_migration_tool_path,\
" ".join(sys.argv[2:])))
sys.exit()
if sys.argv[1] == "-rerun":
print_info("Initializing the rerun feature !!")
try:
junit_path = sys.argv[2]
if os.path.exists(junit_path):
tree = et.parse(junit_path)
root = tree.getroot()
if root.tag.islower() and root.tag == "testsuites":
child_list = []
for child in root:
child_list.append(child.tag)
if "property" in child_list:
print_info("The Junit file provided is a project file")
execute_failedproject(junit_path)
sys.exit()
else:
print_info("The Junit file provided is a testsuite file")
execute_failedsuite(junit_path)
sys.exit()
else:
print_error("Invalid junit path")
sys.exit()
else:
print_error("Invalid junit path")
except Exception as e:
print(e)
print_error("Junit Path is missing")
sys.exit()
if not kwargs:
# Launch from terminal/cli exeuction
filepath, cli_args, overwrite = main(sys.argv[1:])
else:
args = [] if not args else args
# Launch from python function call
filepath, cli_args, overwrite = main(*args)
livehtmlobj = kwargs.get("livehtmlobj", None)
status = execution(filepath, cli_args, overwrite, livehtmlobj)
status = {"true": True, "pass": True, "ran": True}.get(str(status).lower())
# add code to send div finished using katana interface class
if status is True:
print_info("DONE 0")
sys.exit(0)
else:
print_info("DONE 1")
sys.exit(1)
except:
data_repo = config_Utils.data_repository
if data_repo is not None:
if 'war_parallel' in data_repo:
if not data_repo['war_parallel']:
war_file_type = data_repo['war_file_type']
tc_junit_object = data_repo['wt_junit_object']
if war_file_type == "Case":
tc_junit_object.junit_output(data_repo['wt_resultsdir'])
elif war_file_type == "Suite":
tc_junit_object.junit_output(data_repo['wt_results_execdir'])
elif war_file_type == "Project":
tc_junit_object.junit_output(data_repo['wp_results_execdir'])
"""Handle all the cli command, new functions may be added later"""
def decide_runcat_actions(w_cli_obj, namespace):
"""Decide the actions to be taken for runcat tag """
filepath = namespace.filepath
if namespace.tcdir is not None and len(namespace.tcdir) == 0:
namespace.tcdir = None
if namespace.runcat and namespace.suitename is None:
namespace.cat = namespace.runcat
filepath = w_cli_obj.check_tag(namespace.cat, namespace.tcdir)
elif namespace.runcat and namespace.suitename is not None and len(namespace.runcat) != 0:
namespace.cat = namespace.runcat
filepath = w_cli_obj.examine_create_suite(namespace)
print_info("suite created in ", filepath[0])
if len(filepath) == 0:
print_error("No matching Testcases found for the provided category(ies)")
exit(1)
print_info("file path for runcat actions is ", str(filepath))
return filepath
def decide_createsuite_actions(w_cli_obj, namespace):
"""Decide the actions for -createsuite tag """
filepath = namespace.filepath
# already check namespace.create here, no need to double check
if namespace.filepath is not None and len(namespace.filepath) == 0:
namespace.filepath = None
if all([namespace.suitename, namespace.filepath]):
filepath = w_cli_obj.examine_create_suite(namespace)
print_info("suite created in ", filepath[0])
exit(0)
if all([namespace.suitename, namespace.cat]):
filepath = w_cli_obj.examine_create_suite(namespace)
print_info("suite created in ", filepath[0])
exit(0)
elif not namespace.cat and not all([namespace.suitename, namespace.filepath]):
print_error("Invalid combination... Use -createsuite with -suitename, "
"filepath(s) (i.e. list of testcase xml files. "
"Use -h or --help for more command line options")
exit(1)
elif namespace.cat and not namespace.suitename:
print_error("Invalid combination... Use -creatsuite + -category "
"with -suitename")
exit(1)
return filepath
def decide_ujd_actions(w_cli_obj, namespace):
"""Decide upload jira objects actions """
if namespace.ujd and any([namespace.ddir, namespace.djson]):
if namespace.ddir is not None and namespace.djson is None:
w_cli_obj.manual_defects("dir", namespace.ddir, jiraproj=namespace.jiraproj)
elif namespace.djson is not None and namespace.ddir is None:
w_cli_obj.manual_defects("files", namespace.djson, jiraproj=namespace.jiraproj)
elif namespace.ddir is not None and namespace.djson is not None:
print_error("Use -ujd with one of -ddir or -djson not both")
exit(0)
elif namespace.ujd and not any([namespace.ddir, namespace.djson]):
print_error("Use -ujd with one of -ddir or -djson")
exit(1)
def decide_overwrite_var(namespace):
"""options provided in cli get preference over the ones provided inside tests
"""
overwrite = {}
if namespace.datafile:
if namespace.datafile[0] != os.sep:
namespace.datafile = os.getcwd() + os.sep + namespace.datafile
overwrite['ow_datafile'] = namespace.datafile
#namespace for mapfile
if namespace.mapfile:
if namespace.mapfile[0] != os.sep:
namespace.mapfile = os.getcwd() + os.sep + namespace.mapfile
overwrite['ow_mapfile'] = namespace.mapfile
# namespace for wrapperfile
if namespace.wrapperfile:
if namespace.wrapperfile[0] != os.sep:
namespace.wrapperfile = os.getcwd() + os.sep + namespace.wrapperfile
overwrite['ow_testwrapperfile'] = namespace.wrapperfile
# namespace for random tc execution
if namespace.random_tc_execution:
overwrite['random_tc_execution'] = namespace.random_tc_execution
if namespace.resultdir:
if namespace.resultdir[0] != os.sep:
namespace.resultdir = os.getcwd() + os.sep + namespace.resultdir
overwrite['ow_resultdir'] = namespace.resultdir
if namespace.logdir:
if namespace.logdir[0] != os.sep:
namespace.logdir = os.getcwd() + os.sep + namespace.logdir
overwrite['ow_logdir'] = namespace.logdir
if namespace.outputdir:
if namespace.outputdir[0] != os.sep:
namespace.outputdir = os.getcwd() + os.sep + namespace.outputdir
overwrite['ow_resultdir'] = namespace.outputdir
overwrite['ow_logdir'] = namespace.outputdir
if all([namespace.outputdir, any([namespace.resultdir, namespace.logdir])]):
print_error("outputdir shouldn't be used with resultdir or logdir")
exit(1)
if namespace.jobid:
#settings_xml = Tools.__path__[0] + os.sep + 'w_settings.xml'
settings_xml = os.getenv("WAR_TOOLS_DIR") + os.sep + 'w_settings.xml'
job_url = get_credentials(settings_xml, 'job_url', ['url'], 'Setting')
if job_url['url'] is not None:
url = job_url['url']
else:
print_debug("jobid is specified but no job url found in w_settings")
print_info("Using jobid only in JUnit file")
url = ""
overwrite['jobid'] = url + str(namespace.jobid)
if namespace.pythonpath:
overwrite['pythonpath'] = namespace.pythonpath
if namespace.genericdatafile:
overwrite['genericdatafile'] = namespace.genericdatafile
if namespace.gen_no_of_samples:
overwrite['gen_no_of_samples'] = namespace.gen_no_of_samples
if namespace.gen_select_rows:
overwrite['gen_select_rows'] = namespace.gen_select_rows
if namespace.gen_shuffle_columns:
overwrite['gen_shuffle_columns'] = namespace.gen_shuffle_columns
if namespace.gen_purge_db:
overwrite['gen_purge_db'] = namespace.gen_purge_db
if namespace.gen_exec_tag:
overwrite['gen_exec_tag'] = namespace.gen_exec_tag
if namespace.gen_report:
overwrite['gen_report'] = namespace.gen_report
return overwrite
def append_path(filepath, path_list, path):
"""Append appropriate paths for testcase/suite/project in test folder
"""
temp_list = []
for file_name in path_list:
file_name = path + file_name
temp_list.append(file_name)
if temp_list:
filepath.extend(temp_list)
return filepath
def decide_action(w_cli_obj, namespace):
"""Prepare filepath and other arguments for Warrior main to use"""
# First level, sleep
#setenv varibles by cli
if namespace.setenv:
env_vir_list = [x.strip() for x in namespace.setenv.split(',')]
for env_vir in env_vir_list:
env_val_splt = env_vir.split(':')
env_vir_name = env_val_splt[0]
env_vir_val = env_val_splt[1]
# env_name = env_val_splt[0].strip('"\'')
# env_val = env_val_splt[1].strip('"\'')
os.environ[env_vir_name] = env_vir_val
if namespace.target_time:
w_cli_obj.gosleep(namespace.target_time)
# Second level, decide filepath
cli_args = [namespace.kwparallel, namespace.kwsequential,
namespace.tcparallel, namespace.tcsequential,
namespace.RMT, namespace.RUF]
filepath = namespace.filepath
# runcat related actions
if namespace.runcat:
filepath = decide_runcat_actions(w_cli_obj, namespace)
elif namespace.create:
filepath = decide_createsuite_actions(w_cli_obj, namespace)
elif namespace.encrypt:
status = True
encoded_key = False
if namespace.secretkey:
# Checks if User has given a string for creating a secret key
status, encoded_key = Encrypt.set_secret_key(namespace.secretkey)
else:
# If secret key has not been given, checks for the existence of the
# secret.key file
#path = file_Utils.get_parent_dir(os.path.realpath(__file__),
# "WarriorCore")
#path = os.path.join(path, "Tools", "admin", "secret.key")
path = os.path.join(os.getenv("WAR_TOOLS_DIR"), "admin", "secret.key")
if not os.path.exists(path):
print_error("Could not find the secret.key file in Tools/Admin!"
" Please use '-secretkey your_key_text' in the "
"-encrypt command for creating the file!")
status = False
if status:
# sends secret key and plain text password for encryption
message = Encrypt.encrypt(namespace.encrypt[0], encoded_key)
# Checks if message is not hexadecimal
if re.match(r".*[g-z].*", message):
print_error(message)
else:
print_info("The encrypted text for '{0}' is: {1}".
format(namespace.encrypt[0], message))
exit(0)
else:
print_error("Encrypted text could not be generated.")
exit(1)
elif namespace.decrypt:
status = True
encoded_key = False
if namespace.secretkey:
# Checks if User has given a string for creating a secret key
status, encoded_key = Encrypt.set_secret_key(namespace.secretkey)
else:
# If secret key has not been given, checks for the existence of the
# secret.key file
#path = file_Utils.get_parent_dir(os.path.realpath(__file__),
# "WarriorCore")
#path = os.path.join(path, "Tools", "admin", "secret.key")
path = os.path.join(os.getenv("WAR_TOOLS_DIR"), "admin", "secret.key")
if not os.path.exists(path):
print_error("Could not find the secret.key file in Tools/Admin!"
" Please use '-secretkey your_key_text' in the "
"-encrypt command for creating the file!")
status = False
if status:
# sends secret key and encrypted text password for decryption
message = Encrypt.decrypt(namespace.decrypt[0], encoded_key)
print_info("The decrypted text for '{0}' is: {1}". \
format(namespace.decrypt[0], message))
exit(0)
else:
print_error("Decrypted text could not be generated.")
exit(1)
elif any(cli_args):
filepath = w_cli_obj.examine_cli_args(cli_args, namespace)
elif namespace.ujd:
decide_ujd_actions(w_cli_obj, namespace)
# append additional path
if namespace.tc_name is not None:
filepath = append_path(filepath, namespace.tc_name, "Warriorspace/Testcases/")
if namespace.ts_name is not None:
filepath = append_path(filepath, namespace.ts_name, "Warriorspace/Suites/")
if namespace.proj_name is not None:
filepath = append_path(filepath, namespace.proj_name, "Warriorspace/Projects/")
# overwrite layer
overwrite = decide_overwrite_var(namespace)
if filepath is None:
print_error("No input filepath: {0}".format(namespace.filepath))
exit(1)
else:
for index, file_name in enumerate(filepath):
if len(file_name.split('.')) == 1:
filepath[index] = file_name + '.xml'
return (filepath, namespace, overwrite)
def main(args):
"""init a Warrior Cli Class object, parse its arguments and run it"""
w_cli_obj = war_cli_class.WarriorCliClass()
parsed_args = w_cli_obj.parser(args)
return decide_action(w_cli_obj, parsed_args)
if __name__ == "__main__":
print(re.match(r"[g-z]", input("Enter: ")))
main(sys.argv[1:])
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Understanding some of the things in the Scheuing & Yang 2008 paper.
This is an *extremely* simplified situation where there is one direct
path and one indirect path that is recorded by all microphones.
Created on Mon Jul 5 10:48:01 2021
@author: thejasvi
"""
#%%
import numpy as np
np.random.seed(82319)
import pandas as pd
import scipy.signal as signal
import scipy.spatial as spatial
import matplotlib.pyplot as plt
from itertools import combinations
from gen_cross_corr import estimate_gcc
#%%
def simulate_sound_propagation(**kwargs):
# microphone array geometry
R = 1.2
theta = np.pi/3
tristar = np.row_stack(([0,0,0],
[-R*np.sin(theta), 0, -R*np.cos(theta)],
[R*np.sin(theta), 0, -R*np.cos(theta)],
[0,0, R]))
tristar[:,1] += [1e-3, 0.5e-3, 0.25e-3, 0.15e-3]
sound_pos = kwargs.get('sound_pos',np.array([3,2,1]))
reflection_source = kwargs.get('reflection_source',np.array([1,4,1]))
direct_indirect_sources = np.row_stack((sound_pos, reflection_source))
# direct path propagation:
dist_mat = spatial.distance_matrix(direct_indirect_sources, tristar)
#add the distance of propagation from source to reflection point
source_to_reflectionpoint = spatial.distance.euclidean(sound_pos, reflection_source)
dist_mat[1,:] += source_to_reflectionpoint
# make the direct
chirp_durn = 0.003
fs = 192000
t = np.linspace(0,chirp_durn,int(fs*chirp_durn))
chirp = signal.chirp(t,80000,t[-1],25000)
chirp *= signal.hann(chirp.size)*0.5
vsound = 340.0
audio = np.zeros((int(fs*0.03),4))
toa_sounds = dist_mat/vsound
toa_samples = np.int64(toa_sounds*fs)
for channel in range(4):
random_atten = np.random.choice(np.linspace(0.2,0.9,20),2)
start_direct, start_indirect = toa_samples[0,channel], toa_samples[1,channel]
audio[start_direct:start_direct+chirp.size,channel] += chirp*random_atten[0]
audio[start_indirect:start_indirect+chirp.size,channel] += chirp*random_atten[1]
audio += np.random.normal(0,1e-5,audio.size).reshape(audio.shape)
return audio , dist_mat, tristar, (sound_pos, reflection_source)
#%% Generate the cross-corr for each channel pair
def generate_multich_crosscorr(input_audio, **kwargs):
'''
Generates all unique pair cross-correlations: (NxN-1)/2 pairs. Each pair is
designated by a tuple where the second number is the reference channel, eg. (1,0)
where channel 1 is cross-correlated with reference to channel 0.
Parameters
----------
input_audio: np.array
M samples x N channels
gcc : boolean
Whether to use a gcc instead of the standard cross-correlation.
Defaults to False.
Returns
-------
multichannel_cc : dictionary
Keys indicate the channel pair, and entries are the cross-correlation.
Each cross-correlation has M samples (same size as one audio channel).
'''
num_channels = input_audio.shape[1]
unique_pairs = list(combinations(range(num_channels), 2))
multichannel_cc = {}
for cha, chb in unique_pairs:
# make sure the lower channel number is the reference signal
signal_ch, ref_signal = sorted([cha, chb], reverse=True)
if not kwargs.get('gcc',False):
multichannel_cc[(signal_ch, ref_signal)] = signal.correlate(input_audio[:,signal_ch],
input_audio[:,ref_signal],'full')
else:
multichannel_cc[(signal_ch, ref_signal)] = estimate_gcc(input_audio[:,signal_ch],
input_audio[:,ref_signal])
return multichannel_cc
def generate_multich_autocorr(input_audio):
'''
Parameters
----------
input_audio : np.array
M samples x Nchannels
Returns
-------
multichannel_autocor : np.array
M samples x Nchannels
'''
return np.apply_along_axis(lambda X: signal.correlate(X,X,'same'),0, input_audio)
##%% extract auto-cor and cross-cor peaks
cc_and_acc_peaks = lambda X: signal.find_peaks(X, 0.11,distance=int(fs*1e-4))[0]
def make_p_prime_kk_ll(crosscor_peaks, acc_peaks_ch1, acc_peaks_ch2, twrm):
'''
Identifies the peaks in the cross-correlation which are from echo paths.
Parameters
----------
crosscor_peaks : np.array
With indices of peak locations
acc_peaks_ch1, acc_peaks_ch2: np.array
With centred indices of peak locations of the autocorrelation.
'Centred indices' mean that the 'centre' of the signal is the 0th
index, all those to the left of it have -ve indices and to the right
have +ve.
Returns
-------
p_prime_kk, p_prime_ll: dict
Dictionaries with the echo path delay as key and the corresponding cross-cor
peaks as entries.
'''
peak_pairs_that_match_acc = []
p_prime_kk = {}
p_prime_ll = {}
eta_mu = np.concatenate((acc_peaks_ch1, acc_peaks_ch2))
for focal_peak in crosscor_peaks:
non_focalpeaks = np.argwhere(crosscor_peaks!=focal_peak)
cc_peaks_wo_focal = crosscor_peaks[non_focalpeaks]
for each in cc_peaks_wo_focal:
difference01 = each-focal_peak
acc_match = np.abs(eta_mu-np.abs(difference01))
if np.any(acc_match<=twrm*0.5):
peak_pairs_that_match_acc.append(focal_peak)
peak_pairs_that_match_acc.append(each.tolist()[0])
# save the acc peak into the P_prime list
acc_delay = eta_mu[np.argmin(acc_match)]
# figure out if it's from acc ch1/ch2
correct_channel_acc = [acc_delay in acc_peaks_ch2, acc_delay in acc_peaks_ch1]
if np.logical_and(sum(correct_channel_acc)>0, sum(correct_channel_acc)<2):
if np.argmax(correct_channel_acc)==1:
p_prime_ll[acc_delay] = [focal_peak, each.tolist()[0]]
elif np.argmax(correct_channel_acc)==0:
p_prime_kk[acc_delay] = [each.tolist()[0], focal_peak, ]
else:
ValueError('Problem with finding correct acc channel ')
return p_prime_kk, p_prime_ll
def gamma_tfrm(autocorr_delay, cc_peak_diff, tfrm):
difference = np.abs(autocorr_delay) - np.abs(cc_peak_diff)
if difference < 0.5*tfrm:
return 1 - (difference/(0.5*tfrm))
else:
return 0
def calculate_quality_value(crosscor_and_peaks, p_primekk_ll, acc_and_peaks, twrm):
'''
Parameters
----------
crosscor_and_peaks : tuple
With (cross_cor, crosscor_peaks)
p_primekk_ll : tuple
With (p_prime_kk, p_prime_ll)
acc_and_peaks : tuple
With (acc_channel1, acc1_peaks, acc_channel2, acc2_peaks).
The autocorrelation peaks here are expected to be 'centred'.
twrm : int>0
The tolerance width of raster matching in samples.
Returns
-------
quality_values : np.array
Same size as the number of crosscor_peaks, with the corresponding
quality score.
'''
cross_cor, cc_peaks = crosscor_and_peaks
p_primekk, p_primell = p_primekk_ll
acc_ch1, acc1_peaks, acc_ch2, acc2_peaks = acc_and_peaks
quality_values = np.zeros(cc_peaks.size)
# where peak1 = eta_{gamma} and peak2 = eta_{mu}
for i,each_ccpeak in enumerate(cc_peaks):
rkl_mu = cross_cor[each_ccpeak]
ch1_autocorr_term = 0
ch2_autocorr_term = 0
for acc_delay, (peak1, peak2) in p_primekk.items():
uncentred = int(acc_ch1.size*0.5)+int(acc_delay)
acc_peak_value = acc_ch1[uncentred]
cc_delay = peak1-peak2
thisdelay_autocorr_term = np.sign(peak1-peak2)*np.abs(acc_peak_value)
thisdelay_autocorr_term *= gamma_tfrm(acc_delay, cc_delay, twrm)
ch1_autocorr_term += thisdelay_autocorr_term
for acc_delay, (peak1, peak2) in p_primell.items():
uncentred = int(acc_ch2.size*0.5)+int(acc_delay)
acc_peak_value = acc_ch2[uncentred]
cc_delay = peak1-peak2
thisdelay_autocorr_term = np.sign(peak1-peak2)*np.abs(acc_peak_value)
thisdelay_autocorr_term *= gamma_tfrm(acc_delay, cc_delay, twrm)
ch2_autocorr_term += thisdelay_autocorr_term
quality_values[i] = rkl_mu + ch1_autocorr_term + ch2_autocorr_term
return quality_values
def filter_cc_peaks_by_plausibility_and_minrkl(mic_pair, crosscor_and_peaks, quality_values,
array_geom, fs, **kwargs):
'''
'''
mic_1, mic_2 = mic_pair
cross_cor, cc_peaks = crosscor_and_peaks
min_rkl = np.min(cross_cor[cc_peaks])
quality_peaks = cc_peaks[cc_peaks>=min_rkl]
intermic_distance = spatial.distance.euclidean(array_geom[mic_1,:],
array_geom[mic_2,:])
vsound = kwargs.get('vsound', 338.0)
max_intermic_delay = intermic_distance/vsound
centred_peaks = quality_peaks - cross_cor.size*0.5
peak_delays = centred_peaks/fs
return quality_peaks[peak_delays<=max_intermic_delay]
#%%
def gamma_tftm(delay, tftm, **kwargs):
'''
'''
if np.abs(delay)>=tftm:
return 0
else:
if kwargs.get('weighting_function') is None:
weighting_function = np.cos
theta = 0.25*np.pi*(delay/(tftm*0.5))
return weighting_function(theta)
else:
return weighting_function(delay, tftm)
#%%
def cyclic_tde_sum(tde_ba, tde_bc, tde_ac):
return tde_ba+tde_bc+tde_ac
def calculate_connectivity():
'''
the 'w' term described in equation 24, where broadly speaking:
:math:`w = \Sigma_{all\:consistent\:triples} \Gamma_{TFTM}(cyclic \:sum \:of \:triple)
'''
def parse_triplet_graph_key(triplet_key):
'''
parses and splits the input string of the following format:
'micA,micB,micC;tde_ba,tde_bc,tde_ac'
Here micA,B,C are >=0 integers, while tde_ba,_bc,_ac are
also integers that can be <=0 or >0.
Parameters
----------
triplet_key : str
See description.
Returns
-------
(mica,micb,micc),(tdeba,tdebc,tdeac): str
'''
mic_ids, tde_values = triplet_key.split(';')
mica, micb, micc = mic_ids.split(',')
tde_a, tde_b, tde_c = tde_values.split(',')
return (mica, micb, micc), (tde_a, tde_b, tde_c)
#%%
def find_quadruplet_from_triplet_set(starter_triplet, candidate_triplets, **kwargs):
'''
The candidate triplet list is first cut down to show only those whose
nodes have at least one different microphone.
Then all candidates are checked for two common nodes and the same edge length.
'''
# remove all candidate triplets with the exact same mic numbers
subset_of_triplets = remove_exactly_same_triplets(starter_triplet,
candidate_triplet)
if not len(subset_of_triplets)>0:
return None
for candidate_triplet in subset_of_triplets:
commonality_found = check_for_common_edge_and_nodes(starter_triplet, candidate_triplet)
if commonality_found:
quadruplet = fuse_two_triplets(starter_triplet, candidate_triplet)
return quadruplet
#%%
if __name__ == '__main__':
#%%
# make simulated audio and generate cross-cor + auto-cor
vsound = 338.0
sim_audio, dist_mat, array_geom = simulate_sound_propagation()
tdoas = np.row_stack((dist_mat[0,1:]-dist_mat[0,0],dist_mat[1,1:]-dist_mat[1,0]))/vsound
fs = 192000
multich_cc = generate_multich_crosscorr(sim_audio)
multich_acc = generate_multich_autocorr(sim_audio)
twrm_samples = 10
#%% extract peaks for each cc pair
crosscor_peaks = {}
for each_pair, crosscor in multich_cc.items():
ch_b, ch_a = each_pair
cc_peaks = cc_and_acc_peaks(crosscor)
autocorr_peaks_chb = cc_and_acc_peaks(multich_acc[:,ch_b])
autocorr_peaks_cha = cc_and_acc_peaks(multich_acc[:,ch_a])
pprime_kk_ll = make_p_prime_kk_ll(cc_peaks,
autocorr_peaks_chb,
autocorr_peaks_cha,
twrm_samples)
acc_n_peaks = (multich_acc[:,ch_b], autocorr_peaks_chb,
multich_acc[:,ch_a], autocorr_peaks_cha,)
q_vector = calculate_quality_value((crosscor,cc_peaks),
pprime_kk_ll,
acc_n_peaks,
twrm_samples)
good_cc_peaks = filter_cc_peaks_by_plausibility_and_minrkl(each_pair,
(crosscor, cc_peaks),
q_vector,
array_geom,
fs)
# 'centre' the cross-correlation peaks to get negative
# and positive values for values on the left and right
# of the 0 lag in the centre.
crosscor_peaks[each_pair] = good_cc_peaks-sim_audio.shape[0]*0.5
#%% Now create *all* pair TDOA peaks, using the relation
# TDE_ba = -TDE_ab
comprehensive_crosscorpeaks = {}
for pair, tdes in crosscor_peaks.items():
mic_x, mic_y = pair
comprehensive_crosscorpeaks[(mic_y, mic_x)] = -1*tdes
# and then fuse the dictionary with the old one
comprehensive_crosscorpeaks.update(crosscor_peaks)
#%%
all_triples = list(combinations(range(4), 3))
# for each triple, try out all possible tdoas
tftm_seconds = 1.7e-3 # seconds
tftm_samples = int(tftm_seconds*fs)
consistent_triples = {}
consistent_triple_scores = {}
for each_triple in all_triples:
mic_a, mic_b, mic_c = each_triple
# the equation to test
# 0 = delay_ba + delay_bc + delay_ac
for tde_ba in comprehensive_crosscorpeaks[(mic_b, mic_a)]:
for tde_bc in comprehensive_crosscorpeaks[(mic_b, mic_c)]:
for tde_ac in comprehensive_crosscorpeaks[(mic_a, mic_c)]:
consistency = tde_ba + tde_bc + tde_ac
trip_serialnum = 0
if np.abs(consistency)<=tftm_samples:
key = str(f'{mic_a},{mic_b},{mic_c};{trip_serialnum}')
while key in list(consistent_triples.keys()):
trip_serialnum += 1
key = str(f'{mic_a},{mic_b},{mic_c};{trip_serialnum}')
graph = np.zeros((3,3))
graph[1,0] = tde_ba; graph[0,1] = -tde_ba;
graph[1,2] = tde_bc; graph[2,1] = -tde_bc;
graph[0,2] = tde_ac; graph[2,0] = -tde_ac
df_graph = pd.DataFrame(graph)
df_graph.columns = [mic_a, mic_b, mic_c]
df_graph.index = [mic_a, mic_b, mic_c]
consistency_score = gamma_tftm(consistency, tftm_samples)
consistent_triple_scores[key] = consistency_score
consistent_triples[key] = df_graph
#%% Choose the most consistent triple and proceed to make a quadruplet
# do this multiple times until no more quadruplets can be formed.
import copy
values = sorted(consistent_triple_scores.values())
sorted_values = sorted(values, reverse=True)
keys = list(consistent_triples.keys())
most_consistent_triplet = keys[values.index(sorted_values[0])]
mic_ids, _ = most_consistent_triplet.split(';')
best_triplet = consistent_triples[most_consistent_triplet]
# now remove this triplet and all other triplets with the same mics in them
candidate_triplets = copy.deepcopy(consistent_triples)
candidate_triplets.pop(most_consistent_triplet)
candidate_triplet_graphs = list(candidate_triplets.keys())
commonedge_triples = []
for each_triplet, graph in candidate_triplets.items():
triplet_ids, _ = each_triplet.split(';')
common_mics = set(triplet_ids.split(',')).intersection(set(mic_ids.split(',')))
common_mics = list([int(each) for each in common_mics])
if len(common_mics) ==2:
print(common_mics, triplet_ids, each_triplet)
# now check for the same edge length betweeen nodes
if graph.loc[common_mics[0],common_mics[1]] == best_triplet.loc[common_mics[0],common_mics[1]]:
print('Quadruplet candidate found')
print(graph)
print(best_triplet)
commonedge_triples.append((triplet_ids, graph))
print(commonedge_triples)
#%%
# Make a quadruplet
#%% Check to see if you can combine the quadruplets if they share a triplet
# between them. If yes, then this will make a sextet
|
import os
import uvicorn
from dotenv import load_dotenv
from fastapi import Depends, FastAPI, Request
from fastapi.responses import JSONResponse
from fastapi.security import APIKeyHeader, HTTPBearer
from fastapi_jwt_auth.exceptions import AuthJWTException
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.middleware.cors import CORSMiddleware
from mtl_accounts.database.conn import db
from mtl_accounts.errors import exceptions as ex
from mtl_accounts.middlewares.token_validator import access_control
from mtl_accounts.middlewares.trusted_hosts import TrustedHostMiddleware
from .routes import auth, users
load_dotenv(verbose=True)
def create_app():
app = FastAPI()
db.init_app(app)
app.add_middleware(middleware_class=BaseHTTPMiddleware, dispatch=access_control)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.add_middleware(TrustedHostMiddleware, allowed_hosts=["*"], except_path=["/health"])
# HTTP Host Header 공격 방어
# 모든 요청이 Host 헤더가 적절하게 세팅되었는지 강제하기 위한 미들웨어
# except_path : AWS를 로드밸런싱으로 사용할때, 내부아이피로 health check를 한다
# 그로 인해 모든 health check를 실패한다.
# middleware 은 stack 으로 동작하기때문에 가장 나중에 넣은것부터 실행한다.
@app.exception_handler(AuthJWTException)
def authjwt_exception_handler(request: Request, exc: AuthJWTException):
return JSONResponse(status_code=exc.status_code, content={"detail": exc.message})
@app.exception_handler(ex.APIException)
def api_exception_handler(request: Request, error: ex.APIException):
error_dict = dict(status=error.status_code, msg=error.msg, detail=error.detail, code=error.code)
return JSONResponse(status_code=error.status_code, content=error_dict)
app.include_router(router=auth.router, tags=["JWT"], prefix="/jwt")
app.include_router(router=users.router, tags=["Users"], prefix="/user")
return app
app = create_app()
# if __name__ == "__main__":
# uvicorn.run("main:app", host="0.0.0.0", port=8080, reload=True)
|
########################################################################################################################
# #
# Header Replace Script #
# #
########################################################################################################################
# #
# This script is used to update the top part of a header in all .SAS files found within a specified directory. #
# The purpose of this is to ensure that the header used across all files in a study is consistent and up to date #
# with the current standard. #
# #
# Should the standard change this script can be used to update all in a given directory to reflect this change. #
# #
# To use this script ensure that this .py file has the header(.txt) file that is to be used present in the same #
# directory. if there is no header available for this script to use, then it will fail. no changes to files will be #
# made. #
# #
# this program will not alter anything in the SAS files, BESIDES the top section of the header. The top section #
# currently begins with a line that starts '/*===========' and ends with the first line that starts '___________' #
# should this change, then the script will need updating. #
# #
# <Insert some README stuff about the cmd line args and stuff...> #
# #
########################################################################################################################
# #
# For any questions, queries or errors that have occurred as a result of this script, please email me and i shall #
# get it sorted! #
# #
########################################################################################################################
import os, argparse
# Needs to be turned into an object...
histNeeded = False
def listAvailableFiles(directory, suffix='.sas'):
""" Creates a list of all files that exist within a directory that are readable """
readableFiles = []
try: # Try listing files in a directory.
dirList = os.listdir(directory)
except FileNotFoundError: # don't break if the dir provided is not found
print('Invalid Directory Specified')
else: # if it's good, continue as planned
for file in dirList:
if file.endswith(suffix) and os.access(directory + "\\" + file, os.R_OK): # i.e. the file can be checked.
readableFiles.append(file)
print(str(len(readableFiles)) + ' files located.')
return readableFiles
# should get around SCCS... 100% shouldn't be done this way...
def setPermissions(directory, files):
""" sets the permissions of all files to 777 """
for file in files:
os.chmod(directory + "\\" + file, 0o777) # gives write access so files can be edited
def readFile(filePath):
""" reads a file, returning an array of lines in the file """
lines = [] # or even lines = [l for l in file]
try:
file = open(filePath)
except FileNotFoundError:
print("Invalid File Path Provided")
else:
for l in file:
lines.append(l)
return lines
def readHeader(filePath, sourceFilePath):
""" Wrapper for read file. checks for special key values in the header. """
lines = readFile(filePath)
newLines = []
for l in lines:
newLines.append(str.replace(l, '%FP%', sourceFilePath))
return newLines
def findExpression(lines, expr, startPos=0):
""" Searches an array of strings for an expression. returning the line idx that contains it"""
idx = -1
if expr == "":
return idx
for i in range(startPos, len(lines)):
if expr in lines[i]:
idx = i
break # Line is found, bail from the loop.
return idx
def updateFile(original, header, startIdx, endIdx):
""" creates a new file with an updated header, then returns it """
newFile = []
for i in range(startIdx): # keep the segment occurring before the start idx
newFile.append(original[i])
newFile.extend(header) # add to the new file with the desired header
for i in range(endIdx + 1, len(original)): # place the rest of the original file that is needed
newFile.append(original[i])
return newFile
def writeFile(filePath, file):
""" writes a file at the specified location """
f = open(filePath, 'w') # Create a blank version of the original file
for l in file: # Write the new lines to the blank file.
f.write(l)
f.close()
def updateHeaders(directory, headerFile, startExp = "/*====", endExp = "_______"):
""" goes through all sas files in a directory and updates their headers """
global histNeeded
print('Updating headers for files in: ' + directory)
fileNames = listAvailableFiles(directory)
setPermissions(directory, fileNames)
for f in fileNames: # go through each file in the directory.
header = readHeader(headerFile, directory + f)
path = directory + "\\" + f
fileLines = readFile(path)
start = findExpression(fileLines, startExp) # used to mark the start of the area to replace
if start == -1:
break # the start of the header was not found. Skip this file since it's not valid. don't wanna ruin stuff
end = findExpression(fileLines, endExp, startPos=start) # used to mark the end of the area to replace
newFile = updateFile(fileLines, header, start, end)
if histNeeded: # This needs to be done better. too many branches.
start = findExpression(newFile, "Version History")
versionHist = readFile('verHist.txt')
if start != -1:
end = findExpression(newFile, "====*/")
newFile = updateFile(newFile, versionHist, start, end)
writeFile(path, newFile)
print('Headers Updated.\n')
def createArgParser():
""" creates a CMD line argument parser with possible options """
parser = argparse.ArgumentParser(description='Replaces Header section of Amgen standard .SAS files.')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-d', '--dir',
help='The [d]irectory location of files needing to have headers replaced')
group.add_argument('-m', '--multiple',
help='Location of the .txt file containing the paths to [m]ultiple directories that need' +
'headers to be replaced')
parser.add_argument('-f', '--file',
help='Location of the .txt header [f]ile containing the new header\'s content',
required=True)
parser.add_argument('-v', '--versionhistory',
help='Wipe the version history present in the header.',
action='store_true')
return parser
def updateDirs(filePath, headerFile):
""" Updates the headers for each file located in each Directory located in the given file."""
dirs = readFile(filePath)
# OUTPUT FOR USER.
print(str(len(dirs)) + ' directory locations read in the file:\n' + filePath + '\n')
for d in dirs:
updateHeaders(d.strip('\n'), headerFile)
print('Directories Updated')
########################################################################################################################
# #
# Main #
# #
########################################################################################################################
def main():
""" Main """
global histNeeded
parser = createArgParser()
vars = parser.parse_args()
print('\nUsing the contents of header file:\n' + vars.file + '\n')
histNeeded = vars.versionhistory
header = vars.file
if vars.dir is not None:
updateHeaders(vars.dir, header)
elif vars.multiple is not None:
updateDirs(vars.multiple, header)
if __name__ == '__main__':
main()
|
# https://stackoverflow.com/questions/8899905/count-number-of-occurrences-of-a-substring-in-a-string
# https://www.geeksforgeeks.org/number-substrings-string/
def count_substring(string, sub_string):
k=len(string)
m=len(sub_string)
i=0
l=0
count=0
while l<k:
if string[l:l+m]==sub_string:
count=count+1
l=l+1
return count
if __name__ == '__main__':
string = input().strip()
sub_string = input().strip()
count = count_substring(string, sub_string)
print(count)
# https://www.techiedelight.com/find-longest-substring-given-string-containing-distinct-characters/
# Function to find the longest substring with all
# distinct characters using a sliding window
def findLongestSubstringing(str):
# mark characters present in the current window
window = {}
# stores the longest substring boundaries
begin = end = 0
# `[low…high]` maintain the sliding window boundaries
low = high = 0
while high < len(str):
# if the current character is present in the current window
if window.get(str[high]):
# remove characters from the left of the window till
# we encounter the current character
while str[low] != str[high]:
window[str[low]] = False
low = low + 1
low = low + 1 # remove the current character
else:
# if the current character is not present in the current
# window, include it
window[str[high]] = True
# update the maximum window size if necessary
if end - begin < high - low:
begin = low
end = high
high = high + 1
# return the longest substring found at `str[begin…end]`
return str[begin:end + 1]
if __name__ == '__main__':
str = "abbcdafeegh"
print(findLongestSubstringing(str))
# https://www.geeksforgeeks.org/length-of-the-longest-substring-without-repeating-characters/
# https://www.geeksforgeeks.org/print-longest-substring-without-repeating-characters/
Method 3 (Linear Time): Let us talk about the linear time solution now. This solution uses extra space to store the last indexes of already visited characters. The idea is to scan the string from left to right, keep track of the maximum length Non-Repeating Character Substring seen so far in res. When we traverse the string, to know the length of current window we need two indexes.
1) Ending index ( j ) : We consider current index as ending index.
2) Starting index ( i ) : It is same as previous window if current character was not present in the previous window. To check if the current character was present in the previous window or not, we store last index of every character in an array lasIndex[]. If lastIndex[str[j]] + 1 is more than previous start, then we updated the start index i. Else we keep same i.
Below is the implementation of the above approach :
# Python3 program to find the length
# of the longest substring
# without repeating characters
def longestUniqueSubsttr(string):
# last index of every character
last_idx = {}
max_len = 0
# starting index of current
# window to calculate max_len
start_idx = 0
for i in range(0, len(string)):
# Find the last index of str[i]
# Update start_idx (starting index of current window)
# as maximum of current value of start_idx and last
# index plus 1
if string[i] in last_idx:
start_idx = max(start_idx, last_idx[string[i]] + 1)
# Update result if we get a larger window
max_len = max(max_len, i-start_idx + 1)
# Update last index of current char.
last_idx[string[i]] = i
return max_len
# Driver program to test the above function
string = "geeksforgeeks"
print("The input string is " + string)
length = longestUniqueSubsttr(string)
print("The length of the longest non-repeating character" +
" substring is " + str(length))
# Python3 program to find and print longest
# substring without repeating characters.
# Function to find and print longest
# substring without repeating characters.
def findLongestSubstring(string):
n = len(string)
# starting point of current substring.
st = 0
# maximum length substring without
# repeating characters.
maxlen = 0
# starting index of maximum
# length substring.
start = 0
# Hash Map to store last occurrence
# of each already visited character.
pos = {}
# Last occurrence of first
# character is index 0
pos[string[0]] = 0
for i in range(1, n):
# If this character is not present in hash,
# then this is first occurrence of this
# character, store this in hash.
if string[i] not in pos:
pos[string[i]] = i
else:
# If this character is present in hash then
# this character has previous occurrence,
# check if that occurrence is before or after
# starting point of current substring.
if pos[string[i]] >= st:
# find length of current substring and
# update maxlen and start accordingly.
currlen = i - st
if maxlen < currlen:
maxlen = currlen
start = st
# Next substring will start after the last
# occurrence of current character to avoid
# its repetition.
st = pos[string[i]] + 1
# Update last occurrence of
# current character.
pos[string[i]] = i
# Compare length of last substring with maxlen
# and update maxlen and start accordingly.
if maxlen < i - st:
maxlen = i - st
start = st
# The required longest substring without
# repeating characters is from string[start]
# to string[start+maxlen-1].
return string[start : start + maxlen]
# Driver Code
if __name__ == "__main__":
string = "GEEKSFORGEEKS"
print(findLongestSubstring(string))
# This code is contributed by Rituraj Jain
|
# -*- coding: UTF-8 -*-
from flask import Blueprint, session, request, redirect, url_for, flash, jsonify, make_response, g, get_flashed_messages
from flask.templating import _default_template_ctx_processor
from jinja2 import Environment, PackageLoader
from flask_babel import lazy_gettext as gettext
from fypress.user import level_required, login_required, User, UserEditForm, UserAddForm, UserEditFormAdmin, UserLoginForm
from fypress.folder import FolderForm, Folder
from fypress.media import Media
from fypress.post import Post, SimpleComment, AkismetForm
from fypress.admin.static import messages
from fypress.admin.forms import GeneralSettingsForm, SocialSettingsForm
from fypress.admin.models import Option, Theme
from fypress.utils import get_redirect_target, Paginator
from fypress import __version__, __file__ as __fypress_file__, FyPress
import json
import datetime
admin = Blueprint('admin', __name__, url_prefix='/admin')
fypress = FyPress()
admin_jinja = Environment(
loader=PackageLoader('fypress', '_html/templates/'),
extensions=['jinja2.ext.autoescape', 'jinja2.ext.with_'],
autoescape=True
)
def render_template(template, **kwargs):
kwargs.update(_default_template_ctx_processor())
kwargs.update({
'url_for': url_for,
'get_flashed_messages': get_flashed_messages,
'_': gettext
})
kwargs.update(dict(options=fypress.options, version=__version__, debug=fypress.config.DEBUG, flask_config=fypress.config))
template = admin_jinja.get_template(template)
return template.render(**kwargs)
@admin.before_request
def before_request():
g.user = None
if 'user_id' in session:
g.user = User.get(User.id == session['user_id'])
fypress.options = Option.auto_load()
@admin.after_request
def clear_cache(response):
if fypress.cache:
fypress.cache.clear()
return response
@admin.route('/')
@login_required
def root():
return render_template('admin/index.html', title='Admin')
"""
Force Clear Cache
"""
@admin.route('/cache/')
@level_required(4)
def clear_cache_force():
if fypress.cache:
fypress.cache.clear()
return redirect(request.args.get('ref'))
"""
Login & Logout
"""
@admin.route('/login', methods=['GET', 'POST'])
def login():
if 'user_id' in session:
return redirect('/admin/')
form = UserLoginForm(request.form, next=request.args.get('next'))
if form.validate_on_submit():
login = User.connect(form.data['login'], form.data['password'])
if login:
if form.data['next'] != '':
return redirect(form.data['next'])
else:
return redirect('/admin/')
else:
pass
return render_template('admin/login.html', form=form, title=gettext('Please sign in'))
@admin.route('/logout')
@login_required
def logout():
session.clear()
return redirect(url_for('admin.login'))
"""
Errors & Utils
"""
@admin.route('/back')
def back():
return redirect(get_redirect_target())
def handle_404():
return render_template('admin/404.html', title=gettext('Error: 404')), 404
def handle_403():
return render_template('admin/403.html', title=gettext('Error: 403')), 403
"""
Themes
"""
@admin.route('/themes')
@level_required(4)
def themes():
themes = Theme.load_themes()
return render_template('admin/themes.html', themes=themes, title=gettext('Theme'))
@level_required(4)
@admin.route('/themes/active')
def theme_active():
Option.update('theme', request.args.get('theme'))
fypress.options = Option.auto_load()
return redirect(url_for('admin.themes'))
"""
Settings
"""
@admin.route('/settings/all', methods=['POST', 'GET'])
@level_required(4)
def settings_general():
form = GeneralSettingsForm(obj=Option.get_settings())
if form.validate_on_submit():
for data in form.data:
Option.update(data, form.data[data])
fypress.options = Option.auto_load()
return redirect(url_for('admin.settings_general'))
return render_template('admin/settings_general.html', form=form, title=gettext('General - Settings'))
@admin.route('/settings/social', methods=['POST', 'GET'])
@level_required(4)
def settings_social():
form = SocialSettingsForm(obj=Option.get_settings('social'))
if form.validate_on_submit():
for data in form.data:
Option.update(data, form.data[data])
fypress.options = Option.auto_load()
return redirect(url_for('admin.settings_social'))
return render_template('admin/settings_social.html', form=form, title=gettext('Social - Settings'))
@admin.route('/settings/design', methods=['POST', 'GET'])
def settings_design():
options = Option.get_settings('design')
if request.form:
for data in request.form:
Option.update(data, request.form[data])
fypress.options = Option.auto_load()
return redirect(url_for('admin.settings_design'))
return render_template('admin/settings_design.html', design=options, title=gettext('Design - Settings'))
@admin.route('/settings/reading')
def settings_reading():
return render_template('admin/blank.html', title=gettext('Design - Settings'))
'''
Comments
'''
@admin.route('/comments', methods=['POST', 'GET'])
@admin.route('/comments/all', methods=['POST', 'GET'])
@level_required(4)
def comments():
count_published = SimpleComment.count_filter(SimpleComment.status == 'valid')
count_spam = SimpleComment.count_filter(SimpleComment.status == 'spam')
paginator = Paginator(
query=SimpleComment.filter(SimpleComment.status == (request.args.get('filter') or 'valid')).order_by(SimpleComment.created),
page=request.args.get('page')
)
akismet = Option.get(Option.name == 'akismet')
if akismet:
form = AkismetForm(api_key=akismet.value)
else:
form = AkismetForm()
if form.validate_on_submit():
Option.update('akismet', request.form['api_key'])
fypress.options = Option.auto_load()
return redirect(url_for('admin.comments'))
return render_template('admin/comments.html',
count_published=count_published,
filter=request.args.get('filter'),
count_spam=count_spam,
pages=paginator.links,
akismet=akismet,
form=form,
comments=paginator.items,
title=gettext('Comments')
)
@admin.route('/comments/delete')
def comments_delete():
comment = SimpleComment.get(SimpleComment.id == request.args.get('id'))
SimpleComment.count_comments(comment.id_post, True)
comment.remove()
return redirect(url_for('admin.comments'))
@admin.route('/comments/unspam')
def comments_unspam():
comment = SimpleComment.get(SimpleComment.id == request.args.get('id'))
comment.status = 'valid'
comment.save()
SimpleComment.count_comments(comment.id_post)
return redirect(url_for('admin.comments'))
"""
Posts & Pages
"""
@admin.route('/pages')
@admin.route('/pages/all')
@level_required(1)
def pages():
return posts(True)
@admin.route('/pages/edit', methods=['POST', 'GET'])
@admin.route('/pages/new', methods=['POST', 'GET'])
@level_required(1)
def pages_add():
return posts_add(True)
@admin.route('/posts')
@admin.route('/posts/all')
@level_required(1)
def posts(page=False):
numbers = Post.count_by_status(page)
if not request.args.get('filter'):
query = Post.filter(Post.status << ['draft', 'published'], Post.type == ('page' if page else 'post')).order_by(Post.created)
else:
query = Post.filter(Post.status == request.args.get('filter'), Post.type == ('page' if page else 'post')).order_by(Post.created)
if page:
title = gettext('Page')
else:
title = gettext('Posts')
paginator = Paginator(
query=query,
page=request.args.get('page')
)
if page:
urls = 'admin.pages'
else:
urls = 'admin.posts'
return render_template('admin/posts.html', pages=paginator.links, title=title, posts=paginator.items, numbers=numbers, filter=request.args.get('filter'), page=page, urls=urls)
@admin.route('/posts/delete')
@level_required(4)
def posts_delete():
post = Post.get(Post.id == request.args.get('id'))
if post:
Post.delete(post)
flash(messages['deleted'] + ' (' + str(post) + ')')
return redirect(get_redirect_target())
else:
return handle_404()
@admin.route('/posts/move')
@level_required(1)
def posts_move():
post = Post.get(Post.id == request.args.get('id'))
if post:
post.move(request.args.get('status'))
flash(messages['moved'] + ' to ' + request.args.get('status') + ' (' + str(post) + ')')
return redirect(get_redirect_target())
else:
return handle_404()
@admin.route('/posts/edit', methods=['POST', 'GET'])
@admin.route('/posts/new', methods=['POST', 'GET'])
@level_required(1)
def posts_add(page=False):
post = False
if page:
urls = 'admin.pages'
else:
urls = 'admin.posts'
if request.args.get('edit'):
post = Post.get(Post.id == request.args.get('edit'))
if post:
if post.parent:
return redirect(url_for(urls + '_add', edit=post.parent))
if request.form:
post = Post.update(request.form, post)
flash(messages['updated'] + ' (' + str(post) + ')')
return redirect(url_for(urls + '_add', edit=post.id))
else:
return handle_404()
else:
if request.form:
post = Post.new(request.form)
flash(messages['added'] + ' (' + str(post) + ')')
return redirect(url_for(urls + '_add', edit=post.id))
folders = Folder.get_all()
if page:
title = gettext('New - Page')
else:
title = gettext('New - Post')
return render_template('admin/posts_new.html', folders=folders, post=post, title=title, page=page, urls=urls)
"""
Medias
"""
@admin.route('/medias')
@admin.route('/medias/all')
@level_required(1)
def medias():
if not request.args.get('filter'):
query = Media.select().order_by(Media.modified)
else:
query = Media.filter(Media.type == request.args.get('filter')).order_by(Media.modified)
paginator = Paginator(
query=query,
page=request.args.get('page'),
per_page=12
)
return render_template('admin/medias.html', medias=paginator.items, pages=paginator.links, title=gettext('Library - Medias'))
@admin.route('/medias/add/web')
@level_required(1)
def medias_web():
return render_template('admin/medias_web.html', title=gettext('Add from Web - Medias'))
@admin.route('/medias/add/upload')
@level_required(1)
def medias_upload():
return render_template('admin/medias_upload.html', title=gettext('Medias'))
"""
Folders
"""
@admin.route('/folders', methods=['POST', 'GET'])
@admin.route('/folders/all', methods=['POST', 'GET'])
@level_required(3)
def folders():
folders = Folder.get_all(True)
folder = None
if request.args.get('edit') and request.args.get('edit') != 1:
folder = Folder.get(Folder.id == request.args.get('edit'))
form = FolderForm(obj=folder)
if form.validate_on_submit():
form.populate_obj(folder)
folder.modified = datetime.datetime.now()
folder.save()
flash(messages['updated'] + ' (' + str(folder) + ')')
return redirect(url_for('admin.folders'))
else:
form = FolderForm()
if form.validate_on_submit():
Folder.add(form)
flash(messages['added'] + ' (' + str(folder) + ')')
return redirect(url_for('admin.folders'))
return render_template('admin/folders.html', folders=folders, folder=folder, title=gettext('Categories'), form=form)
"""
Users
"""
@admin.route('/users')
@admin.route('/users/all')
@level_required(4)
def users():
paginator = Paginator(
query=User.select(),
page=request.args.get('page')
)
return render_template('admin/users.html', title=gettext('Users'), users=paginator.items, pages=paginator.links)
@admin.route('/users/edit', methods=['POST', 'GET'])
@level_required(0)
def users_edit(id_user=None):
if not id_user:
id_user = request.args.get('id')
user = User.get(User.id == id_user)
if g.user.status == 4:
form = UserEditFormAdmin(obj=user)
else:
form = UserEditForm(obj=user)
if form.validate_on_submit():
status = user.status
form.populate_obj(user)
# don't allow to change status unless admin.
if g.user.status != 4:
user.status = status
if g.user.status == 4 or g.user.id == user.id:
user.save()
flash(messages['updated'] + ' (' + str(user) + ')')
if g.user.status == 4:
return redirect(url_for('admin.users'))
else:
return redirect(url_for('admin.users_me'))
return render_template('admin/users_edit.html', title=gettext('Edit - User'), user=user, form=form)
@admin.route('/users/new', methods=['POST', 'GET'])
@level_required(4)
def users_new():
form = UserAddForm(request.form)
if form.validate_on_submit():
user = User.create()
form.populate_obj(user)
user.save()
flash(messages['added'] + ' (' + str(user) + ')')
return redirect(url_for('admin.users'))
return render_template('admin/users_new.html', title=gettext('New - User'), form=form)
@admin.route('/users/me', methods=['POST', 'GET'])
@login_required
def users_me():
return users_edit(session.get('user_id'))
"""
POST
"""
@admin.route('/medias/upload', methods=['POST'])
@level_required(1)
def post_media():
return Media.upload(request.files['qqfile'], request.form)
@admin.route('/medias/upload/<uuid>', methods=['POST'])
@level_required(1)
def post_media_delete():
try:
# handle_delete(uuid)
return jsonify(success=True), 200
except Exception, e:
return jsonify(success=False, error=e.message), 400
"""
AJAX
"""
@admin.route('/medias/get')
@level_required(1)
def ajax_get_media():
media = Media.get(Media.id == request.args.get('id').replace('media_', ''))
result = {}
result['name'] = media.name
result['icon'] = media.icon
result['guid'] = media.guid
if media.type == 'image':
result['var'] = media.data['var']
if media.type == 'oembed':
result['html'] = media.html
return jsonify(data=result)
@admin.route('/medias/oembed/add', methods=['POST'])
@level_required(1)
def ajax_oembed_add():
return Media.add_oembed(request.form)
@admin.route('/medias/oembed', methods=['POST'])
@level_required(1)
def ajax_oembed():
data = request.form.get('data')
return Media.add_from_web(data)
@admin.route('/folders/update', methods=['POST'])
@level_required(3)
def ajax_folders():
data = json.loads(request.form.get('data'))
Folder.update_all(data)
return ''
|
import math
import os
# import matplotlib.pyplot as plt
import numpy as np
import random
import torch
import torchvision
import torchvision.transforms as transforms
from torch import Tensor
from torch.autograd import Function
from torch.nn import init, Module, functional
from torch.nn.parameter import Parameter, UninitializedParameter
from torch.utils.data import DataLoader, Dataset, TensorDataset
from torch.nn.common_types import _size_1_t, _size_2_t, _size_3_t
from torch.nn.modules.utils import _single, _pair, _triple, _reverse_repeat_tuple
from typing import Optional, Tuple, List, Union
def hard_sigmoid(x: Tensor):
return torch.clip((x + 1) / 2, 0, 1)
class Binarize(Function):
@staticmethod
def forward(ctx, weight: Tensor, H: float, deterministic: bool=True) -> Tensor:
weight_binary = hard_sigmoid(weight / H)
if deterministic:
weight_binary = torch.round(weight_binary)
else:
weight_binary = torch.bernoulli(weight_binary)
weight_binary = weight_binary.float()
weight_binary = ((2 * weight_binary - 1) * H).float()
return weight_binary
@staticmethod
def backward(ctx, grad_output: Tensor) -> tuple:
# grad_output = doutput/dWb
return grad_output, None, None
binarize = Binarize.apply
class BinaryDense(Module):
__constants__ = ["in_features", "out_features"]
in_features: int
out_features: int
weight: Tensor
binary_weight: Tensor
def __init__(self, in_features: int, out_features: int, H: float=1, bias: bool=False, deterministic: bool=True) -> None:
super(BinaryDense, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(Tensor(out_features, in_features))
self.H = H
if bias:
self.bias = Parameter(Tensor(out_features))
else:
self.register_parameter("bias", None)
self.deterministic = deterministic
self.reset_parameters()
def reset_parameters(self) -> None:
init.xavier_uniform_(self.weight)
if self.bias is not None:
init.zeros_(self.bias)
def forward(self, input: Tensor) -> Tensor:
weight_binary = binarize(self.weight, self.H, self.deterministic)
return functional.linear(input, weight_binary, self.bias)
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
class _BinaryConvNd(Module):
def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor:
...
_in_channels: int
_reversed_padding_repeated_twice: List[int]
out_channels: int
kernel_size: Tuple[int, ...]
stride: Tuple[int, ...]
padding: Union[str, Tuple[int, ...]]
dilation: Tuple[int, ...]
transposed: bool
output_padding: Tuple[int, ...]
groups: int
padding_mode: str
weight: Tensor
bias: Optional[Tensor]
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Tuple[int, ...],
stride: Tuple[int, ...],
padding: Tuple[int, ...],
dilation: Tuple[int, ...],
transposed: bool,
output_padding: Tuple[int, ...],
groups: int,
bias: bool,
padding_mode: str,
H: float=1.,
deterministic: bool=True) -> None:
super(_BinaryConvNd, self).__init__()
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
valid_padding_modes = {'zeros', 'reflect', 'replicate', 'circular'}
if padding_mode not in valid_padding_modes:
raise ValueError("padding_mode must be one of {}, but got padding_mode='{}'".format(
valid_padding_modes, padding_mode))
self.H = H
self.deterministic = deterministic
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.transposed = transposed
self.output_padding = output_padding
self.groups = groups
self.padding_mode = padding_mode
# `_reversed_padding_repeated_twice` is the padding to be passed to
# `F.pad` if needed (e.g., for non-zero padding types that are
# implemented as two ops: padding + conv). `F.pad` accepts paddings in
# reverse order than the dimension.
self._reversed_padding_repeated_twice = _reverse_repeat_tuple(self.padding, 2)
if transposed:
self.weight = Parameter(torch.Tensor(
in_channels, out_channels // groups, *kernel_size))
else:
self.weight = Parameter(torch.Tensor(
out_channels, in_channels // groups, *kernel_size))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
init.xavier_uniform_(self.weight)
if self.bias is not None:
init.zeros_(self.bias)
def extra_repr(self):
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
', stride={stride}')
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.output_padding != (0,) * len(self.output_padding):
s += ', output_padding={output_padding}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is None:
s += ', bias=False'
if self.padding_mode != 'zeros':
s += ', padding_mode={padding_mode}'
return s.format(**self.__dict__)
def __setstate__(self, state):
super(_BinaryConvNd, self).__setstate__(state)
if not hasattr(self, 'padding_mode'):
self.padding_mode = 'zeros'
class BinaryConv2D(_BinaryConvNd):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros', # TODO: refine this type
H: float=1.,
deterministic: bool=True
):
kernel_size_ = torch.nn.modules.utils._pair(kernel_size)
stride_ = torch.nn.modules.utils._pair(stride)
padding_ = torch.nn.modules.utils._pair(padding)
dilation_ = torch.nn.modules.utils._pair(dilation)
self.H = H
self.deterministic = deterministic
super(BinaryConv2D, self).__init__(
in_channels, out_channels, kernel_size_, stride_, padding_, dilation_,
False, _pair(0), groups, bias, padding_mode)
def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor]):
if self.padding_mode != 'zeros':
return functional.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, bias, self.stride,
_pair(0), self.dilation, self.groups)
return functional.conv2d(input, weight, bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input: Tensor) -> Tensor:
weight_binary = binarize(self.weight, self.H, self.deterministic)
return self._conv_forward(input, weight_binary, self.bias)
def SquareHingeLoss(input, target):
# From https://forums.fast.ai/t/custom-loss-function/8647/2
zero = torch.Tensor([0]).cuda()
return torch.mean(torch.max(zero, 0.5 - input * target) ** 2)
|
from django.db import migrations
def update_contenttypes_table(apps, schema_editor):
content_type_model = apps.get_model('contenttypes', 'ContentType')
content_type_model.objects.filter(app_label='git').update(app_label='dataset_repo')
class Migration(migrations.Migration):
dependencies = [
('dataset_repo', '0003_gitdata_lfs'),
]
operations = [
migrations.AlterModelTable('gitdata', 'dataset_repo_gitdata'),
migrations.RunPython(update_contenttypes_table),
]
|
import sys
between = map(int, sys.stdin.read().split('-'))
def valid1(attempt):
if attempt <= 99999 or attempt > 999999:
return False
consecutive = False
attempt, last_d = divmod(attempt, 10)
while attempt:
attempt, d = divmod(attempt, 10)
if d > last_d:
return False
if d == last_d:
consecutive = True
last_d = d
return consecutive
def valid2(attempt):
if attempt <= 99999 or attempt > 999999:
return False
groups = []
attempt, last_d = divmod(attempt, 10)
groups.append(1)
while attempt:
attempt, d = divmod(attempt, 10)
if d > last_d:
return False
if d == last_d:
groups[-1] += 1
else:
groups.append(1)
last_d = d
return 2 in groups
def part1():
print len(filter(valid1, range(between[0], between[1] + 1)))
def part2():
print len(filter(valid2, range(between[0], between[1] + 1)))
part1()
part2()
|
def is_palindrome(string):
return string.lower() == string[::-1].lower() |
find("1416542452932.png")
doubleClick("1416542464884.png")
type("jenkins\tphoneme\n")
wait(Pattern("1416543208055.png").similar(0.39))
find("1416775268385.png")
doubleClick("1416775295161.png")
find(Pattern("1416773490682.png").targetOffset(136,16))
click("1416775375191.png")
doubleClick(Pattern("1416773344427.png").similar(0.72).targetOffset(14,-106))
type ("create new datum with notes")
doubleClick(Pattern("1416774184215.png").targetOffset(-2,-9))
wait("1416779000083.png")
find(Pattern("1416779840409.png").similar(0.94).targetOffset(-47,38))
click(Pattern("1416779906325.png").targetOffset(-80,-19))
type("Accusative, Causative\t")
click("1416779059145.png")
wait("1416779107273.png")
type("another record\t\n")
click("1416778416325.png")
find("1416778724505.png")
doubleClick("1416778736658.png")
find("1416779237123.png")
click("1416778794225.png")
wait("1416779308662.png")
wait("1416542595201.png")
doubleClick("1416542595201.png")
|
import os
from flask import Flask
from app.database import db
from tqdm.auto import tqdm
import os
from .recommendation_logic.text_processing import get_text_map
import pandas as pd
from .testclass import save_to_db
from .database import redis_db
def create_app():
BASE = os.path.dirname(os.path.abspath(__file__))
texts_db = pd.read_csv(os.path.join(BASE, "./recommendation_logic/articles/cherdak_texts.csv"))
uploaded_map_type = "music"
TEXTS_TO_BE_UPLOADED = 100
print("checking texts availability in database")
for i, data in tqdm(texts_db.iterrows(), total=min(TEXTS_TO_BE_UPLOADED, len(texts_db))):
potential_index_in_text_types_list = "text_maps_"+ uploaded_map_type + "_" + str(i)
check_availability = redis_db.get(potential_index_in_text_types_list)
if check_availability is None:
text_map = get_text_map(data['text'], raw_text_input=True)
save_to_db(text_map, map_type = uploaded_map_type, require_status=False)
else:
#print(potential_index_in_text_types_list , "has already been uploaded. SKIP")
pass
if i == TEXTS_TO_BE_UPLOADED: break
app = Flask(__name__)
# app.config.from_object(os.environ['APP_SETTINGS'])
# db.init_app(app)
# with app.test_request_context():
# db.create_all()
# from app.addtext import addtext_page
# from app.get_text_map import get_text
from app.add_user_info import add_user_info
from app.get_test import get_test
from app.get_recommendation import send_results
from app.get_recommendation import get_recommendation
from app.handle_recommendation import evaluate_recommended_texts
from app.handle_recommendation import get_user_accuracy_record
# app.register_blueprint(addtext_page)
# app.register_blueprint(get_text)
app.register_blueprint(add_user_info)
app.register_blueprint(get_test)
app.register_blueprint(send_results)
app.register_blueprint(get_recommendation)
app.register_blueprint(evaluate_recommended_texts)
app.register_blueprint(get_user_accuracy_record)
app.run(debug=True)
return app
"""
curl --header "Content-Type: application/json; charset=UTF-8" --request POST --data '{"type":"with choice","question":"Вопросик 1", "answers": {"a":"1", "b":"2"}, "true answer":"a"}' http://localhost:5000/addtest
curl --header "Content-Type: application/json; charset=UTF-8" --request GET --data '{ "id":"1"}' http://localhost:5000/gettest
curl --header "Content-Type: application/json; charset=UTF-8" --request POST --data '{"text":"hello ueba"}' http://localhost:5000/addtest
"""
|
#!/usr/bin/env python2
from pwn import *
import sys
context.log_level = 'error'
MSG = """Agent,
Greetings. My situation report is as follows:
My agent identifying code is: .
Down with the Soviets,
006
"""
IV_LEN = 16
HASH_LEN = 20
def encrypt(socket,payload1,payload2):
socket.recvuntil("Send & verify (S)")
socket.sendline("e")
socket.recvuntil("Please enter your situation report:")
socket.sendline(payload1)
socket.recvuntil("Anything else?")
socket.sendline(payload2)
socket.recvuntil("encrypted: ")
cipher = socket.recvline().strip().decode('hex')
return cipher
def verify(socket,cipher):
socket.recvuntil("Send & verify (S)")
socket.sendline("s")
socket.recvuntil("Please input the encrypted message:")
socket.sendline(cipher.encode('hex'))
data = socket.recvuntil("Select an option:")
if "Ooops! Did not decrypt successfully." not in data:
return True
return False
def getFlagLength():
r = remote("2018shell.picoctf.com",14263)
known_len = IV_LEN + len(MSG) + HASH_LEN
cipher_len = len(encrypt(r,'',''))
padding_len = 1
while True:
new_len = len(encrypt(r,padding_len*'A',''))
if new_len > cipher_len:
break
padding_len += 1
r.close()
return cipher_len - padding_len - known_len
"""
IIIIIIIIIIIIIIII
Agent, Greetings
. My situation r
eport is as foll
ows: XXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
My agent identi
fying code is: p
icoCTF{g0_@g3nt0
06!_4950961}. Do
wn with the Sovi
ets, 006 YYYMMMM
MMMMMMMMMMMMMMMM
PPPPPPPPPPPPPPPP
"""
def readSecret(secretLength):
injection1Offset = 69
secretOffset = 100
alignment1Length = (-injection1Offset)%16 # align to a full block
alignment2Length = secretLength+(-secretLength)%16 # just to ensure "real_block_offset" doesn't change later
newSecretOffset = secretOffset+alignment1Length+alignment2Length
hashOffset = newSecretOffset + secretLength + 4 + 16 + 9
alignment3Length = (15-newSecretOffset)%16 # we want first byte of the secret to be the last byte of previous block
alignment4Length = (12-hashOffset)%16 # we want 16 bytes padding
real_block_offset = newSecretOffset+alignment3Length-15
secret = ""
sys.stdout.write("[*] Reading secret...: ")
for i in range(secretLength):
while True:
r = remote("2018shell.picoctf.com",14263)
cipher = encrypt(r,'x'*alignment1Length+'y'*alignment2Length + 'z'*alignment3Length,'a'*alignment4Length)
cipher = cipher[:-16] + cipher[real_block_offset:real_block_offset+16]
if verify(r,cipher):
c = chr(0x10 ^ ord(cipher[-17]) ^ ord(cipher[real_block_offset-1]) ) # plaintext (padding 0x10) ^ cipher[-17] = AES output for our block, AES output for our block ^ block before = plaintext (flag)
secret += c
alignment2Length -= 1
alignment4Length += 1
sys.stdout.write(c)
sys.stdout.flush()
r.close()
break
r.close()
sys.stdout.write("\n")
sys.stdout.flush()
return secret
flagLen = getFlagLength()
print("Flag length: %d"%flagLen)
flag = readSecret(flagLen)
print("Flag: %s"%flag) |
from bs4 import BeautifulSoup
from bs4.element import Tag
import nltk.tokenize
import requests
import re
from typing import List, Optional
import urllib.parse
from .models import MovieData, MovieResult
from .util import is_subslice
def compute_minutes(runtime: str) -> Optional[int]:
m = re.match(r"(?:(\d+)h )?(\d+)m", runtime)
if m is None:
return None
hours = int(m.group(1)) if m.group(1) else 0
minutes = int(m.group(2)) or 0
return 60 * hours + minutes
def get_movies(search: str) -> List[MovieResult]:
quoted_search = urllib.parse.quote(search)
url = f"https://www.rottentomatoes.com/search?search={quoted_search}"
r = requests.get(url)
if r.status_code != 200:
raise RuntimeError("Bad request")
doc = BeautifulSoup(r.text, "html.parser")
if not isinstance(
slot := doc.find("search-page-result", attrs={"slot": "movie"}), Tag
):
return []
if not isinstance(ul := slot.find("ul"), Tag):
raise RuntimeError("<ul> not found")
results = ul.find_all("search-page-media-row")
return [
MovieResult(
year=r.get("releaseyear") or None,
title=r.find_all("a")[1].string.strip(),
href=r.find_all("a")[1].get("href"),
)
for r in results
]
def get_movie_data(url: str) -> MovieData:
r = requests.get(url)
if r.status_code != 200:
raise RuntimeError("Bad request")
doc = BeautifulSoup(r.text, "html.parser")
if not isinstance(scores := doc.find("score-board"), Tag):
raise RuntimeError("<score-board> not found")
if not isinstance(h1 := scores.find("h1", attrs={"slot": "title"}), Tag):
raise RuntimeError("<h1> not found")
title = h1.string
if not isinstance(info := scores.find("p", attrs={"slot": "info"}), Tag):
raise RuntimeError("<p> not found")
# Analyze the info text.
year_text = None
genre_text = None
runtime_text = None
try:
[year_text, genre_text, runtime_text] = info.text.split(", ")
except ValueError:
try:
[genre_text, runtime_text] = info.text.split(", ")
except ValueError:
year_text = info.text
year = int(year_text) if year_text is not None else None
genres = genre_text.split("/") if genre_text is not None else []
runtime = compute_minutes(runtime_text) if runtime_text is not None else None
return MovieData(
audience=scores.get("audiencescore") or None,
tomatometer=scores.get("tomatometerscore") or None,
rating=scores.get("rating"),
genres=genres,
runtime=runtime,
title=title,
year=year,
href=url,
)
def match_movie(
movies: List[MovieResult], name: str, year: Optional[int] = None
) -> List[MovieResult]:
def matches_exact(m: MovieResult) -> bool:
target = m.title.lower()
search = name.lower()
name_matches = search == target
year_matches = year is None or year == m.year
return name_matches and year_matches
def matches_tokens(m: MovieResult) -> bool:
target = nltk.tokenize.word_tokenize(m.title.lower())
search = nltk.tokenize.word_tokenize(name.lower())
name_matches = is_subslice(search, target)
year_matches = year is None or year == m.year
return name_matches and year_matches
def matches_fuzzy(m: MovieResult) -> bool:
target = m.title.lower()
search = name.lower()
name_matches = search in target
year_matches = year is None or year == m.year
return name_matches and year_matches
exact = list(filter(matches_exact, movies))
tokens = list(filter(matches_tokens, movies))
fuzzy = list(filter(matches_fuzzy, movies))
return exact or tokens or fuzzy
|
class LocationCoordinates:
def __init__(
self,
lat,
lon
):
self._lat = lat
self._lon = lon
@property
def lat(self):
if self._lat is not None:
return float(self._lat)
else:
return None
@property
def lon(self):
if self._lon is not None:
return float(self._lon)
else:
return None
def __eq__(self, other):
if self.lat == other.lat and self.lon == other.lon:
return True
return False
class LocationPolygon:
def __init__(
self,
coordinates
):
self.coordinates = []
if coordinates:
for point in coordinates[0]:
self.coordinates.append(LocationCoordinates(point[1], point[0]))
class Location:
def __init__(
self,
id,
name,
polygon,
status,
sort_id,
listed,
external_id,
coordinates=None
):
self.id = id
self.name = name
self.status = status
self.sort_id = sort_id
self.listed = listed
self.external_id = external_id
if coordinates:
self.coordinates = LocationCoordinates(coordinates['lat'], coordinates['lon'])
else:
self.coordinates = None
if polygon:
self.polygon = LocationPolygon(polygon['coordinates'])
else:
self.polygon = None
|
import numpy as np
import functools
import itertools
size = (80, 40)
seed_of_creation = np.array([np.random.rand(size[0]) for x in np.arange(size[0])])
all_true = np.array([[True for x in np.arange(size[1])] for y in np.arange(size[1])])
bool_of_seed = seed_of_creation > 0.4
# iterations = []
def move_all_left(array):
new_array = np.delete(array, 0, 1)
new_array = np.hstack((new_array, np.array([[False] for x in np.arange(array.shape[1])])))
return new_array
def move_all_right(array):
new_array = np.delete(array, -1, 1)
new_array = np.hstack((np.array([[False] for x in np.arange(array.shape[1])]), new_array))
return new_array
def move_all_up(array):
return np.append(np.delete(array, 0, 0), [[False for x in np.arange(array.shape[0])]], axis=0)
def move_all_down(array):
return np.concatenate(([[False for x in np.arange(array.shape[0])]], np.delete(array, -1, 0)), axis=0)
# @functools.lru_cache()
def iterate_cell(merged_cell):
"""Takes a cell and sum of neighbours and returns True/False if the cell is
alive/dead. """
cell = bool(merged_cell & 16)
sum_of_neighbours = merged_cell - 16 if cell else merged_cell
if 2 <= sum_of_neighbours <= 3 and cell:
# if between 2 and 3 nbrs, keep alive
return True
elif not cell and sum_of_neighbours == 3:
# else if 3 nbrs, make alive
return True
elif sum_of_neighbours < 2:
# Else, if lonely, kill
return False
elif sum_of_neighbours > 3:
# Else, if overpopulated, kill
return False
else:
# No reason to keep alive
return False
def make_merged_cells(array):
corner_movements = list(itertools.product([move_all_down, move_all_up], [move_all_left, move_all_right]))
simple_movements = list(itertools.product([move_all_right, move_all_left, move_all_up, move_all_down], [lambda x: x]))
movements = corner_movements+simple_movements
arrays = [1*f(g(array)) for f, g in movements]
arrays.append(16*array)
return np.sum(arrays, axis=0)
def next_iteration(array):
return np.vectorize(iterate_cell)(make_merged_cells(array))
def print_grid(grid):
for i in grid:
for j in i:
print("0" if j else "-", end="")
print()
def produce_iterations(no_of_iterations, initial_seed=None):
if initial_seed is None:
initial_seed = bool_of_seed.copy()
iterations = [initial_seed]
# When I do this in the interpreter it doesn't break, but when I do it here, it does. Odd.
#
# TODO figure out why, and fix it.
for x in np.arange(no_of_iterations):
iterations.append(next_iteration(iterations[-1]))
print_grid(iterations[-1])
input("Press Enter to continue")
return iterations[-1]
if __name__ == '__main__':
import sys
produce_iterations(int(sys.argv[-1]))
|
from more_collections import tuples
import pytest
def test_float_tuple1():
assert tuples.typedtuple(float)(.0) == (.0,)
def test_float_tuple1_invalid_arity():
with pytest.raises(ValueError):
assert tuples.typedtuple(float)(.0, 0.)
def test_float_tuple1_invalid_type():
with pytest.raises(ValueError):
assert tuples.typedtuple(float)(1)
def test_float_tuple2():
assert tuples.typedtuple(float, float)(.0, 0.) == (.0, 0.)
def test_float_tuple2_invalid_arity():
with pytest.raises(ValueError):
assert tuples.typedtuple(float, float)('foo')
def test_float_tuple2_invalid_type():
with pytest.raises(ValueError):
assert tuples.typedtuple(float, float)(1, 2)
def test_float_tuple3():
assert tuples.typedtuple(float, float, float)(.0, .0, .0) == (.0, .0, .0)
def test_float_tuple3_invalid_arity():
with pytest.raises(ValueError):
assert tuples.typedtuple(float, float, float)('foo')
def test_float_tuple3_invalid_type():
with pytest.raises(ValueError):
assert tuples.typedtuple(float, float, float)('1', 2, 3)
def test_float_tuple4():
assert tuples.typedtuple(
float, float, float, int)(.0, .0, .0, 4) == (.0, .0, .0, 4)
def test_float_tuple4_invalid_arity():
with pytest.raises(ValueError):
assert tuples.typedtuple(float, float, float, float)(1)
def test_float_tuple4_invalid_type():
with pytest.raises(ValueError):
assert tuples.typedtuple(
float, float, float, float)('1', '2', '3', '4')
|
from rlpyt.agents.dqn.r2d1_agent import R2d1Agent, R2d1AlternatingAgent
from rlpyt.models.dqn.atari_r2d1_model import AtariR2d1Model
from rlpyt.agents.dqn.mixin import Mixin
class AtariR2d1Agent(Mixin, R2d1Agent):
def __init__(self, ModelCls=AtariR2d1Model, **kwargs):
super().__init__(ModelCls=ModelCls, **kwargs)
class AtariR2d1AlternatingAgent(Mixin, R2d1AlternatingAgent):
def __init__(self, ModelCls=AtariR2d1Model, **kwargs):
super().__init__(ModelCls=ModelCls, **kwargs) |
from dart.client.python.dart_client import Dart
from dart.model.dataset import Column, DatasetData, Dataset, DataFormat, FileFormat, DataType, Compression, RowFormat, \
LoadType
if __name__ == '__main__':
dart = Dart('localhost', 5000)
assert isinstance(dart, Dart)
dataset = dart.save_dataset(Dataset(id='PDUZ8EDNOR', data=(DatasetData(
name='beacon_native_app_parsed_gzipped_v03',
table_name='beacon_native_app',
location='s3://example-bucket/prd/beacon/native_app/v3/dwh-delimited/gzipped',
load_type=LoadType.INSERT,
distribution_keys=['created'],
sort_keys=['created', 'eventtype'],
hive_compatible_partition_folders=True,
data_format=DataFormat(
FileFormat.TEXTFILE,
RowFormat.DELIMITED,
delimited_by='\t',
quoted_by='"',
escaped_by='\\',
null_string='NULL',
),
compression=Compression.GZIP,
partitions=[Column('createdpartition', DataType.STRING)],
columns=[
Column('logfileid', DataType.INT),
Column('linenumber', DataType.INT),
Column('created', DataType.TIMESTAMP, date_pattern="yyyy-MM-dd HH:mm:ss"),
Column('remoteip', DataType.VARCHAR, 500),
Column('useragent', DataType.VARCHAR, 2500),
Column('eventtype', DataType.VARCHAR, 255),
Column('appversion', DataType.VARCHAR, 255),
Column('advertiserid', DataType.VARCHAR, 2048),
Column('couponsonpage', DataType.INT),
Column('coupons', DataType.VARCHAR, 10000),
Column('channel', DataType.VARCHAR, 128),
Column('geocouponcount', DataType.BIGINT),
Column('geofence', DataType.VARCHAR, 255),
Column('geofencetimespent', DataType.NUMERIC, precision=14, scale=5),
Column('loginstatus', DataType.VARCHAR, 25),
Column('products', DataType.VARCHAR, 2500),
Column('session', DataType.VARCHAR, 5000),
Column('systemname', DataType.VARCHAR, 2500),
Column('systemversion', DataType.VARCHAR, 2500),
Column('udid', DataType.VARCHAR, 128),
Column('userqualifier', DataType.VARCHAR, 64),
Column('url', DataType.VARCHAR, 5000),
Column('user_uuid', DataType.VARCHAR, 64),
Column('userid', DataType.INT),
Column('searchtype', DataType.VARCHAR, 128),
Column('searchlistterm', DataType.VARCHAR, 512),
Column('searchterm', DataType.VARCHAR, 512),
Column('emailuuid', DataType.VARCHAR, 128),
Column('userfingerprint', DataType.VARCHAR, 64),
Column('locationstatus', DataType.VARCHAR, 128),
Column('pushnotificationstatus', DataType.VARCHAR, 128),
Column('placement', DataType.VARCHAR, 1024),
Column('loc', DataType.VARCHAR, 128),
Column('ppoi0', DataType.VARCHAR, 128),
Column('ppoi1', DataType.VARCHAR, 128),
Column('ppoi2', DataType.VARCHAR, 128),
Column('ppoi3', DataType.VARCHAR, 128),
Column('ppoi4', DataType.VARCHAR, 128),
Column('applaunchnotificationtype', DataType.VARCHAR, 128),
Column('scenarioname', DataType.VARCHAR, 128),
Column('behaviorname', DataType.VARCHAR, 128),
Column('coupontype', DataType.VARCHAR, 128),
Column('couponposition', DataType.VARCHAR, 128),
Column('hasqsrcontent', DataType.VARCHAR, 128),
Column('promptname', DataType.VARCHAR, 128),
Column('locationpermissionchanage', DataType.VARCHAR, 128),
Column('couponproblemtype', DataType.VARCHAR, 128),
Column('storetitle', DataType.VARCHAR, 128),
Column('mallname', DataType.VARCHAR, 128),
Column('restaurantname', DataType.VARCHAR, 128),
Column('milesaway', DataType.VARCHAR, 128),
Column('menuitem', DataType.VARCHAR, 128),
Column('toolname', DataType.VARCHAR, 128),
Column('toolaction', DataType.VARCHAR, 128),
Column('toolstep', DataType.VARCHAR, 128),
Column('mallposition', DataType.VARCHAR, 128),
Column('recommendstorename', DataType.VARCHAR, 128),
Column('recommendstoreposition', DataType.VARCHAR, 128),
Column('favoritestorename', DataType.VARCHAR, 128),
Column('favoritestoreaction', DataType.VARCHAR, 128),
Column('favoritestoreposition', DataType.VARCHAR, 128),
Column('favoritesiteid', DataType.VARCHAR, 128),
Column('receivername', DataType.VARCHAR, 128),
Column('outclickbuttonprompt', DataType.VARCHAR, 128),
Column('datasource', DataType.VARCHAR, 1024),
Column('searchresultcount', DataType.VARCHAR, 128),
Column('searchresultposition', DataType.VARCHAR, 128),
Column('sharetype', DataType.VARCHAR, 128),
Column('daysuntilexpiration', DataType.VARCHAR, 128),
Column('firedate', DataType.VARCHAR, 128),
Column('settingschangevalue', DataType.VARCHAR, 128),
Column('settingschangetype', DataType.VARCHAR, 128),
Column('settingschangelocation', DataType.VARCHAR, 128),
Column('clickaction', DataType.VARCHAR, 128),
Column('tnt', DataType.VARCHAR, 128),
Column('previouspage', DataType.VARCHAR, 2500),
Column('clickpage', DataType.VARCHAR, 2500),
Column('launchreason', DataType.VARCHAR, 128),
Column('taplyticsData', DataType.VARCHAR, 150),
Column('appCampaign', DataType.VARCHAR, 50),
Column('accountMethod', DataType.VARCHAR, 60),
Column('appState', DataType.VARCHAR, 100),
Column('btStatus', DataType.BOOLEAN),
Column('btBeaconId', DataType.VARCHAR, 500),
Column('btBeaconFactoryId', DataType.VARCHAR, 500),
Column('btBeaconName', DataType.VARCHAR, 500),
Column('btTimeSpent', DataType.VARCHAR, 50),
Column('purchaseId', DataType.VARCHAR, 500),
Column('transactionId', DataType.VARCHAR, 500),
Column('outclickLink', DataType.VARCHAR, 1000),
Column('outclickPage', DataType.VARCHAR, 300),
Column('featuredCouponPosition', DataType.INT),
Column('commentCount', DataType.INT),
Column('mallCount', DataType.INT),
Column('clickCount', DataType.INT),
Column('merchantName', DataType.VARCHAR, 100),
Column('merchantPosition', DataType.INT),
Column('couponUuids', DataType.VARCHAR, 10000),
Column('favoriteSiteUuid', DataType.VARCHAR, 50),
Column('deepLinkType', DataType.VARCHAR, 40),
Column('adUnitUuid', DataType.VARCHAR, 50),
],
))))
print 'created dataset: %s' % dataset.id
|
"""renamed latency table header
Revision ID: a69472dd044e
Revises: 8c0c0ed1d004
Create Date: 2019-03-08 18:27:44.327115
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a69472dd044e'
down_revision = '8c0c0ed1d004'
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
|
#!/usr/bin/env python3
"""The script decompiles the given file via RetDec IDA plugin.
The supported decompilation modes are:
full - decompile entire input file.
selective - decompile only the function selected by the given address.
"""
import argparse
import os
import shutil
import signal
import subprocess
import sys
script_full = 'retdec-decompile-full.idc'
script_selective = 'retdec-decompile-selective.idc'
TIMEOUT_RC = 137
def is_windows():
return sys.platform in ('win32', 'msys') or os.name == 'nt'
def print_error_and_die(*msg):
print('Error:', *msg)
sys.exit(1)
def parse_args(args):
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('file',
metavar='FILE',
help='The input file.')
parser.add_argument('-o', '--output',
dest='output',
metavar='FILE',
help='Output file (default: file.c). All but the last component must exist.')
parser.add_argument('-i', '--ida',
dest='ida_path',
default=os.environ.get('IDA_PATH'),
help='Path to the IDA directory.')
parser.add_argument('-d', '--idb',
dest='idb_path',
metavar='FILE',
help='IDA DB file associated with input file.')
parser.add_argument('-s', '--select',
dest='selected_addr',
help='Decompile only the function selected by the given address (any address inside function). Examples: 0x1000, 4096.')
parser.add_argument('--ea64',
dest='ea64',
action='store_true',
help='Use 64-bit address space plugin, i.e. retdec64 library and idat64 executable.')
return parser.parse_args(args)
def check_args(args):
if args.ida_path is None:
print_error_and_die('Path to IDA directory was not specified.')
if not os.path.isdir(args.ida_path):
print_error_and_die('Specified path to IDA directory is not a directory:', args.ida_path)
if args.ea64:
args.idat_path = os.path.join(args.ida_path, 'idat64.exe' if is_windows() else 'idat64')
else:
args.idat_path = os.path.join(args.ida_path, 'idat.exe' if is_windows() else 'idat')
if not os.path.exists(args.idat_path):
print_error_and_die('IDA console application does not exist:', args.idat_path)
if args.idb_path and not os.path.exists(args.idb_path):
print_error_and_die('Specified IDB file does not exist:', args.idb_path)
if not args.file or not os.path.exists(args.file):
print_error_and_die('Specified input file does not exist:', args.file)
args.file_dir = os.path.dirname(args.file)
if not args.output:
args.output = args.file + '.c'
args.output_dir = os.path.dirname(args.output)
if not os.path.exists(args.output_dir):
print_error_and_die('Output directory does not exist:', args.output_dir)
def main():
args = parse_args(sys.argv[1:])
check_args(args)
if args.file_dir != args.output_dir:
shutil.copy(args.file, args.output_dir)
args.file = os.path.join(args.output_dir, os.path.basename(args.file))
if args.idb_path and os.path.dirname(args.idb_path) != args.output_dir:
shutil.copy(args.idb_path, args.output_dir)
args.idb_path = os.path.join(args.output_dir, os.path.basename(args.idb_path))
rc = 0
cmd = [args.idat_path, '-A']
# Select mode.
if args.selected_addr:
cmd.append('-S' + script_selective + ' "' + args.file + '" ' + args.selected_addr)
# Full mode.
else:
cmd.append('-S' + script_full + ' "' + args.file + '"')
cmd.append(args.file)
print('RUN: ' + ' '.join(cmd))
rc = subprocess.call(cmd)
# Plugin produces "<input>.c" -> copy the file to the desired output.
out = args.file + '.c'
if os.path.exists(out) and out != args.output:
shutil.copyfile(out, args.output)
return rc
if __name__ == "__main__":
main()
|
import sys
import argparse
import configparser
import traceback
from taky import __version__
from taky.config import load_config
from taky import cli
def arg_parse():
argp = argparse.ArgumentParser(description="Taky command line utility")
argp.add_argument(
"-c",
action="store",
dest="cfg_file",
default=None,
help="Path to configuration file",
)
argp.add_argument(
"--version", action="version", version="%%(prog)s version %s" % __version__
)
subp = argp.add_subparsers(dest="command")
cli.setup_taky_reg(subp)
cli.build_client_reg(subp)
cli.systemd_reg(subp)
cli.status_reg(subp)
args = argp.parse_args()
return (argp, args)
def main():
(argp, args) = arg_parse()
try:
load_config(args.cfg_file)
except (OSError, configparser.ParsingError) as exc:
print(exc, file=sys.stderr)
sys.exit(1)
commands = {
"setup": cli.setup_taky,
"build_client": cli.build_client,
"systemd": cli.systemd,
"status": cli.status,
}
if not args.command:
argp.print_usage()
sys.exit(1)
try:
ret = commands[args.command](args)
except KeyboardInterrupt:
# TODO: Teardown function?
ret = 1
except Exception as exc: # pylint: disable=broad-except
print(f"{args.command} failed: {str(exc)}", file=sys.stderr)
print("Unhandled exception:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
ret = 1
sys.exit(ret)
if __name__ == "__main__":
main()
|
"""%tensorboard line magic that patches TensorBoard's implementation to make use of Jupyter
TensorBoard server extension providing built-in proxying.
Use:
%load_ext tensorboard
%tensorboard --logdir /logs
"""
import argparse
import uuid
from IPython.display import HTML, display
def _tensorboard_magic(line):
"""Line magic function.
Makes an AJAX call to the Jupyter TensorBoard server extension and outputs
an IFrame displaying the TensorBoard instance.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--logdir", default="/workspace/")
args = parser.parse_args(line.split())
iframe_id = "tensorboard-" + str(uuid.uuid4())
html = """
<!-- JUPYTER_TENSORBOARD_TEST_MARKER -->
<script>
fetch(Jupyter.notebook.base_url + 'api/tensorboard', {
method: 'POST',
contentType: 'application/json',
body: JSON.stringify({ 'logdir': '%s' }),
headers: { 'Content-Type': 'application/json' }
})
.then(res => res.json())
.then(res => {
const iframe = document.getElementById('%s');
iframe.src = Jupyter.notebook.base_url + 'tensorboard/' + res.name;
iframe.style.display = 'block';
});
</script>
<iframe
id="%s"
style="width: 100%%; height: 620px; display: none;"
frameBorder="0">
</iframe>
""" % (
args.logdir,
iframe_id,
iframe_id,
)
display(HTML(html))
def load_ipython_extension(ipython):
"""Deprecated: use `%load_ext tensorboard` instead.
Raises:
RuntimeError: Always.
"""
raise RuntimeError(
"Use '%load_ext tensorboard' instead of '%load_ext tensorboard.notebook'."
)
def _load_ipython_extension(ipython):
"""Load the TensorBoard notebook extension.
Intended to be called from `%load_ext tensorboard`. Do not invoke this
directly.
Args:
ipython: An `IPython.InteractiveShell` instance.
"""
ipython.register_magic_function(
_tensorboard_magic,
magic_kind="line",
magic_name="tensorboard",
)
|
#!/usr/bin/env python3.4
#
import sys
import time
import RPi.GPIO as GPIO
sys.path.append('../../lib')
from ACNode import ACNode
class LED(GPIOOut):
pin=23
def led(self,onOff):
self.gpioout(onOff)
if __name__ == "__main__":
acnode = LED()
acnode.parseArguments()
acnode.setup()
acnode.forever = True
while acnode.forever:
acnode.led(1)
time.sleep(0.3)
acnode.led(0)
time.sleep(0.3)
print("Done.")
sys.exit(0)
sys.path.append('../../lib')
from GPIOACNode import GPIOACNode
class GPIOOut(GPIOACNode):
name="Unknown"
def setup(self):
super().setup()
if self.cnf.offline:
self.logger.info("TEST: configuring hardware.")
return
self.logger.debug("Initializing hardware.")
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.pin, GPIO.OUT)
GPIO.output(self.pin, False)
def gpioout(self,onOff):
self.logger.info("GPIO[{}]::{} {}".format(self.pin, self.name, onOff))
if self.cnf.offline:
return
GPIO.output(self.pin, onOff)
def on_exit(self):
GPIO.output(self.pin,False)
GPIO.setup(self.pin, 0)
super().on_exit()
|
expected_output = {
'index': {
1: {
'inside_global': '135.0.0.1',
'inside_local': '35.0.0.1',
'outside_global': '---',
'outside_local': '---',
'protocol': '---'
}
}
}
|
import click
import requests
import json
@click.group()
def ah():
"""
Simple CLI for consuming Authors Haven API
"""
pass
@ah.command()
@click.option('--limit', default=10, type=str,
help='Limit how many articles are displayed')
@click.option('--search', type=str,
help='Enter name to filter the articles by author')
def list(limit, search):
"""This returns a list of articles on Authors Haven"""
url_format = 'http://ah-premier-staging.herokuapp.com/api/articles'
try:
if limit and not search:
response = requests.get(url_format + '?page_size=' + str(limit))
elif search and not limit:
response = requests.get(url_format + '?author=' + str(search))
elif search and limit:
response = requests.get(
url_format + '?page_size={}&author={}'.format(limit, search))
else:
response = requests.get(url_format)
response.raise_for_status()
data = response.json()
click.echo(json.dumps(data, indent=2))
except Exception:
click.secho("Oops, articles listing failed", fg="red")
@ah.command()
@click.argument('slug')
def view(slug):
"""
This returns a particular article from the given slug on Authors Haven
"""
url_format = 'http://ah-premier-staging.herokuapp.com/api/articles/{}'
click.echo(slug)
try:
response = requests.get(url_format.format(slug))
response.raise_for_status()
data = response.json()
click.echo(json.dumps(data, indent=2))
except Exception as error:
click.secho(str(error), fg="red")
@ah.command()
@click.argument('slug')
def save(slug):
"""
This saves a particular article from the given slug on Authors Haven
"""
url_format = 'http://ah-premier-staging.herokuapp.com/api/articles/{}'
click.echo(slug)
try:
response = requests.get(url_format.format(slug))
response.raise_for_status()
data = response.json()
click.echo(json.dumps(data, indent=2))
with open('articles/{}.json'.format(slug), 'w') as outfile:
json.dump(data, outfile)
except Exception as error:
click.secho(str(error), fg="red")
|
from bitcoinlib.keys import Address
import binascii
from typing import List, Optional, Union
from bitcoinlib.transactions import Transaction
from xchainpy_client.models import tx_types
from .models.common import DerivePath, UTXO, Witness_UTXO
from . import sochain_api
from xchainpy_util.asset import Asset, AssetLTC
from xchainpy_util.chain import LTCCHAIN
import datetime
TX_EMPTY_SIZE = 4 + 1 + 1 + 4 # 10
TX_INPUT_BASE = 32 + 4 + 1 + 4 # 41
TX_INPUT_PUBKEYHASH = 107
TX_OUTPUT_BASE = 8 + 1 # 9
TX_OUTPUT_PUBKEYHASH = 25
DUST_THRESHOLD = 1000
MIN_TX_FEE = 1000
def parse_tx(tx):
"""Parse tx
:param tx: The transaction to be parsed
:type tx: str
:returns: The transaction parsed from the binance tx
"""
asset = AssetLTC
tx_from = [tx_types.TxFrom(i['address'], i['value']) for i in tx['inputs']]
tx_to = [tx_types.TxTo(i['address'], i['value']) for i in tx['outputs']]
tx_date = datetime.datetime.fromtimestamp(tx['time'])
tx_type = 'transfer'
tx_hash = tx['txid']
tx = tx_types.TX(asset, tx_from, tx_to, tx_date, tx_type, tx_hash)
return tx
def sochain_utxo_to_xchain_utxo(utxo):
"""Get utxo object from a sochain utxo
:param utxo: sochain utxo
:type utxo: dict
:returns: UTXO object
"""
hash = utxo['txid']
index = utxo['output_no']
value = round(float(utxo['value']) * 10 ** 8)
script = bytearray.fromhex(utxo['script_hex']) #utxo['script_hex']
witness_utxo = Witness_UTXO(value, script)
return UTXO(hash, index, witness_utxo)
def validate_address(network, address):
"""Validate the LTC address
:param network: testnet or mainnet
:type network: str
:param address: address
:type address: str
:returns: True or False
"""
try:
address = Address.import_address(address=address, network=(
'litecoin' if network == 'mainnet' else 'litecoin_testnet'))
return True
except:
return False
def network_to_bitcoinlib_format(network: str):
return 'litecoin' if network == 'mainnet' else 'litecoin_testnet'
async def get_balance(sochain_url:str, network:str, address:str):
"""Get the LTC balance of a given address
:param sochain_url: sochain url
:type sochain_url: str
:param address: LTC address
:type address: str
:param network: mainnet or testnet
:type network: str
:returns: The LTC balance of the address
"""
try:
balance = await sochain_api.get_balance(sochain_url, network, address)
if balance == None:
raise Exception("Invalid Address")
return balance
except Exception as err:
raise Exception(str(err))
def calc_fee(fee_rate, memo=''):
"""Calculate fees based on fee rate and memo
:param fee_rate: fee rate
:type fee_rate: int
:param memo: memo
:type memo: str
:returns: The calculated fees based on fee rate and the memo
"""
compiled_memo = compile_memo(memo) if memo else None
fee = get_fee([], fee_rate, compiled_memo)
return fee
def compile_memo(memo: str):
"""Compile memo
:param memo: The memo to be compiled
:type memo: str
:returns: The compiled memo
"""
metadata = bytes(memo, 'utf-8')
metadata_len = len(metadata)
if metadata_len <= 75:
# length byte + data (https://en.bitcoin.it/wiki/Script)
payload = bytearray((metadata_len,))+metadata
elif metadata_len <= 256:
# OP_PUSHDATA1 format
payload = "\x4c"+bytearray((metadata_len,))+metadata
else:
payload = "\x4d"+bytearray((metadata_len % 256,))+bytearray(
(int(metadata_len/256),))+metadata # OP_PUSHDATA2 format
compiled_memo = binascii.b2a_hex(payload).decode('utf-8')
compiled_memo = '6a' + compiled_memo
compiled_memo = binascii.unhexlify(compiled_memo)
return compiled_memo
def get_fee(inputs: List[UTXO], fee_rate: float, data: Optional[bytes]=None):
"""Get the transaction fee
:param inputs: the UTXOs
:type inputs: List[UTXO]
:param fee_rate: the fee rate
:type fee_rate: float
:param data: The compiled memo (Optional)
:type data: bytes
:returns: The fee amount
"""
lst_reduce = 0
if len(inputs) > 0:
for x in inputs:
lst_reduce += TX_INPUT_BASE + \
(len(x.witness_utxo.script)
if x.witness_utxo.script else TX_INPUT_PUBKEYHASH)
sum = TX_EMPTY_SIZE + lst_reduce + \
len(inputs) + TX_OUTPUT_BASE + TX_OUTPUT_PUBKEYHASH + \
TX_OUTPUT_BASE + TX_OUTPUT_PUBKEYHASH
if data:
sum = sum + TX_OUTPUT_BASE + len(data)
fee = sum * fee_rate
result = fee if fee > MIN_TX_FEE else MIN_TX_FEE
return result
async def scan_UTXOs(sochain_url, network, address):
"""Scan UTXOs from sochain
:param network: testnet or mainnet
:type network: str
:param address: address
:type address: str
:returns: The UTXOs of the given address
"""
utxos = await sochain_api.get_unspent_txs(sochain_url, network, address)
utxos = list(map(sochain_utxo_to_xchain_utxo, utxos))
return utxos
async def get_change(sochain_url, value_out, network, address):
"""Get the balance changes amount
:param value_out: amount you wnat to transfer
:type value_out: int
:param network: testnet or mainnet
:type network: str
:param address: address
:type address: str
:returns: The UTXOs of the given address
"""
try:
balance = await sochain_api.get_balance(sochain_url, network, address)
balance = round(balance[0].amount * 10 ** 8)
change = 0
if balance - value_out > DUST_THRESHOLD:
change = balance - value_out
return change
except Exception as err:
raise Exception(str(err))
async def build_tx(sochain_url, amount, recipient, memo, fee_rate, sender, network):
"""Build transcation
:param sochain_url: sochain url
:type sochain_url: str
:param amount: amount of LTC to transfer
:type amount: int
:param recipient: destination address
:type recipient: str
:param memo: optional memo for transaction
:type memo: str
:param fee_rate: fee rates for transaction
:type fee_rate: int
:param sender: sender's address
:type sender: str
:param network: testnet or mainnet
:type network: str
:returns: transaction
"""
try:
utxos = await scan_UTXOs(sochain_url, network, sender)
if len(utxos) == 0:
raise Exception("No utxos to send")
balance = await sochain_api.get_balance(sochain_url, network, sender)
if not validate_address(network, recipient):
raise Exception('Invalid address')
fee_rate_whole = int(fee_rate)
compiled_memo = None
if memo:
compiled_memo = compile_memo(memo)
fee = get_fee(utxos, fee_rate_whole, compiled_memo)
if fee + amount > round(balance[0].amount * 10 ** 8):
raise Exception('Balance insufficient for transaction')
t = Transaction(network=network_to_bitcoinlib_format(network), witness_type='segwit')
for i in utxos:
t.add_input(prev_txid=i.hash, output_n=i.index,
value=i.witness_utxo.value, witnesses=i.witness_utxo.script)
t.add_output(address=recipient, value=amount)
change = await get_change(sochain_url, amount + fee, network, sender)
if change > 0:
t.add_output(address=sender, value=change)
if compiled_memo:
t.add_output(lock_script=compiled_memo, value=0)
return t, utxos
except Exception as err:
raise Exception(str(err))
async def broadcast_tx(sochain_url, network, tx_hex):
"""Broadcast the transaction
:param network: testnet or mainnet
:type network: str
:param tx_hex: tranaction hex
:type tx_hex: str
:returns: The transaction hash
"""
return await sochain_api.broadcast_tx(sochain_url, network, tx_hex)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import textwrap
import six
import pytest
from _pytest.monkeypatch import MonkeyPatch
@pytest.fixture
def mp():
cwd = os.getcwd()
sys_path = list(sys.path)
yield MonkeyPatch()
sys.path[:] = sys_path
os.chdir(cwd)
def test_setattr():
class A(object):
x = 1
monkeypatch = MonkeyPatch()
pytest.raises(AttributeError, monkeypatch.setattr, A, "notexists", 2)
monkeypatch.setattr(A, "y", 2, raising=False)
assert A.y == 2
monkeypatch.undo()
assert not hasattr(A, "y")
monkeypatch = MonkeyPatch()
monkeypatch.setattr(A, "x", 2)
assert A.x == 2
monkeypatch.setattr(A, "x", 3)
assert A.x == 3
monkeypatch.undo()
assert A.x == 1
A.x = 5
monkeypatch.undo() # double-undo makes no modification
assert A.x == 5
class TestSetattrWithImportPath(object):
def test_string_expression(self, monkeypatch):
monkeypatch.setattr("os.path.abspath", lambda x: "hello2")
assert os.path.abspath("123") == "hello2"
def test_string_expression_class(self, monkeypatch):
monkeypatch.setattr("_pytest.config.Config", 42)
import _pytest
assert _pytest.config.Config == 42
def test_unicode_string(self, monkeypatch):
monkeypatch.setattr("_pytest.config.Config", 42)
import _pytest
assert _pytest.config.Config == 42
monkeypatch.delattr("_pytest.config.Config")
def test_wrong_target(self, monkeypatch):
pytest.raises(TypeError, lambda: monkeypatch.setattr(None, None))
def test_unknown_import(self, monkeypatch):
pytest.raises(ImportError, lambda: monkeypatch.setattr("unkn123.classx", None))
def test_unknown_attr(self, monkeypatch):
pytest.raises(
AttributeError, lambda: monkeypatch.setattr("os.path.qweqwe", None)
)
def test_unknown_attr_non_raising(self, monkeypatch):
# https://github.com/pytest-dev/pytest/issues/746
monkeypatch.setattr("os.path.qweqwe", 42, raising=False)
assert os.path.qweqwe == 42
def test_delattr(self, monkeypatch):
monkeypatch.delattr("os.path.abspath")
assert not hasattr(os.path, "abspath")
monkeypatch.undo()
assert os.path.abspath
def test_delattr():
class A(object):
x = 1
monkeypatch = MonkeyPatch()
monkeypatch.delattr(A, "x")
assert not hasattr(A, "x")
monkeypatch.undo()
assert A.x == 1
monkeypatch = MonkeyPatch()
monkeypatch.delattr(A, "x")
pytest.raises(AttributeError, monkeypatch.delattr, A, "y")
monkeypatch.delattr(A, "y", raising=False)
monkeypatch.setattr(A, "x", 5, raising=False)
assert A.x == 5
monkeypatch.undo()
assert A.x == 1
def test_setitem():
d = {"x": 1}
monkeypatch = MonkeyPatch()
monkeypatch.setitem(d, "x", 2)
monkeypatch.setitem(d, "y", 1700)
monkeypatch.setitem(d, "y", 1700)
assert d["x"] == 2
assert d["y"] == 1700
monkeypatch.setitem(d, "x", 3)
assert d["x"] == 3
monkeypatch.undo()
assert d["x"] == 1
assert "y" not in d
d["x"] = 5
monkeypatch.undo()
assert d["x"] == 5
def test_setitem_deleted_meanwhile():
d = {}
monkeypatch = MonkeyPatch()
monkeypatch.setitem(d, "x", 2)
del d["x"]
monkeypatch.undo()
assert not d
@pytest.mark.parametrize("before", [True, False])
def test_setenv_deleted_meanwhile(before):
key = "qwpeoip123"
if before:
os.environ[key] = "world"
monkeypatch = MonkeyPatch()
monkeypatch.setenv(key, "hello")
del os.environ[key]
monkeypatch.undo()
if before:
assert os.environ[key] == "world"
del os.environ[key]
else:
assert key not in os.environ
def test_delitem():
d = {"x": 1}
monkeypatch = MonkeyPatch()
monkeypatch.delitem(d, "x")
assert "x" not in d
monkeypatch.delitem(d, "y", raising=False)
pytest.raises(KeyError, monkeypatch.delitem, d, "y")
assert not d
monkeypatch.setitem(d, "y", 1700)
assert d["y"] == 1700
d["hello"] = "world"
monkeypatch.setitem(d, "x", 1500)
assert d["x"] == 1500
monkeypatch.undo()
assert d == {"hello": "world", "x": 1}
def test_setenv():
monkeypatch = MonkeyPatch()
with pytest.warns(pytest.PytestWarning):
monkeypatch.setenv("XYZ123", 2)
import os
assert os.environ["XYZ123"] == "2"
monkeypatch.undo()
assert "XYZ123" not in os.environ
def test_delenv():
name = "xyz1234"
assert name not in os.environ
monkeypatch = MonkeyPatch()
pytest.raises(KeyError, monkeypatch.delenv, name, raising=True)
monkeypatch.delenv(name, raising=False)
monkeypatch.undo()
os.environ[name] = "1"
try:
monkeypatch = MonkeyPatch()
monkeypatch.delenv(name)
assert name not in os.environ
monkeypatch.setenv(name, "3")
assert os.environ[name] == "3"
monkeypatch.undo()
assert os.environ[name] == "1"
finally:
if name in os.environ:
del os.environ[name]
class TestEnvironWarnings(object):
"""
os.environ keys and values should be native strings, otherwise it will cause problems with other modules (notably
subprocess). On Python 2 os.environ accepts anything without complaining, while Python 3 does the right thing
and raises an error.
"""
VAR_NAME = u"PYTEST_INTERNAL_MY_VAR"
@pytest.mark.skipif(six.PY3, reason="Python 2 only test")
def test_setenv_unicode_key(self, monkeypatch):
with pytest.warns(
pytest.PytestWarning,
match="Environment variable name {!r} should be str".format(self.VAR_NAME),
):
monkeypatch.setenv(self.VAR_NAME, "2")
@pytest.mark.skipif(six.PY3, reason="Python 2 only test")
def test_delenv_unicode_key(self, monkeypatch):
with pytest.warns(
pytest.PytestWarning,
match="Environment variable name {!r} should be str".format(self.VAR_NAME),
):
monkeypatch.delenv(self.VAR_NAME, raising=False)
def test_setenv_non_str_warning(self, monkeypatch):
value = 2
msg = (
"Value of environment variable PYTEST_INTERNAL_MY_VAR type should be str, "
"but got 2 (type: int); converted to str implicitly"
)
with pytest.warns(pytest.PytestWarning, match=re.escape(msg)):
monkeypatch.setenv(str(self.VAR_NAME), value)
def test_setenv_prepend():
import os
monkeypatch = MonkeyPatch()
with pytest.warns(pytest.PytestWarning):
monkeypatch.setenv("XYZ123", 2, prepend="-")
assert os.environ["XYZ123"] == "2"
with pytest.warns(pytest.PytestWarning):
monkeypatch.setenv("XYZ123", 3, prepend="-")
assert os.environ["XYZ123"] == "3-2"
monkeypatch.undo()
assert "XYZ123" not in os.environ
def test_monkeypatch_plugin(testdir):
reprec = testdir.inline_runsource(
"""
def test_method(monkeypatch):
assert monkeypatch.__class__.__name__ == "MonkeyPatch"
"""
)
res = reprec.countoutcomes()
assert tuple(res) == (1, 0, 0), res
def test_syspath_prepend(mp):
old = list(sys.path)
mp.syspath_prepend("world")
mp.syspath_prepend("hello")
assert sys.path[0] == "hello"
assert sys.path[1] == "world"
mp.undo()
assert sys.path == old
mp.undo()
assert sys.path == old
def test_syspath_prepend_double_undo(mp):
old_syspath = sys.path[:]
try:
mp.syspath_prepend("hello world")
mp.undo()
sys.path.append("more hello world")
mp.undo()
assert sys.path[-1] == "more hello world"
finally:
sys.path[:] = old_syspath
def test_chdir_with_path_local(mp, tmpdir):
mp.chdir(tmpdir)
assert os.getcwd() == tmpdir.strpath
def test_chdir_with_str(mp, tmpdir):
mp.chdir(tmpdir.strpath)
assert os.getcwd() == tmpdir.strpath
def test_chdir_undo(mp, tmpdir):
cwd = os.getcwd()
mp.chdir(tmpdir)
mp.undo()
assert os.getcwd() == cwd
def test_chdir_double_undo(mp, tmpdir):
mp.chdir(tmpdir.strpath)
mp.undo()
tmpdir.chdir()
mp.undo()
assert os.getcwd() == tmpdir.strpath
def test_issue185_time_breaks(testdir):
testdir.makepyfile(
"""
import time
def test_m(monkeypatch):
def f():
raise Exception
monkeypatch.setattr(time, "time", f)
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
"""
*1 passed*
"""
)
def test_importerror(testdir):
p = testdir.mkpydir("package")
p.join("a.py").write(
textwrap.dedent(
"""\
import doesnotexist
x = 1
"""
)
)
testdir.tmpdir.join("test_importerror.py").write(
textwrap.dedent(
"""\
def test_importerror(monkeypatch):
monkeypatch.setattr('package.a.x', 2)
"""
)
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
"""
*import error in package.a: No module named {0}doesnotexist{0}*
""".format(
"'" if sys.version_info > (3, 0) else ""
)
)
class SampleNew(object):
@staticmethod
def hello():
return True
class SampleNewInherit(SampleNew):
pass
class SampleOld(object):
# oldstyle on python2
@staticmethod
def hello():
return True
class SampleOldInherit(SampleOld):
pass
@pytest.mark.parametrize(
"Sample",
[SampleNew, SampleNewInherit, SampleOld, SampleOldInherit],
ids=["new", "new-inherit", "old", "old-inherit"],
)
def test_issue156_undo_staticmethod(Sample):
monkeypatch = MonkeyPatch()
monkeypatch.setattr(Sample, "hello", None)
assert Sample.hello is None
monkeypatch.undo()
assert Sample.hello()
def test_undo_class_descriptors_delattr():
class SampleParent(object):
@classmethod
def hello(_cls):
pass
@staticmethod
def world():
pass
class SampleChild(SampleParent):
pass
monkeypatch = MonkeyPatch()
original_hello = SampleChild.hello
original_world = SampleChild.world
monkeypatch.delattr(SampleParent, "hello")
monkeypatch.delattr(SampleParent, "world")
assert getattr(SampleParent, "hello", None) is None
assert getattr(SampleParent, "world", None) is None
monkeypatch.undo()
assert original_hello == SampleChild.hello
assert original_world == SampleChild.world
def test_issue1338_name_resolving():
pytest.importorskip("requests")
monkeypatch = MonkeyPatch()
try:
monkeypatch.delattr("requests.sessions.Session.request")
finally:
monkeypatch.undo()
def test_context():
monkeypatch = MonkeyPatch()
import functools
import inspect
with monkeypatch.context() as m:
m.setattr(functools, "partial", 3)
assert not inspect.isclass(functools.partial)
assert inspect.isclass(functools.partial)
|
import csv
from io import StringIO
def parse_line(value, quotechar=None, **kwargs):
"""
A simple wrapper to parse a single CSV value
"""
quotechar = quotechar or '"'
return next(csv.reader([value], quotechar=quotechar, **kwargs), None)
def line_to_string(value, **kwargs):
"""
A simple wrapper to write one CSV line
"""
fh = StringIO()
csv.writer(fh, **kwargs).writerow(value)
return fh.getvalue()
|
from ._session import SessionCommandFactory
|
"""
Main script for data processing and final dataset generation
@date Oct 16, 2020
@author Inova Ixtepô
"""
from create_final_base import create_final_base, create_list_of_cnpj
def main():
create_list_of_cnpj(number_cnpjs=250000, max_filiais=100)
create_final_base()
if __name__ == '__main__':
main()
|
import os
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
TRAINED_PATH = os.path.join(BASE_PATH, "output/trained_model")
EXPS_PATH = os.path.join(BASE_PATH, "output/experiments")
DATA_DIR = os.path.join(BASE_PATH, 'datasets')
EMB_DIR = os.path.join(BASE_PATH, 'embeddings')
PAD = 0
|
import logging
import time
from google.cloud import logging_v2
from cloud_fn_utilities.gcp.cloud_env import CloudEnv
from cloud_fn_utilities.gcp.vpc_manager import VpcManager
from cloud_fn_utilities.gcp.datastore_manager import DataStoreManager
from cloud_fn_utilities.gcp.firewall_rule_manager import FirewallManager
from cloud_fn_utilities.gcp.pubsub_manager import PubSubManager
from cloud_fn_utilities.gcp.compute_manager import ComputeManager
from cloud_fn_utilities.globals import DatastoreKeyTypes, PubSub, BuildConstants
from cloud_fn_utilities.state_managers.fixed_arena_states import FixedArenaStateManager
from cloud_fn_utilities.server_specific.firewall_server import FirewallServer
from cloud_fn_utilities.server_specific.display_proxy import DisplayProxy
__author__ = "Philip Huff"
__copyright__ = "Copyright 2022, UA Little Rock, Emerging Analytics Center"
__credits__ = ["Philip Huff"]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Philip Huff"
__email__ = "[email protected]"
__status__ = "Testing"
class FixedArenaBuild:
def __init__(self, build_id, debug=False):
self.fixed_arena_id = build_id
self.debug = debug
self.env = CloudEnv()
log_client = logging_v2.Client()
log_client.setup_logging()
self.s = FixedArenaStateManager.States
self.pubsub_manager = PubSubManager(PubSub.Topics.CYBER_ARENA)
self.state_manager = FixedArenaStateManager(initial_build_id=self.fixed_arena_id)
self.vpc_manager = VpcManager(build_id=self.fixed_arena_id)
self.firewall_manager = FirewallManager()
self.ds = DataStoreManager(key_type=DatastoreKeyTypes.FIXED_ARENA, key_id=self.fixed_arena_id)
self.fixed_arena = self.ds.get()
if not self.fixed_arena:
logging.error(f"The datastore record for {self.fixed_arena_id} no longer exists!")
raise LookupError
def build_fixed_arena(self):
if not self.state_manager.get_state():
self.state_manager.state_transition(self.s.START)
if self.state_manager.get_state() < self.s.BUILDING_NETWORKS.value:
self.state_manager.state_transition(self.s.BUILDING_NETWORKS)
for network in self.fixed_arena['networks']:
self.vpc_manager.build(network_spec=network)
self.vpc_manager.build(network_spec=BuildConstants.Networks.GATEWAY_NETWORK_CONFIG)
self.state_manager.state_transition(self.s.COMPLETED_NETWORKS)
# Servers are built asynchronously and kicked off through pubsub messages.
if self.state_manager.get_state() < self.s.BUILDING_SERVERS.value:
self.state_manager.state_transition(self.s.BUILDING_SERVERS)
for server in self.fixed_arena['servers']:
server_name = f"{self.fixed_arena_id}-{server['name']}"
server['parent_id'] = self.fixed_arena_id,
server['parent_build_type'] = self.fixed_arena['build_type']
self.ds.put(server, key_type=DatastoreKeyTypes.SERVER, key_id=server_name)
if self.debug:
ComputeManager(server_name=server_name).build()
else:
self.pubsub_manager.msg(handler=PubSub.Handlers.BUILD, action=PubSub.BuildActions.SERVER,
server_name=server_name)
# Don't forget to build the Display Proxy Server!
if self.debug:
DisplayProxy(build_id=self.fixed_arena_id, build_spec=self.fixed_arena).build()
else:
self.pubsub_manager.msg(handler=PubSub.Handlers.BUILD, action=PubSub.BuildActions.DISPLAY_PROXY,
key_type=DatastoreKeyTypes.FIXED_ARENA, build_id=self.fixed_arena_id)
if self.state_manager.get_state() < self.s.BUILDING_FIREWALL_RULES.value:
self.state_manager.state_transition(self.s.BUILDING_FIREWALL_RULES)
self.firewall_manager.build(self.fixed_arena_id, self.fixed_arena['firewall_rules'])
self.state_manager.state_transition(self.s.COMPLETED_FIREWALL_RULES)
if self.fixed_arena.get('firewalls', None) and self.state_manager.get_state() < self.s.BUILDING_FIREWALL.value:
self.state_manager.state_transition(self.s.BUILDING_FIREWALL)
FirewallServer(initial_build_id=self.fixed_arena_id, full_build_spec=self.fixed_arena).build()
self.state_manager.state_transition(self.s.COMPLETED_FIREWALL)
if not self.state_manager.are_server_builds_finished():
self.state_manager.state_transition(self.s.BROKEN)
logging.error(f"Fixed Arena {self.fixed_arena_id}: Timed out waiting for server builds to complete!")
else:
self.state_manager.state_transition(self.s.READY)
logging.info(f"Finished building Fixed Arena {self.fixed_arena_id}!")
|
from squall import Router, Squall
from squall.testclient import TestClient
router = Router()
sub_router = Router(prefix="/items")
app = Squall()
@sub_router.get("/")
def read_item():
return {"id": "foo"}
router.include_router(sub_router)
app.include_router(router)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "Squall", "version": "0.1.0"},
"paths": {
"/items": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"summary": "Read Item",
"operationId": "read_item_items_get",
}
}
},
}
client = TestClient(app)
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
def test_path_operation():
response = client.get("/items")
assert response.status_code == 200, response.text
assert response.json() == {"id": "foo"}
|
from django import forms
from allauth.account.forms import LoginForm
class SocialNetworkLoginForm(LoginForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['password'].widget = forms.PasswordInput()
|
class MyList(list):
def __getitem__(self, index):
if index == 0:
raise IndexError
if index > 0:
index -= 1
return list.__getitem__(self, index)
def __setitem__(self, index, value):
if index == 0:
raise IndexError
if index > 0:
index -= 1
list.__setitem__(self, index, value)
if __name__ == '__main__':
x = MyList(['a', 'b', 'c'])
print(x)
print("-" * 10)
x.append('d')
print(x)
print("-" * 10)
x.__setitem__(4, 'e')
print(x)
print("-" * 10)
print(x[1])
print(x.__getitem__(1))
print("-" * 10)
print(x[4])
print(x.__getitem__(4)) |
from django.test import TestCase
from django.contrib.auth.models import User
from django.test import Client
from truco.models import *
from truco.constants import *
def carta_es_valida(carta):
palos_validos = [ESPADA,BASTO,ORO,COPA]
valores_validos = [1,2,3,4,5,6,7,10,11,12]
return carta.palo in palos_validos and carta.valor in valores_validos
class CartaTests(TestCase):
jerarquia_harcoded = [
[Carta(palo=BASTO,valor=4),Carta(palo=ORO,valor=4),Carta(palo=COPA,valor=4),Carta(palo=ESPADA,valor=4)],
[Carta(palo=BASTO,valor=5),Carta(palo=ORO,valor=5),Carta(palo=COPA,valor=5),Carta(palo=ESPADA,valor=5)],
[Carta(palo=BASTO,valor=6),Carta(palo=ORO,valor=6),Carta(palo=COPA,valor=6),Carta(palo=ESPADA,valor=6)],
[Carta(palo=COPA,valor=7),Carta(palo=BASTO,valor=7)],
[Carta(palo=BASTO,valor=10),Carta(palo=ORO,valor=10),Carta(palo=COPA,valor=10),Carta(palo=ESPADA,valor=10)],
[Carta(palo=BASTO,valor=11),Carta(palo=ORO,valor=11),Carta(palo=COPA,valor=11),Carta(palo=ESPADA,valor=11)],
[Carta(palo=BASTO,valor=12),Carta(palo=ORO,valor=12),Carta(palo=COPA,valor=12),Carta(palo=ESPADA,valor=12)],
[Carta(palo=ORO,valor=1),Carta(palo=COPA,valor=1)],
[Carta(palo=BASTO,valor=2),Carta(palo=ORO,valor=2),Carta(palo=COPA,valor=2),Carta(palo=ESPADA,valor=2)],
[Carta(palo=BASTO,valor=3),Carta(palo=ORO,valor=3),Carta(palo=COPA,valor=3),Carta(palo=ESPADA,valor=3)],
[Carta(palo=ORO,valor=7)],
[Carta(palo=ESPADA,valor=7)],
[Carta(palo=BASTO,valor=1)],
[Carta(palo=ESPADA,valor=1)]
]
def test_carta_equidad(self):
a = Carta(palo=BASTO,valor=3)
b = Carta(palo=BASTO,valor=3)
c = Carta(palo=ORO,valor=7)
self.assertTrue(a==b, "No detecta igualdades")
self.assertFalse(b==c, "Detecta igualdades que no son.")
def test_carta_inequidad(self):
a = Carta(palo=BASTO,valor=3)
b = Carta(palo=BASTO,valor=3)
c = Carta(palo=ORO,valor=7)
self.assertFalse(a!=b, "Detecta desigualdades que no son")
self.assertTrue(b!=c, "No detecta desigualdades")
def test_carta_random_correctas(self):
c = Carta()
for i in range(0,200):
c.randomize()
self.assertTrue(carta_es_valida(c), "Genero la carta invalida " + str(c))
def test_carta_jerarquia_pardas(self):
for lista_iguales in self.jerarquia_harcoded:
for carta in lista_iguales:
for otra_carta in lista_iguales:
if carta != otra_carta:
self.assertEqual(carta.jerarquia(),otra_carta.jerarquia(), str(carta) + " y " + str(otra_carta) + " deberian tener la misma jerarquia")
def test_carta_jerarquia_superiores(self):
for i in range(0,len(self.jerarquia_harcoded)):
carta = self.jerarquia_harcoded[i][0]
for j in range(i+1,len(self.jerarquia_harcoded)):
otra_carta = self.jerarquia_harcoded[j][0]
self.assertTrue(carta.jerarquia()<otra_carta.jerarquia(), str(carta) + "deberia perder frente a " + str(otra_carta))
def test_carta_puntos(self):
self.assertEqual(Carta(palo=BASTO,valor=12).puntos(),0)
self.assertEqual(Carta(palo=ORO,valor=11).puntos(),0)
self.assertEqual(Carta(palo=ESPADA,valor=10).puntos(),0)
for i in [1,2,3,4,5,6,7]:
self.assertEqual(Carta(palo=ESPADA,valor=i).puntos(),i)
def hacer_mano(a,b,c):
m = Mano()
m.save()
for carta in [a,b,c]:
carta.mano = m
carta.save()
return m
class ManoTests(TestCase):
def setUp(self):
self.m = Mano()
self.m.save()
self.a = Carta(palo=ESPADA,valor=3,mano=self.m)
self.b = Carta(palo=ESPADA,valor=2,mano=self.m)
self.c = Carta(palo=ESPADA,valor=1,mano=self.m)
self.a.save()
self.b.save()
self.c.save()
def tearDown(self):
self.m.delete()
def test_mano_puntos(self):
# Caso ABC
m = hacer_mano(Carta(palo=BASTO,valor=7),Carta(palo=ORO,valor=2),Carta(palo=COPA,valor=2))
self.assertEqual(m.puntos_del_envido(),7)
m = hacer_mano(Carta(palo=BASTO,valor=2),Carta(palo=ORO,valor=7),Carta(palo=COPA,valor=2))
self.assertEqual(m.puntos_del_envido(),7)
m = hacer_mano(Carta(palo=BASTO,valor=2),Carta(palo=ORO,valor=2),Carta(palo=COPA,valor=7))
self.assertEqual(m.puntos_del_envido(),7)
# Caso AAB ABA BAA
m = hacer_mano(Carta(palo=BASTO,valor=7),Carta(palo=BASTO,valor=2),Carta(palo=ORO,valor=2))
self.assertEqual(m.puntos_del_envido(),29)
m = hacer_mano(Carta(palo=BASTO,valor=2),Carta(palo=ORO,valor=2),Carta(palo=BASTO,valor=7))
self.assertEqual(m.puntos_del_envido(),29)
m = hacer_mano(Carta(palo=ORO,valor=2),Carta(palo=BASTO,valor=2),Carta(palo=BASTO,valor=7))
self.assertEqual(m.puntos_del_envido(),29)
# Caso (AA)A (A)A(A) A(AA)
m = hacer_mano(Carta(palo=BASTO,valor=7),Carta(palo=BASTO,valor=2),Carta(palo=BASTO,valor=1))
self.assertEqual(m.puntos_del_envido(),29)
m = hacer_mano(Carta(palo=BASTO,valor=2),Carta(palo=BASTO,valor=1),Carta(palo=BASTO,valor=7))
self.assertEqual(m.puntos_del_envido(),29)
m = hacer_mano(Carta(palo=BASTO,valor=1),Carta(palo=BASTO,valor=2),Carta(palo=BASTO,valor=7))
self.assertEqual(m.puntos_del_envido(),29)
def test_mano_jugar_carta(self):
self.m.jugar_carta(self.a.id)
aa = self.m.cartas.get(palo=ESPADA,valor=3)
self.assertTrue(aa.jugada != None)
self.assertEqual(aa.jugada,0)
def test_mano_cartas_jugadas(self):
# Despues de jugar una carta
self.m.jugar_carta(self.a.id)
c_ord = self.m.cartas_jugadas()
self.assertTrue(self.a in c_ord)
# Despues de jugar dos cartas
self.m.jugar_carta(self.c.id)
c_ord = self.m.cartas_jugadas()
self.assertTrue(self.a in c_ord and self.c in c_ord)
# Despues de jugar tres cartas
self.m.jugar_carta(self.b.id)
c_ord = self.m.cartas_jugadas()
self.assertTrue(self.a in c_ord and self.c in c_ord and self.b in c_ord)
def test_mano_cartas_jugables(self):
self.m.jugar_carta(self.a.id)
self.assertFalse(self.a in self.m.cartas_jugables())
self.assertTrue(self.b in self.m.cartas_jugables())
self.assertTrue(self.c in self.m.cartas_jugables())
def test_mano_asignaciones(self):
u = User.objects.create_user('a','a','a')
p = Partida(nombre='a')
p.save()
e = Equipo(partida=p,pares=True)
e.save()
j = Jugador(user=u,posicion_en_mesa=0,partida=p,equipo=e)
j.save()
m = Mano()
m.asignaciones(j,Carta(palo=ORO,valor=4),Carta(palo=ORO,valor=4),Carta(palo=ORO,valor=4))
self.assertEqual(m.jugador,j)
self.assertEqual(len(m.cartas.all()),3)
def test_mano_comparar_jugada(self):
m = hacer_mano(Carta(palo=BASTO,valor=1),Carta(palo=BASTO,valor=4),Carta(palo=COPA,valor=5))
m2 = hacer_mano(Carta(palo=BASTO,valor=5),Carta(palo=ORO,valor=4),Carta(palo=ESPADA,valor=1))
m.jugar_carta(m.cartas.get(valor=1).id)
m.jugar_carta(m.cartas.get(valor=4).id)
m.jugar_carta(m.cartas.get(valor=5).id)
m2.jugar_carta(m2.cartas.get(valor=5).id)
m2.jugar_carta(m2.cartas.get(valor=4).id)
m2.jugar_carta(m2.cartas.get(valor=1).id)
self.assertEqual(m.comparar_jugada(m2,0), True)
self.assertEqual(m.comparar_jugada(m2,1), None)
self.assertEqual(m.comparar_jugada(m2,2), False)
class JugadorTests(TestCase):
def setUp(self):
self.us = User.objects.create_user('us',"mail",'us')
self.us2 = User.objects.create_user('us2',"mail",'us2')
self.p = Partida(nombre="test")
self.p.save()
self.e0 = Equipo(partida=self.p,pares=True)
self.e0.save()
self.e1 = Equipo(partida=self.p,pares=False)
self.e1.save()
self.j = Jugador(user=self.us,partida=self.p,posicion_en_mesa=0,equipo=self.e0)
self.j2 = Jugador(user=self.us2,partida=self.p,posicion_en_mesa=0,equipo=self.e1)
self.j.save()
self.j2.save()
def test_jugador_mano_desde_lista(self):
m = hacer_mano(Carta(palo=BASTO,valor=1),Carta(palo=BASTO,valor=2),Carta(palo=BASTO,valor=3))
self.j.mano_desde_lista([Carta(palo=BASTO,valor=1),Carta(palo=BASTO,valor=2),Carta(palo=BASTO,valor=3)])
self.assertEqual(m,self.j.mano)
def test_jugador_jugadas(self):
self.j.mano_desde_lista([Carta(palo=BASTO,valor=1),Carta(palo=BASTO,valor=2),Carta(palo=BASTO,valor=3)])
self.j.mano.jugar_carta(self.j.mano.cartas.get(valor=1).id)
self.assertEqual(len(self.j.jugadas()),1)
self.j.mano.jugar_carta(self.j.mano.cartas.get(valor=2).id)
self.j.mano.jugar_carta(self.j.mano.cartas.get(valor=3).id)
self.assertEqual(len(self.j.jugadas()),3)
def test_jugador_comparar(self):
m = hacer_mano(Carta(palo=BASTO,valor=1),Carta(palo=BASTO,valor=4),Carta(palo=COPA,valor=5))
m2 = hacer_mano(Carta(palo=BASTO,valor=5),Carta(palo=ORO,valor=4),Carta(palo=ESPADA,valor=1))
m.jugar_carta(m.cartas.get(valor=1).id)
m.jugar_carta(m.cartas.get(valor=4).id)
m.jugar_carta(m.cartas.get(valor=5).id)
m2.jugar_carta(m2.cartas.get(valor=5).id)
m2.jugar_carta(m2.cartas.get(valor=4).id)
m2.jugar_carta(m2.cartas.get(valor=1).id)
m.jugador = self.j
m.save()
m2.jugador = self.j2
m2.save()
self.assertEqual(self.j.comparar_manos(self.j2),[True,None,False])
class RondaTests(TestCase):
def setUp (self):
for letra in "abc":
u = User.objects.create_user(letra,"[email protected]",letra)
self.us_a = User.objects.get(username='a')
self.us_b = User.objects.get(username='b')
self.us_c = User.objects.get(username='c')
self.p = Partida(nombre="test")
self.p.save()
self.e0 = Equipo(partida=self.p,pares=True)
self.e0.save()
self.e1 = Equipo(partida=self.p,pares=False)
self.e1.save()
self.j_a = Jugador(user=self.us_a,partida=self.p,posicion_en_mesa=0,equipo=self.e0)
self.j_a.save()
Mano(jugador=self.j_a).save()
self.j_b = Jugador(user=self.us_b,partida=self.p,posicion_en_mesa=1,equipo=self.e1)
self.j_b.save()
Mano(jugador=self.j_b).save()
self.r = Ronda(partida=self.p,mano=0)
self.r.save()
Truco(ronda=self.r).save()
Envido(ronda=self.r).save()
def definir_jugadas(self, jugador, jugada1=None, jugada2=None, jugada3=None):
jugador.mano.cartas.all().delete()
if jugada1 != None:
Carta(palo=jugada1[1],valor=jugada1[0],jugada=0,mano=jugador.mano).save()
if jugada2 != None:
Carta(palo=jugada2[1],valor=jugada2[0],jugada=1,mano=jugador.mano).save()
if jugada3 != None:
Carta(palo=jugada3[1],valor=jugada3[0],jugada=2,mano=jugador.mano).save()
def test_ronda_gana_mano(self):
# Repartir
m = hacer_mano(Carta(palo=ORO,valor=4),Carta(palo=BASTO,valor=5),Carta(palo=ESPADA,valor=6))
m.jugador = self.j_a
m.save()
m = hacer_mano(Carta(palo=BASTO,valor=4),Carta(palo=ORO,valor=5),Carta(palo=COPA,valor=6))
m.jugador = self.j_b
m.save()
# Jugar
self.r.accion(self.j_a,str(self.j_a.mano.cartas.get(valor=4).id))
self.r.accion(self.j_b,str(self.j_b.mano.cartas.get(valor=4).id))
self.r.accion(self.j_a,str(self.j_a.mano.cartas.get(valor=5).id))
self.r.accion(self.j_b,str(self.j_b.mano.cartas.get(valor=5).id))
self.r.accion(self.j_a,str(self.j_a.mano.cartas.get(valor=6).id))
self.r.accion(self.j_b,str(self.j_b.mano.cartas.get(valor=6).id))
j_a_actualizado = self.p.jugadores.get(id=self.j_a.id)
self.assertEqual(j_a_actualizado.equipo.puntos_partida, 1)
def test_ronda_buscar_ganador(self):
# Una sola jugada
self.definir_jugadas(self.j_a,(4, BASTO))
self.definir_jugadas(self.j_b,(4, BASTO))
self.assertRaises(IndexError, self.r.buscar_ganador)
self.definir_jugadas(self.j_a,(1, BASTO))
self.definir_jugadas(self.j_b,(4, BASTO))
self.assertRaises(IndexError, self.r.buscar_ganador)
self.definir_jugadas(self.j_a,(4, BASTO))
self.definir_jugadas(self.j_b,(1, BASTO))
self.assertRaises(IndexError, self.r.buscar_ganador)
# Dos jugadas
self.definir_jugadas(self.j_a,(1, BASTO),(4, BASTO))
self.definir_jugadas(self.j_b,(4, BASTO),(4, BASTO))
self.assertTrue(self.j_a in self.r.buscar_ganador().jugadores.all())
self.definir_jugadas(self.j_a,(1, BASTO),(1, BASTO))
self.definir_jugadas(self.j_b,(4, BASTO),(4, BASTO))
self.assertTrue(self.j_a in self.r.buscar_ganador().jugadores.all())
self.definir_jugadas(self.j_a,(1, BASTO),(4, BASTO))
self.definir_jugadas(self.j_b,(4, BASTO),(1, BASTO))
self.assertRaises(IndexError, self.r.buscar_ganador)
self.definir_jugadas(self.j_a,(4, BASTO),(4, BASTO))
self.definir_jugadas(self.j_b,(1, BASTO),(4, BASTO))
self.assertTrue(self.j_b in self.r.buscar_ganador().jugadores.all())
self.definir_jugadas(self.j_a,(4, BASTO),(4, BASTO))
self.definir_jugadas(self.j_b,(1, BASTO),(1, BASTO))
self.assertTrue(self.j_b in self.r.buscar_ganador().jugadores.all())
self.definir_jugadas(self.j_a,(4, BASTO),(1, BASTO))
self.definir_jugadas(self.j_b,(1, BASTO),(4, BASTO))
self.assertRaises(IndexError, self.r.buscar_ganador)
self.definir_jugadas(self.j_a,(4, BASTO),(1, BASTO))
self.definir_jugadas(self.j_b,(4, BASTO),(4, BASTO))
self.assertTrue(self.j_a in self.r.buscar_ganador().jugadores.all())
self.definir_jugadas(self.j_a,(4, BASTO),(4, BASTO))
self.definir_jugadas(self.j_b,(4, BASTO),(1, BASTO))
self.assertTrue(self.j_b in self.r.buscar_ganador().jugadores.all())
self.definir_jugadas(self.j_a,(4, BASTO),(4, BASTO))
self.definir_jugadas(self.j_b,(4, BASTO),(4, BASTO))
self.assertRaises(IndexError, self.r.buscar_ganador)
# Tres jugadas
self.definir_jugadas(self.j_a,(1, BASTO),(4, BASTO),(1, BASTO))
self.definir_jugadas(self.j_b,(4, BASTO),(4, BASTO),(4, BASTO))
self.assertTrue(self.j_a in self.r.buscar_ganador().jugadores.all())
self.definir_jugadas(self.j_a,(1, BASTO),(4, BASTO),(4, BASTO))
self.definir_jugadas(self.j_b,(4, BASTO),(1, BASTO),(1, BASTO))
self.assertTrue(self.j_b in self.r.buscar_ganador().jugadores.all())
self.definir_jugadas(self.j_a,(1, BASTO),(4, BASTO),(4, BASTO))
self.definir_jugadas(self.j_b,(4, BASTO),(1, BASTO),(4, BASTO))
self.assertTrue(self.j_a in self.r.buscar_ganador().jugadores.all())
self.definir_jugadas(self.j_a,(4, BASTO),(1, BASTO),(1, BASTO))
self.definir_jugadas(self.j_b,(1, BASTO),(4, BASTO),(4, BASTO))
self.assertTrue(self.j_a in self.r.buscar_ganador().jugadores.all())
self.definir_jugadas(self.j_a,(4, BASTO),(1, BASTO),(4, BASTO))
self.definir_jugadas(self.j_b,(1, BASTO),(4, BASTO),(1, BASTO))
self.assertTrue(self.j_b in self.r.buscar_ganador().jugadores.all())
self.definir_jugadas(self.j_a,(4, BASTO),(1, BASTO),(4, BASTO))
self.definir_jugadas(self.j_b,(1, BASTO),(4, BASTO),(4, BASTO))
self.assertTrue(self.j_b in self.r.buscar_ganador().jugadores.all())
self.definir_jugadas(self.j_a,(4, BASTO),(4, BASTO),(1, BASTO))
self.definir_jugadas(self.j_b,(4, BASTO),(4, BASTO),(4, BASTO))
self.assertTrue(self.j_a in self.r.buscar_ganador().jugadores.all())
self.definir_jugadas(self.j_a,(4, BASTO),(4, BASTO),(4, BASTO))
self.definir_jugadas(self.j_b,(4, BASTO),(4, BASTO),(1, BASTO))
self.assertTrue(self.j_b in self.r.buscar_ganador().jugadores.all())
self.definir_jugadas(self.j_a,(4, BASTO),(4, BASTO),(4, BASTO))
self.definir_jugadas(self.j_b,(4, BASTO),(4, BASTO),(4, BASTO))
self.assertRaises(IndexError, self.r.buscar_ganador)
def test_ronda_ganar_ronda(self):
# Repartir
m = hacer_mano(Carta(palo=ORO,valor=7),Carta(palo=BASTO,valor=1),Carta(palo=ESPADA,valor=1))
m.jugador = self.j_a
m.save()
m = hacer_mano(Carta(palo=ORO,valor=4),Carta(palo=COPA,valor=4),Carta(palo=ESPADA,valor=4))
m.jugador = self.j_b
m.save()
# Jugar
self.r.accion(self.j_a,str(self.j_a.mano.cartas.get(palo=ORO).id))
self.r.accion(self.j_b,str(self.j_b.mano.cartas.get(palo=ORO).id))
self.r.accion(self.j_a,str(self.j_a.mano.cartas.get(palo=ESPADA).id))
self.r.accion(self.j_b,str(self.j_b.mano.cartas.get(palo=ESPADA).id))
j_a_actualizado = self.p.jugadores.get(id=self.j_a.id)
self.assertEqual(j_a_actualizado.equipo.puntos_partida, 1)
def test_ronda_repartir(self):
self.r.repartir()
# Solo podemos chequear que es distinto de Vacio
self.assertEqual(len(self.j_a.mano.cartas.all()),3)
self.assertEqual(len(self.j_b.mano.cartas.all()),3)
def test_ronda_tres_rondas(self):
# Repartir
m = hacer_mano(Carta(palo=ORO,valor=7),Carta(palo=BASTO,valor=4),Carta(palo=ESPADA,valor=4))
m.jugador = self.j_a
m.save()
m = hacer_mano(Carta(palo=ORO,valor=4),Carta(palo=BASTO,valor=7),Carta(palo=ESPADA,valor=7))
m.jugador = self.j_b
m.save()
# Jugar
self.r.accion(self.j_a,str(self.j_a.mano.cartas.get(palo=ORO).id))
self.r.accion(self.j_b,str(self.j_b.mano.cartas.get(palo=ORO).id))
self.r.accion(self.j_a,str(self.j_a.mano.cartas.get(palo=BASTO).id))
self.r.accion(self.j_b,str(self.j_b.mano.cartas.get(palo=BASTO).id))
self.r.accion(self.j_a,str(self.j_a.mano.cartas.get(palo=ESPADA).id))
self.r.accion(self.j_b,str(self.j_b.mano.cartas.get(palo=ESPADA).id))
j_b_actualizado = self.p.jugadores.get(id=self.j_b.id)
self.assertEqual(j_b_actualizado.equipo.puntos_partida, 1)
def test_ronda_jugar_carta(self):
self.r.repartir()
self.r.save()
self.r.accion(self.j_a,str(self.j_a.mano.cartas.latest('id').id))
self.assertTrue(len(self.j_a.mano.cartas_jugadas())>0)
self.assertTrue(self.j_a.mano.cartas_jugadas()[0],self.j_a.mano.cartas.latest('id').id)
self.assertEqual(Ronda.objects.get(id=self.r.id).turno_de(),1)
def test_ronda_ultimo_ganador(self):
# Pardas
self.definir_jugadas(self.j_a,(4, BASTO))
self.definir_jugadas(self.j_b,(4, BASTO))
self.assertEqual(self.r.ultimo_ganador(),self.j_a)
# Uno
self.definir_jugadas(self.j_a,(4, BASTO))
self.definir_jugadas(self.j_b,(1, BASTO))
self.assertEqual(self.r.ultimo_ganador(),self.j_b)
# Dos
self.definir_jugadas(self.j_a,(4, BASTO),(4, BASTO))
self.definir_jugadas(self.j_b,(4, BASTO),(1, BASTO))
self.assertEqual(self.r.ultimo_ganador(),self.j_b)
# Desiguales
self.definir_jugadas(self.j_a,(4, BASTO))
self.definir_jugadas(self.j_b,(1, BASTO),(4, BASTO))
self.assertEqual(self.r.ultimo_ganador(),self.j_b)
class EnvidoTests(TestCase):
def setUp(self):
p = Partida(nombre='test',puntos_max=15)
p.save()
self.eq0 = Equipo(partida=p,pares=True)
self.eq0.save()
self.eq1 = Equipo(partida=p,pares=False)
self.eq1.save()
self.j_a = Jugador(user=User.objects.create_user('a','a','a'),partida=p,posicion_en_mesa=0,equipo=self.eq0)
self.j_a.save()
self.j_b = Jugador(user=User.objects.create_user('b','b','b'),partida=p,posicion_en_mesa=1,equipo=self.eq1)
self.j_b.save()
Ronda.crear(p,0)
r = Ronda.objects.latest('id')
self.e = r.envido
def test_envido_puntos_quiero(self):
self.assertEqual(self.e.puntos_quiero(),0)
self.e.input(self.j_a, 'envido')
self.assertEqual(self.e.puntos_quiero(),2)
self.e.input(self.j_a, 'envido')
self.assertEqual(self.e.puntos_quiero(),4)
self.e.input(self.j_a, 'real-envido')
self.assertEqual(self.e.puntos_quiero(),7)
self.e.input(self.j_a, 'falta-envido')
self.assertEqual(self.e.puntos_quiero(),15)
def test_envido_botones(self):
self.assertEqual(self.e.opciones(self.j_a).sort(),['Envido','Real Envido','Falta Envido'].sort())
self.e.input(self.j_a, 'envido')
self.assertEqual(self.e.opciones(self.j_b).sort(),['Envido','Real Envido','Falta Envido','Quiero','No Quiero'].sort())
self.e.input(self.j_b, 'envido')
self.assertEqual(self.e.opciones(self.j_a).sort(),['Real Envido','Falta Envido','Quiero','No Quiero'].sort())
self.e.input(self.j_a, 'real-envido')
self.assertEqual(self.e.opciones(self.j_b).sort(),['Falta Envido','Quiero','No Quiero'].sort())
self.e.input(self.j_b, 'falta-envido')
self.assertEqual(self.e.opciones(self.j_a).sort(),['Quiero','No Quiero'].sort())
def test_envido_puntos_no_quiero(self):
r = Ronda.objects.latest('id')
e = Envido(ronda=r,envido=1,real_envido=False,falta_envido=False)
self.assertEqual(e.puntos_no_quiero(),1)
e = Envido(ronda=r,envido=0,real_envido=True,falta_envido=False)
self.assertEqual(e.puntos_no_quiero(),1)
e = Envido(ronda=r,envido=0,real_envido=False,falta_envido=True)
self.assertEqual(e.puntos_no_quiero(),1)
e = Envido(ronda=r,envido=2,real_envido=False,falta_envido=False)
self.assertEqual(e.puntos_no_quiero(),2)
e = Envido(ronda=r,envido=1,real_envido=True,falta_envido=False)
self.assertEqual(e.puntos_no_quiero(),2)
e = Envido(ronda=r,envido=1,real_envido=False,falta_envido=True)
self.assertEqual(e.puntos_no_quiero(),2)
e = Envido(ronda=r,envido=0,real_envido=True,falta_envido=True)
self.assertEqual(e.puntos_no_quiero(),3)
e = Envido(ronda=r,envido=2,real_envido=False,falta_envido=True)
self.assertEqual(e.puntos_no_quiero(),5)
e = Envido(ronda=r,envido=2,real_envido=True,falta_envido=False)
self.assertEqual(e.puntos_no_quiero(),4)
e = Envido(ronda=r,envido=1,real_envido=True,falta_envido=True)
self.assertEqual(e.puntos_no_quiero(),5)
e = Envido(ronda=r,envido=2,real_envido=True,falta_envido=True)
self.assertEqual(e.puntos_no_quiero(),7)
class TrucoTests(TestCase):
def setUp(self):
p = Partida(nombre='test',puntos_max=15)
p.save()
self.e0 = Equipo(partida=p,pares=True)
self.e0.save()
self.e1 = Equipo(partida=p,pares=False)
self.e1.save()
self.j_a = Jugador(user=User.objects.create_user('a','a','a'),partida=p,posicion_en_mesa=0,equipo=self.e0)
self.j_a.save()
self.j_b = Jugador(user=User.objects.create_user('b','b','b'),partida=p,posicion_en_mesa=1,equipo=self.e1)
self.j_b.save()
Ronda.crear(p,0)
r = Ronda.objects.latest('id')
self.t = r.truco
def test_truco_botones(self):
self.assertEqual(self.t.opciones(self.j_a).sort(),['Truco'].sort())
self.t.input(self.j_a, 'truco')
self.assertEqual(self.t.opciones(self.j_b).sort(),['Quiero Retruco','Quiero','No Quiero'].sort())
self.t.input(self.j_a, 'quiero-retruco')
self.assertEqual(self.t.opciones(self.j_a).sort(),['Quiero Vale Cuatro','Quiero','No Quiero'].sort())
self.t.input(self.j_a, 'quiero-vale-cuatro')
self.assertEqual(self.t.opciones(self.j_b).sort(),['Quiero','No Quiero'].sort())
def mano_jugador(tuplas, jugador):
m = hacer_mano(Carta(valor=tuplas[0][0],palo=tuplas[0][1]),Carta(valor=tuplas[1][0],palo=tuplas[1][1]),Carta(valor=tuplas[2][0],palo=tuplas[2][1]))
m.jugador = jugador
m.save()
def carta_jugador(jugador, num):
return str(jugador.mano.cartas.all()[num].id)
class TurnosTests(TestCase):
def setUp(self):
p = Partida(nombre='test',puntos_max=15)
p.save()
self.e0 = Equipo(partida=p,pares=True)
self.e0.save()
self.e1 = Equipo(partida=p,pares=False)
self.e1.save()
self.a = Jugador(user=User.objects.create_user('a','a','a'),partida=p,posicion_en_mesa=0,equipo=self.e0)
self.a.save()
self.b = Jugador(user=User.objects.create_user('b','b','b'),partida=p,posicion_en_mesa=1,equipo=self.e1)
self.b.save()
Ronda.crear(p,0)
self.r = Ronda.objects.latest('id')
def actualizar(self):
self.a = Jugador.objects.get(id=self.a.id)
self.b = Jugador.objects.get(id=self.b.id)
self.r = Ronda.objects.get(id=self.r.id)
def test_turnos_envido_esta_primero(self):
self.assertEqual(self.r.opciones(self.a).sort(),['Truco','Envido','Real Envido','Falta Envido'].sort())
self.r.accion(self.a, 'truco')
self.assertEqual(self.r.opciones(self.b).sort(),['Quiero Retruco','Quiero','No Quiero','Envido Esta Primero'].sort())
self.r.accion(self.b, 'envido-esta-primero')
self.assertEqual(self.r.opciones(self.a).sort(),['Envido','Real Envido','Falta Envido','Quiero','No Quiero'].sort())
def test_turnos_truco(self):
mano_jugador([(1,ESPADA),(6,ORO),(5,BASTO)],self.a)
mano_jugador([(7,ESPADA),(4,ORO),(4,BASTO)],self.b); self.actualizar()
# El turno del jugador a. Canta truco.
self.assertEqual(self.r.turno_de(),self.a.posicion_en_mesa)
self.assertEqual(self.r.opciones(self.a).sort(), ['Truco','Envido','Real Envido','Falta Envido','Irse al Mazo'].sort())
self.r.accion(self.a, 'truco'); self.actualizar()
# El turno del jugador b. Dice Quiero.
self.assertEqual(self.r.turno_de(),self.b.posicion_en_mesa)
self.assertEqual(self.r.opciones(self.b).sort(), ['Quiero', 'No Quiero','Envido esta primero','Quiero Retruco'].sort())
self.r.accion(self.b, 'quiero'); self.actualizar()
# El turno del jugador a. Juega una carta. 7 espadas.
self.assertEqual(self.r.turno_de(),self.a.posicion_en_mesa)
self.assertEqual(self.r.opciones(self.a).sort(), ['Irse al Mazo'].sort())
self.r.accion(self.a, carta_jugador(self.a,0)); self.actualizar()
# El turno del jugador b. Retruca.
self.assertEqual(self.r.turno_de(),self.b.posicion_en_mesa)
self.assertEqual(self.r.opciones(self.b).sort(), ['Irse al Mazo', 'Quiero Retruco'].sort())
self.r.accion(self.b, 'quiero-retruco'); self.actualizar()
# El turno del jugador a. Acepta.
self.assertEqual(self.r.turno_de(),self.a.posicion_en_mesa)
self.assertEqual(self.r.opciones(self.a).sort(), ['Quiero','No Quiero','Quiero Vale Cuatro'].sort())
self.r.accion(self.a, 'quiero'); self.actualizar()
# El turno del jugador b. Juega una carta. 1 espadas.
self.assertEqual(self.r.turno_de(),self.b.posicion_en_mesa)
self.assertEqual(self.r.opciones(self.b).sort(), ['Irse al Mazo'].sort())
self.r.accion(self.b, carta_jugador(self.b,0)); self.actualizar()
# El turno del jugador a. Pide vale cuatro.
self.assertEqual(self.r.turno_de(),self.a.posicion_en_mesa)
self.assertEqual(self.r.opciones(self.a).sort(), ['Irse al Mazo','Quiero Vale Cuatro'].sort())
self.r.accion(self.a, 'quiero-vale-cuatro'); self.actualizar()
# El turno del jugador b. No acepta. Gana a.
self.assertEqual(self.r.turno_de(),self.b.posicion_en_mesa)
self.assertEqual(self.r.opciones(self.b).sort(), ['Quiero','No Quiero'].sort())
self.r.accion(self.b, 'no-quiero'); self.actualizar()
self.assertEqual(self.r.opciones(self.a).sort(), ['Continuar'].sort())
self.assertEqual(self.r.opciones(self.b).sort(), ['Continuar'].sort())
self.r.accion(self.b, 'continuar'); self.actualizar()
self.assertEqual(self.r.opciones(self.b), [])
self.assertEqual(self.a.equipo.puntos_partida,3)
class PartidaTests(TestCase):
def setUp(self):
for letra in ["a","b","c"]:
u = User.objects.create_user(letra,"[email protected]",letra)
self.us_a = User.objects.get(username='a')
self.us_b = User.objects.get(username='b')
self.us_c = User.objects.get(username='c')
self.p = Partida(nombre="test")
self.p.save()
self.e0 = Equipo(partida=self.p,pares=True)
self.e0.save()
self.e1 = Equipo(partida=self.p,pares=False)
self.e1.save()
self.j_a = Jugador(user=self.us_a,partida=self.p,posicion_en_mesa=0,equipo=self.e0)
self.j_a.save()
self.j_b = Jugador(user=self.us_b,partida=self.p,posicion_en_mesa=1,equipo=self.e1)
self.j_b.save()
Ronda.crear(self.p,0)
def tearDown(self):
for obj in [self.p,self.j_a,self.j_b]:
obj.delete()
def test_partida_sumar_jugador(self):
p = Partida(nombre="test2")
p.save()
self.assertEqual(len(p.jugadores.all()), 0)
x = User.objects.create_user('x',"[email protected]",'x')
p.sumar_jugador(x)
self.assertEqual(len(p.jugadores.all()), 1)
self.assertEqual(p.jugadores.latest('id').posicion_en_mesa, 0)
y = User.objects.create_user('y',"[email protected]",'y')
p.sumar_jugador(y)
self.assertEqual(len(p.jugadores.all()), 2)
self.assertEqual(p.jugadores.latest('id').posicion_en_mesa, 1)
def test_partida_esta_llena(self):
p = Partida(nombre="test2")
p.save()
self.assertFalse(p.esta_llena())
p.sumar_jugador(User.objects.create_user('rer'))
self.assertFalse(p.esta_llena())
p.sumar_jugador(User.objects.create_user('rer2'))
self.assertTrue(p.esta_llena())
def test_partida_jugador(self):
self.assertEqual(self.j_a, self.p.jugador(self.us_a))
self.assertEqual(None, self.p.jugador(self.us_c))
def test_partida_mostrar(self):
self.p.ultima_ronda.repartir()
dic = self.p.mostrar_partida(self.j_a)
self.assertEqual(dic['mi_mano'],self.j_a.mano)
self.assertEqual(dic['otro_mano'], self.j_b.mano)
class ViewTests(TestCase):
def setUp(self):
for letra in ["a","b","c"]:
u = User.objects.create_user(letra,"[email protected]",letra)
self.client_a = Client()
self.client_a.login(username='a', password='a')
self.client_b = Client()
self.client_b.login(username='b', password='b')
def test_view_sala_create(self):
self.client_a.post('/nueva_sala', {'Nombre':'sasa','Puntos':15,'num_jug':2})
self.assertTrue(Partida.objects.get(nombre='sasa'))
def test_view_sala_join(self):
p = Partida(nombre="sasasa")
p.save()
self.client_a.get('/sala/' + str(p.id))
self.client_b.get('/sala/' + str(p.id))
self.assertEqual(len(p.jugadores.all()),2)
def test_view_carta_tirar(self):
p = Partida(nombre="abab")
p.save()
self.client_a.get('/sala/' + str(p.id))
self.client_b.get('/sala/' + str(p.id))
j0 = p.jugadores.get(posicion_en_mesa=0)
# Una carta, jugador A
self.client_a.get('/sala/' + str(p.id) + '/' + str(j0.mano.cartas.latest('id').id))
self.assertEqual(len(j0.mano.cartas_jugadas()),1)
p_actualizada = Partida.objects.get(nombre="abab")
self.assertEqual(p_actualizada.ultima_ronda.turno_de(),1)
# Una carta, jugador B
j1 = p.jugadores.get(posicion_en_mesa=1)
self.client_b.get('/sala/' + str(p.id) + '/' + str(j1.mano.cartas.latest('id').id))
self.assertEqual(len(j1.mano.cartas_jugadas()),1)
p_actualizada = Partida.objects.get(nombre="abab")
# self.assertEqual(p_actualizada.ultima_ronda.turno_de(),0)
# Dos cartas, jugador A
self.client_a.get('/sala/' + str(p.id) + '/' + str(j0.mano.cartas.all()[1].id))
# self.assertEqual(len(j0.mano.cartas_jugadas()),2)
p_actualizada = Partida.objects.get(nombre="abab")
# self.assertEqual(p_actualizada.ultima_ronda.turno_de(),1)
def test_view_envido(self):
p = Partida(nombre="abab")
p.save()
self.client_a.get('/sala/' + str(p.id))
self.client_b.get('/sala/' + str(p.id))
self.client_a.get('/sala/' + str(p.id) + '/envido')
self.assertTrue(p.ultima_ronda.envido.cantado)
self.client_b.get('/sala/' + str(p.id) + '/quiero')
self.assertFalse(p.ultima_ronda.terminada)
# self.assertEqual(Jugador.objects.get(user=client_a.user).,)
p = Partida(nombre="abab")
p.save()
self.client_a.get('/sala/' + str(p.id))
self.client_b.get('/sala/' + str(p.id))
self.client_a.get('/sala/' + str(p.id) + '/envido')
self.assertTrue(p.ultima_ronda.envido.cantado)
self.client_b.get('/sala/' + str(p.id) + '/no-quiero')
self.assertTrue(p.ultima_ronda.envido.terminado)
def test_view_truco(self):
p = Partida(nombre="abab")
p.save()
self.client_a.get('/sala/' + str(p.id))
self.client_b.get('/sala/' + str(p.id))
self.client_a.get('/sala/' + str(p.id) + '/truco')
self.assertTrue(p.ultima_ronda.truco.cantado)
self.client_b.get('/sala/' + str(p.id) + '/quiero')
self.assertFalse(p.ultima_ronda.terminada)
# self.assertEqual(Jugador.objects.get(user=client_a.user).,)
p = Partida(nombre="abab")
p.save()
self.client_a.get('/sala/' + str(p.id))
self.client_b.get('/sala/' + str(p.id))
self.client_a.get('/sala/' + str(p.id) + '/truco')
self.assertTrue(p.ultima_ronda.truco.cantado)
self.client_b.get('/sala/' + str(p.id) + '/no-quiero')
self.assertTrue(p.ultima_ronda.terminada) |
import urlparse
from .base import *
DEBUG = os.getenv("DEBUG", "FALSE") == "TRUE"
TEMPLATE_DEBUG = DEBUG
SHOW_DEBUG_TOOLBAR = False
urlparse.uses_netloc.append("mysql")
url = urlparse.urlparse(DATABASE_URL)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': url.path[1:],
'USER': url.username,
'PASSWORD': url.password,
'HOST': url.hostname
}
}
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
DEFAULT_URL_SCHEME = "https"
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
try:
RAVEN_CONFIG
INSTALLED_APPS += (
'raven.contrib.django.raven_compat',
)
except NameError:
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.