max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
perfkitbenchmarker/linux_benchmarks/stress_ng_benchmark.py | inflatador/PerfKitBenchmarker | 0 | 6631251 | # Copyright 2019 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs stress-ng.
From the stress-ng ubuntu documentation:
stress-ng will stress test a computer system in various selectable ways.
It was designed to exercise various physical subsystems of a computer as
well as the various operating system kernel interfaces. stress-ng also has
a wide range of CPU specific stress tests that exercise floating point,
integer, bit manipulation and control flow.
stress-ng manpage:
http://manpages.ubuntu.com/manpages/xenial/man1/stress-ng.1.html
"""
import logging
import numpy
from perfkitbenchmarker import configs
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'stress_ng'
BENCHMARK_CONFIG = """
stress_ng:
description: Runs stress-ng
vm_groups:
default:
vm_spec: *default_single_core
disk_spec: *default_50_gb
"""
STRESS_NG_DIR = '~/stress_ng'
GIT_REPO = 'https://github.com/ColinIanKing/stress-ng'
GIT_TAG_MAP = {
'0.05.23': '54722768329c9f8184c1c98db63435f201377df1', # ubuntu1604
'0.09.25': '2db2812edf99ec80c08edf98ee88806a3662031c', # ubuntu1804
}
VALID_CPU_METHODS = {
'all', 'ackermann', 'bitops', 'callfunc', 'cdouble', 'cfloat',
'clongdouble', 'correlate', 'crc16', 'decimal32', 'decimal64', 'decimal128',
'dither', 'djb2a', 'double', 'euler', 'explog', 'fft', 'fibonacci', 'float',
'fnv1a', 'gamma', 'gcd', 'gray', 'hamming', 'hanoi', 'hyperbolic', 'idct',
'int128', 'int64', 'int32', 'int16', 'int8', 'int128float', 'int128double',
'int128longdouble', 'int128decimal32', 'int128decimal64',
'int128decimal128', 'int64float', 'int64double', 'int64longdouble',
'int32float', 'int32double', 'int32longdouble', 'jenkin', 'jmp', 'ln2',
'longdouble', 'loop', 'matrixprod', 'nsqrt', 'omega', 'parity', 'phi', 'pi',
'pjw', 'prime', 'psi', 'queens', 'rand', 'rand48', 'rgb', 'sdbm', 'sieve',
'sqrt', 'trig', 'union', 'zeta'
}
VALID_STRESSORS = {
'affinity', 'af-alg', 'aio', 'aio-linux', 'apparmor', 'bigheap', 'brk',
'bsearch', 'cache', 'chdir', 'chmod', 'clock', 'clone', 'context', 'cpu',
'cpu-online', 'crypt', 'daemon', 'dentry', 'dir', 'dup', 'epoll', 'eventfd',
'exec', 'fallocate', 'fault', 'fcntl', 'fiemap', 'fifo', 'filename',
'flock', 'fork', 'fp-error', 'fstat', 'futex', 'get', 'getrandom',
'getdent', 'handle', 'hdd', 'heapsort', 'hsearch', 'icache', 'iosync',
'inotify', 'itimer', 'kcmp', 'key', 'kill', 'klog', 'lease', 'link',
'lockbus', 'lockf', 'longjmp', 'lsearch', 'malloc', 'matrix', 'membarrier',
'memcpy', 'memfd', 'mergesort', 'mincore', 'mknod', 'mlock', 'mmap',
'mmapfork', 'mmapmany', 'mremap', 'msg', 'mq', 'nice', 'null', 'numa',
'oom-pipe', 'open', 'personality', 'pipe', 'poll', 'procfs', 'pthread',
'ptrace', 'qsort', 'quota', 'rdrand', 'readahead', 'remap-file-pages',
'rename', 'rlimit', 'seccomp', 'seek', 'sem-posix', 'sem-sysv', 'shm-posix',
'shm-sysv', 'sendfile', 'sigfd', 'sigfpe', 'sigpending', 'sigq', 'sigsegv',
'sigsuspend', 'sleep', 'socket', 'socket-fd', 'socket-pair', 'spawn',
'splice', 'stack', 'str', 'stream', 'switch', 'symlink', 'sync-file',
'sysinfo', 'sysfs', 'tee', 'timer', 'timerfd', 'tsc', 'tsearch', 'udp',
'udp-flood', 'unshare', 'urandom', 'userfaultfd', 'utime', 'vecmath',
'vfork', 'vm', 'vm-rw', 'vm-splice', 'wait', 'wcs', 'xattr', 'yield',
'zero', 'zlib', 'zombie'
}
CPU_SUITE = {
'af-alg', 'bsearch', 'context', 'cpu', 'cpu-online', 'crypt', 'fp-error',
'getrandom', 'heapsort', 'hsearch', 'longjmp', 'lsearch', 'matrix',
'mergesort', 'numa', 'qsort', 'rdrand', 'str', 'stream', 'tsc', 'tsearch',
'vecmath', 'wcs', 'zlib'
}
CPU_CACHE_SUITE = {
'bsearch', 'cache', 'heapsort', 'hsearch', 'icache', 'lockbus', 'lsearch',
'malloc', 'matrix', 'membarrier', 'memcpy', 'mergesort', 'qsort', 'str',
'stream', 'tsearch', 'vecmath', 'wcs', 'zlib'
}
MEMORY_SUITE = {
'bsearch', 'context', 'heapsort', 'hsearch', 'lockbus', 'lsearch', 'malloc',
'matrix', 'membarrier', 'memcpy', 'memfd', 'mergesort', 'mincore', 'null',
'numa', 'oom-pipe', 'pipe', 'qsort', 'stack', 'str', 'stream', 'tsearch',
'vm', 'vm-rw', 'wcs', 'zero', 'zlib'
}
# Run the stressors that are each part of all of the compute related stress-ng
# classes: cpu, cpu-cache, and memory.
DEFAULT_STRESSORS = sorted(
CPU_SUITE.intersection(CPU_CACHE_SUITE).intersection(MEMORY_SUITE))
flags.DEFINE_integer('stress_ng_duration', 10,
'Number of seconds to run the test.')
flags.DEFINE_boolean('stress_ng_calc_geomean', True,
'Whether to calculate geomean or not.')
flags.DEFINE_list('stress_ng_custom_stressors', DEFAULT_STRESSORS,
'List of stressors to run against. Default combines cpu,'
'cpu-cache, and memory suites')
flags.DEFINE_list('stress_ng_cpu_methods', [],
'List of cpu methods to run with. By default none are ran.')
ALL_WORKLOADS = ['small', 'medium', 'large']
flags.DEFINE_list(
'stress_ng_thread_workloads', ['large'],
'List of threads sizes to run against. Options are'
'small (1 thread total), medium (1 thread per 2 cpus), and '
'large (1 thread per cpu).')
flags.register_validator(
'stress_ng_thread_workloads',
lambda workloads: workloads and set(workloads).issubset(ALL_WORKLOADS))
ALL_VERSIONS = ['0.05.23', '0.09.25']
flags.DEFINE_enum(
'stress_ng_version', '0.09.25', ALL_VERSIONS,
'Stress-ng version to use. Default is 0.09.25 which '
'is the default package on Ubuntu 1804.')
def _GeoMeanOverflow(iterable):
"""Returns the geometric mean.
See https://en.wikipedia.org/wiki/Geometric_mean#Relationship_with_logarithms
Args:
iterable: a list of positive floats to take the geometric mean of.
Returns: The geometric mean of the list.
"""
a = numpy.log(iterable)
return numpy.exp(a.sum() / len(a))
def StressngCustomStressorsValidator(stressors):
"""Returns whether or not the list of custom stressors is valid."""
return VALID_STRESSORS.issuperset(set(stressors))
def StressngCpuMethodsValidator(cpu_methods):
"""Returns whether or not the list of cpu methods is valid."""
return ('all_cpu_methods' in cpu_methods or
VALID_CPU_METHODS.issuperset(set(cpu_methods)))
flags.register_validator('stress_ng_custom_stressors',
StressngCustomStressorsValidator)
flags.register_validator('stress_ng_cpu_methods',
StressngCpuMethodsValidator)
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def Prepare(benchmark_spec):
"""Installs stress-ng on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vm = benchmark_spec.vms[0]
vm.InstallPackages(
'build-essential libaio-dev libapparmor-dev libattr1-dev libbsd-dev libcap-dev libgcrypt11-dev libkeyutils-dev libsctp-dev zlib1g-dev'
)
vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, STRESS_NG_DIR))
vm.RemoteCommand('cd {0} && git checkout {1}'.format(
STRESS_NG_DIR, GIT_TAG_MAP[FLAGS.stress_ng_version]))
vm.RemoteCommand('cd {0} && make && sudo make install'.format(STRESS_NG_DIR))
def _ParseStressngResult(metadata, output, cpu_method=None):
"""Returns stress-ng data as a sample.
Sample output eg:
stress-ng: info: [2566] dispatching hogs: 2 context
stress-ng: info: [2566] successful run completed in 5.00s
stress-ng: info: [2566] stressor bogo ops real time usr time sys
time bogo ops/s bogo ops/s
stress-ng: info: [2566] (secs) (secs) (secs)
(real time) (usr+sys time)
stress-ng: info: [2566] context 22429 5.00 5.49
4.48 4485.82 2249.65
Args:
metadata: metadata of the sample.
output: the output of the stress-ng benchmark.
cpu_method: an optional flag for the cpu method for the cpu stressor.
"""
output_list = output.splitlines()
output_matrix = [i.split() for i in output_list]
if len(output_matrix) != 5:
logging.error('output is missing')
return ''
assert output_matrix[2][-4] == 'bogo' and output_matrix[2][-3] == 'ops/s'
assert output_matrix[3][-4] == '(real' and output_matrix[3][-3] == 'time)'
line = output_matrix[4]
name = line[3]
value = float(line[-2]) # parse bogo ops/s (real time)
if name == 'cpu' and cpu_method:
return sample.Sample(
metric=cpu_method,
value=value,
unit='bogus_ops_sec', # bogus operations per second
metadata=metadata)
return sample.Sample(
metric=name,
value=value,
unit='bogus_ops_sec', # bogus operations per second
metadata=metadata)
def _RunWorkload(vm, num_threads):
"""Runs stress-ng on the target vm.
Args:
vm: The target vm to run on.
num_threads: Number of instances of stressors to launch.
Returns:
A list of sample.Sample objects.
"""
metadata = {
'duration_sec': FLAGS.stress_ng_duration,
'threads': num_threads,
'version': FLAGS.stress_ng_version,
}
samples = []
values_to_geomean_list = []
stressors = FLAGS.stress_ng_custom_stressors
for stressor in stressors:
cmd = ('stress-ng --{stressor} {numthreads} --metrics-brief '
'-t {duration}'.format(
stressor=stressor,
numthreads=num_threads,
duration=FLAGS.stress_ng_duration))
stdout, stderr = vm.RemoteCommand(cmd)
# TODO(user): Find the actual stress-ng version that changes output to
# stderr instead of stdout
if FLAGS.stress_ng_version > '0.05.23':
stdout = stderr
stressng_sample = _ParseStressngResult(metadata, stdout)
if stressng_sample:
samples.append(stressng_sample)
values_to_geomean_list.append(stressng_sample.value)
cpu_methods = (VALID_CPU_METHODS
if 'all_cpu_methods' in FLAGS.stress_ng_cpu_methods
else FLAGS.stress_ng_cpu_methods)
for cpu_method in cpu_methods:
cmd = ('stress-ng --cpu {numthreads} --metrics-brief '
'-t {duration} --cpu-method {cpu_method}'.format(
numthreads=num_threads,
duration=FLAGS.stress_ng_duration,
cpu_method=cpu_method))
stdout, _ = vm.RemoteCommand(cmd)
stressng_sample = _ParseStressngResult(metadata, stdout, cpu_method)
if stressng_sample:
samples.append(stressng_sample)
values_to_geomean_list.append(stressng_sample.value)
if FLAGS.stress_ng_calc_geomean:
geomean_metadata = metadata.copy()
geomean_metadata['stressors'] = stressors
# True only if each stressor provided a value
geomean_metadata['valid_run'] = (
len(values_to_geomean_list) == len(stressors) + len(cpu_methods))
geomean_sample = sample.Sample(
metric='STRESS_NG_GEOMEAN',
value=_GeoMeanOverflow(values_to_geomean_list),
unit='bogus_ops_sec',
metadata=geomean_metadata)
samples.append(geomean_sample)
return samples
def Run(benchmark_spec):
"""Runs stress-ng on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
vm = benchmark_spec.vms[0]
samples = []
for workload in FLAGS.stress_ng_thread_workloads:
if workload == 'small':
samples.extend(_RunWorkload(vm, 1))
elif workload == 'medium':
samples.extend(_RunWorkload(vm, vm.NumCpusForBenchmark() / 2))
elif workload == 'large':
samples.extend(_RunWorkload(vm, vm.NumCpusForBenchmark()))
return samples
def Cleanup(benchmark_spec):
"""Cleans up stress-ng from the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vm = benchmark_spec.vms[0]
vm.RemoteCommand('cd {0} && sudo make uninstall'.format(STRESS_NG_DIR))
| # Copyright 2019 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs stress-ng.
From the stress-ng ubuntu documentation:
stress-ng will stress test a computer system in various selectable ways.
It was designed to exercise various physical subsystems of a computer as
well as the various operating system kernel interfaces. stress-ng also has
a wide range of CPU specific stress tests that exercise floating point,
integer, bit manipulation and control flow.
stress-ng manpage:
http://manpages.ubuntu.com/manpages/xenial/man1/stress-ng.1.html
"""
import logging
import numpy
from perfkitbenchmarker import configs
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'stress_ng'
BENCHMARK_CONFIG = """
stress_ng:
description: Runs stress-ng
vm_groups:
default:
vm_spec: *default_single_core
disk_spec: *default_50_gb
"""
STRESS_NG_DIR = '~/stress_ng'
GIT_REPO = 'https://github.com/ColinIanKing/stress-ng'
GIT_TAG_MAP = {
'0.05.23': '54722768329c9f8184c1c98db63435f201377df1', # ubuntu1604
'0.09.25': '2db2812edf99ec80c08edf98ee88806a3662031c', # ubuntu1804
}
VALID_CPU_METHODS = {
'all', 'ackermann', 'bitops', 'callfunc', 'cdouble', 'cfloat',
'clongdouble', 'correlate', 'crc16', 'decimal32', 'decimal64', 'decimal128',
'dither', 'djb2a', 'double', 'euler', 'explog', 'fft', 'fibonacci', 'float',
'fnv1a', 'gamma', 'gcd', 'gray', 'hamming', 'hanoi', 'hyperbolic', 'idct',
'int128', 'int64', 'int32', 'int16', 'int8', 'int128float', 'int128double',
'int128longdouble', 'int128decimal32', 'int128decimal64',
'int128decimal128', 'int64float', 'int64double', 'int64longdouble',
'int32float', 'int32double', 'int32longdouble', 'jenkin', 'jmp', 'ln2',
'longdouble', 'loop', 'matrixprod', 'nsqrt', 'omega', 'parity', 'phi', 'pi',
'pjw', 'prime', 'psi', 'queens', 'rand', 'rand48', 'rgb', 'sdbm', 'sieve',
'sqrt', 'trig', 'union', 'zeta'
}
VALID_STRESSORS = {
'affinity', 'af-alg', 'aio', 'aio-linux', 'apparmor', 'bigheap', 'brk',
'bsearch', 'cache', 'chdir', 'chmod', 'clock', 'clone', 'context', 'cpu',
'cpu-online', 'crypt', 'daemon', 'dentry', 'dir', 'dup', 'epoll', 'eventfd',
'exec', 'fallocate', 'fault', 'fcntl', 'fiemap', 'fifo', 'filename',
'flock', 'fork', 'fp-error', 'fstat', 'futex', 'get', 'getrandom',
'getdent', 'handle', 'hdd', 'heapsort', 'hsearch', 'icache', 'iosync',
'inotify', 'itimer', 'kcmp', 'key', 'kill', 'klog', 'lease', 'link',
'lockbus', 'lockf', 'longjmp', 'lsearch', 'malloc', 'matrix', 'membarrier',
'memcpy', 'memfd', 'mergesort', 'mincore', 'mknod', 'mlock', 'mmap',
'mmapfork', 'mmapmany', 'mremap', 'msg', 'mq', 'nice', 'null', 'numa',
'oom-pipe', 'open', 'personality', 'pipe', 'poll', 'procfs', 'pthread',
'ptrace', 'qsort', 'quota', 'rdrand', 'readahead', 'remap-file-pages',
'rename', 'rlimit', 'seccomp', 'seek', 'sem-posix', 'sem-sysv', 'shm-posix',
'shm-sysv', 'sendfile', 'sigfd', 'sigfpe', 'sigpending', 'sigq', 'sigsegv',
'sigsuspend', 'sleep', 'socket', 'socket-fd', 'socket-pair', 'spawn',
'splice', 'stack', 'str', 'stream', 'switch', 'symlink', 'sync-file',
'sysinfo', 'sysfs', 'tee', 'timer', 'timerfd', 'tsc', 'tsearch', 'udp',
'udp-flood', 'unshare', 'urandom', 'userfaultfd', 'utime', 'vecmath',
'vfork', 'vm', 'vm-rw', 'vm-splice', 'wait', 'wcs', 'xattr', 'yield',
'zero', 'zlib', 'zombie'
}
CPU_SUITE = {
'af-alg', 'bsearch', 'context', 'cpu', 'cpu-online', 'crypt', 'fp-error',
'getrandom', 'heapsort', 'hsearch', 'longjmp', 'lsearch', 'matrix',
'mergesort', 'numa', 'qsort', 'rdrand', 'str', 'stream', 'tsc', 'tsearch',
'vecmath', 'wcs', 'zlib'
}
CPU_CACHE_SUITE = {
'bsearch', 'cache', 'heapsort', 'hsearch', 'icache', 'lockbus', 'lsearch',
'malloc', 'matrix', 'membarrier', 'memcpy', 'mergesort', 'qsort', 'str',
'stream', 'tsearch', 'vecmath', 'wcs', 'zlib'
}
MEMORY_SUITE = {
'bsearch', 'context', 'heapsort', 'hsearch', 'lockbus', 'lsearch', 'malloc',
'matrix', 'membarrier', 'memcpy', 'memfd', 'mergesort', 'mincore', 'null',
'numa', 'oom-pipe', 'pipe', 'qsort', 'stack', 'str', 'stream', 'tsearch',
'vm', 'vm-rw', 'wcs', 'zero', 'zlib'
}
# Run the stressors that are each part of all of the compute related stress-ng
# classes: cpu, cpu-cache, and memory.
DEFAULT_STRESSORS = sorted(
CPU_SUITE.intersection(CPU_CACHE_SUITE).intersection(MEMORY_SUITE))
flags.DEFINE_integer('stress_ng_duration', 10,
'Number of seconds to run the test.')
flags.DEFINE_boolean('stress_ng_calc_geomean', True,
'Whether to calculate geomean or not.')
flags.DEFINE_list('stress_ng_custom_stressors', DEFAULT_STRESSORS,
'List of stressors to run against. Default combines cpu,'
'cpu-cache, and memory suites')
flags.DEFINE_list('stress_ng_cpu_methods', [],
'List of cpu methods to run with. By default none are ran.')
ALL_WORKLOADS = ['small', 'medium', 'large']
flags.DEFINE_list(
'stress_ng_thread_workloads', ['large'],
'List of threads sizes to run against. Options are'
'small (1 thread total), medium (1 thread per 2 cpus), and '
'large (1 thread per cpu).')
flags.register_validator(
'stress_ng_thread_workloads',
lambda workloads: workloads and set(workloads).issubset(ALL_WORKLOADS))
ALL_VERSIONS = ['0.05.23', '0.09.25']
flags.DEFINE_enum(
'stress_ng_version', '0.09.25', ALL_VERSIONS,
'Stress-ng version to use. Default is 0.09.25 which '
'is the default package on Ubuntu 1804.')
def _GeoMeanOverflow(iterable):
"""Returns the geometric mean.
See https://en.wikipedia.org/wiki/Geometric_mean#Relationship_with_logarithms
Args:
iterable: a list of positive floats to take the geometric mean of.
Returns: The geometric mean of the list.
"""
a = numpy.log(iterable)
return numpy.exp(a.sum() / len(a))
def StressngCustomStressorsValidator(stressors):
"""Returns whether or not the list of custom stressors is valid."""
return VALID_STRESSORS.issuperset(set(stressors))
def StressngCpuMethodsValidator(cpu_methods):
"""Returns whether or not the list of cpu methods is valid."""
return ('all_cpu_methods' in cpu_methods or
VALID_CPU_METHODS.issuperset(set(cpu_methods)))
flags.register_validator('stress_ng_custom_stressors',
StressngCustomStressorsValidator)
flags.register_validator('stress_ng_cpu_methods',
StressngCpuMethodsValidator)
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def Prepare(benchmark_spec):
"""Installs stress-ng on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vm = benchmark_spec.vms[0]
vm.InstallPackages(
'build-essential libaio-dev libapparmor-dev libattr1-dev libbsd-dev libcap-dev libgcrypt11-dev libkeyutils-dev libsctp-dev zlib1g-dev'
)
vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, STRESS_NG_DIR))
vm.RemoteCommand('cd {0} && git checkout {1}'.format(
STRESS_NG_DIR, GIT_TAG_MAP[FLAGS.stress_ng_version]))
vm.RemoteCommand('cd {0} && make && sudo make install'.format(STRESS_NG_DIR))
def _ParseStressngResult(metadata, output, cpu_method=None):
"""Returns stress-ng data as a sample.
Sample output eg:
stress-ng: info: [2566] dispatching hogs: 2 context
stress-ng: info: [2566] successful run completed in 5.00s
stress-ng: info: [2566] stressor bogo ops real time usr time sys
time bogo ops/s bogo ops/s
stress-ng: info: [2566] (secs) (secs) (secs)
(real time) (usr+sys time)
stress-ng: info: [2566] context 22429 5.00 5.49
4.48 4485.82 2249.65
Args:
metadata: metadata of the sample.
output: the output of the stress-ng benchmark.
cpu_method: an optional flag for the cpu method for the cpu stressor.
"""
output_list = output.splitlines()
output_matrix = [i.split() for i in output_list]
if len(output_matrix) != 5:
logging.error('output is missing')
return ''
assert output_matrix[2][-4] == 'bogo' and output_matrix[2][-3] == 'ops/s'
assert output_matrix[3][-4] == '(real' and output_matrix[3][-3] == 'time)'
line = output_matrix[4]
name = line[3]
value = float(line[-2]) # parse bogo ops/s (real time)
if name == 'cpu' and cpu_method:
return sample.Sample(
metric=cpu_method,
value=value,
unit='bogus_ops_sec', # bogus operations per second
metadata=metadata)
return sample.Sample(
metric=name,
value=value,
unit='bogus_ops_sec', # bogus operations per second
metadata=metadata)
def _RunWorkload(vm, num_threads):
"""Runs stress-ng on the target vm.
Args:
vm: The target vm to run on.
num_threads: Number of instances of stressors to launch.
Returns:
A list of sample.Sample objects.
"""
metadata = {
'duration_sec': FLAGS.stress_ng_duration,
'threads': num_threads,
'version': FLAGS.stress_ng_version,
}
samples = []
values_to_geomean_list = []
stressors = FLAGS.stress_ng_custom_stressors
for stressor in stressors:
cmd = ('stress-ng --{stressor} {numthreads} --metrics-brief '
'-t {duration}'.format(
stressor=stressor,
numthreads=num_threads,
duration=FLAGS.stress_ng_duration))
stdout, stderr = vm.RemoteCommand(cmd)
# TODO(user): Find the actual stress-ng version that changes output to
# stderr instead of stdout
if FLAGS.stress_ng_version > '0.05.23':
stdout = stderr
stressng_sample = _ParseStressngResult(metadata, stdout)
if stressng_sample:
samples.append(stressng_sample)
values_to_geomean_list.append(stressng_sample.value)
cpu_methods = (VALID_CPU_METHODS
if 'all_cpu_methods' in FLAGS.stress_ng_cpu_methods
else FLAGS.stress_ng_cpu_methods)
for cpu_method in cpu_methods:
cmd = ('stress-ng --cpu {numthreads} --metrics-brief '
'-t {duration} --cpu-method {cpu_method}'.format(
numthreads=num_threads,
duration=FLAGS.stress_ng_duration,
cpu_method=cpu_method))
stdout, _ = vm.RemoteCommand(cmd)
stressng_sample = _ParseStressngResult(metadata, stdout, cpu_method)
if stressng_sample:
samples.append(stressng_sample)
values_to_geomean_list.append(stressng_sample.value)
if FLAGS.stress_ng_calc_geomean:
geomean_metadata = metadata.copy()
geomean_metadata['stressors'] = stressors
# True only if each stressor provided a value
geomean_metadata['valid_run'] = (
len(values_to_geomean_list) == len(stressors) + len(cpu_methods))
geomean_sample = sample.Sample(
metric='STRESS_NG_GEOMEAN',
value=_GeoMeanOverflow(values_to_geomean_list),
unit='bogus_ops_sec',
metadata=geomean_metadata)
samples.append(geomean_sample)
return samples
def Run(benchmark_spec):
"""Runs stress-ng on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
vm = benchmark_spec.vms[0]
samples = []
for workload in FLAGS.stress_ng_thread_workloads:
if workload == 'small':
samples.extend(_RunWorkload(vm, 1))
elif workload == 'medium':
samples.extend(_RunWorkload(vm, vm.NumCpusForBenchmark() / 2))
elif workload == 'large':
samples.extend(_RunWorkload(vm, vm.NumCpusForBenchmark()))
return samples
def Cleanup(benchmark_spec):
"""Cleans up stress-ng from the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vm = benchmark_spec.vms[0]
vm.RemoteCommand('cd {0} && sudo make uninstall'.format(STRESS_NG_DIR))
| en | 0.768055 | # Copyright 2019 PerfKitBenchmarker Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Runs stress-ng. From the stress-ng ubuntu documentation: stress-ng will stress test a computer system in various selectable ways. It was designed to exercise various physical subsystems of a computer as well as the various operating system kernel interfaces. stress-ng also has a wide range of CPU specific stress tests that exercise floating point, integer, bit manipulation and control flow. stress-ng manpage: http://manpages.ubuntu.com/manpages/xenial/man1/stress-ng.1.html stress_ng: description: Runs stress-ng vm_groups: default: vm_spec: *default_single_core disk_spec: *default_50_gb # ubuntu1604 # ubuntu1804 # Run the stressors that are each part of all of the compute related stress-ng # classes: cpu, cpu-cache, and memory. Returns the geometric mean. See https://en.wikipedia.org/wiki/Geometric_mean#Relationship_with_logarithms Args: iterable: a list of positive floats to take the geometric mean of. Returns: The geometric mean of the list. Returns whether or not the list of custom stressors is valid. Returns whether or not the list of cpu methods is valid. Installs stress-ng on the target vm. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns stress-ng data as a sample. Sample output eg: stress-ng: info: [2566] dispatching hogs: 2 context stress-ng: info: [2566] successful run completed in 5.00s stress-ng: info: [2566] stressor bogo ops real time usr time sys time bogo ops/s bogo ops/s stress-ng: info: [2566] (secs) (secs) (secs) (real time) (usr+sys time) stress-ng: info: [2566] context 22429 5.00 5.49 4.48 4485.82 2249.65 Args: metadata: metadata of the sample. output: the output of the stress-ng benchmark. cpu_method: an optional flag for the cpu method for the cpu stressor. # parse bogo ops/s (real time) # bogus operations per second # bogus operations per second Runs stress-ng on the target vm. Args: vm: The target vm to run on. num_threads: Number of instances of stressors to launch. Returns: A list of sample.Sample objects. # TODO(user): Find the actual stress-ng version that changes output to # stderr instead of stdout # True only if each stressor provided a value Runs stress-ng on the target vm. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: A list of sample.Sample objects. Cleans up stress-ng from the target vm. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. | 1.784285 | 2 |
mpms/src/crawler.py | dadosjusbr/coletores | 18 | 6631252 | import pathlib
import os
import sys
from time import sleep
import shutil
from selenium import webdriver
from selenium.webdriver.common.by import By
BASE_URL = 'https://transparencia.mpms.mp.br/QvAJAXZfc/opendoc.htm?document=portaltransparencia%5Cportaltransparencia.qvw&lang=pt-BR&host=QVS%40srv-1645&anonymous=true'
MONTHS = ['Jan', 'Fev', 'Mar', 'Abr', 'Mai', 'Jun',
'Jul', 'Ago', 'Set', 'Out', 'Nov', 'Dez']
def crawl(year, month, driver_path, output_path):
file = []
pathlib.Path(output_path).mkdir(exist_ok=True)
driver = setup_driver(driver_path, output_path)
find_paycheck(driver)
select_remuneration(driver)
if(year != '2021'):
select_year(year, driver)
# Usar o mês passado como parâmetro para pegar o equivalente em string
select_month(MONTHS[int(month) - 1], driver)
file.append(download(output_path, driver, year, month, 'remuneracao'))
if year == '2020' or year == '2021' or (year == '2019' and int(month)>=7):
select_indemnization(driver)
if(year != '2021'):
select_year(year, driver)
# Usar o mês passado como parâmetro para pegar o equivalente em string
select_month(MONTHS[int(month) - 1], driver)
file.append(download(output_path, driver, year, month, 'indenizacao'))
return file
def setup_driver(driver_path, output_path):
# Seting the directorys to be used by selenium
current_directory = os.getcwd()
path_chrome = current_directory + driver_path
path_prefs = output_path
# Attributing the paths to the webdriver
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('user-agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36"')
chrome_options.add_experimental_option('prefs', {
'download.default_directory': path_prefs,
'download.prompt_for_download': False
})
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-setuid-sandbox')
chrome_options.add_argument('start-maximized')
return webdriver.Chrome(executable_path=path_chrome, chrome_options=chrome_options)
def find_paycheck(driver):
driver.get(BASE_URL)
sleep(15)
# find_main_contain = driver.find_element_by_css_selector('.QvPageBody')
find_div_by_id = driver.find_element_by_class_name('Document_TX28')
selected_div_qvcontent = find_div_by_id.find_elements_by_class_name(name='QvContent')[0]
find_div_clickable = selected_div_qvcontent.find_element_by_class_name(name='TextObject')
find_div_clickable.click()
sleep(3)
def select_remuneration(driver):
find_div_by_id = driver.find_element_by_id('26')
selected_div_qvcontent = find_div_by_id.find_elements_by_class_name(name='QvContent')[0]
find_div_clickable = selected_div_qvcontent.find_element_by_class_name(name='TextObject')
find_div_clickable.click()
sleep(3)
def select_indemnization(driver):
find_div_by_id = driver.find_element_by_id('83')
selected_div_qvcontent = find_div_by_id.find_elements_by_class_name(name='QvContent')[0]
find_div_clickable = selected_div_qvcontent.find_element_by_class_name(name='TextObject')
find_div_clickable.click()
sleep(3)
def select_year(year, driver):
# Usado para selecionar a div e o ano dele
div_year = driver.find_element(By.XPATH, '//*[@title="Ano"]/div')
div_year.click()
sleep(1)
year_selected = driver.find_element(By.XPATH, f'//*[@title="{year}"]')
year_selected.click()
sleep(2)
def select_month(month, driver):
# Estava dando erro quando o mês já estava selecionado, para resolver, apenas ignoro
try:
# Usado para selecionar a div e o mês dele
div_month = driver.find_element(By.XPATH, '//*[@title="Mês"]/div')
div_month.click()
sleep(1)
month_selected = driver.find_element(By.XPATH, f'//*[@title="{month}"]')
month_selected.click()
sleep(2)
except:
pass
def download(output_path, driver, year, month, name):
n1 = driver.find_element(By.XPATH, "//*[@title='Enviar para Excel']")
n1.click()
sleep(15)
file_name = format_filename(output_path, year, month, name)
return file_name
def format_filename(output_path, year, month, name):
# Identifying the name of the last downloaded file
filename = max([os.path.join(output_path, f) for f in os.listdir(output_path)],
key=os.path.getctime)
# renaming the file properly, according to the payroll
new_filename = name + "-" + year + '-' + month + ".xlsx"
shutil.move(filename,os.path.join(output_path,f"{new_filename}"))
new_output_path =output_path + "/" + new_filename
return new_output_path
| import pathlib
import os
import sys
from time import sleep
import shutil
from selenium import webdriver
from selenium.webdriver.common.by import By
BASE_URL = 'https://transparencia.mpms.mp.br/QvAJAXZfc/opendoc.htm?document=portaltransparencia%5Cportaltransparencia.qvw&lang=pt-BR&host=QVS%40srv-1645&anonymous=true'
MONTHS = ['Jan', 'Fev', 'Mar', 'Abr', 'Mai', 'Jun',
'Jul', 'Ago', 'Set', 'Out', 'Nov', 'Dez']
def crawl(year, month, driver_path, output_path):
file = []
pathlib.Path(output_path).mkdir(exist_ok=True)
driver = setup_driver(driver_path, output_path)
find_paycheck(driver)
select_remuneration(driver)
if(year != '2021'):
select_year(year, driver)
# Usar o mês passado como parâmetro para pegar o equivalente em string
select_month(MONTHS[int(month) - 1], driver)
file.append(download(output_path, driver, year, month, 'remuneracao'))
if year == '2020' or year == '2021' or (year == '2019' and int(month)>=7):
select_indemnization(driver)
if(year != '2021'):
select_year(year, driver)
# Usar o mês passado como parâmetro para pegar o equivalente em string
select_month(MONTHS[int(month) - 1], driver)
file.append(download(output_path, driver, year, month, 'indenizacao'))
return file
def setup_driver(driver_path, output_path):
# Seting the directorys to be used by selenium
current_directory = os.getcwd()
path_chrome = current_directory + driver_path
path_prefs = output_path
# Attributing the paths to the webdriver
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('user-agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36"')
chrome_options.add_experimental_option('prefs', {
'download.default_directory': path_prefs,
'download.prompt_for_download': False
})
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-setuid-sandbox')
chrome_options.add_argument('start-maximized')
return webdriver.Chrome(executable_path=path_chrome, chrome_options=chrome_options)
def find_paycheck(driver):
driver.get(BASE_URL)
sleep(15)
# find_main_contain = driver.find_element_by_css_selector('.QvPageBody')
find_div_by_id = driver.find_element_by_class_name('Document_TX28')
selected_div_qvcontent = find_div_by_id.find_elements_by_class_name(name='QvContent')[0]
find_div_clickable = selected_div_qvcontent.find_element_by_class_name(name='TextObject')
find_div_clickable.click()
sleep(3)
def select_remuneration(driver):
find_div_by_id = driver.find_element_by_id('26')
selected_div_qvcontent = find_div_by_id.find_elements_by_class_name(name='QvContent')[0]
find_div_clickable = selected_div_qvcontent.find_element_by_class_name(name='TextObject')
find_div_clickable.click()
sleep(3)
def select_indemnization(driver):
find_div_by_id = driver.find_element_by_id('83')
selected_div_qvcontent = find_div_by_id.find_elements_by_class_name(name='QvContent')[0]
find_div_clickable = selected_div_qvcontent.find_element_by_class_name(name='TextObject')
find_div_clickable.click()
sleep(3)
def select_year(year, driver):
# Usado para selecionar a div e o ano dele
div_year = driver.find_element(By.XPATH, '//*[@title="Ano"]/div')
div_year.click()
sleep(1)
year_selected = driver.find_element(By.XPATH, f'//*[@title="{year}"]')
year_selected.click()
sleep(2)
def select_month(month, driver):
# Estava dando erro quando o mês já estava selecionado, para resolver, apenas ignoro
try:
# Usado para selecionar a div e o mês dele
div_month = driver.find_element(By.XPATH, '//*[@title="Mês"]/div')
div_month.click()
sleep(1)
month_selected = driver.find_element(By.XPATH, f'//*[@title="{month}"]')
month_selected.click()
sleep(2)
except:
pass
def download(output_path, driver, year, month, name):
n1 = driver.find_element(By.XPATH, "//*[@title='Enviar para Excel']")
n1.click()
sleep(15)
file_name = format_filename(output_path, year, month, name)
return file_name
def format_filename(output_path, year, month, name):
# Identifying the name of the last downloaded file
filename = max([os.path.join(output_path, f) for f in os.listdir(output_path)],
key=os.path.getctime)
# renaming the file properly, according to the payroll
new_filename = name + "-" + year + '-' + month + ".xlsx"
shutil.move(filename,os.path.join(output_path,f"{new_filename}"))
new_output_path =output_path + "/" + new_filename
return new_output_path
| pt | 0.825527 | # Usar o mês passado como parâmetro para pegar o equivalente em string # Usar o mês passado como parâmetro para pegar o equivalente em string # Seting the directorys to be used by selenium # Attributing the paths to the webdriver # find_main_contain = driver.find_element_by_css_selector('.QvPageBody') # Usado para selecionar a div e o ano dele # Estava dando erro quando o mês já estava selecionado, para resolver, apenas ignoro # Usado para selecionar a div e o mês dele # Identifying the name of the last downloaded file # renaming the file properly, according to the payroll | 2.909616 | 3 |
updater/__init__.py | daguar/srtracker | 2 | 6631253 | # Copyright (C) 2012, Code for America
# This is open source software, released under a standard 3-clause
# BSD-style license; see the file LICENSE for details.
from update import subscribe, subscription_exists, unsubscribe, subscription_for_key, unsubscribe_with_key
| # Copyright (C) 2012, Code for America
# This is open source software, released under a standard 3-clause
# BSD-style license; see the file LICENSE for details.
from update import subscribe, subscription_exists, unsubscribe, subscription_for_key, unsubscribe_with_key
| en | 0.858194 | # Copyright (C) 2012, Code for America # This is open source software, released under a standard 3-clause # BSD-style license; see the file LICENSE for details. | 0.803589 | 1 |
model-optimizer/mo/front/common/partial_infer/utils.py | undeadinu/dldt | 1 | 6631254 | """
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
import numpy as np
def int64_array(l: list):
return np.array(l, dtype=np.int64)
def float_array(l: list):
return np.array(l, dtype=np.int64)
def mark_input_bins(node, names=('weights', 'biases'), start_port: int = 1):
"""
Preparing necessary attributes for edges at input ports starting from start_port.
It is applicable for convolution and other operations that has constant inputs which
are intended to be dumped as IE IR bin file.
"""
nports = len(node.in_nodes())
for i, name in enumerate(names):
port = i + start_port
if port >= nports:
break
if node.in_node(port).value is not None:
node.in_edge(port)['bin'] = name
def assign_dims_to_weights(node, spatial, input_channel, output_channel=None, dims_number=None):
if spatial is not None:
node['spatial_dims'] = np.array(spatial, dtype=np.int64)
node['input_channel_dim'] = np.array(input_channel, dtype=np.int64)
node['output_channel_dim'] = np.array(output_channel, dtype=np.int64)
if 'input_channel_dim' not in node['dim_attrs']:
node['dim_attrs'].append('input_channel_dim')
node['dims_number'] = dims_number
def copy_or_none(x):
return x.copy() if x is not None else None
def convert_tf_padding_to_str(padding):
mapping = {b'SAME': 'same_upper', b'VALID': 'valid'}
return mapping[padding.s]
# TODO eliminate this dependency and pass necessary function as an argument
def tf_window_op_pad_infer(input, window, stride, auto_pad):
if input is None or window is None or stride is None or auto_pad is None:
return (None, None)
if auto_pad in ['same_lower', 'same_upper']:
if auto_pad == 'same_upper':
output = np.int64(np.ceil(input / stride))
else:
output = np.int64(np.floor(input / stride))
residual = input % stride
mask = residual == 0
full_pad = window.copy()
full_pad[mask] -= stride[mask]
mask = np.logical_not(mask)
full_pad[mask] -= input[mask] % stride[mask]
full_pad = np.maximum(full_pad, 0)
low_pad = np.int64(full_pad / 2)
high_pad = full_pad - low_pad
pad = np.array([low_pad, high_pad]).transpose()
elif auto_pad == 'valid':
output = np.int64(np.ceil((input - window + 1) / stride))
pad = np.zeros((len(output), 2), dtype=np.int64)
else:
log.error("Unsupported padding scheme: {}".format(auto_pad))
pad = None
output = None
return (pad, output)
| """
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
import numpy as np
def int64_array(l: list):
return np.array(l, dtype=np.int64)
def float_array(l: list):
return np.array(l, dtype=np.int64)
def mark_input_bins(node, names=('weights', 'biases'), start_port: int = 1):
"""
Preparing necessary attributes for edges at input ports starting from start_port.
It is applicable for convolution and other operations that has constant inputs which
are intended to be dumped as IE IR bin file.
"""
nports = len(node.in_nodes())
for i, name in enumerate(names):
port = i + start_port
if port >= nports:
break
if node.in_node(port).value is not None:
node.in_edge(port)['bin'] = name
def assign_dims_to_weights(node, spatial, input_channel, output_channel=None, dims_number=None):
if spatial is not None:
node['spatial_dims'] = np.array(spatial, dtype=np.int64)
node['input_channel_dim'] = np.array(input_channel, dtype=np.int64)
node['output_channel_dim'] = np.array(output_channel, dtype=np.int64)
if 'input_channel_dim' not in node['dim_attrs']:
node['dim_attrs'].append('input_channel_dim')
node['dims_number'] = dims_number
def copy_or_none(x):
return x.copy() if x is not None else None
def convert_tf_padding_to_str(padding):
mapping = {b'SAME': 'same_upper', b'VALID': 'valid'}
return mapping[padding.s]
# TODO eliminate this dependency and pass necessary function as an argument
def tf_window_op_pad_infer(input, window, stride, auto_pad):
if input is None or window is None or stride is None or auto_pad is None:
return (None, None)
if auto_pad in ['same_lower', 'same_upper']:
if auto_pad == 'same_upper':
output = np.int64(np.ceil(input / stride))
else:
output = np.int64(np.floor(input / stride))
residual = input % stride
mask = residual == 0
full_pad = window.copy()
full_pad[mask] -= stride[mask]
mask = np.logical_not(mask)
full_pad[mask] -= input[mask] % stride[mask]
full_pad = np.maximum(full_pad, 0)
low_pad = np.int64(full_pad / 2)
high_pad = full_pad - low_pad
pad = np.array([low_pad, high_pad]).transpose()
elif auto_pad == 'valid':
output = np.int64(np.ceil((input - window + 1) / stride))
pad = np.zeros((len(output), 2), dtype=np.int64)
else:
log.error("Unsupported padding scheme: {}".format(auto_pad))
pad = None
output = None
return (pad, output)
| en | 0.884808 | Copyright (c) 2018 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Preparing necessary attributes for edges at input ports starting from start_port. It is applicable for convolution and other operations that has constant inputs which are intended to be dumped as IE IR bin file. # TODO eliminate this dependency and pass necessary function as an argument | 1.856518 | 2 |
sgas/authz/ctxinsertchecker.py | kmjonsson/luts3-service | 0 | 6631255 | <reponame>kmjonsson/luts3-service
"""
Usage Records insertion checker.
Provides functionality for checking if a host should be able to insert a given
usage record.
Author: <NAME> <<EMAIL>>
Copyright: Nordic Data Grid Facility (2009, 2010)
"""
from sgas.authz import rights
class InsertChecker:
CONTEXT_KEY = None
def __init__(self, check_depth):
self.check_depth = check_depth
def contextCheck(self, subject_identity, subject_rights, action_context):
"""
Given a (x509) subject identity, subject rights and a context with for the
insertion, this function decides if the subject is allowed to perform insertion
for the given context.
This is done both with specific checking of specified context, and by checking
"similarity" between the subject identity host name and the action context.
"""
if action_context is None:
return True # compat mode
subject_fqdn = extractFQDNfromX509Identity(subject_identity)
insert_context = [ ctx_value for ctx_key, ctx_value in action_context if ctx_key == self.CONTEXT_KEY ]
# insert context explicitely allowed
explicit_allowed_contexts = []
for sr in subject_rights:
explicit_allowed_contexts += sr.get(self.CONTEXT_KEY, [])
# subject name parts for depth checking
id_parts = [ p for p in subject_fqdn.split('.') if p != '' ]
cd = min(self.check_depth, len(id_parts))
# go through all requested machine names and check if insert is allowed
allowed = []
for ic in insert_context:
if ic in explicit_allowed_contexts:
allowed.append(True)
continue
# check if x509 identity is close enough to machine name to allow insertion
ic_parts = [ p for p in ic.split('.') if p != '' ]
if len(ic_parts) < cd:
allowed.append(False)
continue
for d in range( - cd, 0):
if ic_parts[d] != id_parts[d]:
allowed.append(False)
break
else:
# for loop terminated without breaking, check depth ok
allowed.append(True)
return all(allowed)
def extractFQDNfromX509Identity(identity):
"""
Givens strings like:
"/O=Grid/O=NorduGrid/CN=benedict.grid.aau.dk"
"/O=Grid/O=NorduGrid/CN=host/fyrkat.grid.aau.dk"
this function returns the FQDN of the identity.
"""
if identity is None:
return '.' # this is technically a hostname
tokens = identity.split('/')
if len(tokens) == 1:
return identity # not an x509 identity
if tokens[-2] == 'CN=host':
fqdn = tokens[-1]
elif tokens[-1].startswith('CN='):
fqdn = tokens[-1].split('=',2)[1]
else:
raise ValueError('Could not extract FQDN from X509 identity (%s)' % identity)
if not '.' in fqdn:
raise ValueError('Extracted FQDN is not an FQDN (%s)' % fqdn)
return fqdn
| """
Usage Records insertion checker.
Provides functionality for checking if a host should be able to insert a given
usage record.
Author: <NAME> <<EMAIL>>
Copyright: Nordic Data Grid Facility (2009, 2010)
"""
from sgas.authz import rights
class InsertChecker:
CONTEXT_KEY = None
def __init__(self, check_depth):
self.check_depth = check_depth
def contextCheck(self, subject_identity, subject_rights, action_context):
"""
Given a (x509) subject identity, subject rights and a context with for the
insertion, this function decides if the subject is allowed to perform insertion
for the given context.
This is done both with specific checking of specified context, and by checking
"similarity" between the subject identity host name and the action context.
"""
if action_context is None:
return True # compat mode
subject_fqdn = extractFQDNfromX509Identity(subject_identity)
insert_context = [ ctx_value for ctx_key, ctx_value in action_context if ctx_key == self.CONTEXT_KEY ]
# insert context explicitely allowed
explicit_allowed_contexts = []
for sr in subject_rights:
explicit_allowed_contexts += sr.get(self.CONTEXT_KEY, [])
# subject name parts for depth checking
id_parts = [ p for p in subject_fqdn.split('.') if p != '' ]
cd = min(self.check_depth, len(id_parts))
# go through all requested machine names and check if insert is allowed
allowed = []
for ic in insert_context:
if ic in explicit_allowed_contexts:
allowed.append(True)
continue
# check if x509 identity is close enough to machine name to allow insertion
ic_parts = [ p for p in ic.split('.') if p != '' ]
if len(ic_parts) < cd:
allowed.append(False)
continue
for d in range( - cd, 0):
if ic_parts[d] != id_parts[d]:
allowed.append(False)
break
else:
# for loop terminated without breaking, check depth ok
allowed.append(True)
return all(allowed)
def extractFQDNfromX509Identity(identity):
"""
Givens strings like:
"/O=Grid/O=NorduGrid/CN=benedict.grid.aau.dk"
"/O=Grid/O=NorduGrid/CN=host/fyrkat.grid.aau.dk"
this function returns the FQDN of the identity.
"""
if identity is None:
return '.' # this is technically a hostname
tokens = identity.split('/')
if len(tokens) == 1:
return identity # not an x509 identity
if tokens[-2] == 'CN=host':
fqdn = tokens[-1]
elif tokens[-1].startswith('CN='):
fqdn = tokens[-1].split('=',2)[1]
else:
raise ValueError('Could not extract FQDN from X509 identity (%s)' % identity)
if not '.' in fqdn:
raise ValueError('Extracted FQDN is not an FQDN (%s)' % fqdn)
return fqdn | en | 0.745934 | Usage Records insertion checker. Provides functionality for checking if a host should be able to insert a given usage record. Author: <NAME> <<EMAIL>> Copyright: Nordic Data Grid Facility (2009, 2010) Given a (x509) subject identity, subject rights and a context with for the insertion, this function decides if the subject is allowed to perform insertion for the given context. This is done both with specific checking of specified context, and by checking "similarity" between the subject identity host name and the action context. # compat mode # insert context explicitely allowed # subject name parts for depth checking # go through all requested machine names and check if insert is allowed # check if x509 identity is close enough to machine name to allow insertion # for loop terminated without breaking, check depth ok Givens strings like: "/O=Grid/O=NorduGrid/CN=benedict.grid.aau.dk" "/O=Grid/O=NorduGrid/CN=host/fyrkat.grid.aau.dk" this function returns the FQDN of the identity. # this is technically a hostname # not an x509 identity | 2.805139 | 3 |
psltdsim/find/findGenOnBus.py | thadhaines/PSLTDSim | 0 | 6631256 | def findGenOnBus(mirror, Busnum, Id=None, timing = True):
"""Find first generator on bus unless Id specified
Note that Ids are typically a strings i.e. '2'
"""
# TODO: remove this import
import time
if timing: tic = time.time()
#if mirror.debug: # prints a lot
# print('***Searching Bus %d for gen with ID %s...' %(Busnum, Id))
if not mirror.searchDict:
for x in range(len(mirror.Machines)):
if mirror.Machines[x].Busnum == Busnum:
# Return first gen on bus if no Id
if mirror.debug:
print('***Found gen on Bus %d with ID %s...' %(mirror.Machines[x].Busnum, mirror.Machines[x].Id))
if Id == None:
if timing: mirror.FindTime += time.time() - tic
return mirror.Machines[x]
if Id == mirror.Machines[x].Id:
mirror.FindTime += time.time() - tic
return mirror.Machines[x]
else:
bnum = str(int(Busnum))
if bnum in mirror.searchDict:
# bus found
if 'Machines' in mirror.searchDict[bnum]:
# bus has machines
if Id == None:
# return first gen if No id
if timing: mirror.FindTime += time.time() - tic
return mirror.searchDict[bnum]['Machines'][0]
else:
# find gen with matching ID
for bGen in mirror.searchDict[bnum]['Machines']:
if bGen.Id == Id:
if timing: mirror.FindTime += time.time() - tic
return bGen
if Id:
print("Generator on Bus %d with Id '%s' not Found" % (Busnum,Id))
else:
print("Generator on Bus %d not Found" % Busnum)
if timing: mirror.FindTime += time.time() - tic
return None
| def findGenOnBus(mirror, Busnum, Id=None, timing = True):
"""Find first generator on bus unless Id specified
Note that Ids are typically a strings i.e. '2'
"""
# TODO: remove this import
import time
if timing: tic = time.time()
#if mirror.debug: # prints a lot
# print('***Searching Bus %d for gen with ID %s...' %(Busnum, Id))
if not mirror.searchDict:
for x in range(len(mirror.Machines)):
if mirror.Machines[x].Busnum == Busnum:
# Return first gen on bus if no Id
if mirror.debug:
print('***Found gen on Bus %d with ID %s...' %(mirror.Machines[x].Busnum, mirror.Machines[x].Id))
if Id == None:
if timing: mirror.FindTime += time.time() - tic
return mirror.Machines[x]
if Id == mirror.Machines[x].Id:
mirror.FindTime += time.time() - tic
return mirror.Machines[x]
else:
bnum = str(int(Busnum))
if bnum in mirror.searchDict:
# bus found
if 'Machines' in mirror.searchDict[bnum]:
# bus has machines
if Id == None:
# return first gen if No id
if timing: mirror.FindTime += time.time() - tic
return mirror.searchDict[bnum]['Machines'][0]
else:
# find gen with matching ID
for bGen in mirror.searchDict[bnum]['Machines']:
if bGen.Id == Id:
if timing: mirror.FindTime += time.time() - tic
return bGen
if Id:
print("Generator on Bus %d with Id '%s' not Found" % (Busnum,Id))
else:
print("Generator on Bus %d not Found" % Busnum)
if timing: mirror.FindTime += time.time() - tic
return None
| en | 0.68711 | Find first generator on bus unless Id specified Note that Ids are typically a strings i.e. '2' # TODO: remove this import #if mirror.debug: # prints a lot # print('***Searching Bus %d for gen with ID %s...' %(Busnum, Id)) # Return first gen on bus if no Id # bus found # bus has machines # return first gen if No id # find gen with matching ID | 3.171308 | 3 |
docs/quickstart.py | shapiromatron/bmds-server | 1 | 6631257 | import json
import os
import time
import requests
# set the URL root to the address where BMDS server is currently running
url_root = os.environ.get("BMDS_SERVER_URL", "http://bmds-server.com")
# Create an example BMDS job. This example uses uses BMDS v2.6.0.1. with two
# dichotomous datasets:
inputs = {
"id": "My first BMDS-server run",
"dataset_type": "D",
"bmds_version": "BMDS270",
"datasets": [
{
"id": "run #1",
"doses": [0, 1.96, 5.69, 29.75],
"ns": [75, 49, 50, 49],
"incidences": [5, 1, 3, 14],
},
{
"id": 2,
"doses": [0, 1.96, 5.69, 29.75],
"ns": [75, 49, 50, 49],
"incidences": [0, 0, 11, 27],
},
],
}
# We submit the dataset to the job API:
url = f"{url_root}/api/job/"
data = {"inputs": json.dumps(inputs)}
r = requests.post(url, data)
# If submission is successful, we'll get a HTTP 201 response (job
# created), along with a new random unique identifier for this job:
if r.status_code == 201:
job_id = r.json()["id"]
# Each job is added to a queue on the server; when there are no other jobs
# running this job will be started. We can poll the results page (in this
# case waiting 15 seconds between requests) until the job is finished:
url = f"{url_root}/api/job/{job_id}/"
while True:
print("Polling outputs... sleeping for 15 seconds...")
time.sleep(15)
r = requests.get(url)
response = r.json()
if response["is_finished"]:
print("Job complete!")
break
# After completion, the job returns model outputs. There's lots of
# information in the outputs, including the created dfile, output file,
# and results from the parsed output. If model-recommendations is enabled,
# then a model will also be recommended in the outputs. Here's a snapshot:
outputs = response["outputs"]
for dataset in outputs["outputs"]:
print("----")
ds = json.dumps(dataset["dataset"], indent=2)
n_models = len(dataset["models"])
print(f"Dataset: {ds}")
print(f"Number of models: {n_models}")
for model in dataset["models"]:
name = model["output"]["model_name"]
bmd = model["output"]["BMD"]
print(f" - {name}: BMD -> {bmd}")
| import json
import os
import time
import requests
# set the URL root to the address where BMDS server is currently running
url_root = os.environ.get("BMDS_SERVER_URL", "http://bmds-server.com")
# Create an example BMDS job. This example uses uses BMDS v2.6.0.1. with two
# dichotomous datasets:
inputs = {
"id": "My first BMDS-server run",
"dataset_type": "D",
"bmds_version": "BMDS270",
"datasets": [
{
"id": "run #1",
"doses": [0, 1.96, 5.69, 29.75],
"ns": [75, 49, 50, 49],
"incidences": [5, 1, 3, 14],
},
{
"id": 2,
"doses": [0, 1.96, 5.69, 29.75],
"ns": [75, 49, 50, 49],
"incidences": [0, 0, 11, 27],
},
],
}
# We submit the dataset to the job API:
url = f"{url_root}/api/job/"
data = {"inputs": json.dumps(inputs)}
r = requests.post(url, data)
# If submission is successful, we'll get a HTTP 201 response (job
# created), along with a new random unique identifier for this job:
if r.status_code == 201:
job_id = r.json()["id"]
# Each job is added to a queue on the server; when there are no other jobs
# running this job will be started. We can poll the results page (in this
# case waiting 15 seconds between requests) until the job is finished:
url = f"{url_root}/api/job/{job_id}/"
while True:
print("Polling outputs... sleeping for 15 seconds...")
time.sleep(15)
r = requests.get(url)
response = r.json()
if response["is_finished"]:
print("Job complete!")
break
# After completion, the job returns model outputs. There's lots of
# information in the outputs, including the created dfile, output file,
# and results from the parsed output. If model-recommendations is enabled,
# then a model will also be recommended in the outputs. Here's a snapshot:
outputs = response["outputs"]
for dataset in outputs["outputs"]:
print("----")
ds = json.dumps(dataset["dataset"], indent=2)
n_models = len(dataset["models"])
print(f"Dataset: {ds}")
print(f"Number of models: {n_models}")
for model in dataset["models"]:
name = model["output"]["model_name"]
bmd = model["output"]["BMD"]
print(f" - {name}: BMD -> {bmd}")
| en | 0.913764 | # set the URL root to the address where BMDS server is currently running # Create an example BMDS job. This example uses uses BMDS v2.6.0.1. with two # dichotomous datasets: #1", # We submit the dataset to the job API: # If submission is successful, we'll get a HTTP 201 response (job # created), along with a new random unique identifier for this job: # Each job is added to a queue on the server; when there are no other jobs # running this job will be started. We can poll the results page (in this # case waiting 15 seconds between requests) until the job is finished: # After completion, the job returns model outputs. There's lots of # information in the outputs, including the created dfile, output file, # and results from the parsed output. If model-recommendations is enabled, # then a model will also be recommended in the outputs. Here's a snapshot: | 2.916671 | 3 |
tests/postgres_tests/__init__.py | JBKahn/django | 3 | 6631258 | <reponame>JBKahn/django<filename>tests/postgres_tests/__init__.py<gh_stars>1-10
import unittest
from django.db import connection
from django.db.backends.signals import connection_created
from django.test import TestCase
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific tests")
class PostgreSQLTestCase(TestCase):
@classmethod
def tearDownClass(cls):
# No need to keep that signal overhead for non PostgreSQL-related tests.
from django.contrib.postgres.signals import register_hstore_handler
connection_created.disconnect(register_hstore_handler)
super(PostgreSQLTestCase, cls).tearDownClass()
| import unittest
from django.db import connection
from django.db.backends.signals import connection_created
from django.test import TestCase
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific tests")
class PostgreSQLTestCase(TestCase):
@classmethod
def tearDownClass(cls):
# No need to keep that signal overhead for non PostgreSQL-related tests.
from django.contrib.postgres.signals import register_hstore_handler
connection_created.disconnect(register_hstore_handler)
super(PostgreSQLTestCase, cls).tearDownClass() | en | 0.960956 | # No need to keep that signal overhead for non PostgreSQL-related tests. | 1.962467 | 2 |
configs/twins/twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k.py | rehohoho/mmsegmentation | 1 | 6631259 | _base_ = [
'../_base_/models/twins_pcpvt-s_fpn.py', '../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
]
checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/alt_gvt_small_20220308-7e1c3695.pth' # noqa
model = dict(
backbone=dict(
type='SVT',
init_cfg=dict(type='Pretrained', checkpoint=checkpoint),
embed_dims=[64, 128, 256, 512],
num_heads=[2, 4, 8, 16],
mlp_ratios=[4, 4, 4, 4],
depths=[2, 2, 10, 4],
windiow_sizes=[7, 7, 7, 7],
norm_after_stage=True),
neck=dict(in_channels=[64, 128, 256, 512], out_channels=256, num_outs=4),
decode_head=dict(num_classes=150),
)
optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0001)
| _base_ = [
'../_base_/models/twins_pcpvt-s_fpn.py', '../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
]
checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/alt_gvt_small_20220308-7e1c3695.pth' # noqa
model = dict(
backbone=dict(
type='SVT',
init_cfg=dict(type='Pretrained', checkpoint=checkpoint),
embed_dims=[64, 128, 256, 512],
num_heads=[2, 4, 8, 16],
mlp_ratios=[4, 4, 4, 4],
depths=[2, 2, 10, 4],
windiow_sizes=[7, 7, 7, 7],
norm_after_stage=True),
neck=dict(in_channels=[64, 128, 256, 512], out_channels=256, num_outs=4),
decode_head=dict(num_classes=150),
)
optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0001)
| none | 1 | 1.598241 | 2 |
|
tests/files/md_codeblock_idem_test.py | panfill/pandoc-tables | 74 | 6631260 | <gh_stars>10-100
from logging import getLogger
from pathlib import Path
from typing import Tuple
from panflute import convert_text
from pytest import mark
from pantable.ast import PanCodeBlock
from pantable.util import parse_markdown_codeblock
logger = getLogger('pantable')
EXT = 'md'
PWD = Path(__file__).parent
DIR = PWD / 'md_codeblock'
def round_trip(text: str) -> str:
kwargs = parse_markdown_codeblock(text)
pan_codeblock = PanCodeBlock.from_yaml_filter(**kwargs)
doc = pan_codeblock.to_panflute_ast()
return convert_text(doc, input_format='panflute', output_format='markdown')
def read(path: Path) -> Tuple[str, str, str]:
'''test parsing markdown codeblock to PanCodeBlock
'''
logger.info(f'Testing idempotence with {path}...')
with open(path, 'r') as f:
text = f.read()
text_out = round_trip(text)
text_idem = round_trip(text_out)
return text_out, text_idem, text
def read_io(name: str) -> Tuple[str, str, str]:
path = DIR / f'{name}.{EXT}'
return read(path)
@mark.parametrize('name', (path.stem for path in DIR.glob(f'*.{EXT}')))
def test_md_codeblock_idem(name):
res = read_io(name)
assert res[0].strip() == res[1].strip()
| from logging import getLogger
from pathlib import Path
from typing import Tuple
from panflute import convert_text
from pytest import mark
from pantable.ast import PanCodeBlock
from pantable.util import parse_markdown_codeblock
logger = getLogger('pantable')
EXT = 'md'
PWD = Path(__file__).parent
DIR = PWD / 'md_codeblock'
def round_trip(text: str) -> str:
kwargs = parse_markdown_codeblock(text)
pan_codeblock = PanCodeBlock.from_yaml_filter(**kwargs)
doc = pan_codeblock.to_panflute_ast()
return convert_text(doc, input_format='panflute', output_format='markdown')
def read(path: Path) -> Tuple[str, str, str]:
'''test parsing markdown codeblock to PanCodeBlock
'''
logger.info(f'Testing idempotence with {path}...')
with open(path, 'r') as f:
text = f.read()
text_out = round_trip(text)
text_idem = round_trip(text_out)
return text_out, text_idem, text
def read_io(name: str) -> Tuple[str, str, str]:
path = DIR / f'{name}.{EXT}'
return read(path)
@mark.parametrize('name', (path.stem for path in DIR.glob(f'*.{EXT}')))
def test_md_codeblock_idem(name):
res = read_io(name)
assert res[0].strip() == res[1].strip() | en | 0.539133 | test parsing markdown codeblock to PanCodeBlock | 2.241243 | 2 |
project/decorators.py | 18F/cloud-marketplace-prototype | 0 | 6631261 | import logging
from django.contrib.auth import REDIRECT_FIELD_NAME, decorators
from django.core.exceptions import PermissionDenied
logger = logging.getLogger('cmp')
def staff_login_required(function=None,
redirect_field_name=REDIRECT_FIELD_NAME,
login_url=None):
'''
Decorator to check that the user accessing the decorated view has their
is_staff flag set to True.
It will first redirect to login_url or the default login url if the user is
not authenticated. If the user is authenticated but is not staff, then
a PermissionDenied exception will be raised.
'''
# Based off code from the Django project
# License: https://github.com/django/django/blob/c1aec0feda73ede09503192a66f973598aef901d/LICENSE # NOQA
# Code reference: https://github.com/django/django/blob/c1aec0feda73ede09503192a66f973598aef901d/django/contrib/auth/decorators.py#L40 # NOQA
def check_if_staff(user):
if not user.is_authenticated:
# returning False will cause the user_passes_test decorator
# to redirect to the login flow
logger.info(f'Unauthenticated user has attempted to access '
f'is_staff view')
return False
if user.is_staff:
# then all good
logger.info(f'User with id {user.id} ({user.email}) has passed '
f'is_staff check')
return True
# otherwise the user is authenticated but isn't staff, so
# they do not have the correct permissions and should be directed
# to the 403 page
logger.info(f'User with id {user.id} ({user.email}) is '
f'authenticated but has not passed is_staff check')
raise PermissionDenied
actual_decorator = decorators.user_passes_test(
check_if_staff,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
| import logging
from django.contrib.auth import REDIRECT_FIELD_NAME, decorators
from django.core.exceptions import PermissionDenied
logger = logging.getLogger('cmp')
def staff_login_required(function=None,
redirect_field_name=REDIRECT_FIELD_NAME,
login_url=None):
'''
Decorator to check that the user accessing the decorated view has their
is_staff flag set to True.
It will first redirect to login_url or the default login url if the user is
not authenticated. If the user is authenticated but is not staff, then
a PermissionDenied exception will be raised.
'''
# Based off code from the Django project
# License: https://github.com/django/django/blob/c1aec0feda73ede09503192a66f973598aef901d/LICENSE # NOQA
# Code reference: https://github.com/django/django/blob/c1aec0feda73ede09503192a66f973598aef901d/django/contrib/auth/decorators.py#L40 # NOQA
def check_if_staff(user):
if not user.is_authenticated:
# returning False will cause the user_passes_test decorator
# to redirect to the login flow
logger.info(f'Unauthenticated user has attempted to access '
f'is_staff view')
return False
if user.is_staff:
# then all good
logger.info(f'User with id {user.id} ({user.email}) has passed '
f'is_staff check')
return True
# otherwise the user is authenticated but isn't staff, so
# they do not have the correct permissions and should be directed
# to the 403 page
logger.info(f'User with id {user.id} ({user.email}) is '
f'authenticated but has not passed is_staff check')
raise PermissionDenied
actual_decorator = decorators.user_passes_test(
check_if_staff,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
| en | 0.814372 | Decorator to check that the user accessing the decorated view has their is_staff flag set to True. It will first redirect to login_url or the default login url if the user is not authenticated. If the user is authenticated but is not staff, then a PermissionDenied exception will be raised. # Based off code from the Django project # License: https://github.com/django/django/blob/c1aec0feda73ede09503192a66f973598aef901d/LICENSE # NOQA # Code reference: https://github.com/django/django/blob/c1aec0feda73ede09503192a66f973598aef901d/django/contrib/auth/decorators.py#L40 # NOQA # returning False will cause the user_passes_test decorator # to redirect to the login flow # then all good # otherwise the user is authenticated but isn't staff, so # they do not have the correct permissions and should be directed # to the 403 page | 2.374164 | 2 |
nvm/pmemobj/__init__.py | isabella232/pynvm | 11 | 6631262 | <filename>nvm/pmemobj/__init__.py
from .pool import open, create, MIN_POOL_SIZE, PersistentObjectPool
from .list import PersistentList
from .dict import PersistentDict
from .object import PersistentObject
from .tuple import PersistentTuple
from .set import PersistentSet, PersistentFrozenSet
| <filename>nvm/pmemobj/__init__.py
from .pool import open, create, MIN_POOL_SIZE, PersistentObjectPool
from .list import PersistentList
from .dict import PersistentDict
from .object import PersistentObject
from .tuple import PersistentTuple
from .set import PersistentSet, PersistentFrozenSet
| none | 1 | 1.44051 | 1 |
|
binder_requirements.py | whoopnip/project-report | 0 | 6631263 | import conf
if __name__ == "__main__":
for package in conf.BINDER_ENVIRONMENT_REQUIRES:
print(package)
| import conf
if __name__ == "__main__":
for package in conf.BINDER_ENVIRONMENT_REQUIRES:
print(package)
| none | 1 | 1.437693 | 1 |
|
aim/sdk/init.py | VkoHov/aim | 1 | 6631264 | from aim.sdk.session import DefaultSession
def init(*args, **kwargs):
DefaultSession(*args, **kwargs)
| from aim.sdk.session import DefaultSession
def init(*args, **kwargs):
DefaultSession(*args, **kwargs)
| none | 1 | 1.472934 | 1 |
|
pillcity/resources/media.py | Crystal-RainSlide/pill-city | 0 | 6631265 | import os
import boto3
import json
import werkzeug
import uuid
from typing import List
from flask_restful import reqparse, Resource, fields
from flask_jwt_extended import jwt_required, get_jwt_identity
from pillcity.models.media import Media
from pillcity.daos.media import get_media, create_media, get_media_page
from pillcity.daos.user import find_user
from pillcity.utils.now_ms import now_ms
from pillcity.utils.profiling import timer
from .cache import r, RMediaUrl
MaxMediaCount = 4
PostMediaUrlExpireSeconds = 3600 * 12 # 12 hours
GetMediaPageCount = 4
# Cache structure within Redis
# "mediaUrl" -> object_name -> "media url"(space)"media url generated time in ms"
@timer
def get_media_url(media: Media):
object_name = media.id
# subtract expiry by 10 seconds for some network overhead
r_media_url = r.hget(RMediaUrl, object_name)
if r_media_url:
r_media_url = r_media_url.decode('utf-8')
if now_ms() < int(r_media_url.split(" ")[1]) + (PostMediaUrlExpireSeconds - 10) * 1000:
return r_media_url.split(" ")[0]
sts_client = boto3.client(
'sts',
endpoint_url=os.environ['STS_ENDPOINT_URL'],
region_name=os.environ.get('AWS_REGION', ''),
aws_access_key_id=os.environ['AWS_ACCESS_KEY'],
aws_secret_access_key=os.environ['AWS_SECRET_KEY']
)
s3_bucket_name = os.environ['S3_BUCKET_NAME']
# obtain temp token
read_media_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:GetObject",
"Resource": [f"arn:aws:s3:::{s3_bucket_name}/{object_name}"],
},
],
}
assume_role_response = sts_client.assume_role(
# for minio this is moot
# for s3 this role allows all media read, but intersects with the inline policy, the temp role
# would still be minimal privilege
RoleArn=os.environ['MEDIA_READER_ROLE_ARN'],
# media-reader is the only principal who can assume the role so this can be fixed
RoleSessionName='media-reader',
Policy=json.dumps(read_media_policy),
DurationSeconds=PostMediaUrlExpireSeconds,
)
temp_s3_client = boto3.client(
's3',
endpoint_url=os.environ['S3_ENDPOINT_URL'],
region_name=os.environ.get('AWS_REGION', ''),
aws_access_key_id=assume_role_response['Credentials']['AccessKeyId'],
aws_secret_access_key=assume_role_response['Credentials']['SecretAccessKey'],
aws_session_token=assume_role_response['Credentials']['SessionToken'],
)
# get pre-signed url
media_url = temp_s3_client.generate_presigned_url(
ClientMethod='get_object',
Params={'Bucket': s3_bucket_name, 'Key': media.id},
ExpiresIn=PostMediaUrlExpireSeconds
)
r.hset(RMediaUrl, object_name, f"{media_url} {now_ms()}")
return media_url
class MediaUrls(fields.Raw):
def format(self, media_list):
if not media_list:
return []
return list(map(get_media_url, media_list))
post_media_parser = reqparse.RequestParser()
for i in range(MaxMediaCount):
post_media_parser.add_argument('media' + str(i), type=werkzeug.datastructures.FileStorage, location='files',
required=False, default=None)
get_media_parser = reqparse.RequestParser()
get_media_parser.add_argument('page', type=int, required=True, location='args')
class Media(Resource):
@jwt_required()
def post(self):
user_id = get_jwt_identity()
user = find_user(user_id)
if not user:
return {'msg': f'User {user_id} is not found'}, 404
args = post_media_parser.parse_args()
media_files = []
for i in range(MaxMediaCount):
media_file = args['media' + str(i)]
if media_file:
media_files.append(media_file)
media_object_names = []
for media_file in media_files:
object_name_stem = f"media/{uuid.uuid4()}"
media_object = create_media(media_file, object_name_stem, user)
if not media_object:
return {'msg': f"Disallowed image type"}, 400
media_object_names.append(media_object.id)
return media_object_names, 201
@jwt_required()
def get(self):
user_id = get_jwt_identity()
user = find_user(user_id)
if not user:
return {'msg': f'User {user_id} is not found'}, 404
args = get_media_parser.parse_args()
page_number = args['page']
if page_number < 1:
return {'msg': f'Invalid page number'}, 400
def _media(media: Media):
return {
"objectName": media.id,
"mediaUrl": get_media_url(media)
}
return list(map(_media, get_media_page(user, page_number - 1, GetMediaPageCount)))
def check_media_object_names(media_object_names: List[str], limit: int) -> List[Media]:
media_objects = []
for media_object_name in media_object_names[: limit]:
media_object = get_media(media_object_name)
if media_object:
media_objects.append(media_object)
return media_objects
| import os
import boto3
import json
import werkzeug
import uuid
from typing import List
from flask_restful import reqparse, Resource, fields
from flask_jwt_extended import jwt_required, get_jwt_identity
from pillcity.models.media import Media
from pillcity.daos.media import get_media, create_media, get_media_page
from pillcity.daos.user import find_user
from pillcity.utils.now_ms import now_ms
from pillcity.utils.profiling import timer
from .cache import r, RMediaUrl
MaxMediaCount = 4
PostMediaUrlExpireSeconds = 3600 * 12 # 12 hours
GetMediaPageCount = 4
# Cache structure within Redis
# "mediaUrl" -> object_name -> "media url"(space)"media url generated time in ms"
@timer
def get_media_url(media: Media):
object_name = media.id
# subtract expiry by 10 seconds for some network overhead
r_media_url = r.hget(RMediaUrl, object_name)
if r_media_url:
r_media_url = r_media_url.decode('utf-8')
if now_ms() < int(r_media_url.split(" ")[1]) + (PostMediaUrlExpireSeconds - 10) * 1000:
return r_media_url.split(" ")[0]
sts_client = boto3.client(
'sts',
endpoint_url=os.environ['STS_ENDPOINT_URL'],
region_name=os.environ.get('AWS_REGION', ''),
aws_access_key_id=os.environ['AWS_ACCESS_KEY'],
aws_secret_access_key=os.environ['AWS_SECRET_KEY']
)
s3_bucket_name = os.environ['S3_BUCKET_NAME']
# obtain temp token
read_media_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:GetObject",
"Resource": [f"arn:aws:s3:::{s3_bucket_name}/{object_name}"],
},
],
}
assume_role_response = sts_client.assume_role(
# for minio this is moot
# for s3 this role allows all media read, but intersects with the inline policy, the temp role
# would still be minimal privilege
RoleArn=os.environ['MEDIA_READER_ROLE_ARN'],
# media-reader is the only principal who can assume the role so this can be fixed
RoleSessionName='media-reader',
Policy=json.dumps(read_media_policy),
DurationSeconds=PostMediaUrlExpireSeconds,
)
temp_s3_client = boto3.client(
's3',
endpoint_url=os.environ['S3_ENDPOINT_URL'],
region_name=os.environ.get('AWS_REGION', ''),
aws_access_key_id=assume_role_response['Credentials']['AccessKeyId'],
aws_secret_access_key=assume_role_response['Credentials']['SecretAccessKey'],
aws_session_token=assume_role_response['Credentials']['SessionToken'],
)
# get pre-signed url
media_url = temp_s3_client.generate_presigned_url(
ClientMethod='get_object',
Params={'Bucket': s3_bucket_name, 'Key': media.id},
ExpiresIn=PostMediaUrlExpireSeconds
)
r.hset(RMediaUrl, object_name, f"{media_url} {now_ms()}")
return media_url
class MediaUrls(fields.Raw):
def format(self, media_list):
if not media_list:
return []
return list(map(get_media_url, media_list))
post_media_parser = reqparse.RequestParser()
for i in range(MaxMediaCount):
post_media_parser.add_argument('media' + str(i), type=werkzeug.datastructures.FileStorage, location='files',
required=False, default=None)
get_media_parser = reqparse.RequestParser()
get_media_parser.add_argument('page', type=int, required=True, location='args')
class Media(Resource):
@jwt_required()
def post(self):
user_id = get_jwt_identity()
user = find_user(user_id)
if not user:
return {'msg': f'User {user_id} is not found'}, 404
args = post_media_parser.parse_args()
media_files = []
for i in range(MaxMediaCount):
media_file = args['media' + str(i)]
if media_file:
media_files.append(media_file)
media_object_names = []
for media_file in media_files:
object_name_stem = f"media/{uuid.uuid4()}"
media_object = create_media(media_file, object_name_stem, user)
if not media_object:
return {'msg': f"Disallowed image type"}, 400
media_object_names.append(media_object.id)
return media_object_names, 201
@jwt_required()
def get(self):
user_id = get_jwt_identity()
user = find_user(user_id)
if not user:
return {'msg': f'User {user_id} is not found'}, 404
args = get_media_parser.parse_args()
page_number = args['page']
if page_number < 1:
return {'msg': f'Invalid page number'}, 400
def _media(media: Media):
return {
"objectName": media.id,
"mediaUrl": get_media_url(media)
}
return list(map(_media, get_media_page(user, page_number - 1, GetMediaPageCount)))
def check_media_object_names(media_object_names: List[str], limit: int) -> List[Media]:
media_objects = []
for media_object_name in media_object_names[: limit]:
media_object = get_media(media_object_name)
if media_object:
media_objects.append(media_object)
return media_objects
| en | 0.808685 | # 12 hours # Cache structure within Redis # "mediaUrl" -> object_name -> "media url"(space)"media url generated time in ms" # subtract expiry by 10 seconds for some network overhead # obtain temp token # for minio this is moot # for s3 this role allows all media read, but intersects with the inline policy, the temp role # would still be minimal privilege # media-reader is the only principal who can assume the role so this can be fixed # get pre-signed url | 1.860667 | 2 |
crypto_duck/quack_forum/views.py | sifrovacky-cz/kachna | 0 | 6631266 | <filename>crypto_duck/quack_forum/views.py
from django.shortcuts import render, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect, HttpResponse
from django.urls import reverse
import datetime
#form and model of normal comment
from quack_forum.forms import CommentForm
from quack_forum.models import QuackForum
#form and model of crypto comment
from quack_forum.forms import CryptoForm
from quack_forum.models import CryptoQuack
# saves comment and returns comment models from database
def Comment(request):
#error (for unregistred user, if they do the check wrong)
error_flag = ''
#checking if user is loged in
if request.user.is_authenticated:
html_path = 'quack_forum/forum.html'
form = CommentForm()
if request.method == "POST":
form = CommentForm(request.POST)
if form.is_valid():
form_with_user = form.save(commit=False)
form_with_user.user = request.user.username
form_with_user.save()
form = CommentForm()
# this is the part when user is not loged in
else:
check_password = "<PASSWORD>"
html_path = 'quack_forum/forum_unauthorized.html'
form = CommentForm()
if request.method == "POST":
form = CommentForm(request.POST)
if form.is_valid() and request.POST.get('check') == check_password:
form_with_user = form.save(commit=False)
form_with_user.user = request.POST.get('username')
form_with_user.save()
else:
error_flag = "Please write " + check_password + " into the box above!"
form = CommentForm()
# Ordering comments from oldest to newest
comment_list = QuackForum.objects.order_by('-date_time')
return render(request,html_path,{'form':form,'comment_list':comment_list,'error_flag':error_flag})
@login_required
def CryptoComment(request):
form = CryptoForm()
if request.method == 'POST':
form = CryptoForm(request.POST,request.FILES)
if form.is_valid():
form = form.save(commit = False)
form.author = request.user
form.save()
return HttpResponseRedirect(reverse('quack_forum:crypto_forum'))
return render(request,'quack_forum/crypto_comment_form.html',{'form':form,})
def CryptoForum(request):
today = datetime.date.today()
error_flag = ""
if request.method == 'POST':
try:
id_value = request.POST.get('id_value')
CryptoQuack.objects.filter(id=id_value).delete()
except:
error_flag = "An error has occured :("
cryptoCommentList = CryptoQuack.objects.order_by('-publish_time')
return render(request,'quack_forum/ciphers.html',{'cryptoComentList':cryptoCommentList,'today':today,'error_flag':error_flag})
| <filename>crypto_duck/quack_forum/views.py
from django.shortcuts import render, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect, HttpResponse
from django.urls import reverse
import datetime
#form and model of normal comment
from quack_forum.forms import CommentForm
from quack_forum.models import QuackForum
#form and model of crypto comment
from quack_forum.forms import CryptoForm
from quack_forum.models import CryptoQuack
# saves comment and returns comment models from database
def Comment(request):
#error (for unregistred user, if they do the check wrong)
error_flag = ''
#checking if user is loged in
if request.user.is_authenticated:
html_path = 'quack_forum/forum.html'
form = CommentForm()
if request.method == "POST":
form = CommentForm(request.POST)
if form.is_valid():
form_with_user = form.save(commit=False)
form_with_user.user = request.user.username
form_with_user.save()
form = CommentForm()
# this is the part when user is not loged in
else:
check_password = "<PASSWORD>"
html_path = 'quack_forum/forum_unauthorized.html'
form = CommentForm()
if request.method == "POST":
form = CommentForm(request.POST)
if form.is_valid() and request.POST.get('check') == check_password:
form_with_user = form.save(commit=False)
form_with_user.user = request.POST.get('username')
form_with_user.save()
else:
error_flag = "Please write " + check_password + " into the box above!"
form = CommentForm()
# Ordering comments from oldest to newest
comment_list = QuackForum.objects.order_by('-date_time')
return render(request,html_path,{'form':form,'comment_list':comment_list,'error_flag':error_flag})
@login_required
def CryptoComment(request):
form = CryptoForm()
if request.method == 'POST':
form = CryptoForm(request.POST,request.FILES)
if form.is_valid():
form = form.save(commit = False)
form.author = request.user
form.save()
return HttpResponseRedirect(reverse('quack_forum:crypto_forum'))
return render(request,'quack_forum/crypto_comment_form.html',{'form':form,})
def CryptoForum(request):
today = datetime.date.today()
error_flag = ""
if request.method == 'POST':
try:
id_value = request.POST.get('id_value')
CryptoQuack.objects.filter(id=id_value).delete()
except:
error_flag = "An error has occured :("
cryptoCommentList = CryptoQuack.objects.order_by('-publish_time')
return render(request,'quack_forum/ciphers.html',{'cryptoComentList':cryptoCommentList,'today':today,'error_flag':error_flag})
| en | 0.893052 | #form and model of normal comment #form and model of crypto comment # saves comment and returns comment models from database #error (for unregistred user, if they do the check wrong) #checking if user is loged in # this is the part when user is not loged in # Ordering comments from oldest to newest | 2.455746 | 2 |
intcode/handlers/io/std.py | JavierLuna/intcode | 0 | 6631267 | from intcode.interfaces.io_handler import BaseIOHandler
class StdIOHandler(BaseIOHandler):
def print(self, content: str) -> None:
print(content)
def input(self) -> str:
return input()
| from intcode.interfaces.io_handler import BaseIOHandler
class StdIOHandler(BaseIOHandler):
def print(self, content: str) -> None:
print(content)
def input(self) -> str:
return input()
| none | 1 | 2.160846 | 2 |
|
update_frontmatter.py | mivanit/dendron-pandoc | 7 | 6631268 | <reponame>mivanit/dendron-pandoc
from typing import *
import os
import yaml
MY_REFS : List[str] = ['../refs.bib']
def keylist_access_nested_dict(
d : Dict[str,Any],
keys : List[str],
) -> Tuple[dict,str]:
"""given a keylist `keys`, return (x,y) where x[y] is d[keys]
by pretending that `d` can be accessed dotlist-style, with keys in the list being keys to successive nested dicts, we can provide both read and write access to the element of `d` pointed to by `keys`
### Parameters:
- `d : Dict[str,Any]`
dict to access
- `keys : List[str]`
list of keys to nested dict `d`
### Returns:
- `Tuple[dict,str]`
dict is the final layer dict which contains the element pointed to by `keys`, and the string is the last key in `keys`
"""
fin_dict : dict = d
for k in keys[:-1]:
if k in fin_dict:
fin_dict = fin_dict[k]
else:
fin_dict[k] = {}
fin_dict = fin_dict[k]
fin_key = keys[-1]
return (fin_dict,fin_key)
def fm_add_to_list(
data : dict,
keylist : List[str],
insert_data : list,
) -> dict:
"""add things to the frontmatter
given `keylist`, append to `data[keylist[0]][keylist[1]][...]` if it exists and does not contain `insert_data`
if `data[keylist[0]][keylist[1]][...]` does not exist, create it and set it to `insert_data`
"""
fin_dict,fin_key = keylist_access_nested_dict(data,keylist)
if fin_key not in fin_dict:
fin_dict[fin_key] = insert_data
else:
for item in insert_data:
if item not in fin_dict[fin_key]:
fin_dict[fin_key].append(item)
return data
def fm_add_bib(
data : dict,
bibfiles : List[str] = MY_REFS,
) -> dict:
"""add the bib files to the frontmatter
we want it to look like
```yaml
bibliography: [../refs.bib]
```
"""
return fm_add_to_list(
data = data,
keylist = ['bibliography'],
insert_data = bibfiles,
)
def fm_add_filters(
data : dict,
filters : List[str] = ['$FILTERS$/get_markdown_links.py'],
) -> dict:
"""add the filters to the frontmatter
NOTE: this is for a different tool which allows defaults to be set in the frontmatter,
instead of a separate file. That tools is kind of a mess, but email me if you're interested.
we want it to look like
```yaml
__defaults__:
filters:
- $FILTERS$/get_markdown_links.py
```
"""
return fm_add_to_list(
data = data,
keylist = ['__defaults__', 'filters'],
insert_data = filters,
)
DEFAULT_KEYORDER : List[str] = [
'title',
'desc',
'id',
'created',
'updated',
'bibliography',
'__defaults__',
'traitIds',
]
class PandocMarkdown(object):
def __init__(
self,
delim : str = '---',
loader : Callable[[str],dict] = yaml.safe_load,
keyorder : List[str] = DEFAULT_KEYORDER,
writer : Callable[[dict],str] = lambda x : yaml.dump(x, default_flow_style = None, sort_keys = False),
) -> None:
self.delim = delim
self.loader = loader
self.keyorder = keyorder
self.writer = writer
# get the first section and parse as yaml
self.yaml_data : Dict[str, Any] = None
# get the content
self.content : str = None
def load(self, filename : str) -> None:
"""load a file into the pandoc markdown object
### Parameters:
- `filename : str`
the filename to load
"""
with open(filename, "r") as f:
# split the document by yaml file front matter
sections : List[str] = f.read().split(self.delim)
# check the zeroth section is empty
if sections[0].strip():
raise ValueError(f"file does not start with yaml front matter, found at start of file: {sections[0]}")
if len(sections) < 3:
raise ValueError(f'missing sections in file {filename}, check delims')
# get the first section and parse as yaml
self.yaml_data : Dict[str, Any] = self.loader(sections[1])
# get the content
self.content : str = self.delim.join(sections[2:])
def dumps(self) -> str:
"""dumps both the front matter and content to a string
NOTE: we want this to be on a single line for compatibility with https://github.com/notZaki/PandocCiter, since that tool parses the bibliography in a weird way. hence, `self.writer` has `default_flow_style = None`
"""
if (self.yaml_data is None) or (self.content is None):
raise Exception('')
self.keyorder = self.keyorder + [
k for k in self.yaml_data
if k not in self.keyorder
]
# for k in self.keyorder:
# if not (k in self.yaml_data):
# raise KeyError(f'key {k} found in keyorder but not in yaml_data')
self.yaml_data = {
k : self.yaml_data[k]
for k in self.keyorder
if k in self.yaml_data
}
return '\n'.join([
self.delim,
self.writer(self.yaml_data).strip(),
self.delim,
self.content.lstrip(),
])
def modify_file_fm(file : str, apply_funcs : List[Callable]) -> None:
pdm : PandocMarkdown = PandocMarkdown()
pdm.load(file)
for func in apply_funcs:
pdm.yaml_data = func(pdm.yaml_data)
with open(file, "w") as f:
f.write(pdm.dumps())
def update_all_files_fm(
dir : str,
apply_funcs : List[Callable] = [fm_add_bib, fm_add_filters],
) -> None:
"""update the frontmatter of all files in a directory
### Parameters:
- `dir : str`
the directory to update
- `apply_funcs : List[Callable]`
list of functions to apply to the frontmatter
"""
for file in os.listdir(dir):
if file.endswith(".md"):
modify_file_fm(f'{dir.rstrip("/")}/{file}', apply_funcs)
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
print("Usage: python update_frontmatter.py <filename>")
sys.exit(1)
update_all_files_fm(
dir = sys.argv[1],
apply_funcs = [fm_add_bib],
) | from typing import *
import os
import yaml
MY_REFS : List[str] = ['../refs.bib']
def keylist_access_nested_dict(
d : Dict[str,Any],
keys : List[str],
) -> Tuple[dict,str]:
"""given a keylist `keys`, return (x,y) where x[y] is d[keys]
by pretending that `d` can be accessed dotlist-style, with keys in the list being keys to successive nested dicts, we can provide both read and write access to the element of `d` pointed to by `keys`
### Parameters:
- `d : Dict[str,Any]`
dict to access
- `keys : List[str]`
list of keys to nested dict `d`
### Returns:
- `Tuple[dict,str]`
dict is the final layer dict which contains the element pointed to by `keys`, and the string is the last key in `keys`
"""
fin_dict : dict = d
for k in keys[:-1]:
if k in fin_dict:
fin_dict = fin_dict[k]
else:
fin_dict[k] = {}
fin_dict = fin_dict[k]
fin_key = keys[-1]
return (fin_dict,fin_key)
def fm_add_to_list(
data : dict,
keylist : List[str],
insert_data : list,
) -> dict:
"""add things to the frontmatter
given `keylist`, append to `data[keylist[0]][keylist[1]][...]` if it exists and does not contain `insert_data`
if `data[keylist[0]][keylist[1]][...]` does not exist, create it and set it to `insert_data`
"""
fin_dict,fin_key = keylist_access_nested_dict(data,keylist)
if fin_key not in fin_dict:
fin_dict[fin_key] = insert_data
else:
for item in insert_data:
if item not in fin_dict[fin_key]:
fin_dict[fin_key].append(item)
return data
def fm_add_bib(
data : dict,
bibfiles : List[str] = MY_REFS,
) -> dict:
"""add the bib files to the frontmatter
we want it to look like
```yaml
bibliography: [../refs.bib]
```
"""
return fm_add_to_list(
data = data,
keylist = ['bibliography'],
insert_data = bibfiles,
)
def fm_add_filters(
data : dict,
filters : List[str] = ['$FILTERS$/get_markdown_links.py'],
) -> dict:
"""add the filters to the frontmatter
NOTE: this is for a different tool which allows defaults to be set in the frontmatter,
instead of a separate file. That tools is kind of a mess, but email me if you're interested.
we want it to look like
```yaml
__defaults__:
filters:
- $FILTERS$/get_markdown_links.py
```
"""
return fm_add_to_list(
data = data,
keylist = ['__defaults__', 'filters'],
insert_data = filters,
)
DEFAULT_KEYORDER : List[str] = [
'title',
'desc',
'id',
'created',
'updated',
'bibliography',
'__defaults__',
'traitIds',
]
class PandocMarkdown(object):
def __init__(
self,
delim : str = '---',
loader : Callable[[str],dict] = yaml.safe_load,
keyorder : List[str] = DEFAULT_KEYORDER,
writer : Callable[[dict],str] = lambda x : yaml.dump(x, default_flow_style = None, sort_keys = False),
) -> None:
self.delim = delim
self.loader = loader
self.keyorder = keyorder
self.writer = writer
# get the first section and parse as yaml
self.yaml_data : Dict[str, Any] = None
# get the content
self.content : str = None
def load(self, filename : str) -> None:
"""load a file into the pandoc markdown object
### Parameters:
- `filename : str`
the filename to load
"""
with open(filename, "r") as f:
# split the document by yaml file front matter
sections : List[str] = f.read().split(self.delim)
# check the zeroth section is empty
if sections[0].strip():
raise ValueError(f"file does not start with yaml front matter, found at start of file: {sections[0]}")
if len(sections) < 3:
raise ValueError(f'missing sections in file {filename}, check delims')
# get the first section and parse as yaml
self.yaml_data : Dict[str, Any] = self.loader(sections[1])
# get the content
self.content : str = self.delim.join(sections[2:])
def dumps(self) -> str:
"""dumps both the front matter and content to a string
NOTE: we want this to be on a single line for compatibility with https://github.com/notZaki/PandocCiter, since that tool parses the bibliography in a weird way. hence, `self.writer` has `default_flow_style = None`
"""
if (self.yaml_data is None) or (self.content is None):
raise Exception('')
self.keyorder = self.keyorder + [
k for k in self.yaml_data
if k not in self.keyorder
]
# for k in self.keyorder:
# if not (k in self.yaml_data):
# raise KeyError(f'key {k} found in keyorder but not in yaml_data')
self.yaml_data = {
k : self.yaml_data[k]
for k in self.keyorder
if k in self.yaml_data
}
return '\n'.join([
self.delim,
self.writer(self.yaml_data).strip(),
self.delim,
self.content.lstrip(),
])
def modify_file_fm(file : str, apply_funcs : List[Callable]) -> None:
pdm : PandocMarkdown = PandocMarkdown()
pdm.load(file)
for func in apply_funcs:
pdm.yaml_data = func(pdm.yaml_data)
with open(file, "w") as f:
f.write(pdm.dumps())
def update_all_files_fm(
dir : str,
apply_funcs : List[Callable] = [fm_add_bib, fm_add_filters],
) -> None:
"""update the frontmatter of all files in a directory
### Parameters:
- `dir : str`
the directory to update
- `apply_funcs : List[Callable]`
list of functions to apply to the frontmatter
"""
for file in os.listdir(dir):
if file.endswith(".md"):
modify_file_fm(f'{dir.rstrip("/")}/{file}', apply_funcs)
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
print("Usage: python update_frontmatter.py <filename>")
sys.exit(1)
update_all_files_fm(
dir = sys.argv[1],
apply_funcs = [fm_add_bib],
) | en | 0.754744 | given a keylist `keys`, return (x,y) where x[y] is d[keys] by pretending that `d` can be accessed dotlist-style, with keys in the list being keys to successive nested dicts, we can provide both read and write access to the element of `d` pointed to by `keys` ### Parameters: - `d : Dict[str,Any]` dict to access - `keys : List[str]` list of keys to nested dict `d` ### Returns: - `Tuple[dict,str]` dict is the final layer dict which contains the element pointed to by `keys`, and the string is the last key in `keys` add things to the frontmatter given `keylist`, append to `data[keylist[0]][keylist[1]][...]` if it exists and does not contain `insert_data` if `data[keylist[0]][keylist[1]][...]` does not exist, create it and set it to `insert_data` add the bib files to the frontmatter we want it to look like ```yaml bibliography: [../refs.bib] ``` add the filters to the frontmatter NOTE: this is for a different tool which allows defaults to be set in the frontmatter, instead of a separate file. That tools is kind of a mess, but email me if you're interested. we want it to look like ```yaml __defaults__: filters: - $FILTERS$/get_markdown_links.py ``` # get the first section and parse as yaml # get the content load a file into the pandoc markdown object ### Parameters: - `filename : str` the filename to load # split the document by yaml file front matter # check the zeroth section is empty # get the first section and parse as yaml # get the content dumps both the front matter and content to a string NOTE: we want this to be on a single line for compatibility with https://github.com/notZaki/PandocCiter, since that tool parses the bibliography in a weird way. hence, `self.writer` has `default_flow_style = None` # for k in self.keyorder: # if not (k in self.yaml_data): # raise KeyError(f'key {k} found in keyorder but not in yaml_data') update the frontmatter of all files in a directory ### Parameters: - `dir : str` the directory to update - `apply_funcs : List[Callable]` list of functions to apply to the frontmatter | 3.539023 | 4 |
nipype/testing/tests/test_utils.py | effigies/nipype | 0 | 6631269 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Test testing utilities
"""
import os
import warnings
import subprocess
from mock import patch, MagicMock
from nipype.testing.utils import TempFATFS
from nose.tools import assert_true, assert_raises
def test_tempfatfs():
try:
fatfs = TempFATFS()
except (IOError, OSError):
warnings.warn("Cannot mount FAT filesystems with FUSE")
else:
with fatfs as tmpdir:
yield assert_true, os.path.exists(tmpdir)
@patch('subprocess.check_call', MagicMock(
side_effect=subprocess.CalledProcessError('','')))
def test_tempfatfs_calledprocesserror():
try:
TempFATFS()
except IOError as e:
assert_true(isinstance(e, IOError))
assert_true(isinstance(e.__cause__, subprocess.CalledProcessError))
else:
assert_true(False)
@patch('subprocess.check_call', MagicMock())
@patch('subprocess.Popen', MagicMock(side_effect=OSError()))
def test_tempfatfs_oserror():
try:
TempFATFS()
except IOError as e:
assert_true(isinstance(e, IOError))
assert_true(isinstance(e.__cause__, OSError))
else:
assert_true(False)
| # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Test testing utilities
"""
import os
import warnings
import subprocess
from mock import patch, MagicMock
from nipype.testing.utils import TempFATFS
from nose.tools import assert_true, assert_raises
def test_tempfatfs():
try:
fatfs = TempFATFS()
except (IOError, OSError):
warnings.warn("Cannot mount FAT filesystems with FUSE")
else:
with fatfs as tmpdir:
yield assert_true, os.path.exists(tmpdir)
@patch('subprocess.check_call', MagicMock(
side_effect=subprocess.CalledProcessError('','')))
def test_tempfatfs_calledprocesserror():
try:
TempFATFS()
except IOError as e:
assert_true(isinstance(e, IOError))
assert_true(isinstance(e.__cause__, subprocess.CalledProcessError))
else:
assert_true(False)
@patch('subprocess.check_call', MagicMock())
@patch('subprocess.Popen', MagicMock(side_effect=OSError()))
def test_tempfatfs_oserror():
try:
TempFATFS()
except IOError as e:
assert_true(isinstance(e, IOError))
assert_true(isinstance(e.__cause__, OSError))
else:
assert_true(False)
| en | 0.328576 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: Test testing utilities | 2.005188 | 2 |
examples/dagster_examples/airline_demo/types.py | vatervonacht/dagster | 0 | 6631270 | """Type definitions for the airline_demo."""
from collections import namedtuple
import sqlalchemy
from dagster import as_dagster_type
from dagster.core.types.dagster_type import create_string_type
AirlineDemoResources = namedtuple(
'AirlineDemoResources',
('spark', 's3', 'db_url', 'db_engine', 'db_dialect', 'redshift_s3_temp_dir', 'db_load'),
)
SqlAlchemyEngineType = as_dagster_type(
sqlalchemy.engine.Connectable,
name='SqlAlchemyEngineType',
description='A SqlAlchemy Connectable',
)
SqlTableName = create_string_type('SqlTableName', description='The name of a database table')
| """Type definitions for the airline_demo."""
from collections import namedtuple
import sqlalchemy
from dagster import as_dagster_type
from dagster.core.types.dagster_type import create_string_type
AirlineDemoResources = namedtuple(
'AirlineDemoResources',
('spark', 's3', 'db_url', 'db_engine', 'db_dialect', 'redshift_s3_temp_dir', 'db_load'),
)
SqlAlchemyEngineType = as_dagster_type(
sqlalchemy.engine.Connectable,
name='SqlAlchemyEngineType',
description='A SqlAlchemy Connectable',
)
SqlTableName = create_string_type('SqlTableName', description='The name of a database table')
| en | 0.624777 | Type definitions for the airline_demo. | 2.6634 | 3 |
dashathon/scraping/scrape_berlin_data.py | wfrierson/dashathon | 1 | 6631271 | from dashathon.scraping.scraping_methods import scrape_berlin_marathon_urls
from dashathon.scraping.scraping_methods import scrape_berlin_marathon
headers_berlin = ['year', 'bib', 'age_group', 'gender', 'country', 'rank_gender', 'rank_age_group', '5k', '10k', '15k',
'20k', 'half', '25k', '30k', '35k', '40k', 'finish']
print('Scraping URLs: 2017 M')
berlin_marathon_urls_2017_M = scrape_berlin_marathon_urls(url='http://results.scc-events.com/2017/', year=2017,
event='MAL', gender='M', num_results_per_page=100)
print('Scraping Split Times: 2017 M')
scrape_berlin_marathon(path_input='berlin_marathon_2017_M_urls.csv', path_output='berlin_marathon_2017_M.csv',
path_error='berlin_marathon_2017_M_error_log.csv', year=2017, gender='M', headers=headers_berlin,
df_urls=berlin_marathon_urls_2017_M)
print('Scraping URLs: 2017 W')
berlin_marathon_urls_2017_W = scrape_berlin_marathon_urls(url='http://results.scc-events.com/2017/', year=2017,
event='MAL', gender='W', num_results_per_page=100)
print('Scraping Split Times: 2017 W')
scrape_berlin_marathon(path_input='berlin_marathon_2017_W_urls.csv', path_output='berlin_marathon_2017_W.csv',
path_error='berlin_marathon_2017_W_error_log.csv', year=2017, gender='W', headers=headers_berlin,
df_urls=berlin_marathon_urls_2017_W)
print('Scraping URLs: 2016 M')
berlin_marathon_urls_2016_M = scrape_berlin_marathon_urls(url='http://results.scc-events.com/2016/', year=2016,
event='MAL_99999905C9AF3F0000000945', gender='M',
num_results_per_page=100)
print('Scraping Split Times: 2016 M')
scrape_berlin_marathon(path_input='berlin_marathon_2016_M_urls.csv', path_output='berlin_marathon_2016_M.csv',
path_error='berlin_marathon_2016_M_error_log.csv', year=2016, gender='M', headers=headers_berlin,
df_urls=berlin_marathon_urls_2016_M)
print('Scraping URLs: 2016 W')
berlin_marathon_urls_2016_W = scrape_berlin_marathon_urls(url='http://results.scc-events.com/2016/', year=2016,
event='MAL_99999905C9AF3F0000000945', gender='W',
num_results_per_page=100)
print('Scraping Split Times: 2016 W')
scrape_berlin_marathon(path_input='berlin_marathon_2016_W_urls.csv', path_output='berlin_marathon_2016_W.csv',
path_error='berlin_marathon_2016_W_error_log.csv', year=2016, gender='W', headers=headers_berlin,
df_urls=berlin_marathon_urls_2016_W)
print('Scraping URLs: 2015 M')
berlin_marathon_urls_2015_M = scrape_berlin_marathon_urls(url='http://results.scc-events.com/2015/', year=2015,
event='MAL', gender='M', num_results_per_page=100)
print('Scraping Split Times: 2015 M')
scrape_berlin_marathon(path_input='berlin_marathon_2015_M_urls.csv', path_output='berlin_marathon_2015_M.csv',
path_error='berlin_marathon_2015_M_error_log.csv', year=2015, gender='M', headers=headers_berlin,
df_urls=berlin_marathon_urls_2015_M)
print('Scraping URLs: 2015 W')
berlin_marathon_urls_2015_W = scrape_berlin_marathon_urls(url='http://results.scc-events.com/2015/', year=2015,
event='MAL', gender='W', num_results_per_page=100)
print('Scraping Split Times: 2015 W')
scrape_berlin_marathon(path_input='berlin_marathon_2015_W_urls.csv', path_output='berlin_marathon_2015_W.csv',
path_error='berlin_marathon_2015_W_error_log.csv', year=2015, gender='W', headers=headers_berlin,
df_urls=berlin_marathon_urls_2015_W)
print('Scraping URLs: 2014 M')
berlin_marathon_urls_2014_M = scrape_berlin_marathon_urls(url='http://results.scc-events.com/2014/', year=2014,
event='MAL', gender='M', num_results_per_page=100)
print('Scraping Split Times: 2014 M')
scrape_berlin_marathon(path_input='berlin_marathon_2014_M_urls.csv', path_output='berlin_marathon_2014_M.csv',
path_error='berlin_marathon_2014_M_error_log.csv', year=2014, gender='M', headers=headers_berlin,
df_urls=berlin_marathon_urls_2014_M)
print('Scraping URLs: 2014 W')
berlin_marathon_urls_2014_W = scrape_berlin_marathon_urls(url='http://results.scc-events.com/2014/', year=2014,
event='MAL', gender='W', num_results_per_page=100)
print('Scraping Split Times: 2014 W')
scrape_berlin_marathon(path_input='berlin_marathon_2014_W_urls.csv', path_output='berlin_marathon_2014_W.csv',
path_error='berlin_marathon_2014_W_error_log.csv', year=2014, gender='W', headers=headers_berlin,
df_urls=berlin_marathon_urls_2014_W)
| from dashathon.scraping.scraping_methods import scrape_berlin_marathon_urls
from dashathon.scraping.scraping_methods import scrape_berlin_marathon
headers_berlin = ['year', 'bib', 'age_group', 'gender', 'country', 'rank_gender', 'rank_age_group', '5k', '10k', '15k',
'20k', 'half', '25k', '30k', '35k', '40k', 'finish']
print('Scraping URLs: 2017 M')
berlin_marathon_urls_2017_M = scrape_berlin_marathon_urls(url='http://results.scc-events.com/2017/', year=2017,
event='MAL', gender='M', num_results_per_page=100)
print('Scraping Split Times: 2017 M')
scrape_berlin_marathon(path_input='berlin_marathon_2017_M_urls.csv', path_output='berlin_marathon_2017_M.csv',
path_error='berlin_marathon_2017_M_error_log.csv', year=2017, gender='M', headers=headers_berlin,
df_urls=berlin_marathon_urls_2017_M)
print('Scraping URLs: 2017 W')
berlin_marathon_urls_2017_W = scrape_berlin_marathon_urls(url='http://results.scc-events.com/2017/', year=2017,
event='MAL', gender='W', num_results_per_page=100)
print('Scraping Split Times: 2017 W')
scrape_berlin_marathon(path_input='berlin_marathon_2017_W_urls.csv', path_output='berlin_marathon_2017_W.csv',
path_error='berlin_marathon_2017_W_error_log.csv', year=2017, gender='W', headers=headers_berlin,
df_urls=berlin_marathon_urls_2017_W)
print('Scraping URLs: 2016 M')
berlin_marathon_urls_2016_M = scrape_berlin_marathon_urls(url='http://results.scc-events.com/2016/', year=2016,
event='MAL_99999905C9AF3F0000000945', gender='M',
num_results_per_page=100)
print('Scraping Split Times: 2016 M')
scrape_berlin_marathon(path_input='berlin_marathon_2016_M_urls.csv', path_output='berlin_marathon_2016_M.csv',
path_error='berlin_marathon_2016_M_error_log.csv', year=2016, gender='M', headers=headers_berlin,
df_urls=berlin_marathon_urls_2016_M)
print('Scraping URLs: 2016 W')
berlin_marathon_urls_2016_W = scrape_berlin_marathon_urls(url='http://results.scc-events.com/2016/', year=2016,
event='MAL_99999905C9AF3F0000000945', gender='W',
num_results_per_page=100)
print('Scraping Split Times: 2016 W')
scrape_berlin_marathon(path_input='berlin_marathon_2016_W_urls.csv', path_output='berlin_marathon_2016_W.csv',
path_error='berlin_marathon_2016_W_error_log.csv', year=2016, gender='W', headers=headers_berlin,
df_urls=berlin_marathon_urls_2016_W)
print('Scraping URLs: 2015 M')
berlin_marathon_urls_2015_M = scrape_berlin_marathon_urls(url='http://results.scc-events.com/2015/', year=2015,
event='MAL', gender='M', num_results_per_page=100)
print('Scraping Split Times: 2015 M')
scrape_berlin_marathon(path_input='berlin_marathon_2015_M_urls.csv', path_output='berlin_marathon_2015_M.csv',
path_error='berlin_marathon_2015_M_error_log.csv', year=2015, gender='M', headers=headers_berlin,
df_urls=berlin_marathon_urls_2015_M)
print('Scraping URLs: 2015 W')
berlin_marathon_urls_2015_W = scrape_berlin_marathon_urls(url='http://results.scc-events.com/2015/', year=2015,
event='MAL', gender='W', num_results_per_page=100)
print('Scraping Split Times: 2015 W')
scrape_berlin_marathon(path_input='berlin_marathon_2015_W_urls.csv', path_output='berlin_marathon_2015_W.csv',
path_error='berlin_marathon_2015_W_error_log.csv', year=2015, gender='W', headers=headers_berlin,
df_urls=berlin_marathon_urls_2015_W)
print('Scraping URLs: 2014 M')
berlin_marathon_urls_2014_M = scrape_berlin_marathon_urls(url='http://results.scc-events.com/2014/', year=2014,
event='MAL', gender='M', num_results_per_page=100)
print('Scraping Split Times: 2014 M')
scrape_berlin_marathon(path_input='berlin_marathon_2014_M_urls.csv', path_output='berlin_marathon_2014_M.csv',
path_error='berlin_marathon_2014_M_error_log.csv', year=2014, gender='M', headers=headers_berlin,
df_urls=berlin_marathon_urls_2014_M)
print('Scraping URLs: 2014 W')
berlin_marathon_urls_2014_W = scrape_berlin_marathon_urls(url='http://results.scc-events.com/2014/', year=2014,
event='MAL', gender='W', num_results_per_page=100)
print('Scraping Split Times: 2014 W')
scrape_berlin_marathon(path_input='berlin_marathon_2014_W_urls.csv', path_output='berlin_marathon_2014_W.csv',
path_error='berlin_marathon_2014_W_error_log.csv', year=2014, gender='W', headers=headers_berlin,
df_urls=berlin_marathon_urls_2014_W)
| none | 1 | 3.127021 | 3 |
|
test/test_api/test_api.py | fmagin/jedi | 0 | 6631272 | <reponame>fmagin/jedi
"""
Test all things related to the ``jedi.api`` module.
"""
import os
import sys
from textwrap import dedent
import pytest
from pytest import raises
from parso import cache
from jedi._compatibility import unicode
from jedi import preload_module
from jedi.inference.gradual import typeshed
from test.helpers import test_dir
@pytest.mark.skipif(sys.version_info[0] == 2, reason="Ignore Python 2, EoL")
def test_preload_modules():
def check_loaded(*modules):
for grammar_cache in cache.parser_cache.values():
if None in grammar_cache:
break
# Filter the typeshed parser cache.
typeshed_cache_count = sum(
1 for path in grammar_cache
if path is not None and path.startswith(typeshed.TYPESHED_PATH)
)
# +1 for None module (currently used)
assert len(grammar_cache) - typeshed_cache_count == len(modules) + 1
for i in modules:
assert [i in k for k in grammar_cache.keys() if k is not None]
old_cache = cache.parser_cache.copy()
cache.parser_cache.clear()
try:
preload_module('sys')
check_loaded() # compiled (c_builtin) modules shouldn't be in the cache.
preload_module('types', 'token')
check_loaded('types', 'token')
finally:
cache.parser_cache.update(old_cache)
def test_empty_script(Script):
assert Script('')
def test_line_number_errors(Script):
"""
Script should raise a ValueError if line/column numbers are not in a
valid range.
"""
s = 'hello'
# lines
with raises(ValueError):
Script(s, 2, 0)
with raises(ValueError):
Script(s, 0, 0)
# columns
with raises(ValueError):
Script(s, 1, len(s) + 1)
with raises(ValueError):
Script(s, 1, -1)
# ok
Script(s, 1, 0)
Script(s, 1, len(s))
def _check_number(Script, source, result='float'):
completions = Script(source).completions()
assert completions[0].parent().name == result
def test_completion_on_number_literals(Script):
# No completions on an int literal (is a float).
assert [c.name for c in Script('1. ').completions()] \
== ['and', 'if', 'in', 'is', 'not', 'or']
# Multiple points after an int literal basically mean that there's a float
# and a call after that.
_check_number(Script, '1..')
_check_number(Script, '1.0.')
# power notation
_check_number(Script, '1.e14.')
_check_number(Script, '1.e-3.')
_check_number(Script, '9e3.')
assert Script('1.e3..').completions() == []
assert Script('1.e-13..').completions() == []
def test_completion_on_hex_literals(Script):
assert Script('0x1..').completions() == []
_check_number(Script, '0x1.', 'int') # hexdecimal
# Completing binary literals doesn't work if they are not actually binary
# (invalid statements).
assert Script('0b2.b').completions() == []
_check_number(Script, '0b1.', 'int') # binary
_check_number(Script, '0x2e.', 'int')
_check_number(Script, '0xE7.', 'int')
_check_number(Script, '0xEa.', 'int')
# theoretically, but people can just check for syntax errors:
assert Script('0x.').completions() == []
def test_completion_on_complex_literals(Script):
assert Script('1j..').completions() == []
_check_number(Script, '1j.', 'complex')
_check_number(Script, '44.j.', 'complex')
_check_number(Script, '4.0j.', 'complex')
# No dot no completion - I thought, but 4j is actually a literal after
# which a keyword like or is allowed. Good times, haha!
# However this has been disabled again, because it apparently annoyed
# users. So no completion after j without a space :)
assert not Script('4j').completions()
assert ({c.name for c in Script('4j ').completions()} ==
{'if', 'and', 'in', 'is', 'not', 'or'})
def test_goto_assignments_on_non_name(Script, environment):
assert Script('for').goto_assignments() == []
assert Script('assert').goto_assignments() == []
assert Script('True').goto_assignments() == []
def test_goto_definitions_on_non_name(Script):
assert Script('import x', column=0).goto_definitions() == []
def test_goto_definitions_on_generator(Script):
def_, = Script('def x(): yield 1\ny=x()\ny').goto_definitions()
assert def_.name == 'Generator'
def test_goto_definition_not_multiple(Script):
"""
There should be only one Definition result if it leads back to the same
origin (e.g. instance method)
"""
s = dedent('''\
import random
class A():
def __init__(self, a):
self.a = 3
def foo(self):
pass
if random.randint(0, 1):
a = A(2)
else:
a = A(1)
a''')
assert len(Script(s).goto_definitions()) == 1
def test_usage_description(Script):
descs = [u.description for u in Script("foo = ''; foo").usages()]
assert set(descs) == {"foo = ''", 'foo'}
def test_get_line_code(Script):
def get_line_code(source, line=None, **kwargs):
return Script(source, line=line).completions()[0].get_line_code(**kwargs)
# On builtin
assert get_line_code('abs') == 'def abs(__n: SupportsAbs[_T]) -> _T: ...\n'
# On custom code
first_line = 'def foo():\n'
line = ' foo'
code = first_line + line
assert get_line_code(code) == first_line
# With before/after
code = code + '\nother_line'
assert get_line_code(code, line=2) == first_line
assert get_line_code(code, line=2, after=1) == first_line + line + '\n'
assert get_line_code(code, line=2, after=2, before=1) == code
# Should just be the whole thing, since there are no more lines on both
# sides.
assert get_line_code(code, line=2, after=3, before=3) == code
def test_get_line_code_on_builtin(Script, disable_typeshed):
abs_ = Script('abs').completions()[0]
assert abs_.name == 'abs'
assert abs_.get_line_code() == ''
assert abs_.line is None
def test_goto_assignments_follow_imports(Script):
code = dedent("""
import inspect
inspect.isfunction""")
definition, = Script(code, column=0).goto_assignments(follow_imports=True)
assert 'inspect.py' in definition.module_path
assert (definition.line, definition.column) == (1, 0)
definition, = Script(code).goto_assignments(follow_imports=True)
assert 'inspect.py' in definition.module_path
assert (definition.line, definition.column) > (1, 0)
code = '''def param(p): pass\nparam(1)'''
start_pos = 1, len('def param(')
script = Script(code, *start_pos)
definition, = script.goto_assignments(follow_imports=True)
assert (definition.line, definition.column) == start_pos
assert definition.name == 'p'
result, = definition.goto_assignments()
assert result.name == 'p'
result, = definition.infer()
assert result.name == 'int'
result, = result.infer()
assert result.name == 'int'
definition, = script.goto_assignments()
assert (definition.line, definition.column) == start_pos
d, = Script('a = 1\na').goto_assignments(follow_imports=True)
assert d.name == 'a'
def test_goto_module(Script):
def check(line, expected, follow_imports=False):
script = Script(path=path, line=line)
module, = script.goto_assignments(follow_imports=follow_imports)
assert module.module_path == expected
base_path = os.path.join(os.path.dirname(__file__), 'simple_import')
path = os.path.join(base_path, '__init__.py')
check(1, os.path.join(base_path, 'module.py'))
check(1, os.path.join(base_path, 'module.py'), follow_imports=True)
check(5, os.path.join(base_path, 'module2.py'))
def test_goto_definition_cursor(Script):
s = ("class A():\n"
" def _something(self):\n"
" return\n"
" def different_line(self,\n"
" b):\n"
" return\n"
"A._something\n"
"A.different_line"
)
in_name = 2, 9
under_score = 2, 8
cls = 2, 7
should1 = 7, 10
diff_line = 4, 10
should2 = 8, 10
def get_def(pos):
return [d.description for d in Script(s, *pos).goto_definitions()]
in_name = get_def(in_name)
under_score = get_def(under_score)
should1 = get_def(should1)
should2 = get_def(should2)
diff_line = get_def(diff_line)
assert should1 == in_name
assert should1 == under_score
assert should2 == diff_line
assert get_def(cls) == []
def test_no_statement_parent(Script):
source = dedent("""
def f():
pass
class C:
pass
variable = f if random.choice([0, 1]) else C""")
defs = Script(source, column=3).goto_definitions()
defs = sorted(defs, key=lambda d: d.line)
assert [d.description for d in defs] == ['def f', 'class C']
def test_backslash_continuation_and_bracket(Script):
code = dedent(r"""
x = 0
a = \
[1, 2, 3, (x)]""")
lines = code.splitlines()
column = lines[-1].index('(')
def_, = Script(code, line=len(lines), column=column).goto_definitions()
assert def_.name == 'int'
def test_goto_follow_builtin_imports(Script):
s = Script('import sys; sys')
d, = s.goto_assignments(follow_imports=True)
assert d.in_builtin_module() is True
d, = s.goto_assignments(follow_imports=True, follow_builtin_imports=True)
assert d.in_builtin_module() is True
def test_docstrings_for_completions(Script):
for c in Script('').completions():
assert isinstance(c.docstring(), (str, unicode))
def test_fuzzy_completion(Script):
script = Script('string = "hello"\nstring.upper')
assert ['isupper',
'upper'] == [comp.name for comp in script.completions(fuzzy=True)]
def test_math_fuzzy_completion(Script, environment):
script = Script('import math\nmath.og')
expected = ['copysign', 'log', 'log10', 'log1p']
if environment.version_info.major >= 3:
expected.append('log2')
completions = script.completions(fuzzy=True)
assert expected == [comp.name for comp in completions]
for c in completions:
assert c.complete is None
def test_file_fuzzy_completion(Script):
path = os.path.join(test_dir, 'completion')
script = Script('"{}/ep08_i'.format(path))
assert ['pep0484_basic.py"', 'pep0484_typing.py"'] \
== [comp.name for comp in script.completions(fuzzy=True)]
| """
Test all things related to the ``jedi.api`` module.
"""
import os
import sys
from textwrap import dedent
import pytest
from pytest import raises
from parso import cache
from jedi._compatibility import unicode
from jedi import preload_module
from jedi.inference.gradual import typeshed
from test.helpers import test_dir
@pytest.mark.skipif(sys.version_info[0] == 2, reason="Ignore Python 2, EoL")
def test_preload_modules():
def check_loaded(*modules):
for grammar_cache in cache.parser_cache.values():
if None in grammar_cache:
break
# Filter the typeshed parser cache.
typeshed_cache_count = sum(
1 for path in grammar_cache
if path is not None and path.startswith(typeshed.TYPESHED_PATH)
)
# +1 for None module (currently used)
assert len(grammar_cache) - typeshed_cache_count == len(modules) + 1
for i in modules:
assert [i in k for k in grammar_cache.keys() if k is not None]
old_cache = cache.parser_cache.copy()
cache.parser_cache.clear()
try:
preload_module('sys')
check_loaded() # compiled (c_builtin) modules shouldn't be in the cache.
preload_module('types', 'token')
check_loaded('types', 'token')
finally:
cache.parser_cache.update(old_cache)
def test_empty_script(Script):
assert Script('')
def test_line_number_errors(Script):
"""
Script should raise a ValueError if line/column numbers are not in a
valid range.
"""
s = 'hello'
# lines
with raises(ValueError):
Script(s, 2, 0)
with raises(ValueError):
Script(s, 0, 0)
# columns
with raises(ValueError):
Script(s, 1, len(s) + 1)
with raises(ValueError):
Script(s, 1, -1)
# ok
Script(s, 1, 0)
Script(s, 1, len(s))
def _check_number(Script, source, result='float'):
completions = Script(source).completions()
assert completions[0].parent().name == result
def test_completion_on_number_literals(Script):
# No completions on an int literal (is a float).
assert [c.name for c in Script('1. ').completions()] \
== ['and', 'if', 'in', 'is', 'not', 'or']
# Multiple points after an int literal basically mean that there's a float
# and a call after that.
_check_number(Script, '1..')
_check_number(Script, '1.0.')
# power notation
_check_number(Script, '1.e14.')
_check_number(Script, '1.e-3.')
_check_number(Script, '9e3.')
assert Script('1.e3..').completions() == []
assert Script('1.e-13..').completions() == []
def test_completion_on_hex_literals(Script):
assert Script('0x1..').completions() == []
_check_number(Script, '0x1.', 'int') # hexdecimal
# Completing binary literals doesn't work if they are not actually binary
# (invalid statements).
assert Script('0b2.b').completions() == []
_check_number(Script, '0b1.', 'int') # binary
_check_number(Script, '0x2e.', 'int')
_check_number(Script, '0xE7.', 'int')
_check_number(Script, '0xEa.', 'int')
# theoretically, but people can just check for syntax errors:
assert Script('0x.').completions() == []
def test_completion_on_complex_literals(Script):
assert Script('1j..').completions() == []
_check_number(Script, '1j.', 'complex')
_check_number(Script, '44.j.', 'complex')
_check_number(Script, '4.0j.', 'complex')
# No dot no completion - I thought, but 4j is actually a literal after
# which a keyword like or is allowed. Good times, haha!
# However this has been disabled again, because it apparently annoyed
# users. So no completion after j without a space :)
assert not Script('4j').completions()
assert ({c.name for c in Script('4j ').completions()} ==
{'if', 'and', 'in', 'is', 'not', 'or'})
def test_goto_assignments_on_non_name(Script, environment):
assert Script('for').goto_assignments() == []
assert Script('assert').goto_assignments() == []
assert Script('True').goto_assignments() == []
def test_goto_definitions_on_non_name(Script):
assert Script('import x', column=0).goto_definitions() == []
def test_goto_definitions_on_generator(Script):
def_, = Script('def x(): yield 1\ny=x()\ny').goto_definitions()
assert def_.name == 'Generator'
def test_goto_definition_not_multiple(Script):
"""
There should be only one Definition result if it leads back to the same
origin (e.g. instance method)
"""
s = dedent('''\
import random
class A():
def __init__(self, a):
self.a = 3
def foo(self):
pass
if random.randint(0, 1):
a = A(2)
else:
a = A(1)
a''')
assert len(Script(s).goto_definitions()) == 1
def test_usage_description(Script):
descs = [u.description for u in Script("foo = ''; foo").usages()]
assert set(descs) == {"foo = ''", 'foo'}
def test_get_line_code(Script):
def get_line_code(source, line=None, **kwargs):
return Script(source, line=line).completions()[0].get_line_code(**kwargs)
# On builtin
assert get_line_code('abs') == 'def abs(__n: SupportsAbs[_T]) -> _T: ...\n'
# On custom code
first_line = 'def foo():\n'
line = ' foo'
code = first_line + line
assert get_line_code(code) == first_line
# With before/after
code = code + '\nother_line'
assert get_line_code(code, line=2) == first_line
assert get_line_code(code, line=2, after=1) == first_line + line + '\n'
assert get_line_code(code, line=2, after=2, before=1) == code
# Should just be the whole thing, since there are no more lines on both
# sides.
assert get_line_code(code, line=2, after=3, before=3) == code
def test_get_line_code_on_builtin(Script, disable_typeshed):
abs_ = Script('abs').completions()[0]
assert abs_.name == 'abs'
assert abs_.get_line_code() == ''
assert abs_.line is None
def test_goto_assignments_follow_imports(Script):
code = dedent("""
import inspect
inspect.isfunction""")
definition, = Script(code, column=0).goto_assignments(follow_imports=True)
assert 'inspect.py' in definition.module_path
assert (definition.line, definition.column) == (1, 0)
definition, = Script(code).goto_assignments(follow_imports=True)
assert 'inspect.py' in definition.module_path
assert (definition.line, definition.column) > (1, 0)
code = '''def param(p): pass\nparam(1)'''
start_pos = 1, len('def param(')
script = Script(code, *start_pos)
definition, = script.goto_assignments(follow_imports=True)
assert (definition.line, definition.column) == start_pos
assert definition.name == 'p'
result, = definition.goto_assignments()
assert result.name == 'p'
result, = definition.infer()
assert result.name == 'int'
result, = result.infer()
assert result.name == 'int'
definition, = script.goto_assignments()
assert (definition.line, definition.column) == start_pos
d, = Script('a = 1\na').goto_assignments(follow_imports=True)
assert d.name == 'a'
def test_goto_module(Script):
def check(line, expected, follow_imports=False):
script = Script(path=path, line=line)
module, = script.goto_assignments(follow_imports=follow_imports)
assert module.module_path == expected
base_path = os.path.join(os.path.dirname(__file__), 'simple_import')
path = os.path.join(base_path, '__init__.py')
check(1, os.path.join(base_path, 'module.py'))
check(1, os.path.join(base_path, 'module.py'), follow_imports=True)
check(5, os.path.join(base_path, 'module2.py'))
def test_goto_definition_cursor(Script):
s = ("class A():\n"
" def _something(self):\n"
" return\n"
" def different_line(self,\n"
" b):\n"
" return\n"
"A._something\n"
"A.different_line"
)
in_name = 2, 9
under_score = 2, 8
cls = 2, 7
should1 = 7, 10
diff_line = 4, 10
should2 = 8, 10
def get_def(pos):
return [d.description for d in Script(s, *pos).goto_definitions()]
in_name = get_def(in_name)
under_score = get_def(under_score)
should1 = get_def(should1)
should2 = get_def(should2)
diff_line = get_def(diff_line)
assert should1 == in_name
assert should1 == under_score
assert should2 == diff_line
assert get_def(cls) == []
def test_no_statement_parent(Script):
source = dedent("""
def f():
pass
class C:
pass
variable = f if random.choice([0, 1]) else C""")
defs = Script(source, column=3).goto_definitions()
defs = sorted(defs, key=lambda d: d.line)
assert [d.description for d in defs] == ['def f', 'class C']
def test_backslash_continuation_and_bracket(Script):
code = dedent(r"""
x = 0
a = \
[1, 2, 3, (x)]""")
lines = code.splitlines()
column = lines[-1].index('(')
def_, = Script(code, line=len(lines), column=column).goto_definitions()
assert def_.name == 'int'
def test_goto_follow_builtin_imports(Script):
s = Script('import sys; sys')
d, = s.goto_assignments(follow_imports=True)
assert d.in_builtin_module() is True
d, = s.goto_assignments(follow_imports=True, follow_builtin_imports=True)
assert d.in_builtin_module() is True
def test_docstrings_for_completions(Script):
for c in Script('').completions():
assert isinstance(c.docstring(), (str, unicode))
def test_fuzzy_completion(Script):
script = Script('string = "hello"\nstring.upper')
assert ['isupper',
'upper'] == [comp.name for comp in script.completions(fuzzy=True)]
def test_math_fuzzy_completion(Script, environment):
script = Script('import math\nmath.og')
expected = ['copysign', 'log', 'log10', 'log1p']
if environment.version_info.major >= 3:
expected.append('log2')
completions = script.completions(fuzzy=True)
assert expected == [comp.name for comp in completions]
for c in completions:
assert c.complete is None
def test_file_fuzzy_completion(Script):
path = os.path.join(test_dir, 'completion')
script = Script('"{}/ep08_i'.format(path))
assert ['pep0484_basic.py"', 'pep0484_typing.py"'] \
== [comp.name for comp in script.completions(fuzzy=True)] | en | 0.845072 | Test all things related to the ``jedi.api`` module. # Filter the typeshed parser cache. # +1 for None module (currently used) # compiled (c_builtin) modules shouldn't be in the cache. Script should raise a ValueError if line/column numbers are not in a valid range. # lines # columns # ok # No completions on an int literal (is a float). # Multiple points after an int literal basically mean that there's a float # and a call after that. # power notation # hexdecimal # Completing binary literals doesn't work if they are not actually binary # (invalid statements). # binary # theoretically, but people can just check for syntax errors: # No dot no completion - I thought, but 4j is actually a literal after # which a keyword like or is allowed. Good times, haha! # However this has been disabled again, because it apparently annoyed # users. So no completion after j without a space :) There should be only one Definition result if it leads back to the same origin (e.g. instance method) \ import random class A(): def __init__(self, a): self.a = 3 def foo(self): pass if random.randint(0, 1): a = A(2) else: a = A(1) a # On builtin # On custom code # With before/after # Should just be the whole thing, since there are no more lines on both # sides. import inspect inspect.isfunction def param(p): pass\nparam(1) def f(): pass class C: pass variable = f if random.choice([0, 1]) else C x = 0 a = \ [1, 2, 3, (x)] | 2.350529 | 2 |
aws-inventory/lambda/report-accounts.py | jchrisfarris/antiope | 0 | 6631273 | <reponame>jchrisfarris/antiope
import boto3
from botocore.exceptions import ClientError
import json
import os
import time
import datetime
from mako.template import Template
from antiope.aws_account import *
from antiope.config import AccountLookupError
from common import *
import logging
logger = logging.getLogger()
logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO')))
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
assume_role_link = "<a href=\"https://signin.aws.amazon.com/switchrole?account={}&roleName={}&displayName={}\">{}</a>"
assume_role_url = "https://signin.aws.amazon.com/switchrole?account={}&roleName={}&displayName={}"
RESOURCE_PATH = "organizations/account"
# Lambda main routine
def handler(event, context):
logger.info("Received event: " + json.dumps(event, sort_keys=True))
# We will make a HTML Table and a Json file with this data
json_data = []
# Cache account_name for all the parent accounts
payers = {}
# Data to be saved to S3 and used to generate the template report
json_data = {"accounts": []}
# account_list.txt file comes from this
account_list = []
# Get and then sort the list of accounts by name, case insensitive.
active_accounts = get_active_accounts()
active_accounts.sort(key=lambda x: x.account_name.lower())
for a in active_accounts:
logger.info(a.account_name)
# Add the account ID to this array
account_list.append(str(a.account_id))
# We don't want to save the entire object's attributes.
j = a.db_record.copy()
try:
if str(a.payer_id) in payers:
j['payer_name'] = payers[str(a.payer_id)]
else:
payer = AWSAccount(str(a.payer_id))
j['payer_name'] = payer.account_name
payers[payer.account_id] = payer.account_name
except LookupError:
logger.debug("Unable to find the payer in the database. Must be an orphan")
j['payer_name'] = "Unknown Payer"
payers[str(a.payer_id)] = "Unknown Payer"
# Build the cross account role link
if hasattr(a, 'cross_account_role') and a.cross_account_role is not None:
j['assume_role_link'] = assume_role_link.format(a.account_id, os.environ['ROLE_NAME'], a.account_name, os.environ['ROLE_NAME'])
else:
j['assume_role_link'] = "No Cross Account Role"
json_data['accounts'].append(j)
save_account_as_resource(a)
json_data['timestamp'] = datetime.datetime.now()
json_data['account_count'] = len(active_accounts)
json_data['bucket'] = os.environ['INVENTORY_BUCKET']
fh = open("html_templates/account_inventory.html", "r")
mako_body = fh.read()
result = Template(mako_body).render(**json_data)
# Save HTML and json to S3
s3_client = boto3.client('s3')
try:
response = s3_client.put_object(
# ACL='public-read',
Body=result,
Bucket=os.environ['INVENTORY_BUCKET'],
ContentType='text/html',
Key='Reports/account_inventory.html',
)
# Save a txt file of all the active account IDs
response = s3_client.put_object(
# ACL='public-read',
Body="\n".join(account_list),
Bucket=os.environ['INVENTORY_BUCKET'],
ContentType='text/plain',
Key='Reports/account_list.txt',
)
# Save the JSON to S3
response = s3_client.put_object(
# ACL='public-read',
Body=json.dumps(json_data, sort_keys=True, indent=2, default=str),
Bucket=os.environ['INVENTORY_BUCKET'],
ContentType='application/json',
Key='Reports/account_inventory.json',
)
except ClientError as e:
logger.error("ClientError saving report: {}".format(e))
raise
return(event)
def save_account_as_resource(target_account):
resource_item = {}
resource_item['awsAccountId'] = target_account.account_id
resource_item['awsAccountName'] = target_account.account_name
resource_item['resourceType'] = "AWS::Organizations::Account"
resource_item['source'] = "Antiope"
resource_item['ARN'] = target_account.db_record['payer_record']['Arn']
resource_item['resourceCreationTime'] = target_account.db_record['payer_record']['JoinedTimestamp']
resource_item['configurationItemCaptureTime'] = str(datetime.datetime.now())
resource_item['configuration'] = target_account.db_record.copy()
resource_item['supplementaryConfiguration'] = {}
resource_item['resourceId'] = target_account.account_id
resource_item['resourceName'] = target_account.account_name
resource_item['errors'] = {}
if hasattr(target_account, 'cross_account_role') and target_account.cross_account_role is not None:
role_name = target_account.cross_account_role.split("/")[-1]
resource_item['supplementaryConfiguration']['assume_role_url'] = assume_role_url.format(target_account.account_id, role_name, target_account.account_name)
save_resource_to_s3(RESOURCE_PATH, f"{target_account.account_id}", resource_item)
| import boto3
from botocore.exceptions import ClientError
import json
import os
import time
import datetime
from mako.template import Template
from antiope.aws_account import *
from antiope.config import AccountLookupError
from common import *
import logging
logger = logging.getLogger()
logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO')))
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
assume_role_link = "<a href=\"https://signin.aws.amazon.com/switchrole?account={}&roleName={}&displayName={}\">{}</a>"
assume_role_url = "https://signin.aws.amazon.com/switchrole?account={}&roleName={}&displayName={}"
RESOURCE_PATH = "organizations/account"
# Lambda main routine
def handler(event, context):
logger.info("Received event: " + json.dumps(event, sort_keys=True))
# We will make a HTML Table and a Json file with this data
json_data = []
# Cache account_name for all the parent accounts
payers = {}
# Data to be saved to S3 and used to generate the template report
json_data = {"accounts": []}
# account_list.txt file comes from this
account_list = []
# Get and then sort the list of accounts by name, case insensitive.
active_accounts = get_active_accounts()
active_accounts.sort(key=lambda x: x.account_name.lower())
for a in active_accounts:
logger.info(a.account_name)
# Add the account ID to this array
account_list.append(str(a.account_id))
# We don't want to save the entire object's attributes.
j = a.db_record.copy()
try:
if str(a.payer_id) in payers:
j['payer_name'] = payers[str(a.payer_id)]
else:
payer = AWSAccount(str(a.payer_id))
j['payer_name'] = payer.account_name
payers[payer.account_id] = payer.account_name
except LookupError:
logger.debug("Unable to find the payer in the database. Must be an orphan")
j['payer_name'] = "Unknown Payer"
payers[str(a.payer_id)] = "Unknown Payer"
# Build the cross account role link
if hasattr(a, 'cross_account_role') and a.cross_account_role is not None:
j['assume_role_link'] = assume_role_link.format(a.account_id, os.environ['ROLE_NAME'], a.account_name, os.environ['ROLE_NAME'])
else:
j['assume_role_link'] = "No Cross Account Role"
json_data['accounts'].append(j)
save_account_as_resource(a)
json_data['timestamp'] = datetime.datetime.now()
json_data['account_count'] = len(active_accounts)
json_data['bucket'] = os.environ['INVENTORY_BUCKET']
fh = open("html_templates/account_inventory.html", "r")
mako_body = fh.read()
result = Template(mako_body).render(**json_data)
# Save HTML and json to S3
s3_client = boto3.client('s3')
try:
response = s3_client.put_object(
# ACL='public-read',
Body=result,
Bucket=os.environ['INVENTORY_BUCKET'],
ContentType='text/html',
Key='Reports/account_inventory.html',
)
# Save a txt file of all the active account IDs
response = s3_client.put_object(
# ACL='public-read',
Body="\n".join(account_list),
Bucket=os.environ['INVENTORY_BUCKET'],
ContentType='text/plain',
Key='Reports/account_list.txt',
)
# Save the JSON to S3
response = s3_client.put_object(
# ACL='public-read',
Body=json.dumps(json_data, sort_keys=True, indent=2, default=str),
Bucket=os.environ['INVENTORY_BUCKET'],
ContentType='application/json',
Key='Reports/account_inventory.json',
)
except ClientError as e:
logger.error("ClientError saving report: {}".format(e))
raise
return(event)
def save_account_as_resource(target_account):
resource_item = {}
resource_item['awsAccountId'] = target_account.account_id
resource_item['awsAccountName'] = target_account.account_name
resource_item['resourceType'] = "AWS::Organizations::Account"
resource_item['source'] = "Antiope"
resource_item['ARN'] = target_account.db_record['payer_record']['Arn']
resource_item['resourceCreationTime'] = target_account.db_record['payer_record']['JoinedTimestamp']
resource_item['configurationItemCaptureTime'] = str(datetime.datetime.now())
resource_item['configuration'] = target_account.db_record.copy()
resource_item['supplementaryConfiguration'] = {}
resource_item['resourceId'] = target_account.account_id
resource_item['resourceName'] = target_account.account_name
resource_item['errors'] = {}
if hasattr(target_account, 'cross_account_role') and target_account.cross_account_role is not None:
role_name = target_account.cross_account_role.split("/")[-1]
resource_item['supplementaryConfiguration']['assume_role_url'] = assume_role_url.format(target_account.account_id, role_name, target_account.account_name)
save_resource_to_s3(RESOURCE_PATH, f"{target_account.account_id}", resource_item) | en | 0.783913 | # Lambda main routine # We will make a HTML Table and a Json file with this data # Cache account_name for all the parent accounts # Data to be saved to S3 and used to generate the template report # account_list.txt file comes from this # Get and then sort the list of accounts by name, case insensitive. # Add the account ID to this array # We don't want to save the entire object's attributes. # Build the cross account role link # Save HTML and json to S3 # ACL='public-read', # Save a txt file of all the active account IDs # ACL='public-read', # Save the JSON to S3 # ACL='public-read', | 2.116096 | 2 |
build.py | yjjnls/tesseract | 0 | 6631274 | <filename>build.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from bincrafters import build_template_default
import os
if __name__ == "__main__":
builder = build_template_default.get_builder()
if os.environ.get('EMSCRIPTEN_VERSIONS'):
for version in os.environ['EMSCRIPTEN_VERSIONS'].split(','):
for build_type in os.environ.get('CONAN_BUILD_TYPES','Debug').split(','):
builder.add(settings={
"compiler": "emcc",
"compiler.libcxx":'libcxxabi',
"build_type": build_type,
"compiler.version": version
})
items = []
for item in builder.items:
if not os.environ.get('CONAN_GCC_VERSIONS') and item.settings['compiler'] == 'gcc':
continue
if not os.environ.get('CONAN_CLANG_VERSIONS') and item.settings['compiler'] == 'clang':
continue
items.append(item)
builder.items = items
builder.run()
| <filename>build.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from bincrafters import build_template_default
import os
if __name__ == "__main__":
builder = build_template_default.get_builder()
if os.environ.get('EMSCRIPTEN_VERSIONS'):
for version in os.environ['EMSCRIPTEN_VERSIONS'].split(','):
for build_type in os.environ.get('CONAN_BUILD_TYPES','Debug').split(','):
builder.add(settings={
"compiler": "emcc",
"compiler.libcxx":'libcxxabi',
"build_type": build_type,
"compiler.version": version
})
items = []
for item in builder.items:
if not os.environ.get('CONAN_GCC_VERSIONS') and item.settings['compiler'] == 'gcc':
continue
if not os.environ.get('CONAN_CLANG_VERSIONS') and item.settings['compiler'] == 'clang':
continue
items.append(item)
builder.items = items
builder.run()
| en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 2.075203 | 2 |
techminer/co_word_analysis.py | jdvelasq/techminer-new | 1 | 6631275 | <filename>techminer/co_word_analysis.py
import matplotlib
import matplotlib.pyplot as pyplot
import numpy as np
import pandas as pd
from sklearn.manifold import MDS
import techminer.core.dashboard as dash
from techminer.core import (
CA,
DASH,
TF_matrix,
TFIDF_matrix,
add_counters_to_axis,
clustering,
corpus_filter,
limit_to_exclude,
normalize_network,
sort_by_axis,
cluster_table_to_list,
cluster_table_to_python_code,
keywords_coverage,
)
from techminer.plots import (
ax_text_node_labels,
counters_to_node_sizes,
expand_ax_limits,
set_spines_invisible,
xy_clusters_plot,
xy_cluster_members_plot,
)
###############################################################################
##
## MODEL
##
###############################################################################
class Model:
def __init__(
self,
data,
limit_to,
exclude,
years_range,
clusters=None,
cluster=None,
):
##
if years_range is not None:
initial_year, final_year = years_range
data = data[(data.Year >= initial_year) & (data.Year <= final_year)]
##
## Filter for cluster members
##
if clusters is not None and cluster is not None:
data = corpus_filter(data=data, clusters=clusters, cluster=cluster)
self.data = data
self.limit_to = limit_to
self.exclude = exclude
self.column = None
self.min_occurrence = None
self.max_items = None
self.normalization = None
self.clustering_method = None
self.n_clusters = None
self.affinity = None
self.linkage = None
self.random_state = None
self.x_axis = None
self.y_axis = None
self.top_n = None
self.colors = None
self.width = None
self.height = None
def apply(self):
##
## Concept mapping
## https://tlab.it/en/allegati/help_en_online/mmappe2.htm
##
##
## Co-occurrence matrix
##
TF_matrix_ = TF_matrix(
data=self.data,
column=self.column,
scheme=None,
min_occurrence=self.min_occurrence,
)
##
## Limit to/Exclude
##
TF_matrix_ = limit_to_exclude(
data=TF_matrix_,
axis=1,
column=self.column,
limit_to=self.limit_to,
exclude=self.exclude,
)
##
## Select max items
##
TF_matrix_ = add_counters_to_axis(
X=TF_matrix_, axis=1, data=self.data, column=self.column
)
TF_matrix_ = sort_by_axis(
data=TF_matrix_, sort_by="Num Documents", ascending=False, axis=1
)
TF_matrix_ = TF_matrix_[TF_matrix_.columns[: self.max_items]]
if len(TF_matrix_.columns) > self.max_items:
top_items = TF_matrix_.sum(axis=0)
top_items = top_items.sort_values(ascending=False)
top_items = top_items.head(self.max_items)
TF_matrix_ = TF_matrix_.loc[:, top_items.index]
rows = TF_matrix_.sum(axis=1)
rows = rows[rows > 0]
TF_matrix_ = TF_matrix_.loc[rows.index, :]
##
## Co-occurrence matrix and association index
##
X = np.matmul(TF_matrix_.transpose().values, TF_matrix_.values)
X = pd.DataFrame(X, columns=TF_matrix_.columns, index=TF_matrix_.columns)
X = normalize_network(X=X, normalization=self.normalization)
##
## Clustering of the dissimilarity matrix
##
(
self.n_clusters,
self.labels_,
self.cluster_members_,
self.cluster_centers_,
self.cluster_names_,
) = clustering(
X=(1 - X),
method=self.clustering_method,
n_clusters=self.n_clusters,
affinity=self.affinity,
linkage=self.linkage,
random_state=self.random_state,
top_n=self.top_n,
name_prefix="Cluster {}",
)
self.X_ = X
##
## Cluster co-occurrence
##
M = X.copy()
M["CLUSTER"] = self.labels_
M = M.groupby("CLUSTER").sum()
#
M = M.transpose()
M["CLUSTER"] = self.labels_
M = M.groupby("CLUSTER").sum()
#
M.columns = ["Cluster {}".format(i) for i in range(self.n_clusters)]
M.index = M.columns
#
self.cluster_co_occurrence_ = M
##
## Strategic Map
##
## clusters name
strategic_map = pd.DataFrame(
self.cluster_names_, columns=["Cluster name"], index=M.columns
)
strategic_map["Density"] = 0.0
strategic_map["Centrality"] = 0.0
## Density -- internal conections
for cluster in M.columns:
strategic_map.at[cluster, "Density"] = M[cluster][cluster]
## Centrality -- outside conections
strategic_map["Centrality"] = M.sum()
strategic_map["Centrality"] = (
strategic_map["Centrality"] - strategic_map["Density"]
)
self.strategic_map_ = strategic_map
def mds_keywords_map(self):
##
## Compute co-occurrence matrix
##
self.apply()
X = self.X_.copy()
##
## MDS
##
embedding = MDS(n_components=2)
X_transformed = embedding.fit_transform(
1 - X,
)
x_axis = X_transformed[:, 0]
y_axis = X_transformed[:, 1]
##
## Plot
##
return xy_cluster_members_plot(
x=x_axis,
y=y_axis,
x_axis_at=0,
y_axis_at=0,
labels=self.labels_,
keywords=X.index,
color_scheme=self.colors,
xlabel="Dim-0",
ylabel="Dim-1",
figsize=(self.width, self.height),
)
def mds_cluster_map(self):
##
## Compute co-occurrence matrix
##
self.apply()
X = self.X_.copy()
##
## MDS
##
embedding = MDS(n_components=2)
X_transformed = embedding.fit_transform(
1 - X,
)
X_transformed = pd.DataFrame(X_transformed, columns=["x_axis", "y_axis"])
X_transformed["CLUSTER"] = self.labels_
X_transformed = X_transformed.groupby(["CLUSTER"], as_index=True).mean()
X_transformed = X_transformed.sort_index(axis=0)
##
## Cluster coordinates
##
x_axis = X_transformed.x_axis.tolist()
y_axis = X_transformed.y_axis.tolist()
##
## Cluster names
##
labels = [
"CLUST_{} {}".format(index, label)
for index, label in enumerate(self.cluster_names_)
]
return xy_clusters_plot(
x=x_axis,
y=y_axis,
x_axis_at=0,
y_axis_at=0,
labels=labels,
node_sizes=counters_to_node_sizes(labels),
color_scheme=self.colors,
xlabel="Dim-{}".format(self.x_axis),
ylabel="Dim-{}".format(self.y_axis),
figsize=(self.width, self.height),
)
def mds_keywords_by_cluster_table(self):
self.apply()
return self.cluster_members_
def mds_keywords_by_cluster_list(self):
self.apply()
return cluster_table_to_list(self.cluster_members_)
def mds_keywords_by_cluster_python_code(self):
self.apply()
return cluster_table_to_python_code(self.column, self.cluster_members_)
def ca_keywords_map(self):
##
## Compute co-occurrence matrix
##
self.apply()
X = self.X_.copy()
##
## CA
##
ca = CA()
ca.fit(1 - X)
X_transformed = ca.principal_coordinates_cols_
x_axis = X_transformed.loc[:, X_transformed.columns[self.x_axis]]
y_axis = X_transformed.loc[:, X_transformed.columns[self.y_axis]]
##
## Plot
##
return xy_cluster_members_plot(
x=x_axis,
y=y_axis,
x_axis_at=0,
y_axis_at=0,
labels=self.labels_,
keywords=X.index,
color_scheme=self.colors,
xlabel="Dim-0",
ylabel="Dim-1",
figsize=(self.width, self.height),
)
def ca_cluster_map(self):
##
## Compute co-occurrence matrix
##
self.apply()
X = self.X_.copy()
##
## CA
##
ca = CA()
ca.fit(1 - X)
X_transformed = ca.principal_coordinates_cols_
x_axis = X_transformed.loc[:, X_transformed.columns[self.x_axis]]
y_axis = X_transformed.loc[:, X_transformed.columns[self.y_axis]]
X_transformed = pd.DataFrame(
{"x_axis": x_axis, "y_axis": y_axis, "CLUSTER": self.labels_}
)
X_transformed = X_transformed.groupby(["CLUSTER"], as_index=True).mean()
X_transformed = X_transformed.sort_index(axis=0)
##
## Cluster coordinates
##
x_axis = X_transformed.x_axis.tolist()
y_axis = X_transformed.y_axis.tolist()
##
## Cluster names
##
labels = [
"CLUST_{} {}".format(index, label)
for index, label in enumerate(self.cluster_names_)
]
return xy_clusters_plot(
x=x_axis,
y=y_axis,
x_axis_at=0,
y_axis_at=0,
labels=labels,
node_sizes=counters_to_node_sizes(labels),
color_scheme=self.colors,
xlabel="Dim-{}".format(self.x_axis),
ylabel="Dim-{}".format(self.y_axis),
figsize=(self.width, self.height),
)
def ca_keywords_by_cluster_table(self):
self.apply()
return self.cluster_members_
def ca_keywords_by_cluster_list(self):
self.apply()
return cluster_table_to_list(self.cluster_members_)
def ca_keywords_by_cluster_python_code(self):
self.apply()
return cluster_table_to_python_code(self.column, self.cluster_members_)
######
def strategic_map(self):
self.apply()
strategic_map = self.strategic_map_.copy()
strategic_map["node_sizes"] = strategic_map["Cluster name"].map(
lambda w: w.split(" ")[-1]
)
strategic_map["node_sizes"] = strategic_map.node_sizes.map(
lambda w: w.split(":")[0]
)
strategic_map["node_sizes"] = strategic_map.node_sizes.map(int)
max_node_size = strategic_map.node_sizes.max()
min_node_size = strategic_map.node_sizes.min()
strategic_map["node_sizes"] = strategic_map.node_sizes.map(
lambda w: 200 + 2800 * (w - min_node_size) / (max_node_size - min_node_size)
)
return xy_clusters_plot(
x=strategic_map.Centrality,
y=strategic_map.Density,
x_axis_at=strategic_map.Centrality.median(),
y_axis_at=strategic_map.Density.median(),
labels=strategic_map["Cluster name"]
.map(lambda w: " ".join(w.split(" ")[:-1]))
.tolist(),
node_sizes=strategic_map.node_sizes,
color_scheme=self.colors,
xlabel="Centrality",
ylabel="Density",
figsize=(self.width, self.height),
)
###############################################################################
##
## DASHBOARD
##
###############################################################################
COLUMNS = sorted(
[
"Abstract_words_CL",
"Abstract_words",
"Author_Keywords_CL",
"Author_Keywords",
"Index_Keywords_CL",
"Index_Keywords",
"Keywords_CL",
"Title_words_CL",
"Title_words",
]
)
class DASHapp(DASH, Model):
def __init__(
self,
data,
limit_to=None,
exclude=None,
years_range=None,
clusters=None,
cluster=None,
):
Model.__init__(
self,
data=data,
limit_to=limit_to,
exclude=exclude,
years_range=years_range,
clusters=clusters,
cluster=cluster,
)
DASH.__init__(self)
self.app_title = "Co-word Analysis"
self.menu_options = [
"MDS Keywords Map",
"MDS Cluster Map",
"MDS Keywords by Cluster (table)",
"MDS Keywords by Cluster (list)",
"MDS Keywords by Cluster (Python code)",
"CA Keywords Map",
"CA Cluster Map",
"CA Keywords by Cluster (table)",
"CA Keywords by Cluster (list)",
"CA Keywords by Cluster (Python code)",
"Strategic Map",
]
self.panel_widgets = [
dash.dropdown(
desc="Column:",
options=[z for z in COLUMNS if z in data.columns],
),
dash.min_occurrence(),
dash.max_items(),
dash.normalization(include_none=False),
dash.separator(text="Clustering"),
dash.clustering_method(),
dash.n_clusters(m=3, n=50, i=1),
dash.affinity(),
dash.linkage(),
dash.random_state(),
dash.separator(text="CA diagram"),
dash.x_axis(),
dash.y_axis(),
dash.separator(text="Visualization"),
dash.top_n(),
dash.dropdown(
desc="Colors:",
options=[
"4 Quadrants",
"Clusters",
"Greys",
"Purples",
"Blues",
"Greens",
"Oranges",
"Reds",
],
),
dash.fig_width(),
dash.fig_height(),
]
self.n_components = 10
super().create_grid()
def interactive_output(self, **kwargs):
DASH.interactive_output(self, **kwargs)
with self.output:
if self.menu in ["MDS Keywords Map", "MDS Cluster Map", "Strategic Map"]:
self.set_disabled("X-axis:")
self.set_disabled("Y-axis:")
self.set_enabled("Colors:")
self.set_enabled("Width:")
self.set_enabled("Height:")
if self.menu in ["CA Keywords Map", "CA Cluster Map"]:
self.set_enabled("X-axis:")
self.set_enabled("Y-axis:")
self.set_enabled("Colors:")
self.set_enabled("Width:")
self.set_enabled("Height:")
if self.menu in [
"MDS Keywords by Cluster (table)",
"MDS Keywords by Cluster (list)",
"MDS Keywords by Cluster (Python code)",
"CA Keywords by Cluster (table)",
"CA Keywords by Cluster (list)",
"CA Keywords by Cluster (Python code)",
]:
self.set_disabled("X-axis:")
self.set_disabled("Y-axis:")
self.set_disabled("Colors:")
self.set_disabled("Width:")
self.set_disabled("Height:")
###############################################################################
##
## EXTERNAL INTERFACE
##
###############################################################################
def co_word_analysis(
input_file="techminer.csv",
limit_to=None,
exclude=None,
years_range=None,
clusters=None,
cluster=None,
):
return DASHapp(
data=pd.read_csv(input_file),
limit_to=limit_to,
exclude=exclude,
years_range=years_range,
clusters=clusters,
cluster=cluster,
).run()
| <filename>techminer/co_word_analysis.py
import matplotlib
import matplotlib.pyplot as pyplot
import numpy as np
import pandas as pd
from sklearn.manifold import MDS
import techminer.core.dashboard as dash
from techminer.core import (
CA,
DASH,
TF_matrix,
TFIDF_matrix,
add_counters_to_axis,
clustering,
corpus_filter,
limit_to_exclude,
normalize_network,
sort_by_axis,
cluster_table_to_list,
cluster_table_to_python_code,
keywords_coverage,
)
from techminer.plots import (
ax_text_node_labels,
counters_to_node_sizes,
expand_ax_limits,
set_spines_invisible,
xy_clusters_plot,
xy_cluster_members_plot,
)
###############################################################################
##
## MODEL
##
###############################################################################
class Model:
def __init__(
self,
data,
limit_to,
exclude,
years_range,
clusters=None,
cluster=None,
):
##
if years_range is not None:
initial_year, final_year = years_range
data = data[(data.Year >= initial_year) & (data.Year <= final_year)]
##
## Filter for cluster members
##
if clusters is not None and cluster is not None:
data = corpus_filter(data=data, clusters=clusters, cluster=cluster)
self.data = data
self.limit_to = limit_to
self.exclude = exclude
self.column = None
self.min_occurrence = None
self.max_items = None
self.normalization = None
self.clustering_method = None
self.n_clusters = None
self.affinity = None
self.linkage = None
self.random_state = None
self.x_axis = None
self.y_axis = None
self.top_n = None
self.colors = None
self.width = None
self.height = None
def apply(self):
##
## Concept mapping
## https://tlab.it/en/allegati/help_en_online/mmappe2.htm
##
##
## Co-occurrence matrix
##
TF_matrix_ = TF_matrix(
data=self.data,
column=self.column,
scheme=None,
min_occurrence=self.min_occurrence,
)
##
## Limit to/Exclude
##
TF_matrix_ = limit_to_exclude(
data=TF_matrix_,
axis=1,
column=self.column,
limit_to=self.limit_to,
exclude=self.exclude,
)
##
## Select max items
##
TF_matrix_ = add_counters_to_axis(
X=TF_matrix_, axis=1, data=self.data, column=self.column
)
TF_matrix_ = sort_by_axis(
data=TF_matrix_, sort_by="Num Documents", ascending=False, axis=1
)
TF_matrix_ = TF_matrix_[TF_matrix_.columns[: self.max_items]]
if len(TF_matrix_.columns) > self.max_items:
top_items = TF_matrix_.sum(axis=0)
top_items = top_items.sort_values(ascending=False)
top_items = top_items.head(self.max_items)
TF_matrix_ = TF_matrix_.loc[:, top_items.index]
rows = TF_matrix_.sum(axis=1)
rows = rows[rows > 0]
TF_matrix_ = TF_matrix_.loc[rows.index, :]
##
## Co-occurrence matrix and association index
##
X = np.matmul(TF_matrix_.transpose().values, TF_matrix_.values)
X = pd.DataFrame(X, columns=TF_matrix_.columns, index=TF_matrix_.columns)
X = normalize_network(X=X, normalization=self.normalization)
##
## Clustering of the dissimilarity matrix
##
(
self.n_clusters,
self.labels_,
self.cluster_members_,
self.cluster_centers_,
self.cluster_names_,
) = clustering(
X=(1 - X),
method=self.clustering_method,
n_clusters=self.n_clusters,
affinity=self.affinity,
linkage=self.linkage,
random_state=self.random_state,
top_n=self.top_n,
name_prefix="Cluster {}",
)
self.X_ = X
##
## Cluster co-occurrence
##
M = X.copy()
M["CLUSTER"] = self.labels_
M = M.groupby("CLUSTER").sum()
#
M = M.transpose()
M["CLUSTER"] = self.labels_
M = M.groupby("CLUSTER").sum()
#
M.columns = ["Cluster {}".format(i) for i in range(self.n_clusters)]
M.index = M.columns
#
self.cluster_co_occurrence_ = M
##
## Strategic Map
##
## clusters name
strategic_map = pd.DataFrame(
self.cluster_names_, columns=["Cluster name"], index=M.columns
)
strategic_map["Density"] = 0.0
strategic_map["Centrality"] = 0.0
## Density -- internal conections
for cluster in M.columns:
strategic_map.at[cluster, "Density"] = M[cluster][cluster]
## Centrality -- outside conections
strategic_map["Centrality"] = M.sum()
strategic_map["Centrality"] = (
strategic_map["Centrality"] - strategic_map["Density"]
)
self.strategic_map_ = strategic_map
def mds_keywords_map(self):
##
## Compute co-occurrence matrix
##
self.apply()
X = self.X_.copy()
##
## MDS
##
embedding = MDS(n_components=2)
X_transformed = embedding.fit_transform(
1 - X,
)
x_axis = X_transformed[:, 0]
y_axis = X_transformed[:, 1]
##
## Plot
##
return xy_cluster_members_plot(
x=x_axis,
y=y_axis,
x_axis_at=0,
y_axis_at=0,
labels=self.labels_,
keywords=X.index,
color_scheme=self.colors,
xlabel="Dim-0",
ylabel="Dim-1",
figsize=(self.width, self.height),
)
def mds_cluster_map(self):
##
## Compute co-occurrence matrix
##
self.apply()
X = self.X_.copy()
##
## MDS
##
embedding = MDS(n_components=2)
X_transformed = embedding.fit_transform(
1 - X,
)
X_transformed = pd.DataFrame(X_transformed, columns=["x_axis", "y_axis"])
X_transformed["CLUSTER"] = self.labels_
X_transformed = X_transformed.groupby(["CLUSTER"], as_index=True).mean()
X_transformed = X_transformed.sort_index(axis=0)
##
## Cluster coordinates
##
x_axis = X_transformed.x_axis.tolist()
y_axis = X_transformed.y_axis.tolist()
##
## Cluster names
##
labels = [
"CLUST_{} {}".format(index, label)
for index, label in enumerate(self.cluster_names_)
]
return xy_clusters_plot(
x=x_axis,
y=y_axis,
x_axis_at=0,
y_axis_at=0,
labels=labels,
node_sizes=counters_to_node_sizes(labels),
color_scheme=self.colors,
xlabel="Dim-{}".format(self.x_axis),
ylabel="Dim-{}".format(self.y_axis),
figsize=(self.width, self.height),
)
def mds_keywords_by_cluster_table(self):
self.apply()
return self.cluster_members_
def mds_keywords_by_cluster_list(self):
self.apply()
return cluster_table_to_list(self.cluster_members_)
def mds_keywords_by_cluster_python_code(self):
self.apply()
return cluster_table_to_python_code(self.column, self.cluster_members_)
def ca_keywords_map(self):
##
## Compute co-occurrence matrix
##
self.apply()
X = self.X_.copy()
##
## CA
##
ca = CA()
ca.fit(1 - X)
X_transformed = ca.principal_coordinates_cols_
x_axis = X_transformed.loc[:, X_transformed.columns[self.x_axis]]
y_axis = X_transformed.loc[:, X_transformed.columns[self.y_axis]]
##
## Plot
##
return xy_cluster_members_plot(
x=x_axis,
y=y_axis,
x_axis_at=0,
y_axis_at=0,
labels=self.labels_,
keywords=X.index,
color_scheme=self.colors,
xlabel="Dim-0",
ylabel="Dim-1",
figsize=(self.width, self.height),
)
def ca_cluster_map(self):
##
## Compute co-occurrence matrix
##
self.apply()
X = self.X_.copy()
##
## CA
##
ca = CA()
ca.fit(1 - X)
X_transformed = ca.principal_coordinates_cols_
x_axis = X_transformed.loc[:, X_transformed.columns[self.x_axis]]
y_axis = X_transformed.loc[:, X_transformed.columns[self.y_axis]]
X_transformed = pd.DataFrame(
{"x_axis": x_axis, "y_axis": y_axis, "CLUSTER": self.labels_}
)
X_transformed = X_transformed.groupby(["CLUSTER"], as_index=True).mean()
X_transformed = X_transformed.sort_index(axis=0)
##
## Cluster coordinates
##
x_axis = X_transformed.x_axis.tolist()
y_axis = X_transformed.y_axis.tolist()
##
## Cluster names
##
labels = [
"CLUST_{} {}".format(index, label)
for index, label in enumerate(self.cluster_names_)
]
return xy_clusters_plot(
x=x_axis,
y=y_axis,
x_axis_at=0,
y_axis_at=0,
labels=labels,
node_sizes=counters_to_node_sizes(labels),
color_scheme=self.colors,
xlabel="Dim-{}".format(self.x_axis),
ylabel="Dim-{}".format(self.y_axis),
figsize=(self.width, self.height),
)
def ca_keywords_by_cluster_table(self):
self.apply()
return self.cluster_members_
def ca_keywords_by_cluster_list(self):
self.apply()
return cluster_table_to_list(self.cluster_members_)
def ca_keywords_by_cluster_python_code(self):
self.apply()
return cluster_table_to_python_code(self.column, self.cluster_members_)
######
def strategic_map(self):
self.apply()
strategic_map = self.strategic_map_.copy()
strategic_map["node_sizes"] = strategic_map["Cluster name"].map(
lambda w: w.split(" ")[-1]
)
strategic_map["node_sizes"] = strategic_map.node_sizes.map(
lambda w: w.split(":")[0]
)
strategic_map["node_sizes"] = strategic_map.node_sizes.map(int)
max_node_size = strategic_map.node_sizes.max()
min_node_size = strategic_map.node_sizes.min()
strategic_map["node_sizes"] = strategic_map.node_sizes.map(
lambda w: 200 + 2800 * (w - min_node_size) / (max_node_size - min_node_size)
)
return xy_clusters_plot(
x=strategic_map.Centrality,
y=strategic_map.Density,
x_axis_at=strategic_map.Centrality.median(),
y_axis_at=strategic_map.Density.median(),
labels=strategic_map["Cluster name"]
.map(lambda w: " ".join(w.split(" ")[:-1]))
.tolist(),
node_sizes=strategic_map.node_sizes,
color_scheme=self.colors,
xlabel="Centrality",
ylabel="Density",
figsize=(self.width, self.height),
)
###############################################################################
##
## DASHBOARD
##
###############################################################################
COLUMNS = sorted(
[
"Abstract_words_CL",
"Abstract_words",
"Author_Keywords_CL",
"Author_Keywords",
"Index_Keywords_CL",
"Index_Keywords",
"Keywords_CL",
"Title_words_CL",
"Title_words",
]
)
class DASHapp(DASH, Model):
def __init__(
self,
data,
limit_to=None,
exclude=None,
years_range=None,
clusters=None,
cluster=None,
):
Model.__init__(
self,
data=data,
limit_to=limit_to,
exclude=exclude,
years_range=years_range,
clusters=clusters,
cluster=cluster,
)
DASH.__init__(self)
self.app_title = "Co-word Analysis"
self.menu_options = [
"MDS Keywords Map",
"MDS Cluster Map",
"MDS Keywords by Cluster (table)",
"MDS Keywords by Cluster (list)",
"MDS Keywords by Cluster (Python code)",
"CA Keywords Map",
"CA Cluster Map",
"CA Keywords by Cluster (table)",
"CA Keywords by Cluster (list)",
"CA Keywords by Cluster (Python code)",
"Strategic Map",
]
self.panel_widgets = [
dash.dropdown(
desc="Column:",
options=[z for z in COLUMNS if z in data.columns],
),
dash.min_occurrence(),
dash.max_items(),
dash.normalization(include_none=False),
dash.separator(text="Clustering"),
dash.clustering_method(),
dash.n_clusters(m=3, n=50, i=1),
dash.affinity(),
dash.linkage(),
dash.random_state(),
dash.separator(text="CA diagram"),
dash.x_axis(),
dash.y_axis(),
dash.separator(text="Visualization"),
dash.top_n(),
dash.dropdown(
desc="Colors:",
options=[
"4 Quadrants",
"Clusters",
"Greys",
"Purples",
"Blues",
"Greens",
"Oranges",
"Reds",
],
),
dash.fig_width(),
dash.fig_height(),
]
self.n_components = 10
super().create_grid()
def interactive_output(self, **kwargs):
DASH.interactive_output(self, **kwargs)
with self.output:
if self.menu in ["MDS Keywords Map", "MDS Cluster Map", "Strategic Map"]:
self.set_disabled("X-axis:")
self.set_disabled("Y-axis:")
self.set_enabled("Colors:")
self.set_enabled("Width:")
self.set_enabled("Height:")
if self.menu in ["CA Keywords Map", "CA Cluster Map"]:
self.set_enabled("X-axis:")
self.set_enabled("Y-axis:")
self.set_enabled("Colors:")
self.set_enabled("Width:")
self.set_enabled("Height:")
if self.menu in [
"MDS Keywords by Cluster (table)",
"MDS Keywords by Cluster (list)",
"MDS Keywords by Cluster (Python code)",
"CA Keywords by Cluster (table)",
"CA Keywords by Cluster (list)",
"CA Keywords by Cluster (Python code)",
]:
self.set_disabled("X-axis:")
self.set_disabled("Y-axis:")
self.set_disabled("Colors:")
self.set_disabled("Width:")
self.set_disabled("Height:")
###############################################################################
##
## EXTERNAL INTERFACE
##
###############################################################################
def co_word_analysis(
input_file="techminer.csv",
limit_to=None,
exclude=None,
years_range=None,
clusters=None,
cluster=None,
):
return DASHapp(
data=pd.read_csv(input_file),
limit_to=limit_to,
exclude=exclude,
years_range=years_range,
clusters=clusters,
cluster=cluster,
).run()
| de | 0.341384 | ############################################################################### ## ## MODEL ## ############################################################################### ## ## ## Filter for cluster members ## ## ## Concept mapping ## https://tlab.it/en/allegati/help_en_online/mmappe2.htm ## ## ## Co-occurrence matrix ## ## ## Limit to/Exclude ## ## ## Select max items ## ## ## Co-occurrence matrix and association index ## ## ## Clustering of the dissimilarity matrix ## ## ## Cluster co-occurrence ## # # # ## ## Strategic Map ## ## clusters name ## Density -- internal conections ## Centrality -- outside conections ## ## Compute co-occurrence matrix ## ## ## MDS ## ## ## Plot ## ## ## Compute co-occurrence matrix ## ## ## MDS ## ## ## Cluster coordinates ## ## ## Cluster names ## ## ## Compute co-occurrence matrix ## ## ## CA ## ## ## Plot ## ## ## Compute co-occurrence matrix ## ## ## CA ## ## ## Cluster coordinates ## ## ## Cluster names ## ###### ############################################################################### ## ## DASHBOARD ## ############################################################################### ############################################################################### ## ## EXTERNAL INTERFACE ## ############################################################################### | 2.396186 | 2 |
pyplan/pyplan/common/email/classes/eEmailType.py | jorgedouglas71/pyplan-ide | 17 | 6631276 | from enum import Enum
class eEmailType(Enum):
WORKFLOW_ASSIGNED_TASK = 0
WORKFLOW_CHANGE_STATE = 1
WORKFLOW_CHANGE_PERCENT = 2
INTERFACE_COMMENT = 3
INTERFACE_REFRESH_USER_IN_COMMENT = 4 # TODO:Implement this
INTERFACE_SHARED = 5
APPLICATION_SHARED = 6
RESET_PASSWORD = 7
CHANGED_PASSWORD = 8
TEST = 9
WELCOME_USER = 10
CREATED_USER = 11
ACTIVATED_USER = 12
SCHEDULE_TASK_STATUS_CHANGED = 13
DEACTIVATED_USER = 14 # TODO:Implement this
def __str__(self):
return self.value
| from enum import Enum
class eEmailType(Enum):
WORKFLOW_ASSIGNED_TASK = 0
WORKFLOW_CHANGE_STATE = 1
WORKFLOW_CHANGE_PERCENT = 2
INTERFACE_COMMENT = 3
INTERFACE_REFRESH_USER_IN_COMMENT = 4 # TODO:Implement this
INTERFACE_SHARED = 5
APPLICATION_SHARED = 6
RESET_PASSWORD = 7
CHANGED_PASSWORD = 8
TEST = 9
WELCOME_USER = 10
CREATED_USER = 11
ACTIVATED_USER = 12
SCHEDULE_TASK_STATUS_CHANGED = 13
DEACTIVATED_USER = 14 # TODO:Implement this
def __str__(self):
return self.value
| en | 0.212105 | # TODO:Implement this # TODO:Implement this | 2.74657 | 3 |
video.py | rvk007/Multi-Env-Decision-Making | 2 | 6631277 | import os
import logging
import imageio
class VideoRecorder:
def __init__(self, root_dir, fps=5):
logging.getLogger('imageio_ffmpeg').setLevel(logging.ERROR)
self.save_dir = os.path.join(root_dir, 'eval_video') if root_dir else None
if self.save_dir:
os.makedirs(self.save_dir, exist_ok=True)
self.fps = fps
self.frames = []
def init(self, env, enabled=True):
self.frames = []
self.enabled = self.save_dir is not None and enabled
self.record(env)
def record(self, env):
if self.enabled:
frame = env.render(mode='rgb_array')
self.frames.append(frame)
def save(self, file_name):
if self.enabled:
path = os.path.join(self.save_dir, file_name)
imageio.mimsave(path, self.frames, fps=self.fps, macro_block_size=10, ffmpeg_params=['-loglevel', 'error'])
| import os
import logging
import imageio
class VideoRecorder:
def __init__(self, root_dir, fps=5):
logging.getLogger('imageio_ffmpeg').setLevel(logging.ERROR)
self.save_dir = os.path.join(root_dir, 'eval_video') if root_dir else None
if self.save_dir:
os.makedirs(self.save_dir, exist_ok=True)
self.fps = fps
self.frames = []
def init(self, env, enabled=True):
self.frames = []
self.enabled = self.save_dir is not None and enabled
self.record(env)
def record(self, env):
if self.enabled:
frame = env.render(mode='rgb_array')
self.frames.append(frame)
def save(self, file_name):
if self.enabled:
path = os.path.join(self.save_dir, file_name)
imageio.mimsave(path, self.frames, fps=self.fps, macro_block_size=10, ffmpeg_params=['-loglevel', 'error'])
| none | 1 | 2.539132 | 3 |
|
container-applications/classified/inspect_workout/routes.py | emerginganalytics/ualr-cyber-gym | 3 | 6631278 | <filename>container-applications/classified/inspect_workout/routes.py
import cryptocode
from flask import Blueprint, render_template, redirect, request, url_for
from globals import ds_client, publish_status
# Blueprint Configuration
inspect_bp = Blueprint(
'inspect_bp', __name__,
url_prefix='/inspect',
template_folder='templates',
static_folder='static'
)
@inspect_bp.route('/<workout_id>')
def inspect(workout_id):
key = ds_client.key('cybergym-workout', workout_id)
workout = ds_client.get(key)
if workout['type'] == 'inspect':
page_template = 'inspect.html'
return render_template(page_template, workout_id=workout_id)
else:
return redirect(404)
@inspect_bp.route('/xsfiedSTRflag/<workout_id>', methods=['GET', 'POST'])
def xsfiedSTRflag(workout_id):
key = ds_client.key('cybergym-workout', workout_id)
workout = ds_client.get(key)
if workout['type'] == 'inspect':
page_template = 'index.html'
return render_template(page_template, workout_id=workout_id)
else:
return redirect(404)
@inspect_bp.route('/login/<workout_id>', methods=['POST'])
def login(workout_id):
page_template = 'inspect.html'
key = ds_client.key('cybergym-workout', workout_id)
workout = ds_client.get(key)
if workout['type'] == 'inspect':
if request.method == 'POST':
if request.form['password'] == '<PASSWORD>' and request.form['username'] == 'Maximus':
decrypt_key = workout['assessment']['key']
classified_flag = 'gecJuFQuv1FhQAfLDvn9f6j6xu/GACm00wqyoWVKUJQ=*gXSP1UFZELV59Qz6yP0Y+w==*' \
'y6cg3ujMtm7eSklW2SX3JQ==*C4GDYpzjfozIsTQWVuUc4A=='
plaintext_flag = cryptocode.decrypt(classified_flag, decrypt_key)
return render_template(page_template, workout_id=workout_id, classified_flag=plaintext_flag)
else:
return redirect(url_for('inspect_bp.xsfiedSTRflag ', workout_id=workout_id))
else:
return redirect(404)
@inspect_bp.route('/check_flag/<workout_id>', methods=['POST'])
def check_flag(workout_id):
if request.method == 'POST':
key = ds_client.key('cybergym-workout', workout_id)
page_template = 'inspect.html'
workout = ds_client.get(key)
workout_token = workout['assessment']['questions'][0]['key']
if request.form.get('check_button'):
decrypt_key = workout['assessment']['key']
encrypted_flag = 'gecJuFQuv1FhQAfLDvn9f6j6xu/GACm00wqyoWVKUJQ=*gXSP1UFZELV59Qz6yP0Y+w==*' \
'y6cg3ujMtm7eSklW2SX3JQ==*C4GDYpzjfozIsTQWVuUc4A=='
classified_flag = cryptocode.decrypt(encrypted_flag, decrypt_key)
if classified_flag == request.form['classified_flag']:
publish_status(workout_id, workout_token)
completion = True
return render_template(page_template, workout_id=workout_id, completion=completion)
else:
return render_template(page_template, workout_id=workout_id)
| <filename>container-applications/classified/inspect_workout/routes.py
import cryptocode
from flask import Blueprint, render_template, redirect, request, url_for
from globals import ds_client, publish_status
# Blueprint Configuration
inspect_bp = Blueprint(
'inspect_bp', __name__,
url_prefix='/inspect',
template_folder='templates',
static_folder='static'
)
@inspect_bp.route('/<workout_id>')
def inspect(workout_id):
key = ds_client.key('cybergym-workout', workout_id)
workout = ds_client.get(key)
if workout['type'] == 'inspect':
page_template = 'inspect.html'
return render_template(page_template, workout_id=workout_id)
else:
return redirect(404)
@inspect_bp.route('/xsfiedSTRflag/<workout_id>', methods=['GET', 'POST'])
def xsfiedSTRflag(workout_id):
key = ds_client.key('cybergym-workout', workout_id)
workout = ds_client.get(key)
if workout['type'] == 'inspect':
page_template = 'index.html'
return render_template(page_template, workout_id=workout_id)
else:
return redirect(404)
@inspect_bp.route('/login/<workout_id>', methods=['POST'])
def login(workout_id):
page_template = 'inspect.html'
key = ds_client.key('cybergym-workout', workout_id)
workout = ds_client.get(key)
if workout['type'] == 'inspect':
if request.method == 'POST':
if request.form['password'] == '<PASSWORD>' and request.form['username'] == 'Maximus':
decrypt_key = workout['assessment']['key']
classified_flag = 'gecJuFQuv1FhQAfLDvn9f6j6xu/GACm00wqyoWVKUJQ=*gXSP1UFZELV59Qz6yP0Y+w==*' \
'y6cg3ujMtm7eSklW2SX3JQ==*C4GDYpzjfozIsTQWVuUc4A=='
plaintext_flag = cryptocode.decrypt(classified_flag, decrypt_key)
return render_template(page_template, workout_id=workout_id, classified_flag=plaintext_flag)
else:
return redirect(url_for('inspect_bp.xsfiedSTRflag ', workout_id=workout_id))
else:
return redirect(404)
@inspect_bp.route('/check_flag/<workout_id>', methods=['POST'])
def check_flag(workout_id):
if request.method == 'POST':
key = ds_client.key('cybergym-workout', workout_id)
page_template = 'inspect.html'
workout = ds_client.get(key)
workout_token = workout['assessment']['questions'][0]['key']
if request.form.get('check_button'):
decrypt_key = workout['assessment']['key']
encrypted_flag = 'gecJuFQuv1FhQAfLDvn9f6j6xu/GACm00wqyoWVKUJQ=*gXSP1UFZELV59Qz6yP0Y+w==*' \
'y6cg3ujMtm7eSklW2SX3JQ==*C4GDYpzjfozIsTQWVuUc4A=='
classified_flag = cryptocode.decrypt(encrypted_flag, decrypt_key)
if classified_flag == request.form['classified_flag']:
publish_status(workout_id, workout_token)
completion = True
return render_template(page_template, workout_id=workout_id, completion=completion)
else:
return render_template(page_template, workout_id=workout_id)
| en | 0.431194 | # Blueprint Configuration | 2.018459 | 2 |
rdc/dic/test/test_container.py | hartym/rdc.dic | 0 | 6631279 | <gh_stars>0
from rdc.dic import Container
from rdc.dic.test import TestCase
class ContainerTestCase(TestCase):
def setUp(self):
self.container = Container()
def test_set_parameter(self):
self.assertRaises(KeyError, self.container.get, 'foo')
self.container.set_parameter('foo', 'bar')
self.assertEqual(self.container.get('foo'), 'bar')
def test_set_parameters(self):
self.container.set_parameters({
'foo': 42,
'bar': 43
})
self.assertEqual(self.container.get('foo'), 42)
self.assertEqual(self.container.get('bar'), 43)
| from rdc.dic import Container
from rdc.dic.test import TestCase
class ContainerTestCase(TestCase):
def setUp(self):
self.container = Container()
def test_set_parameter(self):
self.assertRaises(KeyError, self.container.get, 'foo')
self.container.set_parameter('foo', 'bar')
self.assertEqual(self.container.get('foo'), 'bar')
def test_set_parameters(self):
self.container.set_parameters({
'foo': 42,
'bar': 43
})
self.assertEqual(self.container.get('foo'), 42)
self.assertEqual(self.container.get('bar'), 43) | none | 1 | 2.624074 | 3 |
|
test/test_skim.py | rspencer01/skim | 3,352 | 6631280 | <reponame>rspencer01/skim
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# The integration test of skim
# Modeled after fzf's test: https://github.com/junegunn/fzf/blob/master/test/test_go.rb
import subprocess
import unittest
import os
import time
import re
import inspect
import sys
INPUT_RECORD_SEPARATOR = '\n'
DEFAULT_TIMEOUT = 3000
SCRIPT_PATH = os.path.realpath(__file__)
BASE = os.path.expanduser(os.path.join(os.path.dirname(SCRIPT_PATH), '..'))
os.chdir(BASE)
SK = f"SKIM_DEFAULT_OPTIONS= SKIM_DEFAULT_COMMAND= {BASE}/target/release/sk"
def now_mills():
return int(round(time.time() * 1000))
def wait(func, timeout_handler=None):
since = now_mills()
while now_mills() - since < DEFAULT_TIMEOUT:
time.sleep(0.02)
ret = func()
if ret is not None and ret:
return
if timeout_handler is not None:
timeout_handler()
raise BaseException('Timeout on wait')
class Shell(object):
"""The shell configurations for tmux tests"""
def __init__(self):
super(Shell, self).__init__()
def unsets():
return 'unset SKIM_DEFAULT_COMMAND SKIM_DEFAULT_OPTIONS;'
def bash():
return 'PS1= PROMPT_COMMAND= bash --rcfile None'
def zsh():
return 'PS1= PROMPT_COMMAND= HISTSIZE=100 zsh -f'
class Key(object):
"""Represent a key to send to tmux"""
def __init__(self, key):
super(Key, self).__init__()
self.key = key
def __repr__(self):
return self.key
class Ctrl(Key):
"""Represent a control key"""
def __init__(self, key):
super(Ctrl, self).__init__(key)
def __repr__(self):
return f'C-{self.key.upper()}'
class Alt(Key):
"""Represent an alt key"""
def __init__(self, key):
super(Alt, self).__init__(key)
def __repr__(self):
return f'M-{self.key}'
class TmuxOutput(list):
"""A list that contains the output of tmux"""
# match the status line
# normal: `| 10/219 [2] 8/0.`
# inline: `> query < 10/219 [2] 8/0.`
# preview: `> query < 10/219 [2] 8/0.│...`
RE = re.compile(r'(?:^|[^<-]*). ([0-9]+)/([0-9]+)(?:/[A-Z]*)?(?: \[([0-9]+)\])? *([0-9]+)/(-?[0-9]+)(\.)?(?: │)? *$')
def __init__(self, iteratable=[]):
super(TmuxOutput, self).__init__(iteratable)
self._counts = None
def counts(self):
if self._counts is not None:
return self._counts
# match_count item_count select_count item_cursor matcher_stopped
ret = (0, 0, 0, 0, 0, '.')
for line in self:
mat = TmuxOutput.RE.match(line)
if mat is not None:
ret = mat.groups()
break;
self._counts = ret
return ret
def match_count(self):
count = self.counts()[0]
return int(count) if count is not None else None
def item_count(self):
count = self.counts()[1]
return int(count) if count is not None else None
def select_count(self):
count = self.counts()[2]
return int(count) if count is not None else None
def item_index(self):
count = self.counts()[3]
return int(count) if count is not None else None
def hscroll(self):
count = self.counts()[4]
return int(count) if count is not None else None
def matcher_stopped(self):
return self.counts()[5] != '.'
def ready_with_lines(self, lines):
return self.item_count() == lines and self.matcher_stopped()
def ready_with_matches(self, matches):
return self.match_count() == matches and self.matcher_stopped()
def any_include(self, val):
if hasattr(re, '_pattern_type') and isinstance(val, re._pattern_type):
method = lambda l: val.match(l)
if hasattr(re, 'Pattern') and isinstance(val, re.Pattern):
method = lambda l: val.match(l)
else:
method = lambda l: l.find(val) >= 0
for line in self:
if method(line):
return True
return False
class Tmux(object):
TEMPNAME = '/tmp/skim-test.txt'
"""Object to manipulate tmux and get result"""
def __init__(self, shell = 'bash'):
super(Tmux, self).__init__()
if shell == 'bash':
shell_cmd = Shell.unsets() + Shell.bash()
elif shell == 'zsh':
shell_cmd = Shell.unsets() + Shell.zsh()
else:
raise BaseException('unknown shell')
self.win = self._go("new-window", "-d", "-P", "-F", "#I", f"{shell_cmd}")[0]
self._go("set-window-option", "-t", f"{self.win}", "pane-base-index", "0")
self.lines = int(subprocess.check_output('tput lines', shell=True).decode('utf8').strip())
def _go(self, *args, **kwargs):
"""Run tmux command and return result in list of strings (lines)
:returns: List<String>
"""
ret = subprocess.check_output(["tmux"] + list(args))
return ret.decode('utf8').split(INPUT_RECORD_SEPARATOR)
def kill(self):
self._go("kill-window", "-t", f"{self.win}", stderr=subprocess.DEVNULL)
def send_keys(self, *args, pane=None):
if pane is not None:
self._go('select-window', '-t', f'{self.win}')
target = '{self.win}.{pane}'
else:
target = self.win
for key in args:
if key is None:
continue
else:
self._go('send-keys', '-t', f'{target}', f'{key}')
time.sleep(0.01)
def paste(self, content):
subprocess.run(["tmux", "setb", f"{content}", ";",
"pasteb", "-t", f"{self.win}", ";",
"send-keys", "-t", f"{self.win}", "Enter"])
def capture(self, pane = 0):
def save_capture():
try:
self._go('capture-pane', '-t', f'{self.win}.{pane}', stderr=subprocess.DEVNULL)
self._go("save-buffer", f"{Tmux.TEMPNAME}", stderr=subprocess.DEVNULL)
return True
except subprocess.CalledProcessError as ex:
return False
if os.path.exists(Tmux.TEMPNAME):
os.remove(Tmux.TEMPNAME)
wait(save_capture)
with open(Tmux.TEMPNAME) as fp:
content = fp.read()
return TmuxOutput(content.rstrip().split(INPUT_RECORD_SEPARATOR))
def until(self, predicate, refresh = False, pane = 0, debug_info = None):
def wait_callback():
lines = self.capture()
pred = predicate(lines)
if pred:
self.send_keys(Ctrl('l') if refresh else None)
return pred
def timeout_handler():
lines = self.capture()
print(lines)
if debug_info:
print(debug_info)
wait(wait_callback, timeout_handler)
def prepare(self):
try:
self.send_keys(Ctrl('u'), Key('hello'))
self.until(lambda lines: lines[-1].endswith('hello'))
except Exception as e:
raise e
self.send_keys(Ctrl('u'))
class TestBase(unittest.TestCase):
TEMPNAME = '/tmp/output'
def __init__(self, *args, **kwargs):
super(TestBase, self).__init__(*args, **kwargs)
self._temp_suffix = 0
def tempname(self):
curframe = inspect.currentframe()
frames = inspect.getouterframes(curframe)
names = [f.function for f in frames if f.function.startswith('test_')]
fun_name = names[0] if len(names) > 0 else 'test'
return '-'.join((TestBase.TEMPNAME, fun_name, str(self._temp_suffix)))
def writelines(self, path, lines):
if os.path.exists(path):
os.remove(path)
with open(path, 'w') as fp:
fp.writelines(lines)
def readonce(self):
path = self.tempname()
try:
wait(lambda: os.path.exists(path))
with open(path) as fp:
return fp.read()
finally:
if os.path.exists(path):
os.remove(path)
self._temp_suffix += 1
self.tmux.prepare()
def sk(self, *opts):
tmp = self.tempname()
return f'{SK} {" ".join(map(str, opts))} > {tmp}.tmp; mv {tmp}.tmp {tmp}'
def command_until(self, until_predicate, sk_options, stdin="echo -e 'a1\\na2\\na3'"):
command_keys = stdin + " | " + self.sk(*sk_options)
self.tmux.send_keys(command_keys)
self.tmux.send_keys(Key("Enter"))
self.tmux.until(until_predicate, debug_info="SK args: {}".format(sk_options))
self.tmux.send_keys(Key('Enter'))
class TestSkim(TestBase):
def setUp(self):
self.tmux = Tmux()
def tearDown(self):
self.tmux.kill()
pass
def test_vanilla(self):
self.tmux.send_keys(Key(f'seq 1 100000 | {self.sk()}'), Key('Enter'))
self.tmux.until(lambda lines: re.match(r'^>', lines[-1]) and re.match(r'^ 100000', lines[-2]))
lines = self.tmux.capture()
self.assertEqual(' 2', lines[-4])
self.assertEqual('> 1', lines[-3])
self.assertTrue(re.match('^ 100000/100000 *0', lines[-2]))
self.assertEqual('>', lines[-1])
# testing basic key binding
self.tmux.send_keys(Key('99'))
self.tmux.until(lambda ls: ls[-2].startswith(' 8146/100000'))
self.tmux.until(lambda ls: ls[-1].startswith('> 99'))
self.tmux.send_keys(Ctrl('a'), Key('1'))
self.tmux.until(lambda ls: ls[-2].startswith(' 856/100000'))
self.tmux.until(lambda ls: ls[-1].startswith('> 199'))
self.tmux.send_keys(Ctrl('f'), Key('3'))
self.tmux.until(lambda ls: ls[-2].startswith(' 46/100000'))
self.tmux.until(lambda ls: ls[-1].startswith('> 1939'))
self.tmux.send_keys(Ctrl('b'), Ctrl('h'))
self.tmux.until(lambda ls: ls[-2].startswith(' 856/100000'))
self.tmux.until(lambda ls: ls[-1].startswith('> 139'))
self.tmux.send_keys(Ctrl('e'), Ctrl('b'))
self.tmux.send_keys(Ctrl('k'))
self.tmux.until(lambda ls: ls[-4].startswith('> 1390'))
self.tmux.until(lambda ls: ls[-3].startswith(' 139'))
self.tmux.send_keys(Key('Tab'))
self.tmux.until(lambda ls: ls[-4].startswith(' 1390'))
self.tmux.until(lambda ls: ls[-3].startswith('> 139'))
self.tmux.send_keys(Key('BTab'))
self.tmux.until(lambda ls: ls[-4].startswith('> 1390'))
self.tmux.until(lambda ls: ls[-3].startswith(' 139'))
lines = self.tmux.capture()
self.assertEqual('> 1390', lines[-4])
self.assertEqual(' 139', lines[-3])
self.assertTrue(lines[-2].startswith(' 856/100000'))
self.assertEqual('> 139', lines[-1])
self.tmux.send_keys(Key('Enter'))
self.assertEqual('1390', self.readonce().strip())
def test_default_command(self):
self.tmux.send_keys(self.sk().replace('SKIM_DEFAULT_COMMAND=', "SKIM_DEFAULT_COMMAND='echo hello'"))
self.tmux.send_keys(Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.send_keys(Key('Enter'))
self.assertEqual('hello', self.readonce().strip())
def test_key_bindings(self):
self.tmux.send_keys(f"{SK} -q 'foo bar foo-bar'", Key('Enter'))
self.tmux.until(lambda lines: lines[-1].startswith('>'))
# Ctrl-A
self.tmux.send_keys(Ctrl('a'), Key('('))
self.tmux.until(lambda lines: lines[-1] == '> (foo bar foo-bar')
## Meta-F
self.tmux.send_keys(Alt('f'), Key(')'))
self.tmux.until(lambda lines: lines[-1] == '> (foo) bar foo-bar')
# CTRL-B
self.tmux.send_keys(Ctrl('b'), 'var')
self.tmux.until(lambda lines: lines[-1] == '> (foovar) bar foo-bar')
# Left, CTRL-D
self.tmux.send_keys(Key('Left'), Key('Left'), Ctrl('d'))
self.tmux.until(lambda lines: lines[-1] == '> (foovr) bar foo-bar')
# # META-BS
self.tmux.send_keys(Alt('BSpace'))
self.tmux.until(lambda lines: lines[-1] == '> (r) bar foo-bar')
# # # CTRL-Y
self.tmux.send_keys(Ctrl('y'), Ctrl('y'))
self.tmux.until(lambda lines: lines[-1] == '> (foovfoovr) bar foo-bar')
# META-B
self.tmux.send_keys(Alt('b'), Key('Space'), Key('Space'))
self.tmux.until(lambda lines: lines[-1] == '> ( foovfoovr) bar foo-bar')
# CTRL-F / Right
self.tmux.send_keys( Ctrl('f'), Key('Right'), '/')
self.tmux.until(lambda lines: lines[-1] == '> ( fo/ovfoovr) bar foo-bar')
# CTRL-H / BS
self.tmux.send_keys( Ctrl('h'), Key('BSpace'))
self.tmux.until(lambda lines: lines[-1] == '> ( fovfoovr) bar foo-bar')
# CTRL-E
self.tmux.send_keys(Ctrl('e'), 'baz')
self.tmux.until(lambda lines: lines[-1] == '> ( fovfoovr) bar foo-barbaz')
# CTRL-U
self.tmux.send_keys( Ctrl('u'))
self.tmux.until(lambda lines: lines[-1] == '>')
# CTRL-Y
self.tmux.send_keys( Ctrl('y'))
self.tmux.until(lambda lines: lines[-1] == '> ( fovfoovr) bar foo-barbaz')
# CTRL-W
self.tmux.send_keys( Ctrl('w'), 'bar-foo')
self.tmux.until(lambda lines: lines[-1] == '> ( fovfoovr) bar bar-foo')
# # META-D
self.tmux.send_keys(Alt('b'), Alt('b'), Alt('d'), Ctrl('a'), Ctrl('y'))
self.tmux.until(lambda lines: lines[-1] == '> bar( fovfoovr) bar -foo')
# CTRL-M
self.tmux.send_keys(Ctrl('m'))
self.tmux.until(lambda lines: not lines[-1].startswith('>'))
def test_key_bindings_interactive(self):
self.tmux.send_keys(f"{SK} -i --cmd-query 'foo bar foo-bar'", Key('Enter'))
self.tmux.until(lambda lines: lines[-1].startswith('c>'))
# Ctrl-A
self.tmux.send_keys(Ctrl('a'), Key('('))
self.tmux.until(lambda lines: lines[-1] == 'c> (foo bar foo-bar')
## Meta-F
self.tmux.send_keys(Alt('f'), Key(')'))
self.tmux.until(lambda lines: lines[-1] == 'c> (foo) bar foo-bar')
# CTRL-B
self.tmux.send_keys(Ctrl('b'), 'var')
self.tmux.until(lambda lines: lines[-1] == 'c> (foovar) bar foo-bar')
# Left, CTRL-D
self.tmux.send_keys(Key('Left'), Key('Left'), Ctrl('d'))
self.tmux.until(lambda lines: lines[-1] == 'c> (foovr) bar foo-bar')
# # META-BS
self.tmux.send_keys(Alt('BSpace'))
self.tmux.until(lambda lines: lines[-1] == 'c> (r) bar foo-bar')
# # # CTRL-Y
self.tmux.send_keys(Ctrl('y'), Ctrl('y'))
self.tmux.until(lambda lines: lines[-1] == 'c> (foovfoovr) bar foo-bar')
# META-B
self.tmux.send_keys(Alt('b'), Key('Space'), Key('Space'))
self.tmux.until(lambda lines: lines[-1] == 'c> ( foovfoovr) bar foo-bar')
# CTRL-F / Right
self.tmux.send_keys( Ctrl('f'), Key('Right'), '/')
self.tmux.until(lambda lines: lines[-1] == 'c> ( fo/ovfoovr) bar foo-bar')
# CTRL-H / BS
self.tmux.send_keys( Ctrl('h'), Key('BSpace'))
self.tmux.until(lambda lines: lines[-1] == 'c> ( fovfoovr) bar foo-bar')
# CTRL-E
self.tmux.send_keys(Ctrl('e'), 'baz')
self.tmux.until(lambda lines: lines[-1] == 'c> ( fovfoovr) bar foo-barbaz')
# CTRL-U
self.tmux.send_keys( Ctrl('u'))
self.tmux.until(lambda lines: lines[-1] == 'c>')
# CTRL-Y
self.tmux.send_keys( Ctrl('y'))
self.tmux.until(lambda lines: lines[-1] == 'c> ( fovfoovr) bar foo-barbaz')
# CTRL-W
self.tmux.send_keys( Ctrl('w'), 'bar-foo')
self.tmux.until(lambda lines: lines[-1] == 'c> ( fovfoovr) bar bar-foo')
# # META-D
self.tmux.send_keys(Alt('b'), Alt('b'), Alt('d'), Ctrl('a'), Ctrl('y'))
self.tmux.until(lambda lines: lines[-1] == 'c> bar( fovfoovr) bar -foo')
# CTRL-M
self.tmux.send_keys(Ctrl('m'))
self.tmux.until(lambda lines: not lines[-1].startswith('c>'))
def test_read0(self):
nfiles = subprocess.check_output("find .", shell=True).decode("utf-8").strip().split("\n")
num_of_files = len(nfiles)
self.tmux.send_keys(f"find . | {self.sk()}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(num_of_files))
self.tmux.send_keys(Key('Enter'))
orig = self.readonce().strip()
self.tmux.send_keys(f"find . -print0 | {self.sk('--read0')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(num_of_files))
self.tmux.send_keys(Key('Enter'))
self.assertEqual(orig, self.readonce().strip())
def test_print0(self):
self.tmux.send_keys(f"echo -e 'a\\nb' | {self.sk('-m', '--print0')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(2))
self.tmux.send_keys(Key('BTab'), Key('BTab'), Key('Enter'))
lines = self.readonce().strip()
self.assertEqual(lines, 'a\0b\0')
self.tmux.send_keys(f"echo -e 'a\\naa\\nb' | {self.sk('-f a', '--print0')}", Key('Enter'))
lines = self.readonce().strip()
self.assertEqual(lines, 'a\0aa\0')
def test_with_nth_preview(self):
sk_command = self.sk("--delimiter ','", '--with-nth 2..', '--preview', "'echo X{1}Y'")
self.tmux.send_keys("echo -e 'field1,field2,field3,field4' |" + sk_command, Key('Enter'))
self.tmux.until(lambda lines: lines.any_include("Xfield1Y"))
self.tmux.send_keys(Key('Enter'))
def test_with_nth(self):
# fields, expected
tests = [
('1', 'field1,'),
('2', 'field2,'),
('3', 'field3,'),
('4', 'field4'),
('5', ''),
('-1', 'field4'),
('-2', 'field3,'),
('-3', 'field2,'),
('-4', 'field1,'),
('-5', ''),
('2..', 'field2,field3,field4'),
('..3', 'field1,field2,field3,'),
('2..3', 'field2,field3,'),
('3..2', ''),
]
for field, expected in tests:
sk_command = self.sk("--delimiter ','", f'--with-nth={field}')
self.tmux.send_keys("echo -e 'field1,field2,field3,field4' |" + sk_command, Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
lines = self.tmux.capture()
self.tmux.send_keys(Key('Enter'))
self.assertEqual(f'> {expected}'.strip(), lines[-3])
def test_nth(self):
# fields, query, match_count(0/1)
tests = [
('1', 'field1', 1),
('1', 'field2', 0),
('-1', 'field4', 1),
('-1', 'field3', 0),
('-5', 'f', 0),
('2..', 'field2', 1),
('2..', 'field4', 1),
('..3', 'field1', 1),
('..3', 'field3,', 1),
('2..3', '2,3', 1),
('3..2', 'f', 0),
]
for field, query, count in tests:
sk_command = self.sk(f"--delimiter ',' --nth={field} -q {query}")
self.tmux.send_keys("echo -e 'field1,field2,field3,field4' |" + sk_command, Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.send_keys(Key('Enter'))
def test_print_query(self):
self.tmux.send_keys(f"seq 1 1000 | {self.sk('-q 10', '--print-query')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1000))
self.tmux.send_keys(Key('Enter'))
lines = self.readonce().strip()
self.assertEqual(lines, '10\n10')
def test_print_cmd(self):
self.tmux.send_keys(f"seq 1 1000 | {self.sk('--cmd-query 10', '--print-cmd')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1000))
self.tmux.send_keys(Key('Enter'))
lines = self.readonce().strip()
self.assertEqual(lines, '10\n1')
def test_print_cmd_and_query(self):
self.tmux.send_keys(f"seq 1 1000 | {self.sk('-q 10', '--cmd-query cmd', '--print-cmd', '--print-query')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1000))
self.tmux.send_keys(Key('Enter'))
lines = self.readonce().strip()
self.assertEqual(lines, '10\ncmd\n10')
def test_hscroll(self):
# XXXXXXXXXXXXXXXXX..
self.tmux.send_keys(f"cat <<EOF | {self.sk('-q b')}", Key('Enter'))
self.tmux.send_keys(f"b{'a'*1000}", Key('Enter'))
self.tmux.send_keys(f"EOF", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.until(lambda lines: lines[-3].endswith('..'))
self.tmux.send_keys(Key('Enter'))
# ..XXXXXXXXXXXXXXXXXM
self.tmux.send_keys(f"cat <<EOF | {self.sk('-q b')}", Key('Enter'))
self.tmux.send_keys(f"{'a'*1000}b", Key('Enter'))
self.tmux.send_keys(f"EOF", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.until(lambda lines: lines[-3].endswith('b'))
self.tmux.send_keys(Key('Enter'))
# ..XXXXXXXMXXXXXXX..
self.tmux.send_keys(f"cat <<EOF | {self.sk('-q b')}", Key('Enter'))
self.tmux.send_keys(f"{'a'*1000}b{'a'*1000}", Key('Enter'))
self.tmux.send_keys(f"EOF", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.until(lambda lines: lines[-3].startswith('> ..'))
self.tmux.until(lambda lines: lines[-3].endswith('..'))
self.tmux.send_keys(Key('Enter'))
def test_no_hscroll(self):
self.tmux.send_keys(f"cat <<EOF | {self.sk('-q b', '--no-hscroll')}", Key('Enter'))
self.tmux.send_keys(f"{'a'*1000}b", Key('Enter'))
self.tmux.send_keys(f"EOF", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.until(lambda lines: lines[-3].startswith('> a'))
self.tmux.send_keys(Key('Enter'))
def test_tabstop(self):
self.tmux.send_keys(f"echo -e 'a\\tb' | {self.sk()}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.until(lambda lines: lines[-3].startswith('> a b'))
self.tmux.send_keys(Key('Enter'))
self.tmux.send_keys(f"echo -e 'a\\tb' | {self.sk('--tabstop 1')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.until(lambda lines: lines[-3].startswith('> a b'))
self.tmux.send_keys(Key('Enter'))
self.tmux.send_keys(f"echo -e 'aa\\tb' | {self.sk('--tabstop 2')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.until(lambda lines: lines[-3].startswith('> aa b'))
self.tmux.send_keys(Key('Enter'))
self.tmux.send_keys(f"echo -e 'aa\\tb' | {self.sk('--tabstop 3')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.until(lambda lines: lines[-3].startswith('> aa b'))
self.tmux.send_keys(Key('Enter'))
self.tmux.send_keys(f"echo -e 'a\\tb' | {self.sk('--tabstop 4')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.until(lambda lines: lines[-3].startswith('> a b'))
self.tmux.send_keys(Key('Enter'))
def test_inline_info(self):
INLINE_INFO_SEP = " <"
## the dot accounts for spinner
RE = re.compile(r'[^0-9]*([0-9]+)/([0-9]+)(?: \[([0-9]+)\])?')
self.tmux.send_keys(f"echo -e 'a1\\na2\\na3\\na4' | {self.sk('--inline-info')}", Key('Enter'))
self.tmux.until(lambda lines: lines.match_count() == lines.item_count())
self.tmux.send_keys("a")
self.tmux.until(lambda lines: lines[-1].find(INLINE_INFO_SEP) != -1)
lines = self.tmux.capture()
self.tmux.send_keys(Key('Enter'))
query_line = lines[-1]
bef, after = query_line.split(INLINE_INFO_SEP)
mat = RE.match(after)
self.assertTrue(mat is not None)
ret = tuple(map(lambda x: int(x) if x is not None else 0, mat.groups()))
self.assertEqual(len(ret), 3)
self.assertEqual((bef, ret[0], ret[1], ret[2]), ("> a ", 4, 4, 0))
# test that inline info is does not overwrite query
self.tmux.send_keys(f"echo -e '<KEY>' | {self.sk('--inline-info')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(4))
self.tmux.send_keys("bc", Ctrl("a"), "a")
self.tmux.until(lambda lines: lines[-1].find(INLINE_INFO_SEP) != -1 and
lines[-1].split(INLINE_INFO_SEP)[0] == "> abc ")
self.tmux.send_keys(Key('Enter'))
def test_header(self):
self.command_until(sk_options=['--header', 'hello'],
until_predicate=lambda lines: lines[-3].find("hello") != -1)
self.command_until(sk_options=['--inline-info', '--header', 'hello'],
until_predicate=lambda lines: lines[-2].find("hello") != -1)
self.command_until(sk_options=['--reverse', '--inline-info', '--header', 'hello'],
until_predicate=lambda lines: lines[1].find("hello") != -1)
self.command_until(sk_options=['--reverse', '--header', 'hello'],
until_predicate=lambda lines: lines[2].find("hello") != -1)
def test_header_lines(self):
self.command_until(sk_options=['--header-lines', '1'],
until_predicate=lambda lines: lines[-3].find(" a1") != -1)
self.command_until(sk_options=['--header-lines', '4'],
until_predicate=lambda lines: lines[-5].find(" a3") != -1)
self.command_until(sk_options=['--inline-info', '--header-lines', '1'],
until_predicate=lambda lines: lines[-2].find(" a1") != -1)
self.command_until(sk_options=['--reverse', '--inline-info', '--header-lines', '1'],
until_predicate=lambda lines: lines[1].find(" a1") != -1)
self.command_until(sk_options=['--reverse', '--header-lines', '1'],
until_predicate=lambda lines: lines[2].find(" a1") != -1)
def test_reserved_options(self):
options = [
'--extended',
'--algo=TYPE',
'--literal',
'--no-mouse',
'--cycle',
'--hscroll-off=COL',
'--filepath-word',
'--jump-labels=CHARS',
'--border',
'--inline-info',
'--header=STR',
'--header-lines=N',
'--no-bold',
'--history-size=10',
'--sync',
'--no-sort',
# --select-1
'--select-1',
'-1',
# --exit-0
'--exit-0',
'-0']
for opt in options:
self.command_until(sk_options=[opt], until_predicate=find_prompt)
def test_multiple_option_values_should_be_accepted(self):
# normally we'll put some default options to SKIM_DEFAULT_OPTIONS and override it in command
# line. this test will ensure multiple values are accepted.
options = [
'--bind=ctrl-a:cancel --bind ctrl-b:cancel',
'--expect=ctrl-a --expect=ctrl-v',
'--tiebreak=index --tiebreak=score',
'--cmd asdf --cmd find',
'--query asdf -q xyz',
'--delimiter , --delimiter . -d ,',
'--nth 1,2 --nth=1,3 -n 1,3',
'--with-nth 1,2 --with-nth=1,3',
'-I {} -I XX',
'--color base --color light',
'--margin 30% --margin 0',
'--min-height 30% --min-height 10',
'--height 30% --height 10',
'--preview "ls {}" --preview "cat {}"',
'--preview-window up --preview-window down',
'--multi -m',
'--no-multi --no-multi',
'--tac --tac',
'--ansi --ansi',
'--exact -e',
'--regex --regex',
'--literal --literal',
'--no-mouse --no-mouse',
'--cycle --cycle',
'--no-hscroll --no-hscroll',
'--filepath-word --filepath-word',
'--border --border',
'--inline-info --inline-info',
'--no-bold --no-bold',
'--print-query --print-query',
'--print-cmd --print-cmd',
'--print0 --print0',
'--sync --sync',
'--extended --extended',
'--no-sort --no-sort',
'--select-1 --select-1',
'--exit-0 --exit-0',
]
for opt in options:
self.command_until(sk_options=[opt], until_predicate=find_prompt)
options = [
('--prompt a --prompt b -p c', lambda lines: lines[-1].startswith("c")),
('-i --cmd-prompt a --cmd-prompt b', lambda lines: lines[-1].startswith("b")),
('-i --cmd-query asdf --cmd-query xyz', lambda lines: lines[-1].startswith("c> xyz")),
('--interactive -i', lambda lines: find_prompt(lines, interactive=True)),
('--reverse --reverse', lambda lines: find_prompt(lines, reverse=True))
]
for opt, pred in options:
self.command_until(sk_options=[opt], until_predicate=pred)
self.command_until(stdin="echo -e a\\0b", sk_options=['--read0 --read0'], until_predicate=find_prompt)
def test_single_quote_of_preview_command(self):
# echo "'\"ABC\"'" | sk --preview="echo X{}X" => X'"ABC"'X
echo_command = '''echo "'\\"ABC\\"'" | '''
sk_command = self.sk('--preview=\"echo X{}X\"')
command = echo_command + sk_command
self.tmux.send_keys(command, Key('Enter'))
self.tmux.until(lambda lines: lines.any_include('''X'"ABC"'X'''))
# echo "'\"ABC\"'" | sk --preview="echo X\{}X" => X{}X
echo_command = '''echo "'\\"ABC\\"'" | '''
sk_command = self.sk('--preview=\"echo X\\{}X\"')
command = echo_command + sk_command
self.tmux.send_keys(command, Key('Enter'))
self.tmux.until(lambda lines: lines.any_include('''X{}X'''))
def test_ansi_and_read0(self):
"""should keep the NULL character, see #142"""
self.tmux.send_keys(f"echo -e 'a\\0b' | {self.sk('--ansi')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.send_keys(Key('Enter'))
output = ":".join("{:02x}".format(ord(c)) for c in self.readonce())
self.assertTrue(output.find("61:00:62:0a") >= 0)
def test_smart_case_fuzzy(self):
"""should behave correctly on case, #219"""
# smart case
self.tmux.send_keys(f"echo -e 'aBcXyZ' | {self.sk('')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.send_keys(Key('abc'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.send_keys(Ctrl('u'), Key('aBc'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.send_keys(Ctrl('u'), Key('ABc'))
self.tmux.until(lambda lines: lines.ready_with_matches(0))
def test_smart_case_exact(self):
"""should behave correctly on case, #219"""
# smart case
self.tmux.send_keys(f"echo -e 'aBcXyZ' | {self.sk('')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.send_keys(Key("'abc"))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.send_keys(Ctrl('u'), Key("'aBc"))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.send_keys(Ctrl('u'), Key("'ABc"))
self.tmux.until(lambda lines: lines.ready_with_matches(0))
def test_ignore_case_fuzzy(self):
"""should behave correctly on case, #219"""
# ignore case
self.tmux.send_keys(f"echo -e 'aBcXyZ' | {self.sk('--case ignore')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.send_keys(Key('abc'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.send_keys(Ctrl('u'), Key('aBc'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.send_keys(Ctrl('u'), Key('ABc'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
def test_ignore_case_exact(self):
"""should behave correctly on case, #219"""
# ignore case
self.tmux.send_keys(f"echo -e 'aBcXyZ' | {self.sk('--case ignore')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.send_keys(Key("'abc"))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.send_keys(Ctrl('u'), Key("'aBc"))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.send_keys(Ctrl('u'), Key("'ABc"))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
def test_respect_case_fuzzy(self):
"""should behave correctly on case, #219"""
# respect case
self.tmux.send_keys(f"echo -e 'aBcXyZ' | {self.sk('--case respect')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.send_keys(Key('abc'))
self.tmux.until(lambda lines: lines.ready_with_matches(0))
def test_respect_case_exact(self):
"""should behave correctly on case, #219"""
# respect case
self.tmux.send_keys(f"echo -e 'aBcXyZ' | {self.sk('--case respect')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.send_keys(Key("'abc"))
self.tmux.until(lambda lines: lines.ready_with_matches(0))
def test_query_history(self):
"""query history should work"""
history_file = f'{self.tempname()}.history'
self.tmux.send_keys(f"echo -e '<KEY> > {history_file}", Key('Enter'))
history_mtime = os.stat(history_file).st_mtime
self.tmux.send_keys(f"echo -e 'a\nb\nc' | {self.sk('--history', history_file)}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(3))
self.tmux.send_keys(Ctrl('p'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.until(lambda lines: lines[-3].startswith('> c'))
self.tmux.send_keys(Ctrl('p'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.until(lambda lines: lines[-3].startswith('> b'))
self.tmux.send_keys('b')
self.tmux.until(lambda lines: lines.ready_with_matches(0))
self.tmux.send_keys(Ctrl('p'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.until(lambda lines: lines[-3].startswith('> a'))
self.tmux.send_keys(Ctrl('n'))
self.tmux.until(lambda lines: lines.ready_with_matches(0))
self.tmux.until(lambda lines: lines[-1].startswith('> bb'))
self.tmux.send_keys(Ctrl('n'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.until(lambda lines: lines[-1].startswith('> c'))
self.tmux.send_keys('d')
self.tmux.until(lambda lines: lines[-1].startswith('> cd'))
self.tmux.send_keys(Key('Enter'))
self.tmux.send_keys(f'[[ "$(echo -n $(cat {history_file}))" == "a b c cd" ]] && echo ok')
self.tmux.send_keys(Key('Enter'))
self.tmux.until(lambda lines: lines[-1].startswith('ok'))
def test_cmd_history(self):
"""query history should work"""
history_file = f'{self.tempname()}.cmd-history'
self.tmux.send_keys(f"echo -e 'a\nb\nc' > {history_file}", Key('Enter'))
self.tmux.send_keys(f"""{self.sk("-i -c 'echo {}'", '--cmd-history', history_file)}""", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.send_keys(Ctrl('p'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.until(lambda lines: lines[-1].startswith('c> c'))
self.tmux.send_keys(Ctrl('p'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.until(lambda lines: lines[-1].startswith('c> b'))
self.tmux.send_keys('b')
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.send_keys(Ctrl('p'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.until(lambda lines: lines[-1].startswith('c> a'))
self.tmux.send_keys(Ctrl('n'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.until(lambda lines: lines[-1].startswith('c> bb'))
self.tmux.send_keys(Ctrl('n'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.until(lambda lines: lines[-1].startswith('c> c'))
self.tmux.send_keys('d')
self.tmux.until(lambda lines: lines[-1].startswith('c> cd'))
self.tmux.send_keys(Key('Enter'))
self.tmux.send_keys(f'[[ "$(echo -n $(cat {history_file}))" == "a b c cd" ]] && echo ok')
self.tmux.send_keys(Key('Enter'))
self.tmux.until(lambda lines: lines[-1].startswith('ok'))
def test_execute_with_zero_result_ref(self):
"""execute should not panic with zero results #276"""
self.tmux.send_keys(f"""echo -n "" | {self.sk("--bind 'enter:execute(less {})'")}""", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(0))
self.tmux.send_keys(Key('Enter'))
self.tmux.send_keys(Key('q'))
self.tmux.until(lambda lines: lines.ready_with_lines(0))
self.tmux.until(lambda lines: lines[-1].startswith('> q')) # less is not executed at all
self.tmux.send_keys(Ctrl('g'))
def test_execute_with_zero_result_no_ref(self):
"""execute should not panic with zero results #276"""
self.tmux.send_keys(f"""echo -n "" | {self.sk("--bind 'enter:execute(less)'")}""", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(0))
self.tmux.send_keys(Key('Enter'))
self.tmux.send_keys(Key('q'))
self.tmux.until(lambda lines: lines.ready_with_lines(0))
self.tmux.send_keys(Ctrl('g'))
def test_if_non_matched(self):
"""commands only effect if no item is matched"""
self.tmux.send_keys(f"""echo "a\nb" | {self.sk("--bind 'enter:if-non-matched(backward-delete-char)'", "-q ab")}""", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_matches(0))
self.tmux.send_keys(Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.send_keys(Key('Enter')) # not triggered anymore
self.tmux.until(lambda lines: lines.ready_with_matches(1))
def test_nul_in_execute(self):
"""NUL should work in preview command see #278"""
self.tmux.send_keys(f"""echo -ne 'a\\0b' | {self.sk("--preview='echo -en {} | xxd'")}""", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.until(lambda lines: lines.any_include('6100 62'))
def test_skip_to_pattern(self):
self.tmux.send_keys(f"""echo -ne 'a/b/c' | {self.sk("--skip-to-pattern '[^/]*$'")}""", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.until(lambda lines: lines.any_include('..c'))
def test_multi_selection(self):
self.tmux.send_keys(f"""echo -n 'a\nb\nc' | {self.sk("-m")}""", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(3))
self.tmux.until(lambda lines: lines[-3].startswith('> a'))
self.tmux.send_keys(Key('b'))
self.tmux.until(lambda lines: lines[-3].startswith('> b'))
self.tmux.send_keys(Key('TAB'))
self.tmux.until(lambda lines: lines[-3].startswith('>>b'))
self.tmux.send_keys(Key('C-h'))
self.tmux.until(lambda lines: lines[-4].startswith(' >b'))
self.tmux.until(lambda lines: lines[-3].startswith('> a'))
self.tmux.send_keys(Key('c'))
self.tmux.until(lambda lines: lines[-3].startswith('> c'))
self.tmux.send_keys(Key('TAB'))
self.tmux.until(lambda lines: lines[-3].startswith('>>c'))
self.tmux.send_keys(Key('C-h'))
self.tmux.until(lambda lines: lines[-5].startswith(' >c'))
self.tmux.until(lambda lines: lines[-4].startswith(' >b'))
self.tmux.until(lambda lines: lines[-3].startswith('> a'))
self.tmux.send_keys(Key('Enter'))
self.assertEqual('b\nc', self.readonce().strip())
def test_append_and_select(self):
self.tmux.send_keys(f"""echo -n 'a\nb\nc' | {self.sk("-m --bind 'ctrl-f:append-and-select'")}""", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(3))
self.tmux.send_keys(Key('xyz'))
self.tmux.until(lambda lines: lines.ready_with_matches(0))
self.tmux.send_keys(Key('C-f'))
self.tmux.until(lambda lines: lines[-3].startswith('>>xyz'))
self.tmux.send_keys(Key('C-u'))
self.tmux.until(lambda lines: lines[-6].startswith(' >xyz'))
self.tmux.until(lambda lines: lines[-5].startswith(' c'))
self.tmux.until(lambda lines: lines[-4].startswith(' b'))
self.tmux.until(lambda lines: lines[-3].startswith('> a'))
def test_pre_select_n(self):
self.tmux.send_keys(f"""echo -n 'a\nb\nc' | {self.sk("-m --pre-select-n=1")}""", Key('Enter'))
self.tmux.until(lambda lines: lines[-5].startswith(' c'))
self.tmux.until(lambda lines: lines[-4].startswith(' b'))
self.tmux.until(lambda lines: lines[-3].startswith('>>a'))
def test_pre_select_items(self):
args = "-m --pre-select-items=$'b\\nc'"
self.tmux.send_keys(f"""echo -n 'a\nb\nc' | {self.sk(args)}""", Key('Enter'))
self.tmux.until(lambda lines: lines[-5].startswith(' >c'))
self.tmux.until(lambda lines: lines[-4].startswith(' >b'))
self.tmux.until(lambda lines: lines[-3].startswith('> a'))
def test_pre_select_pat(self):
self.tmux.send_keys(f"""echo -n 'a\nb\nc' | {self.sk("-m --pre-select-pat='[b|c]'")}""", Key('Enter'))
self.tmux.until(lambda lines: lines[-5].startswith(' >c'))
self.tmux.until(lambda lines: lines[-4].startswith(' >b'))
self.tmux.until(lambda lines: lines[-3].startswith('> a'))
def test_pre_select_file(self):
pre_select_file = f'{self.tempname()}.pre_select'
self.tmux.send_keys(f"echo -e 'b\nc' > {pre_select_file}", Key('Enter'))
args = f'''-m --pre-select-file={pre_select_file}'''
self.tmux.send_keys(f"""echo -n 'a\nb\nc' | {self.sk(args)}""", Key('Enter'))
self.tmux.until(lambda lines: lines[-5].startswith(' >c'))
self.tmux.until(lambda lines: lines[-4].startswith(' >b'))
self.tmux.until(lambda lines: lines[-3].startswith('> a'))
def test_no_clear_if_empty(self):
text_file = f'{self.tempname()}.txt'
self.tmux.send_keys(f"echo -e 'b\\nc' > {text_file}", Key('Enter'))
args = "-c 'cat {}'" + f''' -i --cmd-query='{text_file}' --no-clear-if-empty'''
self.tmux.send_keys(f"""{self.sk(args)}""", Key('Enter'))
self.tmux.until(lambda lines: lines[-4].startswith(' c'))
self.tmux.until(lambda lines: lines[-3].startswith('> b'))
self.tmux.send_keys(Key('xx'))
self.tmux.until(lambda lines: lines.ready_with_matches(0))
self.tmux.until(lambda lines: lines[-4].startswith(' c'))
self.tmux.until(lambda lines: lines[-3].startswith('> b'))
def test_preview_scroll_const(self):
self.tmux.send_keys(f"""echo foo 123 321 | {self.sk("--preview 'seq 1000' --preview-window left:+123")}""", Key('Enter'))
self.tmux.until(lambda lines: re.match(r'123.*123/1000', lines[0]))
def test_preview_scroll_expr(self):
args = "--preview 'seq 1000' --preview-window left:+{3}"
self.tmux.send_keys(f"""echo foo 123 321 | {self.sk(args)}""", Key('Enter'))
self.tmux.until(lambda lines: re.match(r'321.*321/1000', lines[0]))
def test_preview_scroll_and_offset(self):
args = "--preview 'seq 1000' --preview-window left:+{2}-2"
self.tmux.send_keys(f"""echo foo 123 321 | {self.sk(args)}""", Key('Enter'))
self.tmux.until(lambda lines: re.match(r'121.*121/1000', lines[0]))
self.tmux.send_keys(Key('Enter'))
self.tmux.send_keys(f"""echo foo :123: 321 | {self.sk(args)}""", Key('Enter'))
self.tmux.until(lambda lines: re.match(r'121.*121/1000', lines[0]))
self.tmux.send_keys(Key('Enter'))
def test_issue_359_multi_byte_and_regex(self):
self.tmux.send_keys(f"""echo 'ああa' | {self.sk("--regex -q 'a'")}""", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.until(lambda lines: lines[-3].startswith('> ああa'))
def test_issue_361_literal_space(self):
args = '''-q "'foo\\ bar"'''
self.tmux.send_keys(f"""echo 'foo bar\nfoo bar' | {self.sk(args)}""", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.until(lambda lines: lines[-3].startswith('> foo bar'))
self.tmux.send_keys(Key('Enter'))
args = '''-q "!foo\\ bar"'''
self.tmux.send_keys(f"""echo 'foo bar\nfoo bar' | {self.sk(args)}""", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.until(lambda lines: lines[-3].startswith('> foo bar'))
self.tmux.send_keys(Key('Enter'))
def find_prompt(lines, interactive=False, reverse=False):
linen = -1
prompt = ">"
if interactive:
prompt = "c>"
if reverse:
linen = 0
return lines[linen].startswith(prompt)
if __name__ == '__main__':
unittest.main()
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# The integration test of skim
# Modeled after fzf's test: https://github.com/junegunn/fzf/blob/master/test/test_go.rb
import subprocess
import unittest
import os
import time
import re
import inspect
import sys
INPUT_RECORD_SEPARATOR = '\n'
DEFAULT_TIMEOUT = 3000
SCRIPT_PATH = os.path.realpath(__file__)
BASE = os.path.expanduser(os.path.join(os.path.dirname(SCRIPT_PATH), '..'))
os.chdir(BASE)
SK = f"SKIM_DEFAULT_OPTIONS= SKIM_DEFAULT_COMMAND= {BASE}/target/release/sk"
def now_mills():
return int(round(time.time() * 1000))
def wait(func, timeout_handler=None):
since = now_mills()
while now_mills() - since < DEFAULT_TIMEOUT:
time.sleep(0.02)
ret = func()
if ret is not None and ret:
return
if timeout_handler is not None:
timeout_handler()
raise BaseException('Timeout on wait')
class Shell(object):
"""The shell configurations for tmux tests"""
def __init__(self):
super(Shell, self).__init__()
def unsets():
return 'unset SKIM_DEFAULT_COMMAND SKIM_DEFAULT_OPTIONS;'
def bash():
return 'PS1= PROMPT_COMMAND= bash --rcfile None'
def zsh():
return 'PS1= PROMPT_COMMAND= HISTSIZE=100 zsh -f'
class Key(object):
"""Represent a key to send to tmux"""
def __init__(self, key):
super(Key, self).__init__()
self.key = key
def __repr__(self):
return self.key
class Ctrl(Key):
"""Represent a control key"""
def __init__(self, key):
super(Ctrl, self).__init__(key)
def __repr__(self):
return f'C-{self.key.upper()}'
class Alt(Key):
"""Represent an alt key"""
def __init__(self, key):
super(Alt, self).__init__(key)
def __repr__(self):
return f'M-{self.key}'
class TmuxOutput(list):
"""A list that contains the output of tmux"""
# match the status line
# normal: `| 10/219 [2] 8/0.`
# inline: `> query < 10/219 [2] 8/0.`
# preview: `> query < 10/219 [2] 8/0.│...`
RE = re.compile(r'(?:^|[^<-]*). ([0-9]+)/([0-9]+)(?:/[A-Z]*)?(?: \[([0-9]+)\])? *([0-9]+)/(-?[0-9]+)(\.)?(?: │)? *$')
def __init__(self, iteratable=[]):
super(TmuxOutput, self).__init__(iteratable)
self._counts = None
def counts(self):
if self._counts is not None:
return self._counts
# match_count item_count select_count item_cursor matcher_stopped
ret = (0, 0, 0, 0, 0, '.')
for line in self:
mat = TmuxOutput.RE.match(line)
if mat is not None:
ret = mat.groups()
break;
self._counts = ret
return ret
def match_count(self):
count = self.counts()[0]
return int(count) if count is not None else None
def item_count(self):
count = self.counts()[1]
return int(count) if count is not None else None
def select_count(self):
count = self.counts()[2]
return int(count) if count is not None else None
def item_index(self):
count = self.counts()[3]
return int(count) if count is not None else None
def hscroll(self):
count = self.counts()[4]
return int(count) if count is not None else None
def matcher_stopped(self):
return self.counts()[5] != '.'
def ready_with_lines(self, lines):
return self.item_count() == lines and self.matcher_stopped()
def ready_with_matches(self, matches):
return self.match_count() == matches and self.matcher_stopped()
def any_include(self, val):
if hasattr(re, '_pattern_type') and isinstance(val, re._pattern_type):
method = lambda l: val.match(l)
if hasattr(re, 'Pattern') and isinstance(val, re.Pattern):
method = lambda l: val.match(l)
else:
method = lambda l: l.find(val) >= 0
for line in self:
if method(line):
return True
return False
class Tmux(object):
TEMPNAME = '/tmp/skim-test.txt'
"""Object to manipulate tmux and get result"""
def __init__(self, shell = 'bash'):
super(Tmux, self).__init__()
if shell == 'bash':
shell_cmd = Shell.unsets() + Shell.bash()
elif shell == 'zsh':
shell_cmd = Shell.unsets() + Shell.zsh()
else:
raise BaseException('unknown shell')
self.win = self._go("new-window", "-d", "-P", "-F", "#I", f"{shell_cmd}")[0]
self._go("set-window-option", "-t", f"{self.win}", "pane-base-index", "0")
self.lines = int(subprocess.check_output('tput lines', shell=True).decode('utf8').strip())
def _go(self, *args, **kwargs):
"""Run tmux command and return result in list of strings (lines)
:returns: List<String>
"""
ret = subprocess.check_output(["tmux"] + list(args))
return ret.decode('utf8').split(INPUT_RECORD_SEPARATOR)
def kill(self):
self._go("kill-window", "-t", f"{self.win}", stderr=subprocess.DEVNULL)
def send_keys(self, *args, pane=None):
if pane is not None:
self._go('select-window', '-t', f'{self.win}')
target = '{self.win}.{pane}'
else:
target = self.win
for key in args:
if key is None:
continue
else:
self._go('send-keys', '-t', f'{target}', f'{key}')
time.sleep(0.01)
def paste(self, content):
subprocess.run(["tmux", "setb", f"{content}", ";",
"pasteb", "-t", f"{self.win}", ";",
"send-keys", "-t", f"{self.win}", "Enter"])
def capture(self, pane = 0):
def save_capture():
try:
self._go('capture-pane', '-t', f'{self.win}.{pane}', stderr=subprocess.DEVNULL)
self._go("save-buffer", f"{Tmux.TEMPNAME}", stderr=subprocess.DEVNULL)
return True
except subprocess.CalledProcessError as ex:
return False
if os.path.exists(Tmux.TEMPNAME):
os.remove(Tmux.TEMPNAME)
wait(save_capture)
with open(Tmux.TEMPNAME) as fp:
content = fp.read()
return TmuxOutput(content.rstrip().split(INPUT_RECORD_SEPARATOR))
def until(self, predicate, refresh = False, pane = 0, debug_info = None):
def wait_callback():
lines = self.capture()
pred = predicate(lines)
if pred:
self.send_keys(Ctrl('l') if refresh else None)
return pred
def timeout_handler():
lines = self.capture()
print(lines)
if debug_info:
print(debug_info)
wait(wait_callback, timeout_handler)
def prepare(self):
try:
self.send_keys(Ctrl('u'), Key('hello'))
self.until(lambda lines: lines[-1].endswith('hello'))
except Exception as e:
raise e
self.send_keys(Ctrl('u'))
class TestBase(unittest.TestCase):
TEMPNAME = '/tmp/output'
def __init__(self, *args, **kwargs):
super(TestBase, self).__init__(*args, **kwargs)
self._temp_suffix = 0
def tempname(self):
curframe = inspect.currentframe()
frames = inspect.getouterframes(curframe)
names = [f.function for f in frames if f.function.startswith('test_')]
fun_name = names[0] if len(names) > 0 else 'test'
return '-'.join((TestBase.TEMPNAME, fun_name, str(self._temp_suffix)))
def writelines(self, path, lines):
if os.path.exists(path):
os.remove(path)
with open(path, 'w') as fp:
fp.writelines(lines)
def readonce(self):
path = self.tempname()
try:
wait(lambda: os.path.exists(path))
with open(path) as fp:
return fp.read()
finally:
if os.path.exists(path):
os.remove(path)
self._temp_suffix += 1
self.tmux.prepare()
def sk(self, *opts):
tmp = self.tempname()
return f'{SK} {" ".join(map(str, opts))} > {tmp}.tmp; mv {tmp}.tmp {tmp}'
def command_until(self, until_predicate, sk_options, stdin="echo -e 'a1\\na2\\na3'"):
command_keys = stdin + " | " + self.sk(*sk_options)
self.tmux.send_keys(command_keys)
self.tmux.send_keys(Key("Enter"))
self.tmux.until(until_predicate, debug_info="SK args: {}".format(sk_options))
self.tmux.send_keys(Key('Enter'))
class TestSkim(TestBase):
def setUp(self):
self.tmux = Tmux()
def tearDown(self):
self.tmux.kill()
pass
def test_vanilla(self):
self.tmux.send_keys(Key(f'seq 1 100000 | {self.sk()}'), Key('Enter'))
self.tmux.until(lambda lines: re.match(r'^>', lines[-1]) and re.match(r'^ 100000', lines[-2]))
lines = self.tmux.capture()
self.assertEqual(' 2', lines[-4])
self.assertEqual('> 1', lines[-3])
self.assertTrue(re.match('^ 100000/100000 *0', lines[-2]))
self.assertEqual('>', lines[-1])
# testing basic key binding
self.tmux.send_keys(Key('99'))
self.tmux.until(lambda ls: ls[-2].startswith(' 8146/100000'))
self.tmux.until(lambda ls: ls[-1].startswith('> 99'))
self.tmux.send_keys(Ctrl('a'), Key('1'))
self.tmux.until(lambda ls: ls[-2].startswith(' 856/100000'))
self.tmux.until(lambda ls: ls[-1].startswith('> 199'))
self.tmux.send_keys(Ctrl('f'), Key('3'))
self.tmux.until(lambda ls: ls[-2].startswith(' 46/100000'))
self.tmux.until(lambda ls: ls[-1].startswith('> 1939'))
self.tmux.send_keys(Ctrl('b'), Ctrl('h'))
self.tmux.until(lambda ls: ls[-2].startswith(' 856/100000'))
self.tmux.until(lambda ls: ls[-1].startswith('> 139'))
self.tmux.send_keys(Ctrl('e'), Ctrl('b'))
self.tmux.send_keys(Ctrl('k'))
self.tmux.until(lambda ls: ls[-4].startswith('> 1390'))
self.tmux.until(lambda ls: ls[-3].startswith(' 139'))
self.tmux.send_keys(Key('Tab'))
self.tmux.until(lambda ls: ls[-4].startswith(' 1390'))
self.tmux.until(lambda ls: ls[-3].startswith('> 139'))
self.tmux.send_keys(Key('BTab'))
self.tmux.until(lambda ls: ls[-4].startswith('> 1390'))
self.tmux.until(lambda ls: ls[-3].startswith(' 139'))
lines = self.tmux.capture()
self.assertEqual('> 1390', lines[-4])
self.assertEqual(' 139', lines[-3])
self.assertTrue(lines[-2].startswith(' 856/100000'))
self.assertEqual('> 139', lines[-1])
self.tmux.send_keys(Key('Enter'))
self.assertEqual('1390', self.readonce().strip())
def test_default_command(self):
self.tmux.send_keys(self.sk().replace('SKIM_DEFAULT_COMMAND=', "SKIM_DEFAULT_COMMAND='echo hello'"))
self.tmux.send_keys(Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.send_keys(Key('Enter'))
self.assertEqual('hello', self.readonce().strip())
def test_key_bindings(self):
self.tmux.send_keys(f"{SK} -q 'foo bar foo-bar'", Key('Enter'))
self.tmux.until(lambda lines: lines[-1].startswith('>'))
# Ctrl-A
self.tmux.send_keys(Ctrl('a'), Key('('))
self.tmux.until(lambda lines: lines[-1] == '> (foo bar foo-bar')
## Meta-F
self.tmux.send_keys(Alt('f'), Key(')'))
self.tmux.until(lambda lines: lines[-1] == '> (foo) bar foo-bar')
# CTRL-B
self.tmux.send_keys(Ctrl('b'), 'var')
self.tmux.until(lambda lines: lines[-1] == '> (foovar) bar foo-bar')
# Left, CTRL-D
self.tmux.send_keys(Key('Left'), Key('Left'), Ctrl('d'))
self.tmux.until(lambda lines: lines[-1] == '> (foovr) bar foo-bar')
# # META-BS
self.tmux.send_keys(Alt('BSpace'))
self.tmux.until(lambda lines: lines[-1] == '> (r) bar foo-bar')
# # # CTRL-Y
self.tmux.send_keys(Ctrl('y'), Ctrl('y'))
self.tmux.until(lambda lines: lines[-1] == '> (foovfoovr) bar foo-bar')
# META-B
self.tmux.send_keys(Alt('b'), Key('Space'), Key('Space'))
self.tmux.until(lambda lines: lines[-1] == '> ( foovfoovr) bar foo-bar')
# CTRL-F / Right
self.tmux.send_keys( Ctrl('f'), Key('Right'), '/')
self.tmux.until(lambda lines: lines[-1] == '> ( fo/ovfoovr) bar foo-bar')
# CTRL-H / BS
self.tmux.send_keys( Ctrl('h'), Key('BSpace'))
self.tmux.until(lambda lines: lines[-1] == '> ( fovfoovr) bar foo-bar')
# CTRL-E
self.tmux.send_keys(Ctrl('e'), 'baz')
self.tmux.until(lambda lines: lines[-1] == '> ( fovfoovr) bar foo-barbaz')
# CTRL-U
self.tmux.send_keys( Ctrl('u'))
self.tmux.until(lambda lines: lines[-1] == '>')
# CTRL-Y
self.tmux.send_keys( Ctrl('y'))
self.tmux.until(lambda lines: lines[-1] == '> ( fovfoovr) bar foo-barbaz')
# CTRL-W
self.tmux.send_keys( Ctrl('w'), 'bar-foo')
self.tmux.until(lambda lines: lines[-1] == '> ( fovfoovr) bar bar-foo')
# # META-D
self.tmux.send_keys(Alt('b'), Alt('b'), Alt('d'), Ctrl('a'), Ctrl('y'))
self.tmux.until(lambda lines: lines[-1] == '> bar( fovfoovr) bar -foo')
# CTRL-M
self.tmux.send_keys(Ctrl('m'))
self.tmux.until(lambda lines: not lines[-1].startswith('>'))
def test_key_bindings_interactive(self):
self.tmux.send_keys(f"{SK} -i --cmd-query 'foo bar foo-bar'", Key('Enter'))
self.tmux.until(lambda lines: lines[-1].startswith('c>'))
# Ctrl-A
self.tmux.send_keys(Ctrl('a'), Key('('))
self.tmux.until(lambda lines: lines[-1] == 'c> (foo bar foo-bar')
## Meta-F
self.tmux.send_keys(Alt('f'), Key(')'))
self.tmux.until(lambda lines: lines[-1] == 'c> (foo) bar foo-bar')
# CTRL-B
self.tmux.send_keys(Ctrl('b'), 'var')
self.tmux.until(lambda lines: lines[-1] == 'c> (foovar) bar foo-bar')
# Left, CTRL-D
self.tmux.send_keys(Key('Left'), Key('Left'), Ctrl('d'))
self.tmux.until(lambda lines: lines[-1] == 'c> (foovr) bar foo-bar')
# # META-BS
self.tmux.send_keys(Alt('BSpace'))
self.tmux.until(lambda lines: lines[-1] == 'c> (r) bar foo-bar')
# # # CTRL-Y
self.tmux.send_keys(Ctrl('y'), Ctrl('y'))
self.tmux.until(lambda lines: lines[-1] == 'c> (foovfoovr) bar foo-bar')
# META-B
self.tmux.send_keys(Alt('b'), Key('Space'), Key('Space'))
self.tmux.until(lambda lines: lines[-1] == 'c> ( foovfoovr) bar foo-bar')
# CTRL-F / Right
self.tmux.send_keys( Ctrl('f'), Key('Right'), '/')
self.tmux.until(lambda lines: lines[-1] == 'c> ( fo/ovfoovr) bar foo-bar')
# CTRL-H / BS
self.tmux.send_keys( Ctrl('h'), Key('BSpace'))
self.tmux.until(lambda lines: lines[-1] == 'c> ( fovfoovr) bar foo-bar')
# CTRL-E
self.tmux.send_keys(Ctrl('e'), 'baz')
self.tmux.until(lambda lines: lines[-1] == 'c> ( fovfoovr) bar foo-barbaz')
# CTRL-U
self.tmux.send_keys( Ctrl('u'))
self.tmux.until(lambda lines: lines[-1] == 'c>')
# CTRL-Y
self.tmux.send_keys( Ctrl('y'))
self.tmux.until(lambda lines: lines[-1] == 'c> ( fovfoovr) bar foo-barbaz')
# CTRL-W
self.tmux.send_keys( Ctrl('w'), 'bar-foo')
self.tmux.until(lambda lines: lines[-1] == 'c> ( fovfoovr) bar bar-foo')
# # META-D
self.tmux.send_keys(Alt('b'), Alt('b'), Alt('d'), Ctrl('a'), Ctrl('y'))
self.tmux.until(lambda lines: lines[-1] == 'c> bar( fovfoovr) bar -foo')
# CTRL-M
self.tmux.send_keys(Ctrl('m'))
self.tmux.until(lambda lines: not lines[-1].startswith('c>'))
def test_read0(self):
nfiles = subprocess.check_output("find .", shell=True).decode("utf-8").strip().split("\n")
num_of_files = len(nfiles)
self.tmux.send_keys(f"find . | {self.sk()}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(num_of_files))
self.tmux.send_keys(Key('Enter'))
orig = self.readonce().strip()
self.tmux.send_keys(f"find . -print0 | {self.sk('--read0')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(num_of_files))
self.tmux.send_keys(Key('Enter'))
self.assertEqual(orig, self.readonce().strip())
def test_print0(self):
self.tmux.send_keys(f"echo -e 'a\\nb' | {self.sk('-m', '--print0')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(2))
self.tmux.send_keys(Key('BTab'), Key('BTab'), Key('Enter'))
lines = self.readonce().strip()
self.assertEqual(lines, 'a\0b\0')
self.tmux.send_keys(f"echo -e 'a\\naa\\nb' | {self.sk('-f a', '--print0')}", Key('Enter'))
lines = self.readonce().strip()
self.assertEqual(lines, 'a\0aa\0')
def test_with_nth_preview(self):
sk_command = self.sk("--delimiter ','", '--with-nth 2..', '--preview', "'echo X{1}Y'")
self.tmux.send_keys("echo -e 'field1,field2,field3,field4' |" + sk_command, Key('Enter'))
self.tmux.until(lambda lines: lines.any_include("Xfield1Y"))
self.tmux.send_keys(Key('Enter'))
def test_with_nth(self):
# fields, expected
tests = [
('1', 'field1,'),
('2', 'field2,'),
('3', 'field3,'),
('4', 'field4'),
('5', ''),
('-1', 'field4'),
('-2', 'field3,'),
('-3', 'field2,'),
('-4', 'field1,'),
('-5', ''),
('2..', 'field2,field3,field4'),
('..3', 'field1,field2,field3,'),
('2..3', 'field2,field3,'),
('3..2', ''),
]
for field, expected in tests:
sk_command = self.sk("--delimiter ','", f'--with-nth={field}')
self.tmux.send_keys("echo -e 'field1,field2,field3,field4' |" + sk_command, Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
lines = self.tmux.capture()
self.tmux.send_keys(Key('Enter'))
self.assertEqual(f'> {expected}'.strip(), lines[-3])
def test_nth(self):
# fields, query, match_count(0/1)
tests = [
('1', 'field1', 1),
('1', 'field2', 0),
('-1', 'field4', 1),
('-1', 'field3', 0),
('-5', 'f', 0),
('2..', 'field2', 1),
('2..', 'field4', 1),
('..3', 'field1', 1),
('..3', 'field3,', 1),
('2..3', '2,3', 1),
('3..2', 'f', 0),
]
for field, query, count in tests:
sk_command = self.sk(f"--delimiter ',' --nth={field} -q {query}")
self.tmux.send_keys("echo -e 'field1,field2,field3,field4' |" + sk_command, Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.send_keys(Key('Enter'))
def test_print_query(self):
self.tmux.send_keys(f"seq 1 1000 | {self.sk('-q 10', '--print-query')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1000))
self.tmux.send_keys(Key('Enter'))
lines = self.readonce().strip()
self.assertEqual(lines, '10\n10')
def test_print_cmd(self):
self.tmux.send_keys(f"seq 1 1000 | {self.sk('--cmd-query 10', '--print-cmd')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1000))
self.tmux.send_keys(Key('Enter'))
lines = self.readonce().strip()
self.assertEqual(lines, '10\n1')
def test_print_cmd_and_query(self):
self.tmux.send_keys(f"seq 1 1000 | {self.sk('-q 10', '--cmd-query cmd', '--print-cmd', '--print-query')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1000))
self.tmux.send_keys(Key('Enter'))
lines = self.readonce().strip()
self.assertEqual(lines, '10\ncmd\n10')
def test_hscroll(self):
# XXXXXXXXXXXXXXXXX..
self.tmux.send_keys(f"cat <<EOF | {self.sk('-q b')}", Key('Enter'))
self.tmux.send_keys(f"b{'a'*1000}", Key('Enter'))
self.tmux.send_keys(f"EOF", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.until(lambda lines: lines[-3].endswith('..'))
self.tmux.send_keys(Key('Enter'))
# ..XXXXXXXXXXXXXXXXXM
self.tmux.send_keys(f"cat <<EOF | {self.sk('-q b')}", Key('Enter'))
self.tmux.send_keys(f"{'a'*1000}b", Key('Enter'))
self.tmux.send_keys(f"EOF", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.until(lambda lines: lines[-3].endswith('b'))
self.tmux.send_keys(Key('Enter'))
# ..XXXXXXXMXXXXXXX..
self.tmux.send_keys(f"cat <<EOF | {self.sk('-q b')}", Key('Enter'))
self.tmux.send_keys(f"{'a'*1000}b{'a'*1000}", Key('Enter'))
self.tmux.send_keys(f"EOF", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.until(lambda lines: lines[-3].startswith('> ..'))
self.tmux.until(lambda lines: lines[-3].endswith('..'))
self.tmux.send_keys(Key('Enter'))
def test_no_hscroll(self):
self.tmux.send_keys(f"cat <<EOF | {self.sk('-q b', '--no-hscroll')}", Key('Enter'))
self.tmux.send_keys(f"{'a'*1000}b", Key('Enter'))
self.tmux.send_keys(f"EOF", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.until(lambda lines: lines[-3].startswith('> a'))
self.tmux.send_keys(Key('Enter'))
def test_tabstop(self):
self.tmux.send_keys(f"echo -e 'a\\tb' | {self.sk()}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.until(lambda lines: lines[-3].startswith('> a b'))
self.tmux.send_keys(Key('Enter'))
self.tmux.send_keys(f"echo -e 'a\\tb' | {self.sk('--tabstop 1')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.until(lambda lines: lines[-3].startswith('> a b'))
self.tmux.send_keys(Key('Enter'))
self.tmux.send_keys(f"echo -e 'aa\\tb' | {self.sk('--tabstop 2')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.until(lambda lines: lines[-3].startswith('> aa b'))
self.tmux.send_keys(Key('Enter'))
self.tmux.send_keys(f"echo -e 'aa\\tb' | {self.sk('--tabstop 3')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.until(lambda lines: lines[-3].startswith('> aa b'))
self.tmux.send_keys(Key('Enter'))
self.tmux.send_keys(f"echo -e 'a\\tb' | {self.sk('--tabstop 4')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.until(lambda lines: lines[-3].startswith('> a b'))
self.tmux.send_keys(Key('Enter'))
def test_inline_info(self):
INLINE_INFO_SEP = " <"
## the dot accounts for spinner
RE = re.compile(r'[^0-9]*([0-9]+)/([0-9]+)(?: \[([0-9]+)\])?')
self.tmux.send_keys(f"echo -e 'a1\\na2\\na3\\na4' | {self.sk('--inline-info')}", Key('Enter'))
self.tmux.until(lambda lines: lines.match_count() == lines.item_count())
self.tmux.send_keys("a")
self.tmux.until(lambda lines: lines[-1].find(INLINE_INFO_SEP) != -1)
lines = self.tmux.capture()
self.tmux.send_keys(Key('Enter'))
query_line = lines[-1]
bef, after = query_line.split(INLINE_INFO_SEP)
mat = RE.match(after)
self.assertTrue(mat is not None)
ret = tuple(map(lambda x: int(x) if x is not None else 0, mat.groups()))
self.assertEqual(len(ret), 3)
self.assertEqual((bef, ret[0], ret[1], ret[2]), ("> a ", 4, 4, 0))
# test that inline info is does not overwrite query
self.tmux.send_keys(f"echo -e '<KEY>' | {self.sk('--inline-info')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(4))
self.tmux.send_keys("bc", Ctrl("a"), "a")
self.tmux.until(lambda lines: lines[-1].find(INLINE_INFO_SEP) != -1 and
lines[-1].split(INLINE_INFO_SEP)[0] == "> abc ")
self.tmux.send_keys(Key('Enter'))
def test_header(self):
self.command_until(sk_options=['--header', 'hello'],
until_predicate=lambda lines: lines[-3].find("hello") != -1)
self.command_until(sk_options=['--inline-info', '--header', 'hello'],
until_predicate=lambda lines: lines[-2].find("hello") != -1)
self.command_until(sk_options=['--reverse', '--inline-info', '--header', 'hello'],
until_predicate=lambda lines: lines[1].find("hello") != -1)
self.command_until(sk_options=['--reverse', '--header', 'hello'],
until_predicate=lambda lines: lines[2].find("hello") != -1)
def test_header_lines(self):
self.command_until(sk_options=['--header-lines', '1'],
until_predicate=lambda lines: lines[-3].find(" a1") != -1)
self.command_until(sk_options=['--header-lines', '4'],
until_predicate=lambda lines: lines[-5].find(" a3") != -1)
self.command_until(sk_options=['--inline-info', '--header-lines', '1'],
until_predicate=lambda lines: lines[-2].find(" a1") != -1)
self.command_until(sk_options=['--reverse', '--inline-info', '--header-lines', '1'],
until_predicate=lambda lines: lines[1].find(" a1") != -1)
self.command_until(sk_options=['--reverse', '--header-lines', '1'],
until_predicate=lambda lines: lines[2].find(" a1") != -1)
def test_reserved_options(self):
options = [
'--extended',
'--algo=TYPE',
'--literal',
'--no-mouse',
'--cycle',
'--hscroll-off=COL',
'--filepath-word',
'--jump-labels=CHARS',
'--border',
'--inline-info',
'--header=STR',
'--header-lines=N',
'--no-bold',
'--history-size=10',
'--sync',
'--no-sort',
# --select-1
'--select-1',
'-1',
# --exit-0
'--exit-0',
'-0']
for opt in options:
self.command_until(sk_options=[opt], until_predicate=find_prompt)
def test_multiple_option_values_should_be_accepted(self):
# normally we'll put some default options to SKIM_DEFAULT_OPTIONS and override it in command
# line. this test will ensure multiple values are accepted.
options = [
'--bind=ctrl-a:cancel --bind ctrl-b:cancel',
'--expect=ctrl-a --expect=ctrl-v',
'--tiebreak=index --tiebreak=score',
'--cmd asdf --cmd find',
'--query asdf -q xyz',
'--delimiter , --delimiter . -d ,',
'--nth 1,2 --nth=1,3 -n 1,3',
'--with-nth 1,2 --with-nth=1,3',
'-I {} -I XX',
'--color base --color light',
'--margin 30% --margin 0',
'--min-height 30% --min-height 10',
'--height 30% --height 10',
'--preview "ls {}" --preview "cat {}"',
'--preview-window up --preview-window down',
'--multi -m',
'--no-multi --no-multi',
'--tac --tac',
'--ansi --ansi',
'--exact -e',
'--regex --regex',
'--literal --literal',
'--no-mouse --no-mouse',
'--cycle --cycle',
'--no-hscroll --no-hscroll',
'--filepath-word --filepath-word',
'--border --border',
'--inline-info --inline-info',
'--no-bold --no-bold',
'--print-query --print-query',
'--print-cmd --print-cmd',
'--print0 --print0',
'--sync --sync',
'--extended --extended',
'--no-sort --no-sort',
'--select-1 --select-1',
'--exit-0 --exit-0',
]
for opt in options:
self.command_until(sk_options=[opt], until_predicate=find_prompt)
options = [
('--prompt a --prompt b -p c', lambda lines: lines[-1].startswith("c")),
('-i --cmd-prompt a --cmd-prompt b', lambda lines: lines[-1].startswith("b")),
('-i --cmd-query asdf --cmd-query xyz', lambda lines: lines[-1].startswith("c> xyz")),
('--interactive -i', lambda lines: find_prompt(lines, interactive=True)),
('--reverse --reverse', lambda lines: find_prompt(lines, reverse=True))
]
for opt, pred in options:
self.command_until(sk_options=[opt], until_predicate=pred)
self.command_until(stdin="echo -e a\\0b", sk_options=['--read0 --read0'], until_predicate=find_prompt)
def test_single_quote_of_preview_command(self):
# echo "'\"ABC\"'" | sk --preview="echo X{}X" => X'"ABC"'X
echo_command = '''echo "'\\"ABC\\"'" | '''
sk_command = self.sk('--preview=\"echo X{}X\"')
command = echo_command + sk_command
self.tmux.send_keys(command, Key('Enter'))
self.tmux.until(lambda lines: lines.any_include('''X'"ABC"'X'''))
# echo "'\"ABC\"'" | sk --preview="echo X\{}X" => X{}X
echo_command = '''echo "'\\"ABC\\"'" | '''
sk_command = self.sk('--preview=\"echo X\\{}X\"')
command = echo_command + sk_command
self.tmux.send_keys(command, Key('Enter'))
self.tmux.until(lambda lines: lines.any_include('''X{}X'''))
def test_ansi_and_read0(self):
"""should keep the NULL character, see #142"""
self.tmux.send_keys(f"echo -e 'a\\0b' | {self.sk('--ansi')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.send_keys(Key('Enter'))
output = ":".join("{:02x}".format(ord(c)) for c in self.readonce())
self.assertTrue(output.find("61:00:62:0a") >= 0)
def test_smart_case_fuzzy(self):
"""should behave correctly on case, #219"""
# smart case
self.tmux.send_keys(f"echo -e 'aBcXyZ' | {self.sk('')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.send_keys(Key('abc'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.send_keys(Ctrl('u'), Key('aBc'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.send_keys(Ctrl('u'), Key('ABc'))
self.tmux.until(lambda lines: lines.ready_with_matches(0))
def test_smart_case_exact(self):
"""should behave correctly on case, #219"""
# smart case
self.tmux.send_keys(f"echo -e 'aBcXyZ' | {self.sk('')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.send_keys(Key("'abc"))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.send_keys(Ctrl('u'), Key("'aBc"))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.send_keys(Ctrl('u'), Key("'ABc"))
self.tmux.until(lambda lines: lines.ready_with_matches(0))
def test_ignore_case_fuzzy(self):
"""should behave correctly on case, #219"""
# ignore case
self.tmux.send_keys(f"echo -e 'aBcXyZ' | {self.sk('--case ignore')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.send_keys(Key('abc'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.send_keys(Ctrl('u'), Key('aBc'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.send_keys(Ctrl('u'), Key('ABc'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
def test_ignore_case_exact(self):
"""should behave correctly on case, #219"""
# ignore case
self.tmux.send_keys(f"echo -e 'aBcXyZ' | {self.sk('--case ignore')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.send_keys(Key("'abc"))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.send_keys(Ctrl('u'), Key("'aBc"))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.send_keys(Ctrl('u'), Key("'ABc"))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
def test_respect_case_fuzzy(self):
"""should behave correctly on case, #219"""
# respect case
self.tmux.send_keys(f"echo -e 'aBcXyZ' | {self.sk('--case respect')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.send_keys(Key('abc'))
self.tmux.until(lambda lines: lines.ready_with_matches(0))
def test_respect_case_exact(self):
"""should behave correctly on case, #219"""
# respect case
self.tmux.send_keys(f"echo -e 'aBcXyZ' | {self.sk('--case respect')}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.send_keys(Key("'abc"))
self.tmux.until(lambda lines: lines.ready_with_matches(0))
def test_query_history(self):
"""query history should work"""
history_file = f'{self.tempname()}.history'
self.tmux.send_keys(f"echo -e '<KEY> > {history_file}", Key('Enter'))
history_mtime = os.stat(history_file).st_mtime
self.tmux.send_keys(f"echo -e 'a\nb\nc' | {self.sk('--history', history_file)}", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(3))
self.tmux.send_keys(Ctrl('p'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.until(lambda lines: lines[-3].startswith('> c'))
self.tmux.send_keys(Ctrl('p'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.until(lambda lines: lines[-3].startswith('> b'))
self.tmux.send_keys('b')
self.tmux.until(lambda lines: lines.ready_with_matches(0))
self.tmux.send_keys(Ctrl('p'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.until(lambda lines: lines[-3].startswith('> a'))
self.tmux.send_keys(Ctrl('n'))
self.tmux.until(lambda lines: lines.ready_with_matches(0))
self.tmux.until(lambda lines: lines[-1].startswith('> bb'))
self.tmux.send_keys(Ctrl('n'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.until(lambda lines: lines[-1].startswith('> c'))
self.tmux.send_keys('d')
self.tmux.until(lambda lines: lines[-1].startswith('> cd'))
self.tmux.send_keys(Key('Enter'))
self.tmux.send_keys(f'[[ "$(echo -n $(cat {history_file}))" == "a b c cd" ]] && echo ok')
self.tmux.send_keys(Key('Enter'))
self.tmux.until(lambda lines: lines[-1].startswith('ok'))
def test_cmd_history(self):
"""query history should work"""
history_file = f'{self.tempname()}.cmd-history'
self.tmux.send_keys(f"echo -e 'a\nb\nc' > {history_file}", Key('Enter'))
self.tmux.send_keys(f"""{self.sk("-i -c 'echo {}'", '--cmd-history', history_file)}""", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.send_keys(Ctrl('p'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.until(lambda lines: lines[-1].startswith('c> c'))
self.tmux.send_keys(Ctrl('p'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.until(lambda lines: lines[-1].startswith('c> b'))
self.tmux.send_keys('b')
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.send_keys(Ctrl('p'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.until(lambda lines: lines[-1].startswith('c> a'))
self.tmux.send_keys(Ctrl('n'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.until(lambda lines: lines[-1].startswith('c> bb'))
self.tmux.send_keys(Ctrl('n'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.until(lambda lines: lines[-1].startswith('c> c'))
self.tmux.send_keys('d')
self.tmux.until(lambda lines: lines[-1].startswith('c> cd'))
self.tmux.send_keys(Key('Enter'))
self.tmux.send_keys(f'[[ "$(echo -n $(cat {history_file}))" == "a b c cd" ]] && echo ok')
self.tmux.send_keys(Key('Enter'))
self.tmux.until(lambda lines: lines[-1].startswith('ok'))
def test_execute_with_zero_result_ref(self):
"""execute should not panic with zero results #276"""
self.tmux.send_keys(f"""echo -n "" | {self.sk("--bind 'enter:execute(less {})'")}""", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(0))
self.tmux.send_keys(Key('Enter'))
self.tmux.send_keys(Key('q'))
self.tmux.until(lambda lines: lines.ready_with_lines(0))
self.tmux.until(lambda lines: lines[-1].startswith('> q')) # less is not executed at all
self.tmux.send_keys(Ctrl('g'))
def test_execute_with_zero_result_no_ref(self):
"""execute should not panic with zero results #276"""
self.tmux.send_keys(f"""echo -n "" | {self.sk("--bind 'enter:execute(less)'")}""", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(0))
self.tmux.send_keys(Key('Enter'))
self.tmux.send_keys(Key('q'))
self.tmux.until(lambda lines: lines.ready_with_lines(0))
self.tmux.send_keys(Ctrl('g'))
def test_if_non_matched(self):
"""commands only effect if no item is matched"""
self.tmux.send_keys(f"""echo "a\nb" | {self.sk("--bind 'enter:if-non-matched(backward-delete-char)'", "-q ab")}""", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_matches(0))
self.tmux.send_keys(Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.send_keys(Key('Enter')) # not triggered anymore
self.tmux.until(lambda lines: lines.ready_with_matches(1))
def test_nul_in_execute(self):
"""NUL should work in preview command see #278"""
self.tmux.send_keys(f"""echo -ne 'a\\0b' | {self.sk("--preview='echo -en {} | xxd'")}""", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.until(lambda lines: lines.any_include('6100 62'))
def test_skip_to_pattern(self):
self.tmux.send_keys(f"""echo -ne 'a/b/c' | {self.sk("--skip-to-pattern '[^/]*$'")}""", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(1))
self.tmux.until(lambda lines: lines.any_include('..c'))
def test_multi_selection(self):
self.tmux.send_keys(f"""echo -n 'a\nb\nc' | {self.sk("-m")}""", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(3))
self.tmux.until(lambda lines: lines[-3].startswith('> a'))
self.tmux.send_keys(Key('b'))
self.tmux.until(lambda lines: lines[-3].startswith('> b'))
self.tmux.send_keys(Key('TAB'))
self.tmux.until(lambda lines: lines[-3].startswith('>>b'))
self.tmux.send_keys(Key('C-h'))
self.tmux.until(lambda lines: lines[-4].startswith(' >b'))
self.tmux.until(lambda lines: lines[-3].startswith('> a'))
self.tmux.send_keys(Key('c'))
self.tmux.until(lambda lines: lines[-3].startswith('> c'))
self.tmux.send_keys(Key('TAB'))
self.tmux.until(lambda lines: lines[-3].startswith('>>c'))
self.tmux.send_keys(Key('C-h'))
self.tmux.until(lambda lines: lines[-5].startswith(' >c'))
self.tmux.until(lambda lines: lines[-4].startswith(' >b'))
self.tmux.until(lambda lines: lines[-3].startswith('> a'))
self.tmux.send_keys(Key('Enter'))
self.assertEqual('b\nc', self.readonce().strip())
def test_append_and_select(self):
self.tmux.send_keys(f"""echo -n 'a\nb\nc' | {self.sk("-m --bind 'ctrl-f:append-and-select'")}""", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_lines(3))
self.tmux.send_keys(Key('xyz'))
self.tmux.until(lambda lines: lines.ready_with_matches(0))
self.tmux.send_keys(Key('C-f'))
self.tmux.until(lambda lines: lines[-3].startswith('>>xyz'))
self.tmux.send_keys(Key('C-u'))
self.tmux.until(lambda lines: lines[-6].startswith(' >xyz'))
self.tmux.until(lambda lines: lines[-5].startswith(' c'))
self.tmux.until(lambda lines: lines[-4].startswith(' b'))
self.tmux.until(lambda lines: lines[-3].startswith('> a'))
def test_pre_select_n(self):
self.tmux.send_keys(f"""echo -n 'a\nb\nc' | {self.sk("-m --pre-select-n=1")}""", Key('Enter'))
self.tmux.until(lambda lines: lines[-5].startswith(' c'))
self.tmux.until(lambda lines: lines[-4].startswith(' b'))
self.tmux.until(lambda lines: lines[-3].startswith('>>a'))
def test_pre_select_items(self):
args = "-m --pre-select-items=$'b\\nc'"
self.tmux.send_keys(f"""echo -n 'a\nb\nc' | {self.sk(args)}""", Key('Enter'))
self.tmux.until(lambda lines: lines[-5].startswith(' >c'))
self.tmux.until(lambda lines: lines[-4].startswith(' >b'))
self.tmux.until(lambda lines: lines[-3].startswith('> a'))
def test_pre_select_pat(self):
self.tmux.send_keys(f"""echo -n 'a\nb\nc' | {self.sk("-m --pre-select-pat='[b|c]'")}""", Key('Enter'))
self.tmux.until(lambda lines: lines[-5].startswith(' >c'))
self.tmux.until(lambda lines: lines[-4].startswith(' >b'))
self.tmux.until(lambda lines: lines[-3].startswith('> a'))
def test_pre_select_file(self):
pre_select_file = f'{self.tempname()}.pre_select'
self.tmux.send_keys(f"echo -e 'b\nc' > {pre_select_file}", Key('Enter'))
args = f'''-m --pre-select-file={pre_select_file}'''
self.tmux.send_keys(f"""echo -n 'a\nb\nc' | {self.sk(args)}""", Key('Enter'))
self.tmux.until(lambda lines: lines[-5].startswith(' >c'))
self.tmux.until(lambda lines: lines[-4].startswith(' >b'))
self.tmux.until(lambda lines: lines[-3].startswith('> a'))
def test_no_clear_if_empty(self):
text_file = f'{self.tempname()}.txt'
self.tmux.send_keys(f"echo -e 'b\\nc' > {text_file}", Key('Enter'))
args = "-c 'cat {}'" + f''' -i --cmd-query='{text_file}' --no-clear-if-empty'''
self.tmux.send_keys(f"""{self.sk(args)}""", Key('Enter'))
self.tmux.until(lambda lines: lines[-4].startswith(' c'))
self.tmux.until(lambda lines: lines[-3].startswith('> b'))
self.tmux.send_keys(Key('xx'))
self.tmux.until(lambda lines: lines.ready_with_matches(0))
self.tmux.until(lambda lines: lines[-4].startswith(' c'))
self.tmux.until(lambda lines: lines[-3].startswith('> b'))
def test_preview_scroll_const(self):
self.tmux.send_keys(f"""echo foo 123 321 | {self.sk("--preview 'seq 1000' --preview-window left:+123")}""", Key('Enter'))
self.tmux.until(lambda lines: re.match(r'123.*123/1000', lines[0]))
def test_preview_scroll_expr(self):
args = "--preview 'seq 1000' --preview-window left:+{3}"
self.tmux.send_keys(f"""echo foo 123 321 | {self.sk(args)}""", Key('Enter'))
self.tmux.until(lambda lines: re.match(r'321.*321/1000', lines[0]))
def test_preview_scroll_and_offset(self):
args = "--preview 'seq 1000' --preview-window left:+{2}-2"
self.tmux.send_keys(f"""echo foo 123 321 | {self.sk(args)}""", Key('Enter'))
self.tmux.until(lambda lines: re.match(r'121.*121/1000', lines[0]))
self.tmux.send_keys(Key('Enter'))
self.tmux.send_keys(f"""echo foo :123: 321 | {self.sk(args)}""", Key('Enter'))
self.tmux.until(lambda lines: re.match(r'121.*121/1000', lines[0]))
self.tmux.send_keys(Key('Enter'))
def test_issue_359_multi_byte_and_regex(self):
self.tmux.send_keys(f"""echo 'ああa' | {self.sk("--regex -q 'a'")}""", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.until(lambda lines: lines[-3].startswith('> ああa'))
def test_issue_361_literal_space(self):
args = '''-q "'foo\\ bar"'''
self.tmux.send_keys(f"""echo 'foo bar\nfoo bar' | {self.sk(args)}""", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.until(lambda lines: lines[-3].startswith('> foo bar'))
self.tmux.send_keys(Key('Enter'))
args = '''-q "!foo\\ bar"'''
self.tmux.send_keys(f"""echo 'foo bar\nfoo bar' | {self.sk(args)}""", Key('Enter'))
self.tmux.until(lambda lines: lines.ready_with_matches(1))
self.tmux.until(lambda lines: lines[-3].startswith('> foo bar'))
self.tmux.send_keys(Key('Enter'))
def find_prompt(lines, interactive=False, reverse=False):
linen = -1
prompt = ">"
if interactive:
prompt = "c>"
if reverse:
linen = 0
return lines[linen].startswith(prompt)
if __name__ == '__main__':
unittest.main() | en | 0.549882 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # The integration test of skim # Modeled after fzf's test: https://github.com/junegunn/fzf/blob/master/test/test_go.rb The shell configurations for tmux tests Represent a key to send to tmux Represent a control key Represent an alt key A list that contains the output of tmux # match the status line # normal: `| 10/219 [2] 8/0.` # inline: `> query < 10/219 [2] 8/0.` # preview: `> query < 10/219 [2] 8/0.│...` # match_count item_count select_count item_cursor matcher_stopped Object to manipulate tmux and get result Run tmux command and return result in list of strings (lines) :returns: List<String> # testing basic key binding # Ctrl-A ## Meta-F # CTRL-B # Left, CTRL-D # # META-BS # # # CTRL-Y # META-B # CTRL-F / Right # CTRL-H / BS # CTRL-E # CTRL-U # CTRL-Y # CTRL-W # # META-D # CTRL-M # Ctrl-A ## Meta-F # CTRL-B # Left, CTRL-D # # META-BS # # # CTRL-Y # META-B # CTRL-F / Right # CTRL-H / BS # CTRL-E # CTRL-U # CTRL-Y # CTRL-W # # META-D # CTRL-M # fields, expected # fields, query, match_count(0/1) # XXXXXXXXXXXXXXXXX.. # ..XXXXXXXXXXXXXXXXXM # ..XXXXXXXMXXXXXXX.. ## the dot accounts for spinner # test that inline info is does not overwrite query # --select-1 # --exit-0 # normally we'll put some default options to SKIM_DEFAULT_OPTIONS and override it in command # line. this test will ensure multiple values are accepted. # echo "'\"ABC\"'" | sk --preview="echo X{}X" => X'"ABC"'X echo "'\\"ABC\\"'" | X'"ABC"'X # echo "'\"ABC\"'" | sk --preview="echo X\{}X" => X{}X echo "'\\"ABC\\"'" | X{}X should keep the NULL character, see #142 should behave correctly on case, #219 # smart case should behave correctly on case, #219 # smart case should behave correctly on case, #219 # ignore case should behave correctly on case, #219 # ignore case should behave correctly on case, #219 # respect case should behave correctly on case, #219 # respect case query history should work query history should work {self.sk("-i -c 'echo {}'", '--cmd-history', history_file)} execute should not panic with zero results #276 echo -n "" | {self.sk("--bind 'enter:execute(less {})'")} # less is not executed at all execute should not panic with zero results #276 echo -n "" | {self.sk("--bind 'enter:execute(less)'")} commands only effect if no item is matched echo "a\nb" | {self.sk("--bind 'enter:if-non-matched(backward-delete-char)'", "-q ab")} # not triggered anymore NUL should work in preview command see #278 echo -ne 'a\\0b' | {self.sk("--preview='echo -en {} | xxd'")} echo -ne 'a/b/c' | {self.sk("--skip-to-pattern '[^/]*$'")} echo -n 'a\nb\nc' | {self.sk("-m")} echo -n 'a\nb\nc' | {self.sk("-m --bind 'ctrl-f:append-and-select'")} echo -n 'a\nb\nc' | {self.sk("-m --pre-select-n=1")} echo -n 'a\nb\nc' | {self.sk(args)} echo -n 'a\nb\nc' | {self.sk("-m --pre-select-pat='[b|c]'")} -m --pre-select-file={pre_select_file} echo -n 'a\nb\nc' | {self.sk(args)} -i --cmd-query='{text_file}' --no-clear-if-empty {self.sk(args)} echo foo 123 321 | {self.sk("--preview 'seq 1000' --preview-window left:+123")} echo foo 123 321 | {self.sk(args)} echo foo 123 321 | {self.sk(args)} echo foo :123: 321 | {self.sk(args)} echo 'ああa' | {self.sk("--regex -q 'a'")} -q "'foo\\ bar" echo 'foo bar\nfoo bar' | {self.sk(args)} -q "!foo\\ bar" echo 'foo bar\nfoo bar' | {self.sk(args)} | 2.16677 | 2 |
present/markdown.py | kazukazuinaina/present | 4,252 | 6631281 | <gh_stars>1000+
# -*- coding: utf-8 -*-
import os
import warnings
import yaml
from mistune import markdown
from .slide import (
Slide,
Heading,
Paragraph,
Text,
Strong,
Codespan,
Emphasis,
Link,
List,
Image,
Codio,
BlockCode,
BlockHtml,
BlockQuote,
)
class Markdown(object):
"""Parse and traverse through the markdown abstract syntax tree."""
def __init__(self, filename):
self.filename = filename
self.dirname = os.path.dirname(os.path.realpath(filename))
def parse(self):
with open(self.filename, "r") as f:
text = f.read()
slides = []
ast = markdown(text, renderer="ast")
sliden = 0
buffer = []
for i, obj in enumerate(ast):
if obj["type"] in ["newline"]:
continue
if obj["type"] == "thematic_break" and buffer:
slides.append(Slide(elements=buffer))
sliden += 1
buffer = []
continue
try:
if obj["type"] == "paragraph":
images = [c for c in obj["children"] if c["type"] == "image"]
not_images = [c for c in obj["children"] if c["type"] != "image"]
for image in images:
image["src"] = os.path.join(self.dirname, os.path.expanduser(image["src"]))
if image["alt"] == "codio":
with open(image["src"], "r") as f:
codio = yaml.load(f, Loader=yaml.Loader)
buffer.append(Codio(obj=codio))
else:
buffer.append(Image(obj=image))
obj["children"] = not_images
buffer.append(Paragraph(obj=obj))
else:
element_name = obj["type"].title().replace("_", "")
Element = eval(element_name)
buffer.append(Element(obj=obj))
except NameError:
warnings.warn(f"(Slide {sliden + 1}) {element_name} is not supported")
if i == len(ast) - 1:
slides.append(Slide(elements=buffer))
sliden += 1
return slides
| # -*- coding: utf-8 -*-
import os
import warnings
import yaml
from mistune import markdown
from .slide import (
Slide,
Heading,
Paragraph,
Text,
Strong,
Codespan,
Emphasis,
Link,
List,
Image,
Codio,
BlockCode,
BlockHtml,
BlockQuote,
)
class Markdown(object):
"""Parse and traverse through the markdown abstract syntax tree."""
def __init__(self, filename):
self.filename = filename
self.dirname = os.path.dirname(os.path.realpath(filename))
def parse(self):
with open(self.filename, "r") as f:
text = f.read()
slides = []
ast = markdown(text, renderer="ast")
sliden = 0
buffer = []
for i, obj in enumerate(ast):
if obj["type"] in ["newline"]:
continue
if obj["type"] == "thematic_break" and buffer:
slides.append(Slide(elements=buffer))
sliden += 1
buffer = []
continue
try:
if obj["type"] == "paragraph":
images = [c for c in obj["children"] if c["type"] == "image"]
not_images = [c for c in obj["children"] if c["type"] != "image"]
for image in images:
image["src"] = os.path.join(self.dirname, os.path.expanduser(image["src"]))
if image["alt"] == "codio":
with open(image["src"], "r") as f:
codio = yaml.load(f, Loader=yaml.Loader)
buffer.append(Codio(obj=codio))
else:
buffer.append(Image(obj=image))
obj["children"] = not_images
buffer.append(Paragraph(obj=obj))
else:
element_name = obj["type"].title().replace("_", "")
Element = eval(element_name)
buffer.append(Element(obj=obj))
except NameError:
warnings.warn(f"(Slide {sliden + 1}) {element_name} is not supported")
if i == len(ast) - 1:
slides.append(Slide(elements=buffer))
sliden += 1
return slides | en | 0.764365 | # -*- coding: utf-8 -*- Parse and traverse through the markdown abstract syntax tree. | 2.633495 | 3 |
antioch/client/antioch.py | HuygensING/antioch-python-client | 0 | 6631282 | """
Copyright 2017 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
from http import HTTPStatus
from urllib.parse import urljoin
import requests
import antioch.client.util as util
from antioch.client.about_endpoint import AboutEndpoint
from antioch.client.annotations_endpoint import AnnotationsEndpoint
from antioch.client.resources_endpoint import ResourcesEndpoint
from antioch.client.rest_requester import RestRequester
class Antioch:
def __init__(self, server, admin_key="", auth="", auto_confirm=True):
self.server = server if server.endswith('/') else server + '/'
self.session = requests.Session()
self.session.headers['x-ssl-client-s-dn-cn'] = auth
self.session.headers['auth'] = 'SimpleAuth ' + admin_key
self.session.headers['content-type'] = 'application/json'
self.auto_confirm = auto_confirm
self.about = AboutEndpoint(self)
self.resources = ResourcesEndpoint(self)
# self.searches = SearchesEndpoint(self)
self.annotations = AnnotationsEndpoint(self)
def get(self, uri):
url = urljoin(self.server, uri)
r = self.session.get(url=url)
r.raise_for_status()
return r
def put(self, uri, data):
url = urljoin(self.server, uri)
r = self.session.put(url=url, json=data)
r.raise_for_status()
return r
def put_data(self, uri, data):
url = urljoin(self.server, uri)
current_content_type = self.session.headers.get('content-type')
self.session.headers['content-type'] = 'text/xml'
r = self.session.put(url=url, data=data)
self.session.headers['content-type'] = current_content_type
r.raise_for_status()
return r
def post(self, uri, data):
url = urljoin(self.server, uri)
r = self.session.post(url=url, json=data)
r.raise_for_status()
return r
def delete(self, uri):
r = self.session.delete(url=urljoin(self.server, uri))
r.raise_for_status()
return r
def do_xpath(self, resource_view_ids, xpath):
entity = {
'resourceIds': resource_view_ids,
'xpath': xpath
}
def poster():
return self.post(util.endpoint_uri('commands', 'xpath'), entity)
def status_getter():
return self.antioch.get(uri=util.endpoint_uri(self.endpoint, self.uuid, 'text', 'status'))
return RestRequester(poster).on_status(HTTPStatus.OK, util.entity_as_json).invoke().json
| """
Copyright 2017 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
from http import HTTPStatus
from urllib.parse import urljoin
import requests
import antioch.client.util as util
from antioch.client.about_endpoint import AboutEndpoint
from antioch.client.annotations_endpoint import AnnotationsEndpoint
from antioch.client.resources_endpoint import ResourcesEndpoint
from antioch.client.rest_requester import RestRequester
class Antioch:
def __init__(self, server, admin_key="", auth="", auto_confirm=True):
self.server = server if server.endswith('/') else server + '/'
self.session = requests.Session()
self.session.headers['x-ssl-client-s-dn-cn'] = auth
self.session.headers['auth'] = 'SimpleAuth ' + admin_key
self.session.headers['content-type'] = 'application/json'
self.auto_confirm = auto_confirm
self.about = AboutEndpoint(self)
self.resources = ResourcesEndpoint(self)
# self.searches = SearchesEndpoint(self)
self.annotations = AnnotationsEndpoint(self)
def get(self, uri):
url = urljoin(self.server, uri)
r = self.session.get(url=url)
r.raise_for_status()
return r
def put(self, uri, data):
url = urljoin(self.server, uri)
r = self.session.put(url=url, json=data)
r.raise_for_status()
return r
def put_data(self, uri, data):
url = urljoin(self.server, uri)
current_content_type = self.session.headers.get('content-type')
self.session.headers['content-type'] = 'text/xml'
r = self.session.put(url=url, data=data)
self.session.headers['content-type'] = current_content_type
r.raise_for_status()
return r
def post(self, uri, data):
url = urljoin(self.server, uri)
r = self.session.post(url=url, json=data)
r.raise_for_status()
return r
def delete(self, uri):
r = self.session.delete(url=urljoin(self.server, uri))
r.raise_for_status()
return r
def do_xpath(self, resource_view_ids, xpath):
entity = {
'resourceIds': resource_view_ids,
'xpath': xpath
}
def poster():
return self.post(util.endpoint_uri('commands', 'xpath'), entity)
def status_getter():
return self.antioch.get(uri=util.endpoint_uri(self.endpoint, self.uuid, 'text', 'status'))
return RestRequester(poster).on_status(HTTPStatus.OK, util.entity_as_json).invoke().json
| en | 0.837271 | Copyright 2017 <NAME> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # self.searches = SearchesEndpoint(self) | 1.974482 | 2 |
LTHW/ex5.py | hectorip/ProjectsLearnPython | 0 | 6631283 | <filename>LTHW/ex5.py
# -*- coding: utf-8 -*-
my_name = '<NAME>'
my_age = 25 # not a lie
my_height = 68 # inches
my_weight = 180 # lbs
my_eyes = 'Brown'
my_teeth = 'White'
my_hair = 'Black'
print "Let's talk about %s" % my_name
print "He's %d inches tall." % my_height
print "He's %d pounds heavy." % my_weight
print "Actually that's not too heavy."
print "He's got %s eyes and %s hair." % (my_eyes, my_hair)
print "His teeth are usally %s depending on the coffee." % my_teeth
print "If I add %d, %d and %d I get %d." % (
my_age, my_height, my_weight, my_age + my_height + my_weight)
| <filename>LTHW/ex5.py
# -*- coding: utf-8 -*-
my_name = '<NAME>'
my_age = 25 # not a lie
my_height = 68 # inches
my_weight = 180 # lbs
my_eyes = 'Brown'
my_teeth = 'White'
my_hair = 'Black'
print "Let's talk about %s" % my_name
print "He's %d inches tall." % my_height
print "He's %d pounds heavy." % my_weight
print "Actually that's not too heavy."
print "He's got %s eyes and %s hair." % (my_eyes, my_hair)
print "His teeth are usally %s depending on the coffee." % my_teeth
print "If I add %d, %d and %d I get %d." % (
my_age, my_height, my_weight, my_age + my_height + my_weight)
| en | 0.735219 | # -*- coding: utf-8 -*- # not a lie # inches # lbs | 2.674673 | 3 |
tests/command_acceptor/user_controllerTests.py | andrii-z4i/xmind-telegram | 0 | 6631284 | from unittest import TestCase
from unittest.mock import Mock, MagicMock
from cmp_command_acceptor.app.controllers.user_controller import UserController
from cmp_command_acceptor.app.dependencies import dependencies
class UserControllerTests(TestCase):
def setUp(self):
self.queue = Mock()
dependencies.queue = self.queue
self.message_holder = MagicMock()
def test_get_user_throws_on_add_to_queue(self):
self.queue.add.side_effect = Exception('Can\'t add')
controller = UserController(self.message_holder)
with self.assertRaises(Exception) as ex:
result = controller.get()
self.assertIsNone(result)
self.assertFalse(True)
self.assertEqual(repr(ex.exception), repr(Exception('Can\'t add')))
self.queue.add.assert_called_once_with('someValue')
def test_get_successfully_add_to_queue(self):
self.queue.add.return_value = True
controller = UserController(self.message_holder)
result = controller.get()
expectation = ({'status': True}, 200)
self.assertEqual(result, expectation)
self.queue.add.assert_called_once_with('someValue')
| from unittest import TestCase
from unittest.mock import Mock, MagicMock
from cmp_command_acceptor.app.controllers.user_controller import UserController
from cmp_command_acceptor.app.dependencies import dependencies
class UserControllerTests(TestCase):
def setUp(self):
self.queue = Mock()
dependencies.queue = self.queue
self.message_holder = MagicMock()
def test_get_user_throws_on_add_to_queue(self):
self.queue.add.side_effect = Exception('Can\'t add')
controller = UserController(self.message_holder)
with self.assertRaises(Exception) as ex:
result = controller.get()
self.assertIsNone(result)
self.assertFalse(True)
self.assertEqual(repr(ex.exception), repr(Exception('Can\'t add')))
self.queue.add.assert_called_once_with('someValue')
def test_get_successfully_add_to_queue(self):
self.queue.add.return_value = True
controller = UserController(self.message_holder)
result = controller.get()
expectation = ({'status': True}, 200)
self.assertEqual(result, expectation)
self.queue.add.assert_called_once_with('someValue')
| none | 1 | 3.00309 | 3 |
|
src/opnsense/scripts/unbound/download_blacklists.py | jdeluyck/core | 0 | 6631285 | <reponame>jdeluyck/core<filename>src/opnsense/scripts/unbound/download_blacklists.py
#!/usr/local/bin/python3
"""
Copyright (c) 2020 <NAME> <<EMAIL>>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import os
import sys
import re
import syslog
import tempfile
import time
import fcntl
from configparser import ConfigParser
import requests
def uri_reader(uri):
req_opts = {
'url': uri,
'timeout': 120,
'stream': True
}
try:
req = requests.get(**req_opts)
except Exception as e:
syslog.syslog(syslog.LOG_ERR,'blacklist download : unable to download file from %s (error : %s)' % (uri, e))
return
if req.status_code >= 200 and req.status_code <= 299:
req.raw.decode_content = True
prev_chop = ''
while True:
chop = req.raw.read(1024).decode()
if not chop:
if prev_chop:
yield prev_chop
break
else:
parts = (prev_chop + chop).split('\n')
if parts[-1] != "\n":
prev_chop = parts.pop()
else:
prev_chop = ''
for part in parts:
yield part
else:
syslog.syslog(syslog.LOG_ERR,
'blacklist download : unable to download file from %s (status_code: %d)' % (uri, req.status_code)
)
if __name__ == '__main__':
# check for a running download process, this may take a while so it's better to check...
try:
lck = open('/tmp/unbound-download_blacklists.tmp', 'w+')
fcntl.flock(lck, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
# already running, exit status 99
sys.exit(99)
domain_pattern = re.compile(
r'^(([\da-zA-Z_])([_\w-]{,62})\.){,127}(([\da-zA-Z])[_\w-]{,61})'
r'?([\da-zA-Z]\.((xn\-\-[a-zA-Z\d]+)|([a-zA-Z\d]{2,})))$'
)
startup_time = time.time()
syslog.openlog('unbound', logoption=syslog.LOG_DAEMON, facility=syslog.LOG_LOCAL4)
blacklist_items = set()
if os.path.exists('/var/unbound/etc/blacklists.ini'):
cnf = ConfigParser()
cnf.read('/var/unbound/etc/blacklists.ini')
# exclude (white) lists, compile to regex to be used to filter blacklist entries
if cnf.has_section('exclude'):
exclude_list = set()
for exclude_item in cnf['exclude']:
try:
re.compile(cnf['exclude'][exclude_item], re.IGNORECASE)
exclude_list.add(cnf['exclude'][exclude_item])
except re.error:
syslog.syslog(syslog.LOG_ERR,
'blacklist download : skip invalid whitelist exclude pattern "%s" (%s)' % (
exclude_item, cnf['exclude'][exclude_item]
)
)
if not exclude_list:
exclude_list.add('$^')
wp = '|'.join(exclude_list)
whitelist_pattern = re.compile(wp, re.IGNORECASE)
syslog.syslog(syslog.LOG_NOTICE, 'blacklist download : exclude domains matching %s' % wp)
# fetch all blacklists
if cnf.has_section('blacklists'):
for blacklist in cnf['blacklists']:
file_stats = {'uri': cnf['blacklists'][blacklist], 'skip' : 0, 'blacklist': 0, 'lines' :0}
for line in uri_reader(cnf['blacklists'][blacklist]):
file_stats['lines'] += 1
# cut line into parts before comment marker (if any)
tmp = line.split('#')[0].split()
entry = None
while tmp:
entry = tmp.pop(-1)
if entry not in ['127.0.0.1', '0.0.0.0']:
break
if entry:
domain = entry.lower()
if whitelist_pattern.match(entry):
file_stats['skip'] += 1
else:
if domain_pattern.match(domain):
file_stats['blacklist'] += 1
blacklist_items.add(entry)
else:
file_stats['skip'] += 1
syslog.syslog(
syslog.LOG_NOTICE,
'blacklist download %(uri)s (lines: %(lines)d exclude: %(skip)d black: %(blacklist)d)' % file_stats
)
# write out results
with open("/var/unbound/etc/dnsbl.conf", 'w') as unbound_outf:
if blacklist_items:
unbound_outf.write('server:\n')
for entry in blacklist_items:
unbound_outf.write("local-data: \"%s A 0.0.0.0\"\n" % entry)
syslog.syslog(syslog.LOG_NOTICE, "blacklist download done in %0.2f seconds (%d records)" % (
time.time() - startup_time, len(blacklist_items)
))
| #!/usr/local/bin/python3
"""
Copyright (c) 2020 <NAME> <<EMAIL>>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import os
import sys
import re
import syslog
import tempfile
import time
import fcntl
from configparser import ConfigParser
import requests
def uri_reader(uri):
req_opts = {
'url': uri,
'timeout': 120,
'stream': True
}
try:
req = requests.get(**req_opts)
except Exception as e:
syslog.syslog(syslog.LOG_ERR,'blacklist download : unable to download file from %s (error : %s)' % (uri, e))
return
if req.status_code >= 200 and req.status_code <= 299:
req.raw.decode_content = True
prev_chop = ''
while True:
chop = req.raw.read(1024).decode()
if not chop:
if prev_chop:
yield prev_chop
break
else:
parts = (prev_chop + chop).split('\n')
if parts[-1] != "\n":
prev_chop = parts.pop()
else:
prev_chop = ''
for part in parts:
yield part
else:
syslog.syslog(syslog.LOG_ERR,
'blacklist download : unable to download file from %s (status_code: %d)' % (uri, req.status_code)
)
if __name__ == '__main__':
# check for a running download process, this may take a while so it's better to check...
try:
lck = open('/tmp/unbound-download_blacklists.tmp', 'w+')
fcntl.flock(lck, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
# already running, exit status 99
sys.exit(99)
domain_pattern = re.compile(
r'^(([\da-zA-Z_])([_\w-]{,62})\.){,127}(([\da-zA-Z])[_\w-]{,61})'
r'?([\da-zA-Z]\.((xn\-\-[a-zA-Z\d]+)|([a-zA-Z\d]{2,})))$'
)
startup_time = time.time()
syslog.openlog('unbound', logoption=syslog.LOG_DAEMON, facility=syslog.LOG_LOCAL4)
blacklist_items = set()
if os.path.exists('/var/unbound/etc/blacklists.ini'):
cnf = ConfigParser()
cnf.read('/var/unbound/etc/blacklists.ini')
# exclude (white) lists, compile to regex to be used to filter blacklist entries
if cnf.has_section('exclude'):
exclude_list = set()
for exclude_item in cnf['exclude']:
try:
re.compile(cnf['exclude'][exclude_item], re.IGNORECASE)
exclude_list.add(cnf['exclude'][exclude_item])
except re.error:
syslog.syslog(syslog.LOG_ERR,
'blacklist download : skip invalid whitelist exclude pattern "%s" (%s)' % (
exclude_item, cnf['exclude'][exclude_item]
)
)
if not exclude_list:
exclude_list.add('$^')
wp = '|'.join(exclude_list)
whitelist_pattern = re.compile(wp, re.IGNORECASE)
syslog.syslog(syslog.LOG_NOTICE, 'blacklist download : exclude domains matching %s' % wp)
# fetch all blacklists
if cnf.has_section('blacklists'):
for blacklist in cnf['blacklists']:
file_stats = {'uri': cnf['blacklists'][blacklist], 'skip' : 0, 'blacklist': 0, 'lines' :0}
for line in uri_reader(cnf['blacklists'][blacklist]):
file_stats['lines'] += 1
# cut line into parts before comment marker (if any)
tmp = line.split('#')[0].split()
entry = None
while tmp:
entry = tmp.pop(-1)
if entry not in ['127.0.0.1', '0.0.0.0']:
break
if entry:
domain = entry.lower()
if whitelist_pattern.match(entry):
file_stats['skip'] += 1
else:
if domain_pattern.match(domain):
file_stats['blacklist'] += 1
blacklist_items.add(entry)
else:
file_stats['skip'] += 1
syslog.syslog(
syslog.LOG_NOTICE,
'blacklist download %(uri)s (lines: %(lines)d exclude: %(skip)d black: %(blacklist)d)' % file_stats
)
# write out results
with open("/var/unbound/etc/dnsbl.conf", 'w') as unbound_outf:
if blacklist_items:
unbound_outf.write('server:\n')
for entry in blacklist_items:
unbound_outf.write("local-data: \"%s A 0.0.0.0\"\n" % entry)
syslog.syslog(syslog.LOG_NOTICE, "blacklist download done in %0.2f seconds (%d records)" % (
time.time() - startup_time, len(blacklist_items)
)) | en | 0.743978 | #!/usr/local/bin/python3 Copyright (c) 2020 <NAME> <<EMAIL>> All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # check for a running download process, this may take a while so it's better to check... # already running, exit status 99 # exclude (white) lists, compile to regex to be used to filter blacklist entries # fetch all blacklists # cut line into parts before comment marker (if any) # write out results | 1.610834 | 2 |
practice/find_pivot.py | haandol/dojo | 0 | 6631286 | <reponame>haandol/dojo<filename>practice/find_pivot.py
def find_pivot(nums, lo, hi):
while lo < hi:
mid = (lo+hi)//2
if nums[hi] < nums[mid]:
lo = mid + 1
else:
hi = mid
return lo
arr = [4, 5, 6, 7, 0, 1, 2, 3]
print(find_pivot(arr, 0, len(arr)-1))
| def find_pivot(nums, lo, hi):
while lo < hi:
mid = (lo+hi)//2
if nums[hi] < nums[mid]:
lo = mid + 1
else:
hi = mid
return lo
arr = [4, 5, 6, 7, 0, 1, 2, 3]
print(find_pivot(arr, 0, len(arr)-1)) | none | 1 | 3.528704 | 4 |
|
tests/ssg_test_suite/rule.py | spensireli/content | 1 | 6631287 | from __future__ import print_function
import logging
import os
import shutil
import os.path
import re
import subprocess
import collections
import json
import fnmatch
import tempfile
import contextlib
from ssg.constants import OSCAP_PROFILE, OSCAP_PROFILE_ALL_ID, OSCAP_RULE
from ssg_test_suite import oscap
from ssg_test_suite import xml_operations
from ssg_test_suite import test_env
from ssg_test_suite import common
from ssg_test_suite.log import LogHelper
logging.getLogger(__name__).addHandler(logging.NullHandler())
Scenario = collections.namedtuple(
"Scenario", ["script", "context", "script_params"])
def get_viable_profiles(selected_profiles, datastream, benchmark, script=None):
"""Read datastream, and return set intersection of profiles of given
benchmark and those provided in `selected_profiles` parameter.
"""
valid_profiles = []
all_profiles_elements = xml_operations.get_all_profiles_in_benchmark(
datastream, benchmark, logging)
all_profiles = [el.attrib["id"] for el in all_profiles_elements]
all_profiles.append(OSCAP_PROFILE_ALL_ID)
for ds_profile in all_profiles:
if 'ALL' in selected_profiles:
valid_profiles += [ds_profile]
continue
for sel_profile in selected_profiles:
if ds_profile.endswith(sel_profile):
valid_profiles += [ds_profile]
if not valid_profiles:
if script:
logging.warning('Script {0} - profile {1} not found in datastream'
.format(script, ", ".join(selected_profiles)))
else:
logging.warning('Profile {0} not found in datastream'
.format(", ".join(selected_profiles)))
return valid_profiles
def generate_xslt_change_value_template(value_short_id, new_value):
XSLT_TEMPLATE = """<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns:ds="http://scap.nist.gov/schema/scap/source/1.2" xmlns:xccdf-1.2="http://checklists.nist.gov/xccdf/1.2">
<xsl:output omit-xml-declaration="yes" indent="yes"/>
<xsl:strip-space elements="*"/>
<xsl:template match="node()|@*">
<xsl:copy>
<xsl:apply-templates select="node()|@*"/>
</xsl:copy>
</xsl:template>
<xsl:template match="ds:component/xccdf-1.2:Benchmark//xccdf-1.2:Value[@id='xccdf_org.ssgproject.content_value_{value_short_id}']/xccdf-1.2:value[not(@selector)]/text()">{new_value}</xsl:template>
</xsl:stylesheet>"""
return XSLT_TEMPLATE.format(value_short_id=value_short_id, new_value=new_value)
def _apply_script(rule_dir, test_env, script):
"""Run particular test script on VM and log it's output."""
logging.debug("Applying script {0}".format(script))
rule_name = os.path.basename(rule_dir)
log_file_name = os.path.join(
LogHelper.LOG_DIR, rule_name + ".prescripts.log")
with open(log_file_name, 'a') as log_file:
log_file.write('##### {0} / {1} #####\n'.format(rule_name, script))
shared_dir = os.path.join(common.REMOTE_TEST_SCENARIOS_DIRECTORY, "shared")
command = "cd {0}; SHARED={1} bash -x {2}".format(rule_dir, shared_dir, script)
try:
test_env.execute_ssh_command(command, log_file)
except subprocess.CalledProcessError as exc:
logging.error("Rule testing script {script} failed with exit code {rc}"
.format(script=script, rc=exc.returncode))
return False
return True
def _get_script_context(script):
"""Return context of the script."""
result = re.search(r'.*\.([^.]*)\.[^.]*$', script)
if result is None:
return None
return result.group(1)
class RuleChecker(oscap.Checker):
"""
Rule checks generally work like this -
for every profile that supports that rule:
- Alter the system.
- Run the scan, check that the result meets expectations.
If the test scenario passed as requested, return True,
if it failed or passed unexpectedly, return False.
The following sequence applies if the initial scan
has failed as expected:
- If there are no remediations, return True.
- Run remediation, return False if it failed.
- Return result of the final scan of remediated system.
"""
def __init__(self, test_env):
super(RuleChecker, self).__init__(test_env)
self._matching_rule_found = False
self.results = list()
self._current_result = None
self.remote_dir = ""
def _run_test(self, profile, test_data):
scenario = test_data["scenario"]
rule_id = test_data["rule_id"]
remediation_available = test_data["remediation_available"]
LogHelper.preload_log(
logging.INFO, "Script {0} using profile {1} OK".format(scenario.script, profile),
log_target='pass')
LogHelper.preload_log(
logging.WARNING, "Script {0} using profile {1} notapplicable".format(scenario.script, profile),
log_target='notapplicable')
LogHelper.preload_log(
logging.ERROR,
"Script {0} using profile {1} found issue:".format(scenario.script, profile),
log_target='fail')
runner_cls = oscap.REMEDIATION_RULE_RUNNERS[self.remediate_using]
runner = runner_cls(
self.test_env, oscap.process_profile_id(profile), self.datastream, self.benchmark_id,
rule_id, scenario.script, self.dont_clean, self.manual_debug)
initial_scan_res = self._initial_scan_went_ok(runner, rule_id, scenario.context)
if not initial_scan_res:
return False
if initial_scan_res == 2:
# notapplicable
return True
supported_and_available_remediations = self._get_available_remediations(scenario)
if (scenario.context not in ['fail', 'error']
or not supported_and_available_remediations):
return True
if remediation_available:
if not self._remediation_went_ok(runner, rule_id):
return False
return self._final_scan_went_ok(runner, rule_id)
else:
msg = ("No remediation is available for rule '{}'."
.format(rule_id))
logging.warning(msg)
return False
def _initial_scan_went_ok(self, runner, rule_id, context):
success = runner.run_stage_with_context("initial", context)
self._current_result.record_stage_result("initial_scan", success)
if not success:
msg = ("The initial scan failed for rule '{}'."
.format(rule_id))
logging.error(msg)
return success
def _is_remediation_available(self, rule):
if xml_operations.find_fix_in_benchmark(
self.datastream, self.benchmark_id, rule.id, self.remediate_using) is None:
return False
else:
return True
def _get_available_remediations(self, scenario):
is_supported = set(['all'])
is_supported.add(
oscap.REMEDIATION_RUNNER_TO_REMEDIATION_MEANS[self.remediate_using])
supported_and_available_remediations = set(
scenario.script_params['remediation']).intersection(is_supported)
return supported_and_available_remediations
def _remediation_went_ok(self, runner, rule_id):
success = runner.run_stage_with_context('remediation', 'fixed')
self._current_result.record_stage_result("remediation", success)
if not success:
msg = ("The remediation failed for rule '{}'."
.format(rule_id))
logging.error(msg)
return success
def _final_scan_went_ok(self, runner, rule_id):
success = runner.run_stage_with_context('final', 'pass')
self._current_result.record_stage_result("final_scan", success)
if not success:
msg = ("The check after remediation failed for rule '{}'."
.format(rule_id))
logging.error(msg)
return success
def _rule_should_be_tested(self, rule, rules_to_be_tested):
if 'ALL' in rules_to_be_tested:
return True
else:
for rule_to_be_tested in rules_to_be_tested:
# we check for a substring
if rule_to_be_tested.startswith(OSCAP_RULE):
pattern = rule_to_be_tested
else:
pattern = OSCAP_RULE + rule_to_be_tested
if fnmatch.fnmatch(rule.id, pattern):
return True
return False
def _ensure_package_present_for_all_scenarios(self, scenarios_by_rule):
packages_required = set()
for rule, scenarios in scenarios_by_rule.items():
for s in scenarios:
scenario_packages = s.script_params["packages"]
packages_required.update(scenario_packages)
if packages_required:
common.install_packages(self.test_env, packages_required)
def _prepare_environment(self, scenarios_by_rule):
domain_ip = self.test_env.domain_ip
try:
self.remote_dir = common.send_scripts(self.test_env)
except RuntimeError as exc:
msg = "Unable to upload test scripts: {more_info}".format(more_info=str(exc))
raise RuntimeError(msg)
self._ensure_package_present_for_all_scenarios(scenarios_by_rule)
def _get_rules_to_test(self, target):
rules_to_test = []
for rule in common.iterate_over_rules():
if not self._rule_should_be_tested(rule, target):
continue
if not xml_operations.find_rule_in_benchmark(
self.datastream, self.benchmark_id, rule.id):
logging.error(
"Rule '{0}' isn't present in benchmark '{1}' in '{2}'"
.format(rule.id, self.benchmark_id, self.datastream))
continue
rules_to_test.append(rule)
return rules_to_test
def test_rule(self, state, rule, scenarios):
remediation_available = self._is_remediation_available(rule)
self._check_rule(
rule, scenarios,
self.remote_dir, state, remediation_available)
def _test_target(self, target):
rules_to_test = self._get_rules_to_test(target)
if not rules_to_test:
self._matching_rule_found = False
logging.error("No matching rule ID found for '{0}'".format(target))
return
self._matching_rule_found = True
scenarios_by_rule = dict()
for rule in rules_to_test:
rule_scenarios = self._get_scenarios(
rule.directory, rule.files, self.scenarios_regex,
self.benchmark_cpes)
scenarios_by_rule[rule.id] = rule_scenarios
self._prepare_environment(scenarios_by_rule)
with test_env.SavedState.create_from_environment(self.test_env, "tests_uploaded") as state:
for rule in rules_to_test:
self.test_rule(state, rule, scenarios_by_rule[rule.id])
def _modify_parameters(self, script, params):
if self.scenarios_profile:
params['profiles'] = [self.scenarios_profile]
if not params["profiles"]:
params["profiles"].append(OSCAP_PROFILE_ALL_ID)
logging.debug(
"Added the {0} profile to the list of available profiles for {1}"
.format(OSCAP_PROFILE_ALL_ID, script))
return params
def _parse_parameters(self, script):
"""Parse parameters from script header"""
params = {'profiles': [],
'templates': [],
'packages': [],
'platform': ['multi_platform_all'],
'remediation': ['all'],
'variables': [],
}
with open(script, 'r') as script_file:
script_content = script_file.read()
for parameter in params:
found = re.search(r'^# {0} = ([ =,_\.\-\w\(\)]*)$'.format(parameter),
script_content,
re.MULTILINE)
if found is None:
continue
splitted = found.group(1).split(',')
params[parameter] = [value.strip() for value in splitted]
return params
def _get_scenarios(self, rule_dir, scripts, scenarios_regex, benchmark_cpes):
""" Returns only valid scenario files, rest is ignored (is not meant
to be executed directly.
"""
if scenarios_regex is not None:
scenarios_pattern = re.compile(scenarios_regex)
scenarios = []
for script in scripts:
if scenarios_regex is not None:
if scenarios_pattern.match(script) is None:
logging.debug("Skipping script %s - it did not match "
"--scenarios regex" % script)
continue
script_context = _get_script_context(script)
if script_context is not None:
script_params = self._parse_parameters(os.path.join(rule_dir, script))
script_params = self._modify_parameters(script, script_params)
if common.matches_platform(script_params["platform"], benchmark_cpes):
scenarios += [Scenario(script, script_context, script_params)]
else:
logging.warning("Script %s is not applicable on given platform" % script)
return scenarios
def _check_rule(self, rule, scenarios, remote_dir, state, remediation_available):
remote_rule_dir = os.path.join(remote_dir, rule.short_id)
logging.info(rule.id)
logging.debug("Testing rule directory {0}".format(rule.directory))
args_list = [
(s, remote_rule_dir, rule.id, remediation_available) for s in scenarios
]
state.map_on_top(self._check_and_record_rule_scenario, args_list)
def _check_and_record_rule_scenario(self, scenario, remote_rule_dir, rule_id, remediation_available):
self._current_result = common.RuleResult()
self._current_result.conditions = common.Scenario_conditions(
self.test_env.name, self.test_env.scanning_mode,
self.remediate_using, self.datastream)
self._current_result.scenario = common.Scenario_run(rule_id, scenario.script)
self._current_result.when = self.test_timestamp_str
with self.copy_of_datastream():
self._check_rule_scenario(scenario, remote_rule_dir, rule_id, remediation_available)
self.results.append(self._current_result.save_to_dict())
@contextlib.contextmanager
def copy_of_datastream(self, new_filename=None):
old_filename = self.datastream
if not new_filename:
_, new_filename = tempfile.mkstemp(prefix="ssgts_ds_modified", dir="/tmp")
shutil.copy(old_filename, new_filename)
self.datastream = new_filename
yield new_filename
self.datastream = old_filename
os.unlink(new_filename)
def _change_variable_value(self, varname, value):
_, xslt_filename = tempfile.mkstemp(prefix="xslt-change-value", dir="/tmp")
template = generate_xslt_change_value_template(varname, value)
with open(xslt_filename, "w") as fp:
fp.write(template)
_, temp_datastream = tempfile.mkstemp(prefix="ds-temp", dir="/tmp")
log_file_name = os.path.join(LogHelper.LOG_DIR, "env-preparation.log")
with open(log_file_name, "a") as log_file:
common.run_with_stdout_logging(
"xsltproc", ("--output", temp_datastream, xslt_filename, self.datastream),
log_file)
os.rename(temp_datastream, self.datastream)
os.unlink(xslt_filename)
def _check_rule_scenario(self, scenario, remote_rule_dir, rule_id, remediation_available):
if not _apply_script(
remote_rule_dir, self.test_env, scenario.script):
logging.error("Environment failed to prepare, skipping test")
self._current_result.record_stage_result("preparation", False)
return
if scenario.script_params["variables"]:
for assignment in scenario.script_params["variables"]:
varname, value = assignment.split("=", 1)
self._change_variable_value(varname, value)
self._current_result.record_stage_result("preparation", True)
logging.debug('Using test script {0} with context {1}'
.format(scenario.script, scenario.context))
if scenario.script_params['profiles']:
profiles = get_viable_profiles(
scenario.script_params['profiles'], self.datastream, self.benchmark_id, scenario.script)
else:
# Special case for combined mode when scenario.script_params['profiles']
# is empty which means scenario is not applicable on given profile.
logging.warning('Script {0} is not applicable on given profile'
.format(scenario.script))
return
test_data = dict(scenario=scenario,
rule_id=rule_id,
remediation_available=remediation_available)
self.run_test_for_all_profiles(profiles, test_data)
self.executed_tests += 1
def finalize(self):
super(RuleChecker, self).finalize()
with open(os.path.join(LogHelper.LOG_DIR, "results.json"), "w") as f:
json.dump(self.results, f)
def perform_rule_check(options):
checker = RuleChecker(options.test_env)
checker.datastream = options.datastream
checker.benchmark_id = options.benchmark_id
checker.remediate_using = options.remediate_using
checker.dont_clean = options.dont_clean
checker.manual_debug = options.manual_debug
checker.benchmark_cpes = options.benchmark_cpes
checker.scenarios_regex = options.scenarios_regex
checker.scenarios_profile = options.scenarios_profile
# check if target is a complete profile ID, if not prepend profile prefix
if (checker.scenarios_profile is not None and
not checker.scenarios_profile.startswith(OSCAP_PROFILE) and
not oscap.is_virtual_oscap_profile(checker.scenarios_profile)):
checker.scenarios_profile = OSCAP_PROFILE+options.scenarios_profile
checker.test_target(options.target)
| from __future__ import print_function
import logging
import os
import shutil
import os.path
import re
import subprocess
import collections
import json
import fnmatch
import tempfile
import contextlib
from ssg.constants import OSCAP_PROFILE, OSCAP_PROFILE_ALL_ID, OSCAP_RULE
from ssg_test_suite import oscap
from ssg_test_suite import xml_operations
from ssg_test_suite import test_env
from ssg_test_suite import common
from ssg_test_suite.log import LogHelper
logging.getLogger(__name__).addHandler(logging.NullHandler())
Scenario = collections.namedtuple(
"Scenario", ["script", "context", "script_params"])
def get_viable_profiles(selected_profiles, datastream, benchmark, script=None):
"""Read datastream, and return set intersection of profiles of given
benchmark and those provided in `selected_profiles` parameter.
"""
valid_profiles = []
all_profiles_elements = xml_operations.get_all_profiles_in_benchmark(
datastream, benchmark, logging)
all_profiles = [el.attrib["id"] for el in all_profiles_elements]
all_profiles.append(OSCAP_PROFILE_ALL_ID)
for ds_profile in all_profiles:
if 'ALL' in selected_profiles:
valid_profiles += [ds_profile]
continue
for sel_profile in selected_profiles:
if ds_profile.endswith(sel_profile):
valid_profiles += [ds_profile]
if not valid_profiles:
if script:
logging.warning('Script {0} - profile {1} not found in datastream'
.format(script, ", ".join(selected_profiles)))
else:
logging.warning('Profile {0} not found in datastream'
.format(", ".join(selected_profiles)))
return valid_profiles
def generate_xslt_change_value_template(value_short_id, new_value):
XSLT_TEMPLATE = """<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns:ds="http://scap.nist.gov/schema/scap/source/1.2" xmlns:xccdf-1.2="http://checklists.nist.gov/xccdf/1.2">
<xsl:output omit-xml-declaration="yes" indent="yes"/>
<xsl:strip-space elements="*"/>
<xsl:template match="node()|@*">
<xsl:copy>
<xsl:apply-templates select="node()|@*"/>
</xsl:copy>
</xsl:template>
<xsl:template match="ds:component/xccdf-1.2:Benchmark//xccdf-1.2:Value[@id='xccdf_org.ssgproject.content_value_{value_short_id}']/xccdf-1.2:value[not(@selector)]/text()">{new_value}</xsl:template>
</xsl:stylesheet>"""
return XSLT_TEMPLATE.format(value_short_id=value_short_id, new_value=new_value)
def _apply_script(rule_dir, test_env, script):
"""Run particular test script on VM and log it's output."""
logging.debug("Applying script {0}".format(script))
rule_name = os.path.basename(rule_dir)
log_file_name = os.path.join(
LogHelper.LOG_DIR, rule_name + ".prescripts.log")
with open(log_file_name, 'a') as log_file:
log_file.write('##### {0} / {1} #####\n'.format(rule_name, script))
shared_dir = os.path.join(common.REMOTE_TEST_SCENARIOS_DIRECTORY, "shared")
command = "cd {0}; SHARED={1} bash -x {2}".format(rule_dir, shared_dir, script)
try:
test_env.execute_ssh_command(command, log_file)
except subprocess.CalledProcessError as exc:
logging.error("Rule testing script {script} failed with exit code {rc}"
.format(script=script, rc=exc.returncode))
return False
return True
def _get_script_context(script):
"""Return context of the script."""
result = re.search(r'.*\.([^.]*)\.[^.]*$', script)
if result is None:
return None
return result.group(1)
class RuleChecker(oscap.Checker):
"""
Rule checks generally work like this -
for every profile that supports that rule:
- Alter the system.
- Run the scan, check that the result meets expectations.
If the test scenario passed as requested, return True,
if it failed or passed unexpectedly, return False.
The following sequence applies if the initial scan
has failed as expected:
- If there are no remediations, return True.
- Run remediation, return False if it failed.
- Return result of the final scan of remediated system.
"""
def __init__(self, test_env):
super(RuleChecker, self).__init__(test_env)
self._matching_rule_found = False
self.results = list()
self._current_result = None
self.remote_dir = ""
def _run_test(self, profile, test_data):
scenario = test_data["scenario"]
rule_id = test_data["rule_id"]
remediation_available = test_data["remediation_available"]
LogHelper.preload_log(
logging.INFO, "Script {0} using profile {1} OK".format(scenario.script, profile),
log_target='pass')
LogHelper.preload_log(
logging.WARNING, "Script {0} using profile {1} notapplicable".format(scenario.script, profile),
log_target='notapplicable')
LogHelper.preload_log(
logging.ERROR,
"Script {0} using profile {1} found issue:".format(scenario.script, profile),
log_target='fail')
runner_cls = oscap.REMEDIATION_RULE_RUNNERS[self.remediate_using]
runner = runner_cls(
self.test_env, oscap.process_profile_id(profile), self.datastream, self.benchmark_id,
rule_id, scenario.script, self.dont_clean, self.manual_debug)
initial_scan_res = self._initial_scan_went_ok(runner, rule_id, scenario.context)
if not initial_scan_res:
return False
if initial_scan_res == 2:
# notapplicable
return True
supported_and_available_remediations = self._get_available_remediations(scenario)
if (scenario.context not in ['fail', 'error']
or not supported_and_available_remediations):
return True
if remediation_available:
if not self._remediation_went_ok(runner, rule_id):
return False
return self._final_scan_went_ok(runner, rule_id)
else:
msg = ("No remediation is available for rule '{}'."
.format(rule_id))
logging.warning(msg)
return False
def _initial_scan_went_ok(self, runner, rule_id, context):
success = runner.run_stage_with_context("initial", context)
self._current_result.record_stage_result("initial_scan", success)
if not success:
msg = ("The initial scan failed for rule '{}'."
.format(rule_id))
logging.error(msg)
return success
def _is_remediation_available(self, rule):
if xml_operations.find_fix_in_benchmark(
self.datastream, self.benchmark_id, rule.id, self.remediate_using) is None:
return False
else:
return True
def _get_available_remediations(self, scenario):
is_supported = set(['all'])
is_supported.add(
oscap.REMEDIATION_RUNNER_TO_REMEDIATION_MEANS[self.remediate_using])
supported_and_available_remediations = set(
scenario.script_params['remediation']).intersection(is_supported)
return supported_and_available_remediations
def _remediation_went_ok(self, runner, rule_id):
success = runner.run_stage_with_context('remediation', 'fixed')
self._current_result.record_stage_result("remediation", success)
if not success:
msg = ("The remediation failed for rule '{}'."
.format(rule_id))
logging.error(msg)
return success
def _final_scan_went_ok(self, runner, rule_id):
success = runner.run_stage_with_context('final', 'pass')
self._current_result.record_stage_result("final_scan", success)
if not success:
msg = ("The check after remediation failed for rule '{}'."
.format(rule_id))
logging.error(msg)
return success
def _rule_should_be_tested(self, rule, rules_to_be_tested):
if 'ALL' in rules_to_be_tested:
return True
else:
for rule_to_be_tested in rules_to_be_tested:
# we check for a substring
if rule_to_be_tested.startswith(OSCAP_RULE):
pattern = rule_to_be_tested
else:
pattern = OSCAP_RULE + rule_to_be_tested
if fnmatch.fnmatch(rule.id, pattern):
return True
return False
def _ensure_package_present_for_all_scenarios(self, scenarios_by_rule):
packages_required = set()
for rule, scenarios in scenarios_by_rule.items():
for s in scenarios:
scenario_packages = s.script_params["packages"]
packages_required.update(scenario_packages)
if packages_required:
common.install_packages(self.test_env, packages_required)
def _prepare_environment(self, scenarios_by_rule):
domain_ip = self.test_env.domain_ip
try:
self.remote_dir = common.send_scripts(self.test_env)
except RuntimeError as exc:
msg = "Unable to upload test scripts: {more_info}".format(more_info=str(exc))
raise RuntimeError(msg)
self._ensure_package_present_for_all_scenarios(scenarios_by_rule)
def _get_rules_to_test(self, target):
rules_to_test = []
for rule in common.iterate_over_rules():
if not self._rule_should_be_tested(rule, target):
continue
if not xml_operations.find_rule_in_benchmark(
self.datastream, self.benchmark_id, rule.id):
logging.error(
"Rule '{0}' isn't present in benchmark '{1}' in '{2}'"
.format(rule.id, self.benchmark_id, self.datastream))
continue
rules_to_test.append(rule)
return rules_to_test
def test_rule(self, state, rule, scenarios):
remediation_available = self._is_remediation_available(rule)
self._check_rule(
rule, scenarios,
self.remote_dir, state, remediation_available)
def _test_target(self, target):
rules_to_test = self._get_rules_to_test(target)
if not rules_to_test:
self._matching_rule_found = False
logging.error("No matching rule ID found for '{0}'".format(target))
return
self._matching_rule_found = True
scenarios_by_rule = dict()
for rule in rules_to_test:
rule_scenarios = self._get_scenarios(
rule.directory, rule.files, self.scenarios_regex,
self.benchmark_cpes)
scenarios_by_rule[rule.id] = rule_scenarios
self._prepare_environment(scenarios_by_rule)
with test_env.SavedState.create_from_environment(self.test_env, "tests_uploaded") as state:
for rule in rules_to_test:
self.test_rule(state, rule, scenarios_by_rule[rule.id])
def _modify_parameters(self, script, params):
if self.scenarios_profile:
params['profiles'] = [self.scenarios_profile]
if not params["profiles"]:
params["profiles"].append(OSCAP_PROFILE_ALL_ID)
logging.debug(
"Added the {0} profile to the list of available profiles for {1}"
.format(OSCAP_PROFILE_ALL_ID, script))
return params
def _parse_parameters(self, script):
"""Parse parameters from script header"""
params = {'profiles': [],
'templates': [],
'packages': [],
'platform': ['multi_platform_all'],
'remediation': ['all'],
'variables': [],
}
with open(script, 'r') as script_file:
script_content = script_file.read()
for parameter in params:
found = re.search(r'^# {0} = ([ =,_\.\-\w\(\)]*)$'.format(parameter),
script_content,
re.MULTILINE)
if found is None:
continue
splitted = found.group(1).split(',')
params[parameter] = [value.strip() for value in splitted]
return params
def _get_scenarios(self, rule_dir, scripts, scenarios_regex, benchmark_cpes):
""" Returns only valid scenario files, rest is ignored (is not meant
to be executed directly.
"""
if scenarios_regex is not None:
scenarios_pattern = re.compile(scenarios_regex)
scenarios = []
for script in scripts:
if scenarios_regex is not None:
if scenarios_pattern.match(script) is None:
logging.debug("Skipping script %s - it did not match "
"--scenarios regex" % script)
continue
script_context = _get_script_context(script)
if script_context is not None:
script_params = self._parse_parameters(os.path.join(rule_dir, script))
script_params = self._modify_parameters(script, script_params)
if common.matches_platform(script_params["platform"], benchmark_cpes):
scenarios += [Scenario(script, script_context, script_params)]
else:
logging.warning("Script %s is not applicable on given platform" % script)
return scenarios
def _check_rule(self, rule, scenarios, remote_dir, state, remediation_available):
remote_rule_dir = os.path.join(remote_dir, rule.short_id)
logging.info(rule.id)
logging.debug("Testing rule directory {0}".format(rule.directory))
args_list = [
(s, remote_rule_dir, rule.id, remediation_available) for s in scenarios
]
state.map_on_top(self._check_and_record_rule_scenario, args_list)
def _check_and_record_rule_scenario(self, scenario, remote_rule_dir, rule_id, remediation_available):
self._current_result = common.RuleResult()
self._current_result.conditions = common.Scenario_conditions(
self.test_env.name, self.test_env.scanning_mode,
self.remediate_using, self.datastream)
self._current_result.scenario = common.Scenario_run(rule_id, scenario.script)
self._current_result.when = self.test_timestamp_str
with self.copy_of_datastream():
self._check_rule_scenario(scenario, remote_rule_dir, rule_id, remediation_available)
self.results.append(self._current_result.save_to_dict())
@contextlib.contextmanager
def copy_of_datastream(self, new_filename=None):
old_filename = self.datastream
if not new_filename:
_, new_filename = tempfile.mkstemp(prefix="ssgts_ds_modified", dir="/tmp")
shutil.copy(old_filename, new_filename)
self.datastream = new_filename
yield new_filename
self.datastream = old_filename
os.unlink(new_filename)
def _change_variable_value(self, varname, value):
_, xslt_filename = tempfile.mkstemp(prefix="xslt-change-value", dir="/tmp")
template = generate_xslt_change_value_template(varname, value)
with open(xslt_filename, "w") as fp:
fp.write(template)
_, temp_datastream = tempfile.mkstemp(prefix="ds-temp", dir="/tmp")
log_file_name = os.path.join(LogHelper.LOG_DIR, "env-preparation.log")
with open(log_file_name, "a") as log_file:
common.run_with_stdout_logging(
"xsltproc", ("--output", temp_datastream, xslt_filename, self.datastream),
log_file)
os.rename(temp_datastream, self.datastream)
os.unlink(xslt_filename)
def _check_rule_scenario(self, scenario, remote_rule_dir, rule_id, remediation_available):
if not _apply_script(
remote_rule_dir, self.test_env, scenario.script):
logging.error("Environment failed to prepare, skipping test")
self._current_result.record_stage_result("preparation", False)
return
if scenario.script_params["variables"]:
for assignment in scenario.script_params["variables"]:
varname, value = assignment.split("=", 1)
self._change_variable_value(varname, value)
self._current_result.record_stage_result("preparation", True)
logging.debug('Using test script {0} with context {1}'
.format(scenario.script, scenario.context))
if scenario.script_params['profiles']:
profiles = get_viable_profiles(
scenario.script_params['profiles'], self.datastream, self.benchmark_id, scenario.script)
else:
# Special case for combined mode when scenario.script_params['profiles']
# is empty which means scenario is not applicable on given profile.
logging.warning('Script {0} is not applicable on given profile'
.format(scenario.script))
return
test_data = dict(scenario=scenario,
rule_id=rule_id,
remediation_available=remediation_available)
self.run_test_for_all_profiles(profiles, test_data)
self.executed_tests += 1
def finalize(self):
super(RuleChecker, self).finalize()
with open(os.path.join(LogHelper.LOG_DIR, "results.json"), "w") as f:
json.dump(self.results, f)
def perform_rule_check(options):
checker = RuleChecker(options.test_env)
checker.datastream = options.datastream
checker.benchmark_id = options.benchmark_id
checker.remediate_using = options.remediate_using
checker.dont_clean = options.dont_clean
checker.manual_debug = options.manual_debug
checker.benchmark_cpes = options.benchmark_cpes
checker.scenarios_regex = options.scenarios_regex
checker.scenarios_profile = options.scenarios_profile
# check if target is a complete profile ID, if not prepend profile prefix
if (checker.scenarios_profile is not None and
not checker.scenarios_profile.startswith(OSCAP_PROFILE) and
not oscap.is_virtual_oscap_profile(checker.scenarios_profile)):
checker.scenarios_profile = OSCAP_PROFILE+options.scenarios_profile
checker.test_target(options.target)
| en | 0.569837 | Read datastream, and return set intersection of profiles of given benchmark and those provided in `selected_profiles` parameter. <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns:ds="http://scap.nist.gov/schema/scap/source/1.2" xmlns:xccdf-1.2="http://checklists.nist.gov/xccdf/1.2"> <xsl:output omit-xml-declaration="yes" indent="yes"/> <xsl:strip-space elements="*"/> <xsl:template match="node()|@*"> <xsl:copy> <xsl:apply-templates select="node()|@*"/> </xsl:copy> </xsl:template> <xsl:template match="ds:component/xccdf-1.2:Benchmark//xccdf-1.2:Value[@id='xccdf_org.ssgproject.content_value_{value_short_id}']/xccdf-1.2:value[not(@selector)]/text()">{new_value}</xsl:template> </xsl:stylesheet> Run particular test script on VM and log it's output. #### {0} / {1} #####\n'.format(rule_name, script)) Return context of the script. Rule checks generally work like this - for every profile that supports that rule: - Alter the system. - Run the scan, check that the result meets expectations. If the test scenario passed as requested, return True, if it failed or passed unexpectedly, return False. The following sequence applies if the initial scan has failed as expected: - If there are no remediations, return True. - Run remediation, return False if it failed. - Return result of the final scan of remediated system. # notapplicable # we check for a substring Parse parameters from script header # {0} = ([ =,_\.\-\w\(\)]*)$'.format(parameter), Returns only valid scenario files, rest is ignored (is not meant to be executed directly. # Special case for combined mode when scenario.script_params['profiles'] # is empty which means scenario is not applicable on given profile. # check if target is a complete profile ID, if not prepend profile prefix | 2.015163 | 2 |
set_database.py | ReadySetOdds/Thelinebacker | 0 | 6631288 | <filename>set_database.py
# import dependencies
import pymysql, json
#BESTBETS rotation, league, date, match_details, play, line, odds, play_amount
#GAME league, home_team, away_team, date, home_win, away_win, home_proj_score, away_proj_score, spread_total, home_spread_1, home_spread_2, away_spread_1, away_spread_2, total, home_total, odds_under, away_total, odds_total
#ODDS league, home_team, away_team, date, odds_group, home_odds_1, home_odds_2, away_odds_1, away_odds_2, price_total, over, under
# main
if __name__ == '__main__':
# log into database
database = pymysql.connect(**json.load(open('database.json')))
cursor = database.cursor()
# create tables
for table_name, values in (
('bestbets', 'rotation INTEGER, league TEXT, date TIMESTAMP, match_details TEXT, play TEXT, line FLOAT, odds INTEGER, play_amount INTEGER'),
('games', 'league TEXT, home_team TEXT, away_team TEXT, date TIMESTAMP, home_win FLOAT, away_win FLOAT, home_proj_score FLOAT, away_proj_score FLOAT, spread_total FLOAT, home_spread_1 FLOAT, home_spread_2 FLOAT, away_spread_1 FLOAT, away_spread_2 FLOAT, total FLOAT, home_total FLOAT, odds_under FLOAT, away_total FLOAT, odds_total FLOAT'),
('odds', 'league TEXT, home_team TEXT, away_team TEXT, date TIMESTAMP, odds_group TEXT, home_odds_1 FLOAT, home_odds_2 FLOAT, away_odds_1 FLOAT, away_odds_2 FLOAT, price_total FLOAT, odds_over FLOAT, odds_under FLOAT'),
):
# delete existing
cursor.execute('DROP TABLE IF EXISTS {};'.format(table_name))
database.commit()
# create table
cursor.execute('create table {} ({});'.format(table_name, values))
database.commit()
# finished
database.close() | <filename>set_database.py
# import dependencies
import pymysql, json
#BESTBETS rotation, league, date, match_details, play, line, odds, play_amount
#GAME league, home_team, away_team, date, home_win, away_win, home_proj_score, away_proj_score, spread_total, home_spread_1, home_spread_2, away_spread_1, away_spread_2, total, home_total, odds_under, away_total, odds_total
#ODDS league, home_team, away_team, date, odds_group, home_odds_1, home_odds_2, away_odds_1, away_odds_2, price_total, over, under
# main
if __name__ == '__main__':
# log into database
database = pymysql.connect(**json.load(open('database.json')))
cursor = database.cursor()
# create tables
for table_name, values in (
('bestbets', 'rotation INTEGER, league TEXT, date TIMESTAMP, match_details TEXT, play TEXT, line FLOAT, odds INTEGER, play_amount INTEGER'),
('games', 'league TEXT, home_team TEXT, away_team TEXT, date TIMESTAMP, home_win FLOAT, away_win FLOAT, home_proj_score FLOAT, away_proj_score FLOAT, spread_total FLOAT, home_spread_1 FLOAT, home_spread_2 FLOAT, away_spread_1 FLOAT, away_spread_2 FLOAT, total FLOAT, home_total FLOAT, odds_under FLOAT, away_total FLOAT, odds_total FLOAT'),
('odds', 'league TEXT, home_team TEXT, away_team TEXT, date TIMESTAMP, odds_group TEXT, home_odds_1 FLOAT, home_odds_2 FLOAT, away_odds_1 FLOAT, away_odds_2 FLOAT, price_total FLOAT, odds_over FLOAT, odds_under FLOAT'),
):
# delete existing
cursor.execute('DROP TABLE IF EXISTS {};'.format(table_name))
database.commit()
# create table
cursor.execute('create table {} ({});'.format(table_name, values))
database.commit()
# finished
database.close() | en | 0.887571 | # import dependencies #BESTBETS rotation, league, date, match_details, play, line, odds, play_amount #GAME league, home_team, away_team, date, home_win, away_win, home_proj_score, away_proj_score, spread_total, home_spread_1, home_spread_2, away_spread_1, away_spread_2, total, home_total, odds_under, away_total, odds_total #ODDS league, home_team, away_team, date, odds_group, home_odds_1, home_odds_2, away_odds_1, away_odds_2, price_total, over, under # main # log into database # create tables # delete existing # create table # finished | 2.668667 | 3 |
ncaabb/game.py | aspic2/NCAABB | 1 | 6631289 | <filename>ncaabb/game.py
import random
import statistics
import csv
class Game(object):
"""Game class compares two teams ratings to determine which team is better.
The higher rated team is declared as winner and returned.
Scoring property also prints a projected score for the game.
Scoring defaults to False, as it is only used for the championship game.
"""
def __init__(self, team1, team2, round_no=0, scoring=False):
self.team1 = team1
self.team2 = team2
self.winner = None
self.scoring = scoring
self.team1_score = None
self.team2_score = None
self.round_no = round_no
def play(self):
if self.team1.rating > self.team2.rating:
self.winner = self.team1
elif self.team1.rating < self.team2.rating:
self.winner = self.team2
else:
self.winner = random.choice([self.team1, self.team2])
print("%s\n\t > %s\n%s\n" %
(self.team1.name, self.winner.name, self.team2.name))
return self
def score_game(self):
"""Winner's score is median of their season points scored.
loser's score is median of winner's points allowed.
"""
# TODO: does this solve the 'loser scored more points' problem?
if self.winner == self.team1:
self.team1_score = round(statistics.median(self.team1.get_scores().points_scored))
self.team2_score = round(statistics.median(self.team1.points_allowed))
else:
self.team2_score = round(statistics.median(self.team2.get_scores().points_scored))
self.team1_score = round(statistics.median(self.team2.points_allowed))
print("Projected score: %s: %d - %s: %d" % (
self.team1.name, self.team1_score, self.team2.name, self.team2_score))
return self
def return_formatted_results(self):
return [str(self.round_no), self.team1.region, self.team1.name, \
str(self.team1_score), self.team2.name, str(self.team2_score), self.winner.name]
def write_csv(self, target):
with open(target, 'a', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(self.return_formatted_results())
| <filename>ncaabb/game.py
import random
import statistics
import csv
class Game(object):
"""Game class compares two teams ratings to determine which team is better.
The higher rated team is declared as winner and returned.
Scoring property also prints a projected score for the game.
Scoring defaults to False, as it is only used for the championship game.
"""
def __init__(self, team1, team2, round_no=0, scoring=False):
self.team1 = team1
self.team2 = team2
self.winner = None
self.scoring = scoring
self.team1_score = None
self.team2_score = None
self.round_no = round_no
def play(self):
if self.team1.rating > self.team2.rating:
self.winner = self.team1
elif self.team1.rating < self.team2.rating:
self.winner = self.team2
else:
self.winner = random.choice([self.team1, self.team2])
print("%s\n\t > %s\n%s\n" %
(self.team1.name, self.winner.name, self.team2.name))
return self
def score_game(self):
"""Winner's score is median of their season points scored.
loser's score is median of winner's points allowed.
"""
# TODO: does this solve the 'loser scored more points' problem?
if self.winner == self.team1:
self.team1_score = round(statistics.median(self.team1.get_scores().points_scored))
self.team2_score = round(statistics.median(self.team1.points_allowed))
else:
self.team2_score = round(statistics.median(self.team2.get_scores().points_scored))
self.team1_score = round(statistics.median(self.team2.points_allowed))
print("Projected score: %s: %d - %s: %d" % (
self.team1.name, self.team1_score, self.team2.name, self.team2_score))
return self
def return_formatted_results(self):
return [str(self.round_no), self.team1.region, self.team1.name, \
str(self.team1_score), self.team2.name, str(self.team2_score), self.winner.name]
def write_csv(self, target):
with open(target, 'a', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(self.return_formatted_results())
| en | 0.976346 | Game class compares two teams ratings to determine which team is better.
The higher rated team is declared as winner and returned.
Scoring property also prints a projected score for the game.
Scoring defaults to False, as it is only used for the championship game. Winner's score is median of their season points scored.
loser's score is median of winner's points allowed. # TODO: does this solve the 'loser scored more points' problem? | 4.07155 | 4 |
identify_freq_schema.py | snatch59/oecd-data-mining | 6 | 6631290 | <filename>identify_freq_schema.py
import pandas as pd
import xml.etree.ElementTree as ET
import os
# where to load or save
SCHEMA_DIR = 'OECD_schema'
DATA_DIR = 'OECD_keys'
KEY_NAMES_FILE = os.path.join(DATA_DIR, 'OECD_key_names.csv')
DATA_FILE = os.path.join(DATA_DIR, 'FREQ_key_names.csv')
# performance metrics
dataset_files_cnt = 0
has_datasettype_node_cnt = 0
# data to be collected
usable_datasets = []
frequency_keywords = []
# Load a list of data set ids
dataset_ids_df = pd.read_csv(KEY_NAMES_FILE)
dataset_ids = dataset_ids_df['KeyFamilyId'].tolist()
# go through each data set schema file and see if it
# support the FREQUENCY or FREQ dimension for observations
for dataset_id in dataset_ids:
try:
tree = ET.parse(os.path.join(SCHEMA_DIR, dataset_id + '.xml'))
except FileNotFoundError:
pass
else:
dataset_files_cnt += 1
root = tree.getroot()
childIndex = 0
for rootChild in root:
rootChildAttrib = rootChild.attrib
if 'name' in rootChildAttrib:
attribName = rootChildAttrib['name']
if attribName == 'DataSetType':
# print(dataset_id, 'has DataSetType')
has_datasettype_node_cnt += 1
dstNode = root[childIndex][0][0]
for dstChild in dstNode:
dstChildAttrib = dstChild.attrib
if 'name' in dstChildAttrib:
dimension = dstChildAttrib['name']
# print(val2)
if dimension == 'FREQUENCY' or dimension == 'FREQ':
usable_datasets.append(dataset_id)
frequency_keywords.append(dimension)
print(dataset_id, 'pandasdmx usable with', dimension)
childIndex += 1
if len(usable_datasets):
usableDF = pd.DataFrame({'KeyFamilyId': usable_datasets, 'Dimension': frequency_keywords})
usableDF.set_index('KeyFamilyId', inplace=True)
usableDF.to_csv(DATA_FILE)
print()
print('completed ...')
print('Out of', dataset_files_cnt, 'data set files,', len(usable_datasets), 'are usable by pandasdmx.')
print(has_datasettype_node_cnt, 'have DataSetType nodes')
| <filename>identify_freq_schema.py
import pandas as pd
import xml.etree.ElementTree as ET
import os
# where to load or save
SCHEMA_DIR = 'OECD_schema'
DATA_DIR = 'OECD_keys'
KEY_NAMES_FILE = os.path.join(DATA_DIR, 'OECD_key_names.csv')
DATA_FILE = os.path.join(DATA_DIR, 'FREQ_key_names.csv')
# performance metrics
dataset_files_cnt = 0
has_datasettype_node_cnt = 0
# data to be collected
usable_datasets = []
frequency_keywords = []
# Load a list of data set ids
dataset_ids_df = pd.read_csv(KEY_NAMES_FILE)
dataset_ids = dataset_ids_df['KeyFamilyId'].tolist()
# go through each data set schema file and see if it
# support the FREQUENCY or FREQ dimension for observations
for dataset_id in dataset_ids:
try:
tree = ET.parse(os.path.join(SCHEMA_DIR, dataset_id + '.xml'))
except FileNotFoundError:
pass
else:
dataset_files_cnt += 1
root = tree.getroot()
childIndex = 0
for rootChild in root:
rootChildAttrib = rootChild.attrib
if 'name' in rootChildAttrib:
attribName = rootChildAttrib['name']
if attribName == 'DataSetType':
# print(dataset_id, 'has DataSetType')
has_datasettype_node_cnt += 1
dstNode = root[childIndex][0][0]
for dstChild in dstNode:
dstChildAttrib = dstChild.attrib
if 'name' in dstChildAttrib:
dimension = dstChildAttrib['name']
# print(val2)
if dimension == 'FREQUENCY' or dimension == 'FREQ':
usable_datasets.append(dataset_id)
frequency_keywords.append(dimension)
print(dataset_id, 'pandasdmx usable with', dimension)
childIndex += 1
if len(usable_datasets):
usableDF = pd.DataFrame({'KeyFamilyId': usable_datasets, 'Dimension': frequency_keywords})
usableDF.set_index('KeyFamilyId', inplace=True)
usableDF.to_csv(DATA_FILE)
print()
print('completed ...')
print('Out of', dataset_files_cnt, 'data set files,', len(usable_datasets), 'are usable by pandasdmx.')
print(has_datasettype_node_cnt, 'have DataSetType nodes')
| en | 0.660803 | # where to load or save # performance metrics # data to be collected # Load a list of data set ids # go through each data set schema file and see if it # support the FREQUENCY or FREQ dimension for observations # print(dataset_id, 'has DataSetType') # print(val2) | 2.486446 | 2 |
tests/test_helpers.py | will-jj/arim | 14 | 6631291 | <reponame>will-jj/arim
import enum
import logging
import numpy as np
import pytest
import arim.helpers
from arim.exceptions import InvalidShape, InvalidDimension, NotAnArray
def test_get_name():
metadata = dict(long_name="Nicolas", short_name="Nic")
assert arim.helpers.get_name(metadata) == "Nicolas"
del metadata["long_name"]
assert arim.helpers.get_name(metadata) == "Nic"
del metadata["short_name"]
assert isinstance(arim.helpers.get_name(metadata), str)
def test_parse_enum_constant():
Foo = enum.Enum("Foo", "foo bar")
assert arim.helpers.parse_enum_constant("foo", Foo) is Foo.foo
assert arim.helpers.parse_enum_constant(Foo.foo, Foo) is Foo.foo
assert arim.helpers.parse_enum_constant("bar", Foo) is Foo.bar
assert arim.helpers.parse_enum_constant(Foo.bar, Foo) is Foo.bar
with pytest.raises(ValueError):
arim.helpers.parse_enum_constant("baz", Foo)
with pytest.raises(ValueError):
arim.helpers.parse_enum_constant(Foo, Foo)
def test_timeit(capsys):
logger = logging.getLogger(__name__)
with arim.helpers.timeit(logger=logger):
1 + 1
out, err = capsys.readouterr()
assert out == ""
assert err == ""
with arim.helpers.timeit("Foobar"):
1 + 1
out, err = capsys.readouterr()
assert out.startswith("Foobar")
assert err == ""
def test_cache():
cache = arim.helpers.Cache()
assert len(cache) == 0
assert cache.hits == 0
assert cache.misses == 0
cache["toto"] = "titi"
assert len(cache) == 1
assert cache.hits == 0
assert cache.misses == 0
a = cache["toto"]
assert a == "titi"
assert len(cache) == 1
assert cache.hits == 1
assert cache.misses == 0
a = cache.get("toto")
assert a == "titi"
assert len(cache) == 1
assert cache.hits == 2
assert cache.misses == 0
b = cache.get("foo", None)
assert len(cache) == 1
assert cache.hits == 2
assert cache.misses == 1
with pytest.raises(KeyError):
b = cache["another_miss"]
assert len(cache) == 1
assert cache.hits == 2
assert cache.misses == 2
# 'in' statement do not change the hits/misses count:
"toto" in cache
"tata" in cache
assert len(cache) == 1
assert cache.hits == 2
assert cache.misses == 2
str(cache)
cache.stat()
cache.clear()
def test_nocache():
cache = arim.helpers.NoCache()
assert len(cache) == 0
assert cache.hits == 0
assert cache.misses == 0
cache["toto"] = "titi" # this should do nothing
assert "toto" not in cache
assert len(cache) == 0
assert cache.hits == 0
assert cache.misses == 0
with pytest.raises(KeyError):
a = cache["toto"]
assert len(cache) == 0
assert cache.hits == 0
assert cache.misses == 1
a = cache.get("toto")
assert len(cache) == 0
assert cache.hits == 0
assert cache.misses == 2
# 'in' statement do not change the hits/misses count:
"toto" in cache
"tata" in cache
assert len(cache) == 0
assert cache.hits == 0
assert cache.misses == 2
str(cache)
cache.stat()
cache.clear()
def test_git_version():
v = arim.helpers.get_git_version()
assert isinstance(v, str)
assert v != ""
v_short = arim.helpers.get_git_version(short=True)
assert v_short == v
v_long = arim.helpers.get_git_version(short=False)
assert isinstance(v_long, str)
assert v_long != ""
assert len(v_long) >= len(v_short)
def test_get_shape_safely():
shape = (3, 4, 5)
x = np.arange(3 * 4 * 5).reshape(shape)
assert arim.helpers.get_shape_safely(x, "x", shape) == shape
assert arim.helpers.get_shape_safely(x, "x", (3, None, 5)) == shape
assert arim.helpers.get_shape_safely(x, "x") == shape
assert arim.helpers.get_shape_safely(x, "x", (None, None, None)) == shape
with pytest.raises(InvalidShape):
arim.helpers.get_shape_safely(x, "x", (3, 4, 666))
with pytest.raises(InvalidDimension):
arim.helpers.get_shape_safely(x, "x", (3, 4, 5, 6))
with pytest.raises(NotAnArray):
arim.helpers.get_shape_safely(x.tolist(), "x", (3, 4, 5))
def test_chunk_array():
# 1D:
x = np.arange(10)
size = 3
res = [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
for (sel, w2) in zip(arim.helpers.chunk_array(x.shape, size), res):
w1 = x[sel]
assert np.all(w1 == w2)
# 1D:
x = np.arange(9)
size = 3
res = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
for (sel, w2) in zip(arim.helpers.chunk_array(x.shape, size), res):
w1 = x[sel]
assert np.all(w1 == w2)
# 2D dim 0:
x = np.arange(20).reshape((10, 2))
size = 3
res = [x[0:3, :], x[3:6, :], x[6:9, :], x[9:, :]]
for (sel, w2) in zip(arim.helpers.chunk_array(x.shape, size), res):
w1 = x[sel]
assert np.all(w1 == w2)
# 2D dim 1:
x = np.arange(20).reshape((2, 10))
size = 3
res = [x[:, 0:3], x[:, 3:6], x[:, 6:9], x[:, 9:]]
for (sel, w2) in zip(arim.helpers.chunk_array(x.shape, size, axis=1), res):
w1 = x[sel]
assert np.all(w1 == w2)
# 3D dim 1:
x = np.arange(5 * 10 * 3).reshape((5, 10, 3))
size = 3
res = [x[:, 0:3, :], x[:, 3:6, :], x[:, 6:9, :], x[:, 9:, :]]
for (sel, w2) in zip(arim.helpers.chunk_array(x.shape, size, axis=1), res):
w1 = x[sel]
assert np.all(w1 == w2)
def test_smallest_uint_that_fits():
assert arim.helpers.smallest_uint_that_fits(2 ** 8 - 1) is np.uint8
assert arim.helpers.smallest_uint_that_fits(2 ** 8) is np.uint16
assert arim.helpers.smallest_uint_that_fits(2 ** 64 - 1) is np.uint64
def test_sizeof_fmt():
assert arim.helpers.sizeof_fmt(1) == "1.0 B"
assert arim.helpers.sizeof_fmt(1024) == "1.0 KiB"
assert arim.helpers.sizeof_fmt(2 * 1024) == "2.0 KiB"
assert arim.helpers.sizeof_fmt(5 * 1024 ** 2) == "5.0 MiB"
| import enum
import logging
import numpy as np
import pytest
import arim.helpers
from arim.exceptions import InvalidShape, InvalidDimension, NotAnArray
def test_get_name():
metadata = dict(long_name="Nicolas", short_name="Nic")
assert arim.helpers.get_name(metadata) == "Nicolas"
del metadata["long_name"]
assert arim.helpers.get_name(metadata) == "Nic"
del metadata["short_name"]
assert isinstance(arim.helpers.get_name(metadata), str)
def test_parse_enum_constant():
Foo = enum.Enum("Foo", "foo bar")
assert arim.helpers.parse_enum_constant("foo", Foo) is Foo.foo
assert arim.helpers.parse_enum_constant(Foo.foo, Foo) is Foo.foo
assert arim.helpers.parse_enum_constant("bar", Foo) is Foo.bar
assert arim.helpers.parse_enum_constant(Foo.bar, Foo) is Foo.bar
with pytest.raises(ValueError):
arim.helpers.parse_enum_constant("baz", Foo)
with pytest.raises(ValueError):
arim.helpers.parse_enum_constant(Foo, Foo)
def test_timeit(capsys):
logger = logging.getLogger(__name__)
with arim.helpers.timeit(logger=logger):
1 + 1
out, err = capsys.readouterr()
assert out == ""
assert err == ""
with arim.helpers.timeit("Foobar"):
1 + 1
out, err = capsys.readouterr()
assert out.startswith("Foobar")
assert err == ""
def test_cache():
cache = arim.helpers.Cache()
assert len(cache) == 0
assert cache.hits == 0
assert cache.misses == 0
cache["toto"] = "titi"
assert len(cache) == 1
assert cache.hits == 0
assert cache.misses == 0
a = cache["toto"]
assert a == "titi"
assert len(cache) == 1
assert cache.hits == 1
assert cache.misses == 0
a = cache.get("toto")
assert a == "titi"
assert len(cache) == 1
assert cache.hits == 2
assert cache.misses == 0
b = cache.get("foo", None)
assert len(cache) == 1
assert cache.hits == 2
assert cache.misses == 1
with pytest.raises(KeyError):
b = cache["another_miss"]
assert len(cache) == 1
assert cache.hits == 2
assert cache.misses == 2
# 'in' statement do not change the hits/misses count:
"toto" in cache
"tata" in cache
assert len(cache) == 1
assert cache.hits == 2
assert cache.misses == 2
str(cache)
cache.stat()
cache.clear()
def test_nocache():
cache = arim.helpers.NoCache()
assert len(cache) == 0
assert cache.hits == 0
assert cache.misses == 0
cache["toto"] = "titi" # this should do nothing
assert "toto" not in cache
assert len(cache) == 0
assert cache.hits == 0
assert cache.misses == 0
with pytest.raises(KeyError):
a = cache["toto"]
assert len(cache) == 0
assert cache.hits == 0
assert cache.misses == 1
a = cache.get("toto")
assert len(cache) == 0
assert cache.hits == 0
assert cache.misses == 2
# 'in' statement do not change the hits/misses count:
"toto" in cache
"tata" in cache
assert len(cache) == 0
assert cache.hits == 0
assert cache.misses == 2
str(cache)
cache.stat()
cache.clear()
def test_git_version():
v = arim.helpers.get_git_version()
assert isinstance(v, str)
assert v != ""
v_short = arim.helpers.get_git_version(short=True)
assert v_short == v
v_long = arim.helpers.get_git_version(short=False)
assert isinstance(v_long, str)
assert v_long != ""
assert len(v_long) >= len(v_short)
def test_get_shape_safely():
shape = (3, 4, 5)
x = np.arange(3 * 4 * 5).reshape(shape)
assert arim.helpers.get_shape_safely(x, "x", shape) == shape
assert arim.helpers.get_shape_safely(x, "x", (3, None, 5)) == shape
assert arim.helpers.get_shape_safely(x, "x") == shape
assert arim.helpers.get_shape_safely(x, "x", (None, None, None)) == shape
with pytest.raises(InvalidShape):
arim.helpers.get_shape_safely(x, "x", (3, 4, 666))
with pytest.raises(InvalidDimension):
arim.helpers.get_shape_safely(x, "x", (3, 4, 5, 6))
with pytest.raises(NotAnArray):
arim.helpers.get_shape_safely(x.tolist(), "x", (3, 4, 5))
def test_chunk_array():
# 1D:
x = np.arange(10)
size = 3
res = [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
for (sel, w2) in zip(arim.helpers.chunk_array(x.shape, size), res):
w1 = x[sel]
assert np.all(w1 == w2)
# 1D:
x = np.arange(9)
size = 3
res = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
for (sel, w2) in zip(arim.helpers.chunk_array(x.shape, size), res):
w1 = x[sel]
assert np.all(w1 == w2)
# 2D dim 0:
x = np.arange(20).reshape((10, 2))
size = 3
res = [x[0:3, :], x[3:6, :], x[6:9, :], x[9:, :]]
for (sel, w2) in zip(arim.helpers.chunk_array(x.shape, size), res):
w1 = x[sel]
assert np.all(w1 == w2)
# 2D dim 1:
x = np.arange(20).reshape((2, 10))
size = 3
res = [x[:, 0:3], x[:, 3:6], x[:, 6:9], x[:, 9:]]
for (sel, w2) in zip(arim.helpers.chunk_array(x.shape, size, axis=1), res):
w1 = x[sel]
assert np.all(w1 == w2)
# 3D dim 1:
x = np.arange(5 * 10 * 3).reshape((5, 10, 3))
size = 3
res = [x[:, 0:3, :], x[:, 3:6, :], x[:, 6:9, :], x[:, 9:, :]]
for (sel, w2) in zip(arim.helpers.chunk_array(x.shape, size, axis=1), res):
w1 = x[sel]
assert np.all(w1 == w2)
def test_smallest_uint_that_fits():
assert arim.helpers.smallest_uint_that_fits(2 ** 8 - 1) is np.uint8
assert arim.helpers.smallest_uint_that_fits(2 ** 8) is np.uint16
assert arim.helpers.smallest_uint_that_fits(2 ** 64 - 1) is np.uint64
def test_sizeof_fmt():
assert arim.helpers.sizeof_fmt(1) == "1.0 B"
assert arim.helpers.sizeof_fmt(1024) == "1.0 KiB"
assert arim.helpers.sizeof_fmt(2 * 1024) == "2.0 KiB"
assert arim.helpers.sizeof_fmt(5 * 1024 ** 2) == "5.0 MiB" | en | 0.724837 | # 'in' statement do not change the hits/misses count: # this should do nothing # 'in' statement do not change the hits/misses count: # 1D: # 1D: # 2D dim 0: # 2D dim 1: # 3D dim 1: | 2.144009 | 2 |
ReProcess.py | ziyangz5/UCIWebSocDataAnalysis | 1 | 6631292 | <filename>ReProcess.py
import re
from collections import defaultdict
import course_database
class reprocess:
def __init__(self,text,deptname):
self.deptname = deptname
db = self.data_rough_process(text)
self.re_match(db)
self.data_process()
def data_rough_process(self,text)->{str:[str]}:#粗处理数据,不过滤dis,该字典包括课号下所有行
count = 0
inner_text = []
db = defaultdict(list)
for line in text:
line = line.rstrip().lstrip()
if '_________________________________________________________________' in line:
count+=1
continue
if 'Total Classes Displayed:' in line:
break
if count >= 2:
if '#' in line:
break
inner_text.append(line)
inner_text_iter = iter(inner_text)
temp_database = {}
key_name = ''
begin_write = False
while True:
try:
line = next(inner_text_iter)
except StopIteration:
break
line_text = line.rstrip().lstrip()
if line_text.startswith('CCode'):continue
if 'Same as' in line_text:continue
if 'ON LINE' in line_text:continue
if begin_write == True:
if line_text == '':
begin_write = False
continue
temp_database[key_name].append(line_text)
if (self.deptname.lower() in line_text.lower())and not(',' in line_text.lower()):
key_name = line_text
temp_database[key_name] = []
begin_write = True
return temp_database
def re_match(self,db):
#细处理数据,过滤dis,create a Course Object which contains all the lectures of this course,参数格式为[CCode,Unt,Instructor,Week,Time,Place,Final,Max,Enr,Req,Rstr,Status]
rp = re.compile(r"([\d]{5})[\ ]*LEC[^\d]*([\d]*)[\ ]*([A-Za-z\,\ ]*.)[\ ]*([MWFTuTh]*)[\ ]*((?:1[0-2]|[0-9])(?:\:[0-5][0-9]){0,2}-[\ ]*(?:1[0-2]|[0-9])(?:\:[0-5][0-9]){0,2}p?\ ?)[\ ]*([\w]*[\ ]*[\d]*)[\ ]*((?:(?:[^,]*,[^,]*,)[\ ]*(?:(?:1[0-2]|[0-9])(?:\:[0-5][0-9]){0,2}-[\ ]*(?:1[0-2]|[0-9])(?:\:[0-5][0-9]){0,2}\ ?(?:(?:am|pm)?))\ ?(?:\@[A-Z\ ]*[\w]*)?)|TBA)[\ ]*(\d*)[\ ]*(\d*(?:\/?\d*))[\ ]*([\d]*)[\ ]*([A-Z&]*)[\ ]*([\w]*)")
self.courses = []
for item in db.items():
course = course_database.Course(item[0])
for info_str in item[1]:
#Can you do it in 1 line?
info_list = []
info_re = rp.match(info_str)
if info_re == None:
continue
for info in info_re.groups():
info_list.append(info)
course.add_lec(info_list)
self.courses.append(course)
def data_process(self):
pass
def get_data(self)->course_database:
return self.courses | <filename>ReProcess.py
import re
from collections import defaultdict
import course_database
class reprocess:
def __init__(self,text,deptname):
self.deptname = deptname
db = self.data_rough_process(text)
self.re_match(db)
self.data_process()
def data_rough_process(self,text)->{str:[str]}:#粗处理数据,不过滤dis,该字典包括课号下所有行
count = 0
inner_text = []
db = defaultdict(list)
for line in text:
line = line.rstrip().lstrip()
if '_________________________________________________________________' in line:
count+=1
continue
if 'Total Classes Displayed:' in line:
break
if count >= 2:
if '#' in line:
break
inner_text.append(line)
inner_text_iter = iter(inner_text)
temp_database = {}
key_name = ''
begin_write = False
while True:
try:
line = next(inner_text_iter)
except StopIteration:
break
line_text = line.rstrip().lstrip()
if line_text.startswith('CCode'):continue
if 'Same as' in line_text:continue
if 'ON LINE' in line_text:continue
if begin_write == True:
if line_text == '':
begin_write = False
continue
temp_database[key_name].append(line_text)
if (self.deptname.lower() in line_text.lower())and not(',' in line_text.lower()):
key_name = line_text
temp_database[key_name] = []
begin_write = True
return temp_database
def re_match(self,db):
#细处理数据,过滤dis,create a Course Object which contains all the lectures of this course,参数格式为[CCode,Unt,Instructor,Week,Time,Place,Final,Max,Enr,Req,Rstr,Status]
rp = re.compile(r"([\d]{5})[\ ]*LEC[^\d]*([\d]*)[\ ]*([A-Za-z\,\ ]*.)[\ ]*([MWFTuTh]*)[\ ]*((?:1[0-2]|[0-9])(?:\:[0-5][0-9]){0,2}-[\ ]*(?:1[0-2]|[0-9])(?:\:[0-5][0-9]){0,2}p?\ ?)[\ ]*([\w]*[\ ]*[\d]*)[\ ]*((?:(?:[^,]*,[^,]*,)[\ ]*(?:(?:1[0-2]|[0-9])(?:\:[0-5][0-9]){0,2}-[\ ]*(?:1[0-2]|[0-9])(?:\:[0-5][0-9]){0,2}\ ?(?:(?:am|pm)?))\ ?(?:\@[A-Z\ ]*[\w]*)?)|TBA)[\ ]*(\d*)[\ ]*(\d*(?:\/?\d*))[\ ]*([\d]*)[\ ]*([A-Z&]*)[\ ]*([\w]*)")
self.courses = []
for item in db.items():
course = course_database.Course(item[0])
for info_str in item[1]:
#Can you do it in 1 line?
info_list = []
info_re = rp.match(info_str)
if info_re == None:
continue
for info in info_re.groups():
info_list.append(info)
course.add_lec(info_list)
self.courses.append(course)
def data_process(self):
pass
def get_data(self)->course_database:
return self.courses | en | 0.526504 | #粗处理数据,不过滤dis,该字典包括课号下所有行 #细处理数据,过滤dis,create a Course Object which contains all the lectures of this course,参数格式为[CCode,Unt,Instructor,Week,Time,Place,Final,Max,Enr,Req,Rstr,Status] #Can you do it in 1 line? | 3.241051 | 3 |
scripts/export_to_gcloud.py | khromiumos/chromiumos-chromite | 0 | 6631293 | <filename>scripts/export_to_gcloud.py<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Export entities to gcloud datastore."""
from __future__ import print_function
import ast
import json
import sys
from chromite.lib import commandline
from chromite.lib import dslib
try:
import pytest # pylint: disable=import-error
datastore = pytest.importorskip('gcloud.datastore')
except ImportError:
from gcloud import datastore # pylint: disable=import-error
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
def GetParser():
"""Creates the argparse parser."""
parser = commandline.ArgumentParser(description=__doc__)
parser.add_argument('service_acct_json', type=str, action='store',
help='Path to service account credentials JSON file.')
parser.add_argument('entities', type=str, action='store',
help=('Path to file with entities to export. '
'File should be newline-separated JSON entries.'))
parser.add_argument('--project_id', '-i', type=str, action='store',
default=None,
help=('Optional project_id of datastore to write to. If '
'not supplied, will be taken from credentials '
'file.'))
parser.add_argument('--namespace', '-n', type=str, action='store',
default=None,
help='Optional namespace in which to store entities.')
parser.add_argument('--parent_key', '-p', type=str, action='store',
default=None,
help='Key of parent entity to insert into. This should '
'be in python tuple-literal form, e.g. ("Foo", 1)')
return parser
class DuplicateKeyError(ValueError):
"""Raised when two Entities have the same key."""
def GetEntities(project_id, json_lines, outer_parent_key=None, namespace=None):
"""Create gcloud entities from json string entries.
project_id: String gcloud project id that entities are for.
json_lines: File or other line-by-line iterator of json strings to turn into
entities.
outer_parent_key: Optional datastore.Key instance to act as the parent_key
of all top level entities.
namespace: Optional string namespace for entities.
"""
entity_keys = {}
for line in json_lines:
item = json.loads(line)
kind, idx = item.pop('id')
parent = item.pop('parent', None)
if (kind, idx) in entity_keys:
raise DuplicateKeyError(
'Duplicate entities with id (%s, %s)' % (kind, idx))
if parent:
parent_key = entity_keys[tuple(parent)]
else:
parent_key = outer_parent_key
key = datastore.Key(
kind, idx, project=project_id, parent=parent_key, namespace=namespace)
e = datastore.Entity(key=key)
e.update(item)
entity_keys[(kind, idx)] = key
entity_keys[idx] = key
yield e
def main(argv):
parser = GetParser()
options = parser.parse_args(argv)
entities_path = options.entities
creds_file = options.service_acct_json
project_id = options.project_id
namespace = options.namespace
entities = []
c, project_id = dslib.GetClient(creds_file, project_id, namespace)
if options.parent_key:
upper_parent_key = c.key(*ast.literal_eval(options.parent_key))
else:
upper_parent_key = None
with open(entities_path, 'r') as f:
entities = GetEntities(project_id, f, upper_parent_key, namespace)
dslib.ChunkedBatchWrite(entities, c)
| <filename>scripts/export_to_gcloud.py<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Export entities to gcloud datastore."""
from __future__ import print_function
import ast
import json
import sys
from chromite.lib import commandline
from chromite.lib import dslib
try:
import pytest # pylint: disable=import-error
datastore = pytest.importorskip('gcloud.datastore')
except ImportError:
from gcloud import datastore # pylint: disable=import-error
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
def GetParser():
"""Creates the argparse parser."""
parser = commandline.ArgumentParser(description=__doc__)
parser.add_argument('service_acct_json', type=str, action='store',
help='Path to service account credentials JSON file.')
parser.add_argument('entities', type=str, action='store',
help=('Path to file with entities to export. '
'File should be newline-separated JSON entries.'))
parser.add_argument('--project_id', '-i', type=str, action='store',
default=None,
help=('Optional project_id of datastore to write to. If '
'not supplied, will be taken from credentials '
'file.'))
parser.add_argument('--namespace', '-n', type=str, action='store',
default=None,
help='Optional namespace in which to store entities.')
parser.add_argument('--parent_key', '-p', type=str, action='store',
default=None,
help='Key of parent entity to insert into. This should '
'be in python tuple-literal form, e.g. ("Foo", 1)')
return parser
class DuplicateKeyError(ValueError):
"""Raised when two Entities have the same key."""
def GetEntities(project_id, json_lines, outer_parent_key=None, namespace=None):
"""Create gcloud entities from json string entries.
project_id: String gcloud project id that entities are for.
json_lines: File or other line-by-line iterator of json strings to turn into
entities.
outer_parent_key: Optional datastore.Key instance to act as the parent_key
of all top level entities.
namespace: Optional string namespace for entities.
"""
entity_keys = {}
for line in json_lines:
item = json.loads(line)
kind, idx = item.pop('id')
parent = item.pop('parent', None)
if (kind, idx) in entity_keys:
raise DuplicateKeyError(
'Duplicate entities with id (%s, %s)' % (kind, idx))
if parent:
parent_key = entity_keys[tuple(parent)]
else:
parent_key = outer_parent_key
key = datastore.Key(
kind, idx, project=project_id, parent=parent_key, namespace=namespace)
e = datastore.Entity(key=key)
e.update(item)
entity_keys[(kind, idx)] = key
entity_keys[idx] = key
yield e
def main(argv):
parser = GetParser()
options = parser.parse_args(argv)
entities_path = options.entities
creds_file = options.service_acct_json
project_id = options.project_id
namespace = options.namespace
entities = []
c, project_id = dslib.GetClient(creds_file, project_id, namespace)
if options.parent_key:
upper_parent_key = c.key(*ast.literal_eval(options.parent_key))
else:
upper_parent_key = None
with open(entities_path, 'r') as f:
entities = GetEntities(project_id, f, upper_parent_key, namespace)
dslib.ChunkedBatchWrite(entities, c)
| en | 0.761217 | # -*- coding: utf-8 -*- # Copyright 2016 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. Export entities to gcloud datastore. # pylint: disable=import-error # pylint: disable=import-error Creates the argparse parser. Raised when two Entities have the same key. Create gcloud entities from json string entries. project_id: String gcloud project id that entities are for. json_lines: File or other line-by-line iterator of json strings to turn into entities. outer_parent_key: Optional datastore.Key instance to act as the parent_key of all top level entities. namespace: Optional string namespace for entities. | 2.445979 | 2 |
app/routers/__init__.py | BlueJillYang/blog | 0 | 6631294 | from .contorl import contorller # this is the final controller which is controlling other apps
| from .contorl import contorller # this is the final controller which is controlling other apps
| en | 0.957275 | # this is the final controller which is controlling other apps | 1.25359 | 1 |
applied_python/applied_python/lib/python2.7/site-packages/ncclient/devices/iosxe.py | mith1979/ansible_automation | 0 | 6631295 | """
Handler for Cisco IOS-XE device specific information.
Note that for proper import, the classname has to be:
"<Devicename>DeviceHandler"
...where <Devicename> is something like "Default", "Nexus", etc.
All device-specific handlers derive from the DefaultDeviceHandler, which implements the
generic information needed for interaction with a Netconf server.
"""
from .default import DefaultDeviceHandler
def iosxe_unknown_host_cb(host, fingerprint):
#This will ignore the unknown host check when connecting to CSR devices
return True
class IosxeDeviceHandler(DefaultDeviceHandler):
"""
Cisco IOS-XE handler for device specific information.
"""
def __init__(self, device_params):
super(IosxeDeviceHandler, self).__init__(device_params)
def add_additional_operations(self):
dict = {}
dict["save_config"] = SaveConfig
return dict
def add_additional_ssh_connect_params(self, kwargs):
kwargs['allow_agent'] = False
kwargs['look_for_keys'] = False
kwargs['unknown_host_cb'] = csr_unknown_host_cb
def perform_qualify_check(self):
return False
| """
Handler for Cisco IOS-XE device specific information.
Note that for proper import, the classname has to be:
"<Devicename>DeviceHandler"
...where <Devicename> is something like "Default", "Nexus", etc.
All device-specific handlers derive from the DefaultDeviceHandler, which implements the
generic information needed for interaction with a Netconf server.
"""
from .default import DefaultDeviceHandler
def iosxe_unknown_host_cb(host, fingerprint):
#This will ignore the unknown host check when connecting to CSR devices
return True
class IosxeDeviceHandler(DefaultDeviceHandler):
"""
Cisco IOS-XE handler for device specific information.
"""
def __init__(self, device_params):
super(IosxeDeviceHandler, self).__init__(device_params)
def add_additional_operations(self):
dict = {}
dict["save_config"] = SaveConfig
return dict
def add_additional_ssh_connect_params(self, kwargs):
kwargs['allow_agent'] = False
kwargs['look_for_keys'] = False
kwargs['unknown_host_cb'] = csr_unknown_host_cb
def perform_qualify_check(self):
return False
| en | 0.739228 | Handler for Cisco IOS-XE device specific information. Note that for proper import, the classname has to be: "<Devicename>DeviceHandler" ...where <Devicename> is something like "Default", "Nexus", etc. All device-specific handlers derive from the DefaultDeviceHandler, which implements the generic information needed for interaction with a Netconf server. #This will ignore the unknown host check when connecting to CSR devices Cisco IOS-XE handler for device specific information. | 2.511989 | 3 |
models.py | ECruz25/music-app-backend | 0 | 6631296 | from application import db
from sqlalchemy.dialects.postgresql import JSON
from datetime import date
class SpotifyUserSongInPlaylist(db.Model):
__tablename__ = 'spotifyusersonginplaylist'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.String())
date_added = db.Column(db.DateTime())
track_id = db.Column(db.String())
popularity = db.Column(db.Integer())
explicit = db.Column(db.Boolean())
def __init__(self, user_id, date_added, track_id, popularity, explicit):
self.user_id = user_id
self.date_added = date_added
self.track_id = track_id
self.popularity = popularity
self.explicit = explicit
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.String())
mind_aspect = db.Column(db.String(), nullable=True)
energy_aspect = db.Column(db.String(), nullable=True)
nature_aspect = db.Column(db.String(), nullable=True)
tactics_aspect = db.Column(db.String(), nullable=True)
identity_aspect = db.Column(db.String(), nullable=True)
country = db.Column(db.String())
def __init__(self, user_id, mind_aspect, energy_aspect, nature_aspect, tactics_aspect, identity_aspect, country):
self.user_id = user_id
self.identity_aspect = identity_aspect
self.tactics_aspect = tactics_aspect
self.nature_aspect = nature_aspect
self.mind_aspect = mind_aspect
self.energy_aspect = energy_aspect
self.country = country
class Playlist(db.Model):
__tablename__ = 'playlist'
id = db.Column(db.Integer, primary_key=True)
playlist_id = db.Column(db.String())
name = db.Column(db.String())
owner = db.Column(db.String())
checked = db.Column(db.Boolean())
def __init__(self, playlist_id, name, owner):
self.playlist_id = playlist_id
self.name = name
self.owner = owner
self.checked = False
class Recommendation(db.Model):
__tablename__ = 'recommendation'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.String())
track_id = db.Column(db.String())
date_recommended_for = db.Column(db.Date())
def __init__(self, user_id, track_id):
self.track_id = track_id
self.user_id = user_id
self.date_recommended_for = date.today() | from application import db
from sqlalchemy.dialects.postgresql import JSON
from datetime import date
class SpotifyUserSongInPlaylist(db.Model):
__tablename__ = 'spotifyusersonginplaylist'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.String())
date_added = db.Column(db.DateTime())
track_id = db.Column(db.String())
popularity = db.Column(db.Integer())
explicit = db.Column(db.Boolean())
def __init__(self, user_id, date_added, track_id, popularity, explicit):
self.user_id = user_id
self.date_added = date_added
self.track_id = track_id
self.popularity = popularity
self.explicit = explicit
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.String())
mind_aspect = db.Column(db.String(), nullable=True)
energy_aspect = db.Column(db.String(), nullable=True)
nature_aspect = db.Column(db.String(), nullable=True)
tactics_aspect = db.Column(db.String(), nullable=True)
identity_aspect = db.Column(db.String(), nullable=True)
country = db.Column(db.String())
def __init__(self, user_id, mind_aspect, energy_aspect, nature_aspect, tactics_aspect, identity_aspect, country):
self.user_id = user_id
self.identity_aspect = identity_aspect
self.tactics_aspect = tactics_aspect
self.nature_aspect = nature_aspect
self.mind_aspect = mind_aspect
self.energy_aspect = energy_aspect
self.country = country
class Playlist(db.Model):
__tablename__ = 'playlist'
id = db.Column(db.Integer, primary_key=True)
playlist_id = db.Column(db.String())
name = db.Column(db.String())
owner = db.Column(db.String())
checked = db.Column(db.Boolean())
def __init__(self, playlist_id, name, owner):
self.playlist_id = playlist_id
self.name = name
self.owner = owner
self.checked = False
class Recommendation(db.Model):
__tablename__ = 'recommendation'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.String())
track_id = db.Column(db.String())
date_recommended_for = db.Column(db.Date())
def __init__(self, user_id, track_id):
self.track_id = track_id
self.user_id = user_id
self.date_recommended_for = date.today() | none | 1 | 2.664783 | 3 |
|
QRCode_generator/.env/qrcode/lib/python3.8/site-packages/pyqrcode/tables.py | Aayush3thoughtwin/nft_metaplex | 332 | 6631297 | <filename>QRCode_generator/.env/qrcode/lib/python3.8/site-packages/pyqrcode/tables.py
# -*- coding: utf-8 -*-
# Copyright (c) 2013, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This module lists out all of the tables needed to create a QR code.
If you are viewing this in the HTML documentation, I recommend reading the
actual file instead. The formating for the tables is much more readable.
"""
from __future__ import division, unicode_literals
#: This defines the QR Code's 'mode' which sets what
#: type of code it is along with its size.
modes = {
'numeric': 1,
'alphanumeric': 2,
'binary': 4,
'kanji': 8,
}
#: This defines the amount of error correction. The dictionary
#: allows the user to specify this in several ways.
error_level = {'L': 'L', 'l': 'L', '7%': 'L', .7: 'L',
'M': 'M', 'm': 'M', '15%': 'M', .15: 'M',
'Q': 'Q', 'q': 'Q', '25%': 'Q', .25: 'Q',
'H': 'H', 'h': 'H', '30%': 'H', .30: 'H'}
#: This is a dictionary holds how long the "data length" field is for
#: each version and mode of the QR Code.
data_length_field = {9: {1: 10, 2: 9, 4: 8, 8: 8},
26: {1: 12, 2: 11, 4: 16, 8: 10},
40: {1: 14, 2: 13, 4: 16, 8: 12}}
#: QR Codes uses a unique ASCII-like table for the 'alphanumeric' mode.
#: This is a dictionary representing that unique table, where the
#: keys are the possible characters in the data and the values
#: are the character's numeric representation.
ascii_codes = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7,
'8': 8, '9': 9, 'A': 10, 'B': 11, 'C': 12, 'D': 13, 'E': 14,
'F': 15, 'G': 16, 'H': 17, 'I': 18, 'J': 19, 'K': 20, 'L': 21,
'M': 22, 'N': 23, 'O': 24, 'P': 25, 'Q': 26, 'R': 27, 'S': 28,
'T': 29, 'U': 30, 'V': 31, 'W': 32, 'X': 33, 'Y': 34, 'Z': 35,
' ': 36, '$': 37, '%': 38, '*': 39, '+': 40, '-': 41, '.': 42,
'/': 43, ':': 44}
#: This array specifies the size of a QR Code in pixels. These numbers are
#: defined in the standard. The indexes correspond to the QR Code's
#: version number. This array was taken from:
#:
#: http://www.denso-wave.com/qrcode/vertable1-e.html
version_size = [None, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57,
61, 65, 69, 73, 77, 81, 85, 89, 93, 97,
101, 105, 109, 113, 117, 121, 125, 129, 133, 137,
141, 145, 149, 153, 157, 161, 165, 169, 173, 177]
#: This dictionary lists the data capacity for all possible QR Codes.
#: This dictionary is organized where the first key corresponds to the
#: QR Code version number. The next key corresponds to the error
#: correction level, see error. The final key corresponds to
#: the mode number, see modes. The zero mode number represents the
#: possible "data bits." This table was taken from:
#:
#: http://www.denso-wave.com/qrcode/vertable1-e.html
data_capacity = {
1: {
"L": {0: 152, 1: 41, 2: 25, 4: 17, 8: 10, },
"M": {0: 128, 1: 34, 2: 20, 4: 14, 8: 8, },
"Q": {0: 104, 1: 27, 2: 16, 4: 11, 8: 7, },
"H": {0: 72, 1: 17, 2: 10, 4: 7, 8: 4, }},
2: {
"L": {0: 272, 1: 77, 2: 47, 4: 32, 8: 20, },
"M": {0: 224, 1: 63, 2: 38, 4: 26, 8: 16, },
"Q": {0: 176, 1: 48, 2: 29, 4: 20, 8: 12, },
"H": {0: 128, 1: 34, 2: 20, 4: 14, 8: 8, }},
3: {
"L": {0: 440, 1: 127, 2: 77, 4: 53, 8: 32, },
"M": {0: 352, 1: 101, 2: 61, 4: 42, 8: 26, },
"Q": {0: 272, 1: 77, 2: 47, 4: 32, 8: 20, },
"H": {0: 208, 1: 58, 2: 35, 4: 24, 8: 15, }},
4: {
"L": {0: 640, 1: 187, 2: 114, 4: 78, 8: 48, },
"M": {0: 512, 1: 149, 2: 90, 4: 62, 8: 38, },
"Q": {0: 384, 1: 111, 2: 67, 4: 46, 8: 28, },
"H": {0: 288, 1: 82, 2: 50, 4: 34, 8: 21, }},
5: {
"L": {0: 864, 1: 255, 2: 154, 4: 106, 8: 65, },
"M": {0: 688, 1: 202, 2: 122, 4: 84, 8: 52, },
"Q": {0: 496, 1: 144, 2: 87, 4: 60, 8: 37, },
"H": {0: 368, 1: 106, 2: 64, 4: 44, 8: 27, }},
6: {
"L": {0: 1088, 1: 322, 2: 195, 4: 134, 8: 82, },
"M": {0: 864, 1: 255, 2: 154, 4: 106, 8: 65, },
"Q": {0: 608, 1: 178, 2: 108, 4: 74, 8: 45, },
"H": {0: 480, 1: 139, 2: 84, 4: 58, 8: 36, }},
7: {
"L": {0: 1248, 1: 370, 2: 224, 4: 154, 8: 95, },
"M": {0: 992, 1: 293, 2: 178, 4: 122, 8: 75, },
"Q": {0: 704, 1: 207, 2: 125, 4: 86, 8: 53, },
"H": {0: 528, 1: 154, 2: 93, 4: 64, 8: 39, }},
8: {
"L": {0: 1552, 1: 461, 2: 279, 4: 192, 8: 118, },
"M": {0: 1232, 1: 365, 2: 221, 4: 152, 8: 93, },
"Q": {0: 880, 1: 259, 2: 157, 4: 108, 8: 66, },
"H": {0: 688, 1: 202, 2: 122, 4: 84, 8: 52, }},
9: {
"L": {0: 1856, 1: 552, 2: 335, 4: 230, 8: 141, },
"M": {0: 1456, 1: 432, 2: 262, 4: 180, 8: 111, },
"Q": {0: 1056, 1: 312, 2: 189, 4: 130, 8: 80, },
"H": {0: 800, 1: 235, 2: 143, 4: 98, 8: 60, }},
10: {
"L": {0: 2192, 1: 652, 2: 395, 4: 271, 8: 167, },
"M": {0: 1728, 1: 513, 2: 311, 4: 213, 8: 131, },
"Q": {0: 1232, 1: 364, 2: 221, 4: 151, 8: 93, },
"H": {0: 976, 1: 288, 2: 174, 4: 119, 8: 74, }},
11: {
"L": {0: 2592, 1: 772, 2: 468, 4: 321, 8: 198, },
"M": {0: 2032, 1: 604, 2: 366, 4: 251, 8: 155, },
"Q": {0: 1440, 1: 427, 2: 259, 4: 177, 8: 109, },
"H": {0: 1120, 1: 331, 2: 200, 4: 137, 8: 85, }},
12: {
"L": {0: 2960, 1: 883, 2: 535, 4: 367, 8: 226, },
"M": {0: 2320, 1: 691, 2: 419, 4: 287, 8: 177, },
"Q": {0: 1648, 1: 489, 2: 296, 4: 203, 8: 125, },
"H": {0: 1264, 1: 374, 2: 227, 4: 155, 8: 96, }},
13: {
"L": {0: 3424, 1: 1022, 2: 619, 4: 425, 8: 262, },
"M": {0: 2672, 1: 796, 2: 483, 4: 331, 8: 204, },
"Q": {0: 1952, 1: 580, 2: 352, 4: 241, 8: 149, },
"H": {0: 1440, 1: 427, 2: 259, 4: 177, 8: 109, }},
14: {
"L": {0: 3688, 1: 1101, 2: 667, 4: 458, 8: 282, },
"M": {0: 2920, 1: 871, 2: 528, 4: 362, 8: 223, },
"Q": {0: 2088, 1: 621, 2: 376, 4: 258, 8: 159, },
"H": {0: 1576, 1: 468, 2: 283, 4: 194, 8: 120, }},
15: {
"L": {0: 4184, 1: 1250, 2: 758, 4: 520, 8: 320, },
"M": {0: 3320, 1: 991, 2: 600, 4: 412, 8: 254, },
"Q": {0: 2360, 1: 703, 2: 426, 4: 292, 8: 180, },
"H": {0: 1784, 1: 530, 2: 321, 4: 220, 8: 136, }},
16: {
"L": {0: 4712, 1: 1408, 2: 854, 4: 586, 8: 361, },
"M": {0: 3624, 1: 1082, 2: 656, 4: 450, 8: 277, },
"Q": {0: 2600, 1: 775, 2: 470, 4: 322, 8: 198, },
"H": {0: 2024, 1: 602, 2: 365, 4: 250, 8: 154, }},
17: {
"L": {0: 5176, 1: 1548, 2: 938, 4: 644, 8: 397, },
"M": {0: 4056, 1: 1212, 2: 734, 4: 504, 8: 310, },
"Q": {0: 2936, 1: 876, 2: 531, 4: 364, 8: 224, },
"H": {0: 2264, 1: 674, 2: 408, 4: 280, 8: 173, }},
18: {
"L": {0: 5768, 1: 1725, 2: 1046, 4: 718, 8: 442, },
"M": {0: 4504, 1: 1346, 2: 816, 4: 560, 8: 345, },
"Q": {0: 3176, 1: 948, 2: 574, 4: 394, 8: 243, },
"H": {0: 2504, 1: 746, 2: 452, 4: 310, 8: 191, }},
19: {
"L": {0: 6360, 1: 1903, 2: 1153, 4: 792, 8: 488, },
"M": {0: 5016, 1: 1500, 2: 909, 4: 624, 8: 384, },
"Q": {0: 3560, 1: 1063, 2: 644, 4: 442, 8: 272, },
"H": {0: 2728, 1: 813, 2: 493, 4: 338, 8: 208, }},
20: {
"L": {0: 6888, 1: 2061, 2: 1249, 4: 858, 8: 528, },
"M": {0: 5352, 1: 1600, 2: 970, 4: 666, 8: 410, },
"Q": {0: 3880, 1: 1159, 2: 702, 4: 482, 8: 297, },
"H": {0: 3080, 1: 919, 2: 557, 4: 382, 8: 235, }},
21: {
"L": {0: 7456, 1: 2232, 2: 1352, 4: 929, 8: 572, },
"M": {0: 5712, 1: 1708, 2: 1035, 4: 711, 8: 438, },
"Q": {0: 4096, 1: 1224, 2: 742, 4: 509, 8: 314, },
"H": {0: 3248, 1: 969, 2: 587, 4: 403, 8: 248, }},
22: {
"L": {0: 8048, 1: 2409, 2: 1460, 4: 1003, 8: 618, },
"M": {0: 6256, 1: 1872, 2: 1134, 4: 779, 8: 480, },
"Q": {0: 4544, 1: 1358, 2: 823, 4: 565, 8: 348, },
"H": {0: 3536, 1: 1056, 2: 640, 4: 439, 8: 270, }},
23: {
"L": {0: 8752, 1: 2620, 2: 1588, 4: 1091, 8: 672, },
"M": {0: 6880, 1: 2059, 2: 1248, 4: 857, 8: 528, },
"Q": {0: 4912, 1: 1468, 2: 890, 4: 611, 8: 376, },
"H": {0: 3712, 1: 1108, 2: 672, 4: 461, 8: 284, }},
24: {
"L": {0: 9392, 1: 2812, 2: 1704, 4: 1171, 8: 721, },
"M": {0: 7312, 1: 2188, 2: 1326, 4: 911, 8: 561, },
"Q": {0: 5312, 1: 1588, 2: 963, 4: 661, 8: 407, },
"H": {0: 4112, 1: 1228, 2: 744, 4: 511, 8: 315, }},
25: {
"L": {0: 10208, 1: 3057, 2: 1853, 4: 1273, 8: 784, },
"M": {0: 8000, 1: 2395, 2: 1451, 4: 997, 8: 614, },
"Q": {0: 5744, 1: 1718, 2: 1041, 4: 715, 8: 440, },
"H": {0: 4304, 1: 1286, 2: 779, 4: 535, 8: 330, }},
26: {
"L": {0: 10960, 1: 3283, 2: 1990, 4: 1367, 8: 842, },
"M": {0: 8496, 1: 2544, 2: 1542, 4: 1059, 8: 652, },
"Q": {0: 6032, 1: 1804, 2: 1094, 4: 751, 8: 462, },
"H": {0: 4768, 1: 1425, 2: 864, 4: 593, 8: 365, }},
27: {
"L": {0: 11744, 1: 3514, 2: 2132, 4: 1465, 8: 902, },
"M": {0: 9024, 1: 2701, 2: 1637, 4: 1125, 8: 692, },
"Q": {0: 6464, 1: 1933, 2: 1172, 4: 805, 8: 496, },
"H": {0: 5024, 1: 1501, 2: 910, 4: 625, 8: 385, }},
28: {
"L": {0: 12248, 1: 3669, 2: 2223, 4: 1528, 8: 940, },
"M": {0: 9544, 1: 2857, 2: 1732, 4: 1190, 8: 732, },
"Q": {0: 6968, 1: 2085, 2: 1263, 4: 868, 8: 534, },
"H": {0: 5288, 1: 1581, 2: 958, 4: 658, 8: 405, }},
29: {
"L": {0: 13048, 1: 3909, 2: 2369, 4: 1628, 8: 1002, },
"M": {0: 10136, 1: 3035, 2: 1839, 4: 1264, 8: 778, },
"Q": {0: 7288, 1: 2181, 2: 1322, 4: 908, 8: 559, },
"H": {0: 5608, 1: 1677, 2: 1016, 4: 698, 8: 430, }},
30: {
"L": {0: 13880, 1: 4158, 2: 2520, 4: 1732, 8: 1066, },
"M": {0: 10984, 1: 3289, 2: 1994, 4: 1370, 8: 843, },
"Q": {0: 7880, 1: 2358, 2: 1429, 4: 982, 8: 604, },
"H": {0: 5960, 1: 1782, 2: 1080, 4: 742, 8: 457, }},
31: {
"L": {0: 14744, 1: 4417, 2: 2677, 4: 1840, 8: 1132, },
"M": {0: 11640, 1: 3486, 2: 2113, 4: 1452, 8: 894, },
"Q": {0: 8264, 1: 2473, 2: 1499, 4: 1030, 8: 634, },
"H": {0: 6344, 1: 1897, 2: 1150, 4: 790, 8: 486, }},
32: {
"L": {0: 15640, 1: 4686, 2: 2840, 4: 1952, 8: 1201, },
"M": {0: 12328, 1: 3693, 2: 2238, 4: 1538, 8: 947, },
"Q": {0: 8920, 1: 2670, 2: 1618, 4: 1112, 8: 684, },
"H": {0: 6760, 1: 2022, 2: 1226, 4: 842, 8: 518, }},
33: {
"L": {0: 16568, 1: 4965, 2: 3009, 4: 2068, 8: 1273, },
"M": {0: 13048, 1: 3909, 2: 2369, 4: 1628, 8: 1002, },
"Q": {0: 9368, 1: 2805, 2: 1700, 4: 1168, 8: 719, },
"H": {0: 7208, 1: 2157, 2: 1307, 4: 898, 8: 553, }},
34: {
"L": {0: 17528, 1: 5253, 2: 3183, 4: 2188, 8: 1347, },
"M": {0: 13800, 1: 4134, 2: 2506, 4: 1722, 8: 1060, },
"Q": {0: 9848, 1: 2949, 2: 1787, 4: 1228, 8: 756, },
"H": {0: 7688, 1: 2301, 2: 1394, 4: 958, 8: 590, }},
35: {
"L": {0: 18448, 1: 5529, 2: 3351, 4: 2303, 8: 1417, },
"M": {0: 14496, 1: 4343, 2: 2632, 4: 1809, 8: 1113, },
"Q": {0: 10288, 1: 3081, 2: 1867, 4: 1283, 8: 790, },
"H": {0: 7888, 1: 2361, 2: 1431, 4: 983, 8: 605, }},
36: {
"L": {0: 19472, 1: 5836, 2: 3537, 4: 2431, 8: 1496, },
"M": {0: 15312, 1: 4588, 2: 2780, 4: 1911, 8: 1176, },
"Q": {0: 10832, 1: 3244, 2: 1966, 4: 1351, 8: 832, },
"H": {0: 8432, 1: 2524, 2: 1530, 4: 1051, 8: 647, }},
37: {
"L": {0: 20528, 1: 6153, 2: 3729, 4: 2563, 8: 1577, },
"M": {0: 15936, 1: 4775, 2: 2894, 4: 1989, 8: 1224, },
"Q": {0: 11408, 1: 3417, 2: 2071, 4: 1423, 8: 876, },
"H": {0: 8768, 1: 2625, 2: 1591, 4: 1093, 8: 673, }},
38: {
"L": {0: 21616, 1: 6479, 2: 3927, 4: 2699, 8: 1661, },
"M": {0: 16816, 1: 5039, 2: 3054, 4: 2099, 8: 1292, },
"Q": {0: 12016, 1: 3599, 2: 2181, 4: 1499, 8: 923, },
"H": {0: 9136, 1: 2735, 2: 1658, 4: 1139, 8: 701, }},
39: {
"L": {0: 22496, 1: 6743, 2: 4087, 4: 2809, 8: 1729, },
"M": {0: 17728, 1: 5313, 2: 3220, 4: 2213, 8: 1362, },
"Q": {0: 12656, 1: 3791, 2: 2298, 4: 1579, 8: 972, },
"H": {0: 9776, 1: 2927, 2: 1774, 4: 1219, 8: 750, }},
40: {
"L": {0: 23648, 1: 7089, 2: 4296, 4: 2953, 8: 1817, },
"M": {0: 18672, 1: 5596, 2: 3391, 4: 2331, 8: 1435, },
"Q": {0: 13328, 1: 3993, 2: 2420, 4: 1663, 8: 1024, },
"H": {0: 10208, 1: 3057, 2: 1852, 4: 1273, 8: 784, }}
}
#: This table defines the "Error Correction Code Words and Block Information."
#: The table lists the number of error correction words that are required
#: to be generated for each version and error correction level. The table
#: is accessed by first using the version number as a key and then the
#: error level. The array values correspond to these columns from the source
#: table:
#:
#: +----------------------------+
#: |0 | EC Code Words Per Block |
#: +----------------------------+
#: |1 | Block 1 Count |
#: +----------------------------+
#: |2 | Block 1 Data Code Words |
#: +----------------------------+
#: |3 | Block 2 Count |
#: +----------------------------+
#: |4 | Block 2 Data Code Words |
#: +----------------------------+
#:
#: This table was taken from:
#:
#: http://www.thonky.com/qr-code-tutorial/error-correction-table/
eccwbi = {
1: {
'L': [7, 1, 19, 0, 0, ],
'M': [10, 1, 16, 0, 0, ],
'Q': [13, 1, 13, 0, 0, ],
'H': [17, 1, 9, 0, 0, ],
},
2: {
'L': [10, 1, 34, 0, 0, ],
'M': [16, 1, 28, 0, 0, ],
'Q': [22, 1, 22, 0, 0, ],
'H': [28, 1, 16, 0, 0, ],
},
3: {
'L': [15, 1, 55, 0, 0, ],
'M': [26, 1, 44, 0, 0, ],
'Q': [18, 2, 17, 0, 0, ],
'H': [22, 2, 13, 0, 0, ],
},
4: {
'L': [20, 1, 80, 0, 0, ],
'M': [18, 2, 32, 0, 0, ],
'Q': [26, 2, 24, 0, 0, ],
'H': [16, 4, 9, 0, 0, ],
},
5: {
'L': [26, 1, 108, 0, 0, ],
'M': [24, 2, 43, 0, 0, ],
'Q': [18, 2, 15, 2, 16, ],
'H': [22, 2, 11, 2, 12, ],
},
6: {
'L': [18, 2, 68, 0, 0, ],
'M': [16, 4, 27, 0, 0, ],
'Q': [24, 4, 19, 0, 0, ],
'H': [28, 4, 15, 0, 0, ],
},
7: {
'L': [20, 2, 78, 0, 0, ],
'M': [18, 4, 31, 0, 0, ],
'Q': [18, 2, 14, 4, 15, ],
'H': [26, 4, 13, 1, 14, ],
},
8: {
'L': [24, 2, 97, 0, 0, ],
'M': [22, 2, 38, 2, 39, ],
'Q': [22, 4, 18, 2, 19, ],
'H': [26, 4, 14, 2, 15, ],
},
9: {
'L': [30, 2, 116, 0, 0, ],
'M': [22, 3, 36, 2, 37, ],
'Q': [20, 4, 16, 4, 17, ],
'H': [24, 4, 12, 4, 13, ],
},
10: {
'L': [18, 2, 68, 2, 69, ],
'M': [26, 4, 43, 1, 44, ],
'Q': [24, 6, 19, 2, 20, ],
'H': [28, 6, 15, 2, 16, ],
},
11: {
'L': [20, 4, 81, 0, 0, ],
'M': [30, 1, 50, 4, 51, ],
'Q': [28, 4, 22, 4, 23, ],
'H': [24, 3, 12, 8, 13, ],
},
12: {
'L': [24, 2, 92, 2, 93, ],
'M': [22, 6, 36, 2, 37, ],
'Q': [26, 4, 20, 6, 21, ],
'H': [28, 7, 14, 4, 15, ],
},
13: {
'L': [26, 4, 107, 0, 0, ],
'M': [22, 8, 37, 1, 38, ],
'Q': [24, 8, 20, 4, 21, ],
'H': [22, 12, 11, 4, 12, ],
},
14: {
'L': [30, 3, 115, 1, 116, ],
'M': [24, 4, 40, 5, 41, ],
'Q': [20, 11, 16, 5, 17, ],
'H': [24, 11, 12, 5, 13, ],
},
15: {
'L': [22, 5, 87, 1, 88, ],
'M': [24, 5, 41, 5, 42, ],
'Q': [30, 5, 24, 7, 25, ],
'H': [24, 11, 12, 7, 13, ],
},
16: {
'L': [24, 5, 98, 1, 99, ],
'M': [28, 7, 45, 3, 46, ],
'Q': [24, 15, 19, 2, 20, ],
'H': [30, 3, 15, 13, 16, ],
},
17: {
'L': [28, 1, 107, 5, 108, ],
'M': [28, 10, 46, 1, 47, ],
'Q': [28, 1, 22, 15, 23, ],
'H': [28, 2, 14, 17, 15, ],
},
18: {
'L': [30, 5, 120, 1, 121, ],
'M': [26, 9, 43, 4, 44, ],
'Q': [28, 17, 22, 1, 23, ],
'H': [28, 2, 14, 19, 15, ],
},
19: {
'L': [28, 3, 113, 4, 114, ],
'M': [26, 3, 44, 11, 45, ],
'Q': [26, 17, 21, 4, 22, ],
'H': [26, 9, 13, 16, 14, ],
},
20: {
'L': [28, 3, 107, 5, 108, ],
'M': [26, 3, 41, 13, 42, ],
'Q': [30, 15, 24, 5, 25, ],
'H': [28, 15, 15, 10, 16, ],
},
21: {
'L': [28, 4, 116, 4, 117, ],
'M': [26, 17, 42, 0, 0, ],
'Q': [28, 17, 22, 6, 23, ],
'H': [30, 19, 16, 6, 17, ],
},
22: {
'L': [28, 2, 111, 7, 112, ],
'M': [28, 17, 46, 0, 0, ],
'Q': [30, 7, 24, 16, 25, ],
'H': [24, 34, 13, 0, 0, ],
},
23: {
'L': [30, 4, 121, 5, 122, ],
'M': [28, 4, 47, 14, 48, ],
'Q': [30, 11, 24, 14, 25, ],
'H': [30, 16, 15, 14, 16, ],
},
24: {
'L': [30, 6, 117, 4, 118, ],
'M': [28, 6, 45, 14, 46, ],
'Q': [30, 11, 24, 16, 25, ],
'H': [30, 30, 16, 2, 17, ],
},
25: {
'L': [26, 8, 106, 4, 107, ],
'M': [28, 8, 47, 13, 48, ],
'Q': [30, 7, 24, 22, 25, ],
'H': [30, 22, 15, 13, 16, ],
},
26: {
'L': [28, 10, 114, 2, 115, ],
'M': [28, 19, 46, 4, 47, ],
'Q': [28, 28, 22, 6, 23, ],
'H': [30, 33, 16, 4, 17, ],
},
27: {
'L': [30, 8, 122, 4, 123, ],
'M': [28, 22, 45, 3, 46, ],
'Q': [30, 8, 23, 26, 24, ],
'H': [30, 12, 15, 28, 16, ],
},
28: {
'L': [30, 3, 117, 10, 118, ],
'M': [28, 3, 45, 23, 46, ],
'Q': [30, 4, 24, 31, 25, ],
'H': [30, 11, 15, 31, 16, ],
},
29: {
'L': [30, 7, 116, 7, 117, ],
'M': [28, 21, 45, 7, 46, ],
'Q': [30, 1, 23, 37, 24, ],
'H': [30, 19, 15, 26, 16, ],
},
30: {
'L': [30, 5, 115, 10, 116, ],
'M': [28, 19, 47, 10, 48, ],
'Q': [30, 15, 24, 25, 25, ],
'H': [30, 23, 15, 25, 16, ],
},
31: {
'L': [30, 13, 115, 3, 116, ],
'M': [28, 2, 46, 29, 47, ],
'Q': [30, 42, 24, 1, 25, ],
'H': [30, 23, 15, 28, 16, ],
},
32: {
'L': [30, 17, 115, 0, 0, ],
'M': [28, 10, 46, 23, 47, ],
'Q': [30, 10, 24, 35, 25, ],
'H': [30, 19, 15, 35, 16, ],
},
33: {
'L': [30, 17, 115, 1, 116, ],
'M': [28, 14, 46, 21, 47, ],
'Q': [30, 29, 24, 19, 25, ],
'H': [30, 11, 15, 46, 16, ],
},
34: {
'L': [30, 13, 115, 6, 116, ],
'M': [28, 14, 46, 23, 47, ],
'Q': [30, 44, 24, 7, 25, ],
'H': [30, 59, 16, 1, 17, ],
},
35: {
'L': [30, 12, 121, 7, 122, ],
'M': [28, 12, 47, 26, 48, ],
'Q': [30, 39, 24, 14, 25, ],
'H': [30, 22, 15, 41, 16, ],
},
36: {
'L': [30, 6, 121, 14, 122, ],
'M': [28, 6, 47, 34, 48, ],
'Q': [30, 46, 24, 10, 25, ],
'H': [30, 2, 15, 64, 16, ],
},
37: {
'L': [30, 17, 122, 4, 123, ],
'M': [28, 29, 46, 14, 47, ],
'Q': [30, 49, 24, 10, 25, ],
'H': [30, 24, 15, 46, 16, ],
},
38: {
'L': [30, 4, 122, 18, 123, ],
'M': [28, 13, 46, 32, 47, ],
'Q': [30, 48, 24, 14, 25, ],
'H': [30, 42, 15, 32, 16, ],
},
39: {
'L': [30, 20, 117, 4, 118, ],
'M': [28, 40, 47, 7, 48, ],
'Q': [30, 43, 24, 22, 25, ],
'H': [30, 10, 15, 67, 16, ],
},
40: {
'L': [30, 19, 118, 6, 119, ],
'M': [28, 18, 47, 31, 48, ],
'Q': [30, 34, 24, 34, 25, ],
'H': [30, 20, 15, 61, 16, ],
},
}
#: This table lists all of the generator polynomials used by QR Codes.
#: They are indexed by the number of "ECC Code Words" (see table above).
#: This table is taken from:
#:
#: http://www.matchadesign.com/blog/qr-code-demystified-part-4/
generator_polynomials = {
7: [87, 229, 146, 149, 238, 102, 21],
10: [251, 67, 46, 61, 118, 70, 64, 94, 32, 45],
13: [74, 152, 176, 100, 86, 100, 106, 104, 130, 218, 206, 140, 78],
15: [8, 183, 61, 91, 202, 37, 51, 58, 58, 237, 140, 124, 5, 99, 105],
16: [120, 104, 107, 109, 102, 161, 76, 3, 91, 191, 147, 169, 182, 194,
225, 120],
17: [43, 139, 206, 78, 43, 239, 123, 206, 214, 147, 24, 99, 150, 39,
243, 163, 136],
18: [215, 234, 158, 94, 184, 97, 118, 170, 79, 187, 152, 148, 252, 179,
5, 98, 96, 153],
20: [17, 60, 79, 50, 61, 163, 26, 187, 202, 180, 221, 225, 83, 239, 156,
164, 212, 212, 188, 190],
22: [210, 171, 247, 242, 93, 230, 14, 109, 221, 53, 200, 74, 8, 172, 98,
80, 219, 134, 160, 105, 165, 231],
24: [229, 121, 135, 48, 211, 117, 251, 126, 159, 180, 169, 152, 192, 226,
228, 218, 111, 0, 117, 232, 87, 96, 227, 21],
26: [173, 125, 158, 2, 103, 182, 118, 17, 145, 201, 111, 28, 165, 53, 161,
21, 245, 142, 13, 102, 48, 227, 153, 145, 218, 70],
28: [168, 223, 200, 104, 224, 234, 108, 180, 110, 190, 195, 147, 205, 27,
232, 201, 21, 43, 245, 87, 42, 195, 212, 119, 242, 37, 9, 123],
30: [41, 173, 145, 152, 216, 31, 179, 182, 50, 48, 110, 86, 239, 96, 222,
125, 42, 173, 226, 193, 224, 130, 156, 37, 251, 216, 238, 40, 192,
180]
}
#: This table contains the log and values used in GF(256) arithmetic.
#: They are used to generate error correction codes for QR Codes.
#: This table is taken from:
#:
#: vhttp://www.thonky.com/qr-code-tutorial/log-antilog-table/
galois_log = [
1, 2, 4, 8, 16, 32, 64, 128, 29, 58, 116, 232, 205, 135, 19, 38, 76, 152,
45, 90, 180, 117, 234, 201, 143, 3, 6, 12, 24, 48, 96, 192, 157, 39, 78,
156, 37, 74, 148, 53, 106, 212, 181, 119, 238, 193, 159, 35, 70, 140, 5,
10, 20, 40, 80, 160, 93, 186, 105, 210, 185, 111, 222, 161, 95, 190, 97,
194, 153, 47, 94, 188, 101, 202, 137, 15, 30, 60, 120, 240, 253, 231, 211,
187, 107, 214, 177, 127, 254, 225, 223, 163, 91, 182, 113, 226, 217, 175,
67, 134, 17, 34, 68, 136, 13, 26, 52, 104, 208, 189, 103, 206, 129, 31,
62, 124, 248, 237, 199, 147, 59, 118, 236, 197, 151, 51, 102, 204, 133,
23, 46, 92, 184, 109, 218, 169, 79, 158, 33, 66, 132, 21, 42, 84, 168, 77,
154, 41, 82, 164, 85, 170, 73, 146, 57, 114, 228, 213, 183, 115, 230, 209,
191, 99, 198, 145, 63, 126, 252, 229, 215, 179, 123, 246, 241, 255, 227,
219, 171, 75, 150, 49, 98, 196, 149, 55, 110, 220, 165, 87, 174, 65, 130,
25, 50, 100, 200, 141, 7, 14, 28, 56, 112, 224, 221, 167, 83, 166, 81,
162, 89, 178, 121, 242, 249, 239, 195, 155, 43, 86, 172, 69, 138, 9, 18,
36, 72, 144, 61, 122, 244, 245, 247, 243, 251, 235, 203, 139, 11, 22, 44,
88, 176, 125, 250, 233, 207, 131, 27, 54, 108, 216, 173, 71, 142, 1,]
#: This table contains the antilog and values used in GF(256) arithmetic.
#: They are used to generate error correction codes for QR Codes.
#: This table is taken from:
#:
#: http://www.thonky.com/qr-code-tutorial/log-antilog-table/
galois_antilog = [
None, 0, 1, 25, 2, 50, 26, 198, 3, 223, 51, 238, 27, 104, 199, 75, 4, 100,
224, 14, 52, 141, 239, 129, 28, 193, 105, 248, 200, 8, 76, 113, 5, 138,
101, 47, 225, 36, 15, 33, 53, 147, 142, 218, 240, 18, 130, 69, 29, 181,
194, 125, 106, 39, 249, 185, 201, 154, 9, 120, 77, 228, 114, 166, 6, 191,
139, 98, 102, 221, 48, 253, 226, 152, 37, 179, 16, 145, 34, 136, 54, 208,
148, 206, 143, 150, 219, 189, 241, 210, 19, 92, 131, 56, 70, 64, 30, 66,
182, 163, 195, 72, 126, 110, 107, 58, 40, 84, 250, 133, 186, 61, 202, 94,
155, 159, 10, 21, 121, 43, 78, 212, 229, 172, 115, 243, 167, 87, 7, 112,
192, 247, 140, 128, 99, 13, 103, 74, 222, 237, 49, 197, 254, 24, 227, 165,
153, 119, 38, 184, 180, 124, 17, 68, 146, 217, 35, 32, 137, 46, 55, 63,
209, 91, 149, 188, 207, 205, 144, 135, 151, 178, 220, 252, 190, 97, 242,
86, 211, 171, 20, 42, 93, 158, 132, 60, 57, 83, 71, 109, 65, 162, 31, 45,
67, 216, 183, 123, 164, 118, 196, 23, 73, 236, 127, 12, 111, 246, 108,
161, 59, 82, 41, 157, 85, 170, 251, 96, 134, 177, 187, 204, 62, 90, 203,
89, 95, 176, 156, 169, 160, 81, 11, 245, 22, 235, 122, 117, 44, 215, 79,
174, 213, 233, 230, 231, 173, 232, 116, 214, 244, 234, 168, 80, 88, 175,]
#: This table contains the coordinates for the position adjustment patterns.
#: The index of the table corresponds to the QR Code's version number.
#: This table is taken from:
#:
#: http://www.thonky.com/qr-code-tutorial/part-3-mask-pattern/
position_adjustment = [
None, #There is not version 0
None, #Version 1 does not need adjustment
[6, 18, ],
[6, 22, ],
[6, 26, ],
[6, 30, ],
[6, 34, ],
[6, 22, 38, ],
[6, 24, 42, ],
[6, 26, 46, ],
[6, 28, 50, ],
[6, 30, 54, ],
[6, 32, 58, ],
[6, 34, 62, ],
[6, 26, 46, 66, ],
[6, 26, 48, 70, ],
[6, 26, 50, 74, ],
[6, 30, 54, 78, ],
[6, 30, 56, 82, ],
[6, 30, 58, 86, ],
[6, 34, 62, 90, ],
[6, 28, 50, 72, 94, ],
[6, 26, 50, 74, 98, ],
[6, 30, 54, 78, 102, ],
[6, 28, 54, 80, 106, ],
[6, 32, 58, 84, 110, ],
[6, 30, 58, 86, 114, ],
[6, 34, 62, 90, 118, ],
[6, 26, 50, 74, 98, 122, ],
[6, 30, 54, 78, 102, 126, ],
[6, 26, 52, 78, 104, 130, ],
[6, 30, 56, 82, 108, 134, ],
[6, 34, 60, 86, 112, 138, ],
[6, 30, 58, 86, 114, 142, ],
[6, 34, 62, 90, 118, 146, ],
[6, 30, 54, 78, 102, 126, 150, ],
[6, 24, 50, 76, 102, 128, 154, ],
[6, 28, 54, 80, 106, 132, 158, ],
[6, 32, 58, 84, 110, 136, 162, ],
[6, 26, 54, 82, 110, 138, 166, ],
[6, 30, 58, 86, 114, 142, 170, ],
]
#: This table specifies the bit pattern to be added to a QR Code's
#: image to specify what version the code is. Note, this pattern
#: is not used for versions 1-6. This table is taken from:
#:
#: http://www.thonky.com/qr-code-tutorial/part-3-mask-pattern/
version_pattern = [None, None, None, None, None, None, None, #0-6
'000111110010010100', '001000010110111100', '001001101010011001',
'001010010011010011', '001011101111110110', '001100011101100010',
'001101100001000111', '001110011000001101', '001111100100101000',
'010000101101111000', '010001010001011101', '010010101000010111',
'010011010100110010', '010100100110100110', '010101011010000011',
'010110100011001001', '010111011111101100', '011000111011000100',
'011001000111100001', '011010111110101011', '011011000010001110',
'011100110000011010', '011101001100111111', '011110110101110101',
'011111001001010000', '100000100111010101', '100001011011110000',
'100010100010111010', '100011011110011111', '100100101100001011',
'100101010000101110', '100110101001100100', '100111010101000001',
'101000110001101001'
]
#: This table contains the bit fields needed to specify the error code level and
#: mask pattern used by a QR Code. This table is take from:
#:
#: http://www.thonky.com/qr-code-tutorial/part-3-mask-pattern/
type_bits = {
'L': {
0: '111011111000100',
1: '111001011110011',
2: '111110110101010',
3: '111100010011101',
4: '110011000101111',
5: '110001100011000',
6: '110110001000001',
7: '110100101110110',
},
'M': {
0: '101010000010010',
1: '101000100100101',
2: '101111001111100',
3: '101101101001011',
4: '100010111111001',
5: '100000011001110',
6: '100111110010111',
7: '100101010100000',
},
'Q': {
0: '011010101011111',
1: '011000001101000',
2: '011111100110001',
3: '011101000000110',
4: '010010010110100',
5: '010000110000011',
6: '010111011011010',
7: '010101111101101',
},
'H': {
0: '001011010001001',
1: '001001110111110',
2: '001110011100111',
3: '001100111010000',
4: '000011101100010',
5: '000001001010101',
6: '000110100001100',
7: '000100000111011',
},
}
#: This table contains *functions* to compute whether to change current bit when
#: creating the masks. All of the functions in the table return a boolean value.
#: A True result means you should add the bit to the QR Code exactly as is. A
#: False result means you should add the opposite bit. This table was taken
#: from:
#:
#: http://www.thonky.com/qr-code-tutorial/mask-patterns/
mask_patterns = [
lambda row, col: (row + col) % 2 == 0,
lambda row, col: row % 2 == 0,
lambda row, col: col % 3 == 0,
lambda row, col: (row + col) % 3 == 0,
lambda row, col: ((row // 2) + (col // 3)) % 2 == 0,
lambda row, col: ((row * col) % 2) + ((row * col) % 3) == 0,
lambda row, col: (((row * col) % 2) + ((row * col) % 3)) % 2 == 0,
lambda row, col: (((row + col) % 2) + ((row * col) % 3)) % 2 == 0]
#: This is a table of ASCII escape code for terminal colors. QR codes
#: are drawn using a space with a colored background. Hence, only
#: codes affecting background colors have been added.
#: http://misc.flogisoft.com/bash/tip_colors_and_formatting
term_colors = {
'default': 49,
'background': 49,
'reverse': 7,
'reversed': 7,
'inverse': 7,
'inverted': 7,
'black': 40,
'red': 41,
'green': 42,
'yellow': 43,
'blue': 44,
'magenta': 45,
'cyan': 46,
'light gray': 47,
'light grey': 47,
'dark gray': 100,
'dark grey': 100,
'light red': 101,
'light green': 102,
'light blue': 103,
'light yellow': 104,
'light magenta': 105,
'light cyan': 106,
'white': 107
}
| <filename>QRCode_generator/.env/qrcode/lib/python3.8/site-packages/pyqrcode/tables.py
# -*- coding: utf-8 -*-
# Copyright (c) 2013, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This module lists out all of the tables needed to create a QR code.
If you are viewing this in the HTML documentation, I recommend reading the
actual file instead. The formating for the tables is much more readable.
"""
from __future__ import division, unicode_literals
#: This defines the QR Code's 'mode' which sets what
#: type of code it is along with its size.
modes = {
'numeric': 1,
'alphanumeric': 2,
'binary': 4,
'kanji': 8,
}
#: This defines the amount of error correction. The dictionary
#: allows the user to specify this in several ways.
error_level = {'L': 'L', 'l': 'L', '7%': 'L', .7: 'L',
'M': 'M', 'm': 'M', '15%': 'M', .15: 'M',
'Q': 'Q', 'q': 'Q', '25%': 'Q', .25: 'Q',
'H': 'H', 'h': 'H', '30%': 'H', .30: 'H'}
#: This is a dictionary holds how long the "data length" field is for
#: each version and mode of the QR Code.
data_length_field = {9: {1: 10, 2: 9, 4: 8, 8: 8},
26: {1: 12, 2: 11, 4: 16, 8: 10},
40: {1: 14, 2: 13, 4: 16, 8: 12}}
#: QR Codes uses a unique ASCII-like table for the 'alphanumeric' mode.
#: This is a dictionary representing that unique table, where the
#: keys are the possible characters in the data and the values
#: are the character's numeric representation.
ascii_codes = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7,
'8': 8, '9': 9, 'A': 10, 'B': 11, 'C': 12, 'D': 13, 'E': 14,
'F': 15, 'G': 16, 'H': 17, 'I': 18, 'J': 19, 'K': 20, 'L': 21,
'M': 22, 'N': 23, 'O': 24, 'P': 25, 'Q': 26, 'R': 27, 'S': 28,
'T': 29, 'U': 30, 'V': 31, 'W': 32, 'X': 33, 'Y': 34, 'Z': 35,
' ': 36, '$': 37, '%': 38, '*': 39, '+': 40, '-': 41, '.': 42,
'/': 43, ':': 44}
#: This array specifies the size of a QR Code in pixels. These numbers are
#: defined in the standard. The indexes correspond to the QR Code's
#: version number. This array was taken from:
#:
#: http://www.denso-wave.com/qrcode/vertable1-e.html
version_size = [None, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57,
61, 65, 69, 73, 77, 81, 85, 89, 93, 97,
101, 105, 109, 113, 117, 121, 125, 129, 133, 137,
141, 145, 149, 153, 157, 161, 165, 169, 173, 177]
#: This dictionary lists the data capacity for all possible QR Codes.
#: This dictionary is organized where the first key corresponds to the
#: QR Code version number. The next key corresponds to the error
#: correction level, see error. The final key corresponds to
#: the mode number, see modes. The zero mode number represents the
#: possible "data bits." This table was taken from:
#:
#: http://www.denso-wave.com/qrcode/vertable1-e.html
data_capacity = {
1: {
"L": {0: 152, 1: 41, 2: 25, 4: 17, 8: 10, },
"M": {0: 128, 1: 34, 2: 20, 4: 14, 8: 8, },
"Q": {0: 104, 1: 27, 2: 16, 4: 11, 8: 7, },
"H": {0: 72, 1: 17, 2: 10, 4: 7, 8: 4, }},
2: {
"L": {0: 272, 1: 77, 2: 47, 4: 32, 8: 20, },
"M": {0: 224, 1: 63, 2: 38, 4: 26, 8: 16, },
"Q": {0: 176, 1: 48, 2: 29, 4: 20, 8: 12, },
"H": {0: 128, 1: 34, 2: 20, 4: 14, 8: 8, }},
3: {
"L": {0: 440, 1: 127, 2: 77, 4: 53, 8: 32, },
"M": {0: 352, 1: 101, 2: 61, 4: 42, 8: 26, },
"Q": {0: 272, 1: 77, 2: 47, 4: 32, 8: 20, },
"H": {0: 208, 1: 58, 2: 35, 4: 24, 8: 15, }},
4: {
"L": {0: 640, 1: 187, 2: 114, 4: 78, 8: 48, },
"M": {0: 512, 1: 149, 2: 90, 4: 62, 8: 38, },
"Q": {0: 384, 1: 111, 2: 67, 4: 46, 8: 28, },
"H": {0: 288, 1: 82, 2: 50, 4: 34, 8: 21, }},
5: {
"L": {0: 864, 1: 255, 2: 154, 4: 106, 8: 65, },
"M": {0: 688, 1: 202, 2: 122, 4: 84, 8: 52, },
"Q": {0: 496, 1: 144, 2: 87, 4: 60, 8: 37, },
"H": {0: 368, 1: 106, 2: 64, 4: 44, 8: 27, }},
6: {
"L": {0: 1088, 1: 322, 2: 195, 4: 134, 8: 82, },
"M": {0: 864, 1: 255, 2: 154, 4: 106, 8: 65, },
"Q": {0: 608, 1: 178, 2: 108, 4: 74, 8: 45, },
"H": {0: 480, 1: 139, 2: 84, 4: 58, 8: 36, }},
7: {
"L": {0: 1248, 1: 370, 2: 224, 4: 154, 8: 95, },
"M": {0: 992, 1: 293, 2: 178, 4: 122, 8: 75, },
"Q": {0: 704, 1: 207, 2: 125, 4: 86, 8: 53, },
"H": {0: 528, 1: 154, 2: 93, 4: 64, 8: 39, }},
8: {
"L": {0: 1552, 1: 461, 2: 279, 4: 192, 8: 118, },
"M": {0: 1232, 1: 365, 2: 221, 4: 152, 8: 93, },
"Q": {0: 880, 1: 259, 2: 157, 4: 108, 8: 66, },
"H": {0: 688, 1: 202, 2: 122, 4: 84, 8: 52, }},
9: {
"L": {0: 1856, 1: 552, 2: 335, 4: 230, 8: 141, },
"M": {0: 1456, 1: 432, 2: 262, 4: 180, 8: 111, },
"Q": {0: 1056, 1: 312, 2: 189, 4: 130, 8: 80, },
"H": {0: 800, 1: 235, 2: 143, 4: 98, 8: 60, }},
10: {
"L": {0: 2192, 1: 652, 2: 395, 4: 271, 8: 167, },
"M": {0: 1728, 1: 513, 2: 311, 4: 213, 8: 131, },
"Q": {0: 1232, 1: 364, 2: 221, 4: 151, 8: 93, },
"H": {0: 976, 1: 288, 2: 174, 4: 119, 8: 74, }},
11: {
"L": {0: 2592, 1: 772, 2: 468, 4: 321, 8: 198, },
"M": {0: 2032, 1: 604, 2: 366, 4: 251, 8: 155, },
"Q": {0: 1440, 1: 427, 2: 259, 4: 177, 8: 109, },
"H": {0: 1120, 1: 331, 2: 200, 4: 137, 8: 85, }},
12: {
"L": {0: 2960, 1: 883, 2: 535, 4: 367, 8: 226, },
"M": {0: 2320, 1: 691, 2: 419, 4: 287, 8: 177, },
"Q": {0: 1648, 1: 489, 2: 296, 4: 203, 8: 125, },
"H": {0: 1264, 1: 374, 2: 227, 4: 155, 8: 96, }},
13: {
"L": {0: 3424, 1: 1022, 2: 619, 4: 425, 8: 262, },
"M": {0: 2672, 1: 796, 2: 483, 4: 331, 8: 204, },
"Q": {0: 1952, 1: 580, 2: 352, 4: 241, 8: 149, },
"H": {0: 1440, 1: 427, 2: 259, 4: 177, 8: 109, }},
14: {
"L": {0: 3688, 1: 1101, 2: 667, 4: 458, 8: 282, },
"M": {0: 2920, 1: 871, 2: 528, 4: 362, 8: 223, },
"Q": {0: 2088, 1: 621, 2: 376, 4: 258, 8: 159, },
"H": {0: 1576, 1: 468, 2: 283, 4: 194, 8: 120, }},
15: {
"L": {0: 4184, 1: 1250, 2: 758, 4: 520, 8: 320, },
"M": {0: 3320, 1: 991, 2: 600, 4: 412, 8: 254, },
"Q": {0: 2360, 1: 703, 2: 426, 4: 292, 8: 180, },
"H": {0: 1784, 1: 530, 2: 321, 4: 220, 8: 136, }},
16: {
"L": {0: 4712, 1: 1408, 2: 854, 4: 586, 8: 361, },
"M": {0: 3624, 1: 1082, 2: 656, 4: 450, 8: 277, },
"Q": {0: 2600, 1: 775, 2: 470, 4: 322, 8: 198, },
"H": {0: 2024, 1: 602, 2: 365, 4: 250, 8: 154, }},
17: {
"L": {0: 5176, 1: 1548, 2: 938, 4: 644, 8: 397, },
"M": {0: 4056, 1: 1212, 2: 734, 4: 504, 8: 310, },
"Q": {0: 2936, 1: 876, 2: 531, 4: 364, 8: 224, },
"H": {0: 2264, 1: 674, 2: 408, 4: 280, 8: 173, }},
18: {
"L": {0: 5768, 1: 1725, 2: 1046, 4: 718, 8: 442, },
"M": {0: 4504, 1: 1346, 2: 816, 4: 560, 8: 345, },
"Q": {0: 3176, 1: 948, 2: 574, 4: 394, 8: 243, },
"H": {0: 2504, 1: 746, 2: 452, 4: 310, 8: 191, }},
19: {
"L": {0: 6360, 1: 1903, 2: 1153, 4: 792, 8: 488, },
"M": {0: 5016, 1: 1500, 2: 909, 4: 624, 8: 384, },
"Q": {0: 3560, 1: 1063, 2: 644, 4: 442, 8: 272, },
"H": {0: 2728, 1: 813, 2: 493, 4: 338, 8: 208, }},
20: {
"L": {0: 6888, 1: 2061, 2: 1249, 4: 858, 8: 528, },
"M": {0: 5352, 1: 1600, 2: 970, 4: 666, 8: 410, },
"Q": {0: 3880, 1: 1159, 2: 702, 4: 482, 8: 297, },
"H": {0: 3080, 1: 919, 2: 557, 4: 382, 8: 235, }},
21: {
"L": {0: 7456, 1: 2232, 2: 1352, 4: 929, 8: 572, },
"M": {0: 5712, 1: 1708, 2: 1035, 4: 711, 8: 438, },
"Q": {0: 4096, 1: 1224, 2: 742, 4: 509, 8: 314, },
"H": {0: 3248, 1: 969, 2: 587, 4: 403, 8: 248, }},
22: {
"L": {0: 8048, 1: 2409, 2: 1460, 4: 1003, 8: 618, },
"M": {0: 6256, 1: 1872, 2: 1134, 4: 779, 8: 480, },
"Q": {0: 4544, 1: 1358, 2: 823, 4: 565, 8: 348, },
"H": {0: 3536, 1: 1056, 2: 640, 4: 439, 8: 270, }},
23: {
"L": {0: 8752, 1: 2620, 2: 1588, 4: 1091, 8: 672, },
"M": {0: 6880, 1: 2059, 2: 1248, 4: 857, 8: 528, },
"Q": {0: 4912, 1: 1468, 2: 890, 4: 611, 8: 376, },
"H": {0: 3712, 1: 1108, 2: 672, 4: 461, 8: 284, }},
24: {
"L": {0: 9392, 1: 2812, 2: 1704, 4: 1171, 8: 721, },
"M": {0: 7312, 1: 2188, 2: 1326, 4: 911, 8: 561, },
"Q": {0: 5312, 1: 1588, 2: 963, 4: 661, 8: 407, },
"H": {0: 4112, 1: 1228, 2: 744, 4: 511, 8: 315, }},
25: {
"L": {0: 10208, 1: 3057, 2: 1853, 4: 1273, 8: 784, },
"M": {0: 8000, 1: 2395, 2: 1451, 4: 997, 8: 614, },
"Q": {0: 5744, 1: 1718, 2: 1041, 4: 715, 8: 440, },
"H": {0: 4304, 1: 1286, 2: 779, 4: 535, 8: 330, }},
26: {
"L": {0: 10960, 1: 3283, 2: 1990, 4: 1367, 8: 842, },
"M": {0: 8496, 1: 2544, 2: 1542, 4: 1059, 8: 652, },
"Q": {0: 6032, 1: 1804, 2: 1094, 4: 751, 8: 462, },
"H": {0: 4768, 1: 1425, 2: 864, 4: 593, 8: 365, }},
27: {
"L": {0: 11744, 1: 3514, 2: 2132, 4: 1465, 8: 902, },
"M": {0: 9024, 1: 2701, 2: 1637, 4: 1125, 8: 692, },
"Q": {0: 6464, 1: 1933, 2: 1172, 4: 805, 8: 496, },
"H": {0: 5024, 1: 1501, 2: 910, 4: 625, 8: 385, }},
28: {
"L": {0: 12248, 1: 3669, 2: 2223, 4: 1528, 8: 940, },
"M": {0: 9544, 1: 2857, 2: 1732, 4: 1190, 8: 732, },
"Q": {0: 6968, 1: 2085, 2: 1263, 4: 868, 8: 534, },
"H": {0: 5288, 1: 1581, 2: 958, 4: 658, 8: 405, }},
29: {
"L": {0: 13048, 1: 3909, 2: 2369, 4: 1628, 8: 1002, },
"M": {0: 10136, 1: 3035, 2: 1839, 4: 1264, 8: 778, },
"Q": {0: 7288, 1: 2181, 2: 1322, 4: 908, 8: 559, },
"H": {0: 5608, 1: 1677, 2: 1016, 4: 698, 8: 430, }},
30: {
"L": {0: 13880, 1: 4158, 2: 2520, 4: 1732, 8: 1066, },
"M": {0: 10984, 1: 3289, 2: 1994, 4: 1370, 8: 843, },
"Q": {0: 7880, 1: 2358, 2: 1429, 4: 982, 8: 604, },
"H": {0: 5960, 1: 1782, 2: 1080, 4: 742, 8: 457, }},
31: {
"L": {0: 14744, 1: 4417, 2: 2677, 4: 1840, 8: 1132, },
"M": {0: 11640, 1: 3486, 2: 2113, 4: 1452, 8: 894, },
"Q": {0: 8264, 1: 2473, 2: 1499, 4: 1030, 8: 634, },
"H": {0: 6344, 1: 1897, 2: 1150, 4: 790, 8: 486, }},
32: {
"L": {0: 15640, 1: 4686, 2: 2840, 4: 1952, 8: 1201, },
"M": {0: 12328, 1: 3693, 2: 2238, 4: 1538, 8: 947, },
"Q": {0: 8920, 1: 2670, 2: 1618, 4: 1112, 8: 684, },
"H": {0: 6760, 1: 2022, 2: 1226, 4: 842, 8: 518, }},
33: {
"L": {0: 16568, 1: 4965, 2: 3009, 4: 2068, 8: 1273, },
"M": {0: 13048, 1: 3909, 2: 2369, 4: 1628, 8: 1002, },
"Q": {0: 9368, 1: 2805, 2: 1700, 4: 1168, 8: 719, },
"H": {0: 7208, 1: 2157, 2: 1307, 4: 898, 8: 553, }},
34: {
"L": {0: 17528, 1: 5253, 2: 3183, 4: 2188, 8: 1347, },
"M": {0: 13800, 1: 4134, 2: 2506, 4: 1722, 8: 1060, },
"Q": {0: 9848, 1: 2949, 2: 1787, 4: 1228, 8: 756, },
"H": {0: 7688, 1: 2301, 2: 1394, 4: 958, 8: 590, }},
35: {
"L": {0: 18448, 1: 5529, 2: 3351, 4: 2303, 8: 1417, },
"M": {0: 14496, 1: 4343, 2: 2632, 4: 1809, 8: 1113, },
"Q": {0: 10288, 1: 3081, 2: 1867, 4: 1283, 8: 790, },
"H": {0: 7888, 1: 2361, 2: 1431, 4: 983, 8: 605, }},
36: {
"L": {0: 19472, 1: 5836, 2: 3537, 4: 2431, 8: 1496, },
"M": {0: 15312, 1: 4588, 2: 2780, 4: 1911, 8: 1176, },
"Q": {0: 10832, 1: 3244, 2: 1966, 4: 1351, 8: 832, },
"H": {0: 8432, 1: 2524, 2: 1530, 4: 1051, 8: 647, }},
37: {
"L": {0: 20528, 1: 6153, 2: 3729, 4: 2563, 8: 1577, },
"M": {0: 15936, 1: 4775, 2: 2894, 4: 1989, 8: 1224, },
"Q": {0: 11408, 1: 3417, 2: 2071, 4: 1423, 8: 876, },
"H": {0: 8768, 1: 2625, 2: 1591, 4: 1093, 8: 673, }},
38: {
"L": {0: 21616, 1: 6479, 2: 3927, 4: 2699, 8: 1661, },
"M": {0: 16816, 1: 5039, 2: 3054, 4: 2099, 8: 1292, },
"Q": {0: 12016, 1: 3599, 2: 2181, 4: 1499, 8: 923, },
"H": {0: 9136, 1: 2735, 2: 1658, 4: 1139, 8: 701, }},
39: {
"L": {0: 22496, 1: 6743, 2: 4087, 4: 2809, 8: 1729, },
"M": {0: 17728, 1: 5313, 2: 3220, 4: 2213, 8: 1362, },
"Q": {0: 12656, 1: 3791, 2: 2298, 4: 1579, 8: 972, },
"H": {0: 9776, 1: 2927, 2: 1774, 4: 1219, 8: 750, }},
40: {
"L": {0: 23648, 1: 7089, 2: 4296, 4: 2953, 8: 1817, },
"M": {0: 18672, 1: 5596, 2: 3391, 4: 2331, 8: 1435, },
"Q": {0: 13328, 1: 3993, 2: 2420, 4: 1663, 8: 1024, },
"H": {0: 10208, 1: 3057, 2: 1852, 4: 1273, 8: 784, }}
}
#: This table defines the "Error Correction Code Words and Block Information."
#: The table lists the number of error correction words that are required
#: to be generated for each version and error correction level. The table
#: is accessed by first using the version number as a key and then the
#: error level. The array values correspond to these columns from the source
#: table:
#:
#: +----------------------------+
#: |0 | EC Code Words Per Block |
#: +----------------------------+
#: |1 | Block 1 Count |
#: +----------------------------+
#: |2 | Block 1 Data Code Words |
#: +----------------------------+
#: |3 | Block 2 Count |
#: +----------------------------+
#: |4 | Block 2 Data Code Words |
#: +----------------------------+
#:
#: This table was taken from:
#:
#: http://www.thonky.com/qr-code-tutorial/error-correction-table/
eccwbi = {
1: {
'L': [7, 1, 19, 0, 0, ],
'M': [10, 1, 16, 0, 0, ],
'Q': [13, 1, 13, 0, 0, ],
'H': [17, 1, 9, 0, 0, ],
},
2: {
'L': [10, 1, 34, 0, 0, ],
'M': [16, 1, 28, 0, 0, ],
'Q': [22, 1, 22, 0, 0, ],
'H': [28, 1, 16, 0, 0, ],
},
3: {
'L': [15, 1, 55, 0, 0, ],
'M': [26, 1, 44, 0, 0, ],
'Q': [18, 2, 17, 0, 0, ],
'H': [22, 2, 13, 0, 0, ],
},
4: {
'L': [20, 1, 80, 0, 0, ],
'M': [18, 2, 32, 0, 0, ],
'Q': [26, 2, 24, 0, 0, ],
'H': [16, 4, 9, 0, 0, ],
},
5: {
'L': [26, 1, 108, 0, 0, ],
'M': [24, 2, 43, 0, 0, ],
'Q': [18, 2, 15, 2, 16, ],
'H': [22, 2, 11, 2, 12, ],
},
6: {
'L': [18, 2, 68, 0, 0, ],
'M': [16, 4, 27, 0, 0, ],
'Q': [24, 4, 19, 0, 0, ],
'H': [28, 4, 15, 0, 0, ],
},
7: {
'L': [20, 2, 78, 0, 0, ],
'M': [18, 4, 31, 0, 0, ],
'Q': [18, 2, 14, 4, 15, ],
'H': [26, 4, 13, 1, 14, ],
},
8: {
'L': [24, 2, 97, 0, 0, ],
'M': [22, 2, 38, 2, 39, ],
'Q': [22, 4, 18, 2, 19, ],
'H': [26, 4, 14, 2, 15, ],
},
9: {
'L': [30, 2, 116, 0, 0, ],
'M': [22, 3, 36, 2, 37, ],
'Q': [20, 4, 16, 4, 17, ],
'H': [24, 4, 12, 4, 13, ],
},
10: {
'L': [18, 2, 68, 2, 69, ],
'M': [26, 4, 43, 1, 44, ],
'Q': [24, 6, 19, 2, 20, ],
'H': [28, 6, 15, 2, 16, ],
},
11: {
'L': [20, 4, 81, 0, 0, ],
'M': [30, 1, 50, 4, 51, ],
'Q': [28, 4, 22, 4, 23, ],
'H': [24, 3, 12, 8, 13, ],
},
12: {
'L': [24, 2, 92, 2, 93, ],
'M': [22, 6, 36, 2, 37, ],
'Q': [26, 4, 20, 6, 21, ],
'H': [28, 7, 14, 4, 15, ],
},
13: {
'L': [26, 4, 107, 0, 0, ],
'M': [22, 8, 37, 1, 38, ],
'Q': [24, 8, 20, 4, 21, ],
'H': [22, 12, 11, 4, 12, ],
},
14: {
'L': [30, 3, 115, 1, 116, ],
'M': [24, 4, 40, 5, 41, ],
'Q': [20, 11, 16, 5, 17, ],
'H': [24, 11, 12, 5, 13, ],
},
15: {
'L': [22, 5, 87, 1, 88, ],
'M': [24, 5, 41, 5, 42, ],
'Q': [30, 5, 24, 7, 25, ],
'H': [24, 11, 12, 7, 13, ],
},
16: {
'L': [24, 5, 98, 1, 99, ],
'M': [28, 7, 45, 3, 46, ],
'Q': [24, 15, 19, 2, 20, ],
'H': [30, 3, 15, 13, 16, ],
},
17: {
'L': [28, 1, 107, 5, 108, ],
'M': [28, 10, 46, 1, 47, ],
'Q': [28, 1, 22, 15, 23, ],
'H': [28, 2, 14, 17, 15, ],
},
18: {
'L': [30, 5, 120, 1, 121, ],
'M': [26, 9, 43, 4, 44, ],
'Q': [28, 17, 22, 1, 23, ],
'H': [28, 2, 14, 19, 15, ],
},
19: {
'L': [28, 3, 113, 4, 114, ],
'M': [26, 3, 44, 11, 45, ],
'Q': [26, 17, 21, 4, 22, ],
'H': [26, 9, 13, 16, 14, ],
},
20: {
'L': [28, 3, 107, 5, 108, ],
'M': [26, 3, 41, 13, 42, ],
'Q': [30, 15, 24, 5, 25, ],
'H': [28, 15, 15, 10, 16, ],
},
21: {
'L': [28, 4, 116, 4, 117, ],
'M': [26, 17, 42, 0, 0, ],
'Q': [28, 17, 22, 6, 23, ],
'H': [30, 19, 16, 6, 17, ],
},
22: {
'L': [28, 2, 111, 7, 112, ],
'M': [28, 17, 46, 0, 0, ],
'Q': [30, 7, 24, 16, 25, ],
'H': [24, 34, 13, 0, 0, ],
},
23: {
'L': [30, 4, 121, 5, 122, ],
'M': [28, 4, 47, 14, 48, ],
'Q': [30, 11, 24, 14, 25, ],
'H': [30, 16, 15, 14, 16, ],
},
24: {
'L': [30, 6, 117, 4, 118, ],
'M': [28, 6, 45, 14, 46, ],
'Q': [30, 11, 24, 16, 25, ],
'H': [30, 30, 16, 2, 17, ],
},
25: {
'L': [26, 8, 106, 4, 107, ],
'M': [28, 8, 47, 13, 48, ],
'Q': [30, 7, 24, 22, 25, ],
'H': [30, 22, 15, 13, 16, ],
},
26: {
'L': [28, 10, 114, 2, 115, ],
'M': [28, 19, 46, 4, 47, ],
'Q': [28, 28, 22, 6, 23, ],
'H': [30, 33, 16, 4, 17, ],
},
27: {
'L': [30, 8, 122, 4, 123, ],
'M': [28, 22, 45, 3, 46, ],
'Q': [30, 8, 23, 26, 24, ],
'H': [30, 12, 15, 28, 16, ],
},
28: {
'L': [30, 3, 117, 10, 118, ],
'M': [28, 3, 45, 23, 46, ],
'Q': [30, 4, 24, 31, 25, ],
'H': [30, 11, 15, 31, 16, ],
},
29: {
'L': [30, 7, 116, 7, 117, ],
'M': [28, 21, 45, 7, 46, ],
'Q': [30, 1, 23, 37, 24, ],
'H': [30, 19, 15, 26, 16, ],
},
30: {
'L': [30, 5, 115, 10, 116, ],
'M': [28, 19, 47, 10, 48, ],
'Q': [30, 15, 24, 25, 25, ],
'H': [30, 23, 15, 25, 16, ],
},
31: {
'L': [30, 13, 115, 3, 116, ],
'M': [28, 2, 46, 29, 47, ],
'Q': [30, 42, 24, 1, 25, ],
'H': [30, 23, 15, 28, 16, ],
},
32: {
'L': [30, 17, 115, 0, 0, ],
'M': [28, 10, 46, 23, 47, ],
'Q': [30, 10, 24, 35, 25, ],
'H': [30, 19, 15, 35, 16, ],
},
33: {
'L': [30, 17, 115, 1, 116, ],
'M': [28, 14, 46, 21, 47, ],
'Q': [30, 29, 24, 19, 25, ],
'H': [30, 11, 15, 46, 16, ],
},
34: {
'L': [30, 13, 115, 6, 116, ],
'M': [28, 14, 46, 23, 47, ],
'Q': [30, 44, 24, 7, 25, ],
'H': [30, 59, 16, 1, 17, ],
},
35: {
'L': [30, 12, 121, 7, 122, ],
'M': [28, 12, 47, 26, 48, ],
'Q': [30, 39, 24, 14, 25, ],
'H': [30, 22, 15, 41, 16, ],
},
36: {
'L': [30, 6, 121, 14, 122, ],
'M': [28, 6, 47, 34, 48, ],
'Q': [30, 46, 24, 10, 25, ],
'H': [30, 2, 15, 64, 16, ],
},
37: {
'L': [30, 17, 122, 4, 123, ],
'M': [28, 29, 46, 14, 47, ],
'Q': [30, 49, 24, 10, 25, ],
'H': [30, 24, 15, 46, 16, ],
},
38: {
'L': [30, 4, 122, 18, 123, ],
'M': [28, 13, 46, 32, 47, ],
'Q': [30, 48, 24, 14, 25, ],
'H': [30, 42, 15, 32, 16, ],
},
39: {
'L': [30, 20, 117, 4, 118, ],
'M': [28, 40, 47, 7, 48, ],
'Q': [30, 43, 24, 22, 25, ],
'H': [30, 10, 15, 67, 16, ],
},
40: {
'L': [30, 19, 118, 6, 119, ],
'M': [28, 18, 47, 31, 48, ],
'Q': [30, 34, 24, 34, 25, ],
'H': [30, 20, 15, 61, 16, ],
},
}
#: This table lists all of the generator polynomials used by QR Codes.
#: They are indexed by the number of "ECC Code Words" (see table above).
#: This table is taken from:
#:
#: http://www.matchadesign.com/blog/qr-code-demystified-part-4/
generator_polynomials = {
7: [87, 229, 146, 149, 238, 102, 21],
10: [251, 67, 46, 61, 118, 70, 64, 94, 32, 45],
13: [74, 152, 176, 100, 86, 100, 106, 104, 130, 218, 206, 140, 78],
15: [8, 183, 61, 91, 202, 37, 51, 58, 58, 237, 140, 124, 5, 99, 105],
16: [120, 104, 107, 109, 102, 161, 76, 3, 91, 191, 147, 169, 182, 194,
225, 120],
17: [43, 139, 206, 78, 43, 239, 123, 206, 214, 147, 24, 99, 150, 39,
243, 163, 136],
18: [215, 234, 158, 94, 184, 97, 118, 170, 79, 187, 152, 148, 252, 179,
5, 98, 96, 153],
20: [17, 60, 79, 50, 61, 163, 26, 187, 202, 180, 221, 225, 83, 239, 156,
164, 212, 212, 188, 190],
22: [210, 171, 247, 242, 93, 230, 14, 109, 221, 53, 200, 74, 8, 172, 98,
80, 219, 134, 160, 105, 165, 231],
24: [229, 121, 135, 48, 211, 117, 251, 126, 159, 180, 169, 152, 192, 226,
228, 218, 111, 0, 117, 232, 87, 96, 227, 21],
26: [173, 125, 158, 2, 103, 182, 118, 17, 145, 201, 111, 28, 165, 53, 161,
21, 245, 142, 13, 102, 48, 227, 153, 145, 218, 70],
28: [168, 223, 200, 104, 224, 234, 108, 180, 110, 190, 195, 147, 205, 27,
232, 201, 21, 43, 245, 87, 42, 195, 212, 119, 242, 37, 9, 123],
30: [41, 173, 145, 152, 216, 31, 179, 182, 50, 48, 110, 86, 239, 96, 222,
125, 42, 173, 226, 193, 224, 130, 156, 37, 251, 216, 238, 40, 192,
180]
}
#: This table contains the log and values used in GF(256) arithmetic.
#: They are used to generate error correction codes for QR Codes.
#: This table is taken from:
#:
#: vhttp://www.thonky.com/qr-code-tutorial/log-antilog-table/
galois_log = [
1, 2, 4, 8, 16, 32, 64, 128, 29, 58, 116, 232, 205, 135, 19, 38, 76, 152,
45, 90, 180, 117, 234, 201, 143, 3, 6, 12, 24, 48, 96, 192, 157, 39, 78,
156, 37, 74, 148, 53, 106, 212, 181, 119, 238, 193, 159, 35, 70, 140, 5,
10, 20, 40, 80, 160, 93, 186, 105, 210, 185, 111, 222, 161, 95, 190, 97,
194, 153, 47, 94, 188, 101, 202, 137, 15, 30, 60, 120, 240, 253, 231, 211,
187, 107, 214, 177, 127, 254, 225, 223, 163, 91, 182, 113, 226, 217, 175,
67, 134, 17, 34, 68, 136, 13, 26, 52, 104, 208, 189, 103, 206, 129, 31,
62, 124, 248, 237, 199, 147, 59, 118, 236, 197, 151, 51, 102, 204, 133,
23, 46, 92, 184, 109, 218, 169, 79, 158, 33, 66, 132, 21, 42, 84, 168, 77,
154, 41, 82, 164, 85, 170, 73, 146, 57, 114, 228, 213, 183, 115, 230, 209,
191, 99, 198, 145, 63, 126, 252, 229, 215, 179, 123, 246, 241, 255, 227,
219, 171, 75, 150, 49, 98, 196, 149, 55, 110, 220, 165, 87, 174, 65, 130,
25, 50, 100, 200, 141, 7, 14, 28, 56, 112, 224, 221, 167, 83, 166, 81,
162, 89, 178, 121, 242, 249, 239, 195, 155, 43, 86, 172, 69, 138, 9, 18,
36, 72, 144, 61, 122, 244, 245, 247, 243, 251, 235, 203, 139, 11, 22, 44,
88, 176, 125, 250, 233, 207, 131, 27, 54, 108, 216, 173, 71, 142, 1,]
#: This table contains the antilog and values used in GF(256) arithmetic.
#: They are used to generate error correction codes for QR Codes.
#: This table is taken from:
#:
#: http://www.thonky.com/qr-code-tutorial/log-antilog-table/
galois_antilog = [
None, 0, 1, 25, 2, 50, 26, 198, 3, 223, 51, 238, 27, 104, 199, 75, 4, 100,
224, 14, 52, 141, 239, 129, 28, 193, 105, 248, 200, 8, 76, 113, 5, 138,
101, 47, 225, 36, 15, 33, 53, 147, 142, 218, 240, 18, 130, 69, 29, 181,
194, 125, 106, 39, 249, 185, 201, 154, 9, 120, 77, 228, 114, 166, 6, 191,
139, 98, 102, 221, 48, 253, 226, 152, 37, 179, 16, 145, 34, 136, 54, 208,
148, 206, 143, 150, 219, 189, 241, 210, 19, 92, 131, 56, 70, 64, 30, 66,
182, 163, 195, 72, 126, 110, 107, 58, 40, 84, 250, 133, 186, 61, 202, 94,
155, 159, 10, 21, 121, 43, 78, 212, 229, 172, 115, 243, 167, 87, 7, 112,
192, 247, 140, 128, 99, 13, 103, 74, 222, 237, 49, 197, 254, 24, 227, 165,
153, 119, 38, 184, 180, 124, 17, 68, 146, 217, 35, 32, 137, 46, 55, 63,
209, 91, 149, 188, 207, 205, 144, 135, 151, 178, 220, 252, 190, 97, 242,
86, 211, 171, 20, 42, 93, 158, 132, 60, 57, 83, 71, 109, 65, 162, 31, 45,
67, 216, 183, 123, 164, 118, 196, 23, 73, 236, 127, 12, 111, 246, 108,
161, 59, 82, 41, 157, 85, 170, 251, 96, 134, 177, 187, 204, 62, 90, 203,
89, 95, 176, 156, 169, 160, 81, 11, 245, 22, 235, 122, 117, 44, 215, 79,
174, 213, 233, 230, 231, 173, 232, 116, 214, 244, 234, 168, 80, 88, 175,]
#: This table contains the coordinates for the position adjustment patterns.
#: The index of the table corresponds to the QR Code's version number.
#: This table is taken from:
#:
#: http://www.thonky.com/qr-code-tutorial/part-3-mask-pattern/
position_adjustment = [
None, #There is not version 0
None, #Version 1 does not need adjustment
[6, 18, ],
[6, 22, ],
[6, 26, ],
[6, 30, ],
[6, 34, ],
[6, 22, 38, ],
[6, 24, 42, ],
[6, 26, 46, ],
[6, 28, 50, ],
[6, 30, 54, ],
[6, 32, 58, ],
[6, 34, 62, ],
[6, 26, 46, 66, ],
[6, 26, 48, 70, ],
[6, 26, 50, 74, ],
[6, 30, 54, 78, ],
[6, 30, 56, 82, ],
[6, 30, 58, 86, ],
[6, 34, 62, 90, ],
[6, 28, 50, 72, 94, ],
[6, 26, 50, 74, 98, ],
[6, 30, 54, 78, 102, ],
[6, 28, 54, 80, 106, ],
[6, 32, 58, 84, 110, ],
[6, 30, 58, 86, 114, ],
[6, 34, 62, 90, 118, ],
[6, 26, 50, 74, 98, 122, ],
[6, 30, 54, 78, 102, 126, ],
[6, 26, 52, 78, 104, 130, ],
[6, 30, 56, 82, 108, 134, ],
[6, 34, 60, 86, 112, 138, ],
[6, 30, 58, 86, 114, 142, ],
[6, 34, 62, 90, 118, 146, ],
[6, 30, 54, 78, 102, 126, 150, ],
[6, 24, 50, 76, 102, 128, 154, ],
[6, 28, 54, 80, 106, 132, 158, ],
[6, 32, 58, 84, 110, 136, 162, ],
[6, 26, 54, 82, 110, 138, 166, ],
[6, 30, 58, 86, 114, 142, 170, ],
]
#: This table specifies the bit pattern to be added to a QR Code's
#: image to specify what version the code is. Note, this pattern
#: is not used for versions 1-6. This table is taken from:
#:
#: http://www.thonky.com/qr-code-tutorial/part-3-mask-pattern/
version_pattern = [None, None, None, None, None, None, None, #0-6
'000111110010010100', '001000010110111100', '001001101010011001',
'001010010011010011', '001011101111110110', '001100011101100010',
'001101100001000111', '001110011000001101', '001111100100101000',
'010000101101111000', '010001010001011101', '010010101000010111',
'010011010100110010', '010100100110100110', '010101011010000011',
'010110100011001001', '010111011111101100', '011000111011000100',
'011001000111100001', '011010111110101011', '011011000010001110',
'011100110000011010', '011101001100111111', '011110110101110101',
'011111001001010000', '100000100111010101', '100001011011110000',
'100010100010111010', '100011011110011111', '100100101100001011',
'100101010000101110', '100110101001100100', '100111010101000001',
'101000110001101001'
]
#: This table contains the bit fields needed to specify the error code level and
#: mask pattern used by a QR Code. This table is take from:
#:
#: http://www.thonky.com/qr-code-tutorial/part-3-mask-pattern/
type_bits = {
'L': {
0: '111011111000100',
1: '111001011110011',
2: '111110110101010',
3: '111100010011101',
4: '110011000101111',
5: '110001100011000',
6: '110110001000001',
7: '110100101110110',
},
'M': {
0: '101010000010010',
1: '101000100100101',
2: '101111001111100',
3: '101101101001011',
4: '100010111111001',
5: '100000011001110',
6: '100111110010111',
7: '100101010100000',
},
'Q': {
0: '011010101011111',
1: '011000001101000',
2: '011111100110001',
3: '011101000000110',
4: '010010010110100',
5: '010000110000011',
6: '010111011011010',
7: '010101111101101',
},
'H': {
0: '001011010001001',
1: '001001110111110',
2: '001110011100111',
3: '001100111010000',
4: '000011101100010',
5: '000001001010101',
6: '000110100001100',
7: '000100000111011',
},
}
#: This table contains *functions* to compute whether to change current bit when
#: creating the masks. All of the functions in the table return a boolean value.
#: A True result means you should add the bit to the QR Code exactly as is. A
#: False result means you should add the opposite bit. This table was taken
#: from:
#:
#: http://www.thonky.com/qr-code-tutorial/mask-patterns/
mask_patterns = [
lambda row, col: (row + col) % 2 == 0,
lambda row, col: row % 2 == 0,
lambda row, col: col % 3 == 0,
lambda row, col: (row + col) % 3 == 0,
lambda row, col: ((row // 2) + (col // 3)) % 2 == 0,
lambda row, col: ((row * col) % 2) + ((row * col) % 3) == 0,
lambda row, col: (((row * col) % 2) + ((row * col) % 3)) % 2 == 0,
lambda row, col: (((row + col) % 2) + ((row * col) % 3)) % 2 == 0]
#: This is a table of ASCII escape code for terminal colors. QR codes
#: are drawn using a space with a colored background. Hence, only
#: codes affecting background colors have been added.
#: http://misc.flogisoft.com/bash/tip_colors_and_formatting
term_colors = {
'default': 49,
'background': 49,
'reverse': 7,
'reversed': 7,
'inverse': 7,
'inverted': 7,
'black': 40,
'red': 41,
'green': 42,
'yellow': 43,
'blue': 44,
'magenta': 45,
'cyan': 46,
'light gray': 47,
'light grey': 47,
'dark gray': 100,
'dark grey': 100,
'light red': 101,
'light green': 102,
'light blue': 103,
'light yellow': 104,
'light magenta': 105,
'light cyan': 106,
'white': 107
}
| en | 0.746835 | # -*- coding: utf-8 -*- # Copyright (c) 2013, <NAME> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. This module lists out all of the tables needed to create a QR code. If you are viewing this in the HTML documentation, I recommend reading the actual file instead. The formating for the tables is much more readable. #: This defines the QR Code's 'mode' which sets what #: type of code it is along with its size. #: This defines the amount of error correction. The dictionary #: allows the user to specify this in several ways. #: This is a dictionary holds how long the "data length" field is for #: each version and mode of the QR Code. #: QR Codes uses a unique ASCII-like table for the 'alphanumeric' mode. #: This is a dictionary representing that unique table, where the #: keys are the possible characters in the data and the values #: are the character's numeric representation. #: This array specifies the size of a QR Code in pixels. These numbers are #: defined in the standard. The indexes correspond to the QR Code's #: version number. This array was taken from: #: #: http://www.denso-wave.com/qrcode/vertable1-e.html #: This dictionary lists the data capacity for all possible QR Codes. #: This dictionary is organized where the first key corresponds to the #: QR Code version number. The next key corresponds to the error #: correction level, see error. The final key corresponds to #: the mode number, see modes. The zero mode number represents the #: possible "data bits." This table was taken from: #: #: http://www.denso-wave.com/qrcode/vertable1-e.html #: This table defines the "Error Correction Code Words and Block Information." #: The table lists the number of error correction words that are required #: to be generated for each version and error correction level. The table #: is accessed by first using the version number as a key and then the #: error level. The array values correspond to these columns from the source #: table: #: #: +----------------------------+ #: |0 | EC Code Words Per Block | #: +----------------------------+ #: |1 | Block 1 Count | #: +----------------------------+ #: |2 | Block 1 Data Code Words | #: +----------------------------+ #: |3 | Block 2 Count | #: +----------------------------+ #: |4 | Block 2 Data Code Words | #: +----------------------------+ #: #: This table was taken from: #: #: http://www.thonky.com/qr-code-tutorial/error-correction-table/ #: This table lists all of the generator polynomials used by QR Codes. #: They are indexed by the number of "ECC Code Words" (see table above). #: This table is taken from: #: #: http://www.matchadesign.com/blog/qr-code-demystified-part-4/ #: This table contains the log and values used in GF(256) arithmetic. #: They are used to generate error correction codes for QR Codes. #: This table is taken from: #: #: vhttp://www.thonky.com/qr-code-tutorial/log-antilog-table/ #: This table contains the antilog and values used in GF(256) arithmetic. #: They are used to generate error correction codes for QR Codes. #: This table is taken from: #: #: http://www.thonky.com/qr-code-tutorial/log-antilog-table/ #: This table contains the coordinates for the position adjustment patterns. #: The index of the table corresponds to the QR Code's version number. #: This table is taken from: #: #: http://www.thonky.com/qr-code-tutorial/part-3-mask-pattern/ #There is not version 0 #Version 1 does not need adjustment #: This table specifies the bit pattern to be added to a QR Code's #: image to specify what version the code is. Note, this pattern #: is not used for versions 1-6. This table is taken from: #: #: http://www.thonky.com/qr-code-tutorial/part-3-mask-pattern/ #0-6 #: This table contains the bit fields needed to specify the error code level and #: mask pattern used by a QR Code. This table is take from: #: #: http://www.thonky.com/qr-code-tutorial/part-3-mask-pattern/ #: This table contains *functions* to compute whether to change current bit when #: creating the masks. All of the functions in the table return a boolean value. #: A True result means you should add the bit to the QR Code exactly as is. A #: False result means you should add the opposite bit. This table was taken #: from: #: #: http://www.thonky.com/qr-code-tutorial/mask-patterns/ #: This is a table of ASCII escape code for terminal colors. QR codes #: are drawn using a space with a colored background. Hence, only #: codes affecting background colors have been added. #: http://misc.flogisoft.com/bash/tip_colors_and_formatting | 1.031432 | 1 |
fluent_contents/plugins/oembeditem/content_plugins.py | jayvdb/django-fluent-contents | 0 | 6631298 | """
Definition of the plugin.
"""
import re
from fluent_contents.extensions import ContentPlugin, plugin_pool
from fluent_contents.plugins.oembeditem.models import OEmbedItem
re_safe = re.compile(r"[^\w_-]")
@plugin_pool.register
class OEmbedPlugin(ContentPlugin):
model = OEmbedItem
category = ContentPlugin.MEDIA
admin_form_template = "admin/fluent_contents/plugins/oembeditem/admin_form.html"
render_template = "fluent_contents/plugins/oembed/default.html"
#: Custom render template
render_template_base = "fluent_contents/plugins/oembed/{type}.html"
fieldsets = (
(None, {"fields": ("embed_url", ("embed_max_width", "embed_max_height"))}),
)
class Media:
css = {"screen": ("fluent_contents/plugins/oembed/oembed_admin.css",)}
def get_render_template(self, request, instance, **kwargs):
"""
Allow to style the item based on the type.
"""
safe_filename = re_safe.sub("", instance.type or "default")
return [
self.render_template_base.format(type=safe_filename),
self.render_template,
]
| """
Definition of the plugin.
"""
import re
from fluent_contents.extensions import ContentPlugin, plugin_pool
from fluent_contents.plugins.oembeditem.models import OEmbedItem
re_safe = re.compile(r"[^\w_-]")
@plugin_pool.register
class OEmbedPlugin(ContentPlugin):
model = OEmbedItem
category = ContentPlugin.MEDIA
admin_form_template = "admin/fluent_contents/plugins/oembeditem/admin_form.html"
render_template = "fluent_contents/plugins/oembed/default.html"
#: Custom render template
render_template_base = "fluent_contents/plugins/oembed/{type}.html"
fieldsets = (
(None, {"fields": ("embed_url", ("embed_max_width", "embed_max_height"))}),
)
class Media:
css = {"screen": ("fluent_contents/plugins/oembed/oembed_admin.css",)}
def get_render_template(self, request, instance, **kwargs):
"""
Allow to style the item based on the type.
"""
safe_filename = re_safe.sub("", instance.type or "default")
return [
self.render_template_base.format(type=safe_filename),
self.render_template,
]
| en | 0.719456 | Definition of the plugin. #: Custom render template Allow to style the item based on the type. | 2.377124 | 2 |
kale/queue_selector.py | ORIGINALLIFE/ndkale | 210 | 6631299 | """Module containing queue selection algorithms.
How to implement your own queue selection algorithm?
class MyQueueSelector(SelectQueueBase):
def get_queue(self):
# Get a list of all queues defined in the YAML file that is
# specified at QUEUE_CONFIG in settings file.
#
# You may use these two properties of a queue object to select
# a queue:
#
# - name: string of queue name
# - priority: integer of queue priority; larger value,
# higher priority
queues = self.queue_info.get_queues()
# Implement your algorithm here
# ...
# Eventually, return one of queue object from queues
return queue
"""
from __future__ import absolute_import
import random
from six.moves import range
class SelectQueueBase(object):
"""Base class for selecting a queue.
The only method that needs to be implemented:
get_queue: it's called for each task processing cycle on task worker.
"""
def __init__(self, queue_info):
self.queue_info = queue_info
def get_queue(self, *args, **kwargs):
"""Returns a TaskQueue object."""
raise NotImplementedError('Base class cannot be used directly.')
class Random(SelectQueueBase):
"""Randomly selects a queue without considering priority."""
def get_queue(self):
queues = self.queue_info.get_queues()
return random.choice(queues)
class Lottery(SelectQueueBase):
"""Use lottery scheduling algorithm to select a queue based on priority."""
@staticmethod
def _run_lottery(queues):
"""Draw lottery from a list of candidate queues.
:param list[TaskQueue] queues: a list of candidate queues.
:return: A TaskQueue object that wins lottery. If it fails (e.g.,
invalid priority of queues), it returns None.
:rtype: TaskQueue
"""
tickets = {}
total_tickets = 0
for queue in queues:
# Queue priority should be within 1 to 100.
if queue.priority < 1 or queue.priority > 100:
continue
priority = queue.priority
low = total_tickets
total_tickets += priority
high = total_tickets
tickets[queue.name] = (low, high)
# [0, total_tickets)
try:
number = random.randrange(0, total_tickets)
for queue in queues:
if number >= tickets[
queue.name][0] and number < tickets[queue.name][1]:
return queue
except ValueError:
return None
# Something wrong happens
return None
def get_queue(self, *args, **kwargs):
return self._run_lottery(self.queue_info.get_queues())
class HighestPriorityFirst(SelectQueueBase):
"""Highest priority first.
Get the highest priority non-empty queue first.
If all queues are empty, get the highest priority empty queue.
"""
def get_queue(self, *args, **kwargs):
queue = self.queue_info.get_highest_priority_queue_that_needs_work()
if queue:
return queue
queues = self.queue_info.get_queues()
queues.sort(key=lambda x: x.priority, reverse=True)
return queues[0]
class HighestPriorityLottery(Lottery):
"""Highest priority first + lottery.
Get highest priority non-empty queue first.
If all queues are empty, run lottery on empty queues.
"""
def get_queue(self, *args, **kwargs):
queue = self.queue_info.get_highest_priority_queue_that_needs_work()
if queue:
return queue
return self._run_lottery(self.queue_info.get_queues())
class LotteryLottery(Lottery):
"""Run lottery on both non-empty and empty queues.
Run lottery on all queues. When we get an non-empty queue, return
immediately. If we get 10 empty queues in a row, run lottery again,
and long poll on whatever queue we get.
"""
def get_queue(self, *args, **kwargs):
retry_empty_queue_count = 10
for i in range(retry_empty_queue_count):
queue = self._run_lottery(self.queue_info.get_queues())
if self.queue_info.does_queue_need_work(queue):
return queue
return self._run_lottery(self.queue_info.get_queues())
class ReducedLottery(Lottery):
"""Improved lottery scheduling.
Limiting the lottery pool by removing known empty queues. When we get an
non-empty queue, return immediately. If we get an empty queue, we'll remove
this empty queue out of the lottery pool and rerun lottery again. If all
queues are empty, run lottery on all queues, and long poll on whatever
queue we get.
"""
def get_queue(self, *args, **kwargs):
# Make a new copy of list, so no side effect on queue_info.queues
candidate_queues = self.queue_info.get_queues()[:]
while len(candidate_queues) > 0:
queue = self._run_lottery(candidate_queues)
if self.queue_info.does_queue_need_work(queue):
return queue
else:
candidate_queues.remove(queue)
return self._run_lottery(self.queue_info.get_queues())
| """Module containing queue selection algorithms.
How to implement your own queue selection algorithm?
class MyQueueSelector(SelectQueueBase):
def get_queue(self):
# Get a list of all queues defined in the YAML file that is
# specified at QUEUE_CONFIG in settings file.
#
# You may use these two properties of a queue object to select
# a queue:
#
# - name: string of queue name
# - priority: integer of queue priority; larger value,
# higher priority
queues = self.queue_info.get_queues()
# Implement your algorithm here
# ...
# Eventually, return one of queue object from queues
return queue
"""
from __future__ import absolute_import
import random
from six.moves import range
class SelectQueueBase(object):
"""Base class for selecting a queue.
The only method that needs to be implemented:
get_queue: it's called for each task processing cycle on task worker.
"""
def __init__(self, queue_info):
self.queue_info = queue_info
def get_queue(self, *args, **kwargs):
"""Returns a TaskQueue object."""
raise NotImplementedError('Base class cannot be used directly.')
class Random(SelectQueueBase):
"""Randomly selects a queue without considering priority."""
def get_queue(self):
queues = self.queue_info.get_queues()
return random.choice(queues)
class Lottery(SelectQueueBase):
"""Use lottery scheduling algorithm to select a queue based on priority."""
@staticmethod
def _run_lottery(queues):
"""Draw lottery from a list of candidate queues.
:param list[TaskQueue] queues: a list of candidate queues.
:return: A TaskQueue object that wins lottery. If it fails (e.g.,
invalid priority of queues), it returns None.
:rtype: TaskQueue
"""
tickets = {}
total_tickets = 0
for queue in queues:
# Queue priority should be within 1 to 100.
if queue.priority < 1 or queue.priority > 100:
continue
priority = queue.priority
low = total_tickets
total_tickets += priority
high = total_tickets
tickets[queue.name] = (low, high)
# [0, total_tickets)
try:
number = random.randrange(0, total_tickets)
for queue in queues:
if number >= tickets[
queue.name][0] and number < tickets[queue.name][1]:
return queue
except ValueError:
return None
# Something wrong happens
return None
def get_queue(self, *args, **kwargs):
return self._run_lottery(self.queue_info.get_queues())
class HighestPriorityFirst(SelectQueueBase):
"""Highest priority first.
Get the highest priority non-empty queue first.
If all queues are empty, get the highest priority empty queue.
"""
def get_queue(self, *args, **kwargs):
queue = self.queue_info.get_highest_priority_queue_that_needs_work()
if queue:
return queue
queues = self.queue_info.get_queues()
queues.sort(key=lambda x: x.priority, reverse=True)
return queues[0]
class HighestPriorityLottery(Lottery):
"""Highest priority first + lottery.
Get highest priority non-empty queue first.
If all queues are empty, run lottery on empty queues.
"""
def get_queue(self, *args, **kwargs):
queue = self.queue_info.get_highest_priority_queue_that_needs_work()
if queue:
return queue
return self._run_lottery(self.queue_info.get_queues())
class LotteryLottery(Lottery):
"""Run lottery on both non-empty and empty queues.
Run lottery on all queues. When we get an non-empty queue, return
immediately. If we get 10 empty queues in a row, run lottery again,
and long poll on whatever queue we get.
"""
def get_queue(self, *args, **kwargs):
retry_empty_queue_count = 10
for i in range(retry_empty_queue_count):
queue = self._run_lottery(self.queue_info.get_queues())
if self.queue_info.does_queue_need_work(queue):
return queue
return self._run_lottery(self.queue_info.get_queues())
class ReducedLottery(Lottery):
"""Improved lottery scheduling.
Limiting the lottery pool by removing known empty queues. When we get an
non-empty queue, return immediately. If we get an empty queue, we'll remove
this empty queue out of the lottery pool and rerun lottery again. If all
queues are empty, run lottery on all queues, and long poll on whatever
queue we get.
"""
def get_queue(self, *args, **kwargs):
# Make a new copy of list, so no side effect on queue_info.queues
candidate_queues = self.queue_info.get_queues()[:]
while len(candidate_queues) > 0:
queue = self._run_lottery(candidate_queues)
if self.queue_info.does_queue_need_work(queue):
return queue
else:
candidate_queues.remove(queue)
return self._run_lottery(self.queue_info.get_queues())
| en | 0.795965 | Module containing queue selection algorithms. How to implement your own queue selection algorithm? class MyQueueSelector(SelectQueueBase): def get_queue(self): # Get a list of all queues defined in the YAML file that is # specified at QUEUE_CONFIG in settings file. # # You may use these two properties of a queue object to select # a queue: # # - name: string of queue name # - priority: integer of queue priority; larger value, # higher priority queues = self.queue_info.get_queues() # Implement your algorithm here # ... # Eventually, return one of queue object from queues return queue Base class for selecting a queue. The only method that needs to be implemented: get_queue: it's called for each task processing cycle on task worker. Returns a TaskQueue object. Randomly selects a queue without considering priority. Use lottery scheduling algorithm to select a queue based on priority. Draw lottery from a list of candidate queues. :param list[TaskQueue] queues: a list of candidate queues. :return: A TaskQueue object that wins lottery. If it fails (e.g., invalid priority of queues), it returns None. :rtype: TaskQueue # Queue priority should be within 1 to 100. # [0, total_tickets) # Something wrong happens Highest priority first. Get the highest priority non-empty queue first. If all queues are empty, get the highest priority empty queue. Highest priority first + lottery. Get highest priority non-empty queue first. If all queues are empty, run lottery on empty queues. Run lottery on both non-empty and empty queues. Run lottery on all queues. When we get an non-empty queue, return immediately. If we get 10 empty queues in a row, run lottery again, and long poll on whatever queue we get. Improved lottery scheduling. Limiting the lottery pool by removing known empty queues. When we get an non-empty queue, return immediately. If we get an empty queue, we'll remove this empty queue out of the lottery pool and rerun lottery again. If all queues are empty, run lottery on all queues, and long poll on whatever queue we get. # Make a new copy of list, so no side effect on queue_info.queues | 3.724429 | 4 |
python/helpers/epydoc/gui.py | truthiswill/intellij-community | 339 | 6631300 | <filename>python/helpers/epydoc/gui.py
#!/usr/bin/env python
#
# objdoc: epydoc command-line interface
# <NAME>
#
# Created [03/15/02 10:31 PM]
# $Id: gui.py 646 2004-03-19 19:01:37Z edloper $
#
"""
Graphical interface to epydoc. This interface might be useful for
systems where it's inconvenient to use the command-line interface
(such as Windows). It supports many (but not all) of the features
that are supported by the command-line interface. It also supports
loading and saving of X{project files}, which store a set of related
modules, and the options that should be used to generate the
documentation for those modules.
Usage::
epydocgui [OPTIONS] [FILE.prj | MODULES...]
FILE.prj An epydoc GUI project file.
MODULES... A list of Python modules to document.
-V, --version Print the version of epydoc.
-h, -?, --help, --usage Display this usage message
--debug Do not suppress error messages
@todo: Use ini-style project files, rather than pickles (using the
same format as the CLI).
"""
__docformat__ = 'epytext en'
import sys, os.path, re, glob
from Tkinter import *
from tkFileDialog import askopenfilename, asksaveasfilename
from thread import start_new_thread, exit_thread
from pickle import dump, load
# askdirectory is only defined in python 2.2+; fall back on
# asksaveasfilename if it's not available.
try: from tkFileDialog import askdirectory
except: askdirectory = None
# Include support for Zope, if it's available.
try: import ZODB
except: pass
##/////////////////////////////////////////////////////////////////////////
## CONSTANTS
##/////////////////////////////////////////////////////////////////////////
DEBUG = 0
# Colors for tkinter display
BG_COLOR='#e0e0e0'
ACTIVEBG_COLOR='#e0e0e0'
TEXT_COLOR='black'
ENTRYSELECT_COLOR = ACTIVEBG_COLOR
SELECT_COLOR = '#208070'
MESSAGE_COLOR = '#000060'
ERROR_COLOR = '#600000'
GUIERROR_COLOR = '#600000'
WARNING_COLOR = '#604000'
HEADER_COLOR = '#000000'
# Convenience dictionaries for specifying widget colors
COLOR_CONFIG = {'background':BG_COLOR, 'highlightcolor': BG_COLOR,
'foreground':TEXT_COLOR, 'highlightbackground': BG_COLOR}
ENTRY_CONFIG = {'background':BG_COLOR, 'highlightcolor': BG_COLOR,
'foreground':TEXT_COLOR, 'highlightbackground': BG_COLOR,
'selectbackground': ENTRYSELECT_COLOR,
'selectforeground': TEXT_COLOR}
SB_CONFIG = {'troughcolor':BG_COLOR, 'activebackground':BG_COLOR,
'background':BG_COLOR, 'highlightbackground':BG_COLOR}
LISTBOX_CONFIG = {'highlightcolor': BG_COLOR, 'highlightbackground': BG_COLOR,
'foreground':TEXT_COLOR, 'selectforeground': TEXT_COLOR,
'selectbackground': ACTIVEBG_COLOR, 'background':BG_COLOR}
BUTTON_CONFIG = {'background':BG_COLOR, 'highlightthickness':0, 'padx':4,
'highlightbackground': BG_COLOR, 'foreground':TEXT_COLOR,
'highlightcolor': BG_COLOR, 'activeforeground': TEXT_COLOR,
'activebackground': ACTIVEBG_COLOR, 'pady':0}
CBUTTON_CONFIG = {'background':BG_COLOR, 'highlightthickness':0, 'padx':4,
'highlightbackground': BG_COLOR, 'foreground':TEXT_COLOR,
'highlightcolor': BG_COLOR, 'activeforeground': TEXT_COLOR,
'activebackground': ACTIVEBG_COLOR, 'pady':0,
'selectcolor': SELECT_COLOR}
SHOWMSG_CONFIG = CBUTTON_CONFIG.copy()
SHOWMSG_CONFIG['foreground'] = MESSAGE_COLOR
SHOWWRN_CONFIG = CBUTTON_CONFIG.copy()
SHOWWRN_CONFIG['foreground'] = WARNING_COLOR
SHOWERR_CONFIG = CBUTTON_CONFIG.copy()
SHOWERR_CONFIG['foreground'] = ERROR_COLOR
# Colors for the progress bar
PROGRESS_HEIGHT = 16
PROGRESS_WIDTH = 200
PROGRESS_BG='#305060'
PROGRESS_COLOR1 = '#30c070'
PROGRESS_COLOR2 = '#60ffa0'
PROGRESS_COLOR3 = '#106030'
# On tkinter canvases, where's the zero coordinate?
if sys.platform.lower().startswith('win'):
DX = 3; DY = 3
DH = 0; DW = 7
else:
DX = 1; DY = 1
DH = 1; DW = 3
# How much of the progress is in each subtask?
IMPORT_PROGRESS = 0.1
BUILD_PROGRESS = 0.2
WRITE_PROGRESS = 1.0 - BUILD_PROGRESS - IMPORT_PROGRESS
##/////////////////////////////////////////////////////////////////////////
## IMAGE CONSTANTS
##/////////////////////////////////////////////////////////////////////////
UP_GIF = '''\
R0lGODlhCwAMALMAANnZ2QDMmQCZZgBmZgAAAAAzM////////wAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAACH5BAEAAAAALAAAAAALAAwAAAQjEMhJKxCW4gzCIJxXZIEwFGDlDadqsii1sq1U0nA64+ON
5xEAOw==
'''
DOWN_GIF = '''\
R0lGODlhCwAMALMAANnZ2QDMmQCZZgBmZgAAAAAzM////////wAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAACH5BAEAAAAALAAAAAALAAwAAAQmEIQxgLVUCsppsVPngVtXEFfIfWk5nBe4xuSL0tKLy/cu
7JffJQIAOw==
'''
LEFT_GIF='''\
R0lGODlhDAALAKIAANnZ2QDMmQCZZgBmZgAAAAAzM////////yH5BAEAAAAALAAAAAAMAAsAAAM4
CLocgaCrESiDoBshOAoAgBEyMzgAEIGCowsiOLoLgEBVOLoIqlSFo4OgC1RYM4Ogq1RYg6DLVJgA
Ow==
'''
RIGHT_GIF='''\
R0lGODlhDAALAKIAANnZ2QDMmQBmZgCZZgAzMwAAAP///////yH5BAEAAAAALAAAAAAMAAsAAAM5
GIGgyzIYgaCrIigTgaALIigyEQiqKLoTgaAoujuDgKJLVAgqIoJEBQAIIkKEhaArRFgIukqFoMsJ
ADs=
'''
##/////////////////////////////////////////////////////////////////////////
## MessageIO
##/////////////////////////////////////////////////////////////////////////
from epydoc import log
from epydoc.util import wordwrap
class GUILogger(log.Logger):
_STAGES = [40, 7, 1, 3, 1, 30, 1, 2, 100]
def __init__(self, progress, cancel):
self._progress = progress
self._cancel = cancel
self.clear()
def clear(self):
self._messages = []
self._n = 0
self._stage = 0
self._message_blocks = []
def log(self, level, message):
message = wordwrap(str(message)).rstrip() + '\n'
if self._message_blocks:
self._message_blocks[-1][-1].append( (level, message) )
else:
self._messages.append( (level, message) )
def start_block(self, header):
self._message_blocks.append( (header, []) )
def end_block(self):
header, messages = self._message_blocks.pop()
if messages:
self._messages.append( ('uline', ' '*75+'\n') )
self.log('header', header)
self._messages += messages
self._messages.append( ('uline', ' '*75+'\n') )
def start_progress(self, header=None):
self.log(log.INFO, header)
self._stage += 1
def end_progress(self):
pass
def progress(self, percent, message=''):
if self._cancel[0]: exit_thread()
i = self._stage - 1
p = ((sum(self._STAGES[:i]) + percent*self._STAGES[i]) /
float(sum(self._STAGES)))
self._progress[0] = p
def read(self):
if self._n >= len(self._messages):
return None, None
else:
self._n += 1
return self._messages[self._n-1]
##/////////////////////////////////////////////////////////////////////////
## THREADED DOCUMENTER
##/////////////////////////////////////////////////////////////////////////
def document(options, cancel, done):
"""
Create the documentation for C{modules}, using the options
specified by C{options}. C{document} is designed to be started in
its own thread by L{EpydocGUI._go}.
@param options: The options to use for generating documentation.
This includes keyword options that can be given to
L{docwriter.html.HTMLWriter}, as well as the option C{target}, which
controls where the output is written to.
@type options: C{dictionary}
"""
from epydoc.docwriter.html import HTMLWriter
from epydoc.docbuilder import build_doc_index
import epydoc.docstringparser
# Set the default docformat.
docformat = options.get('docformat', 'epytext')
epydoc.docstringparser.DEFAULT_DOCFORMAT = docformat
try:
parse = options['introspect_or_parse'] in ('parse', 'both')
introspect = options['introspect_or_parse'] in ('introspect', 'both')
docindex = build_doc_index(options['modules'], parse, introspect)
html_writer = HTMLWriter(docindex, **options)
log.start_progress('Writing HTML docs to %r' % options['target'])
html_writer.write(options['target'])
log.end_progress()
# We're done.
log.warning('Finished!')
done[0] = 'done'
except SystemExit:
# Cancel.
log.error('Cancelled!')
done[0] ='cancel'
raise
except Exception, e:
# We failed.
log.error('Internal error: %s' % e)
done[0] ='cancel'
raise
except:
# We failed.
log.error('Internal error!')
done[0] ='cancel'
raise
##/////////////////////////////////////////////////////////////////////////
## GUI
##/////////////////////////////////////////////////////////////////////////
class EpydocGUI:
"""
A graphical user interace to epydoc.
"""
def __init__(self):
self._afterid = 0
self._progress = [None]
self._cancel = [0]
self._filename = None
self._init_dir = None
# Store a copy of sys.modules, so that we can restore it
# later. This is useful for making sure that we reload
# everything when we re-build its documentation. This will
# *not* reload the modules that are present when the EpydocGUI
# is created, but that should only contain some builtins, some
# epydoc modules, Tkinter, pickle, and thread..
self._old_modules = sys.modules.keys()
# Create the main window.
self._root = Tk()
self._root['background']=BG_COLOR
self._root.bind('<Control-q>', self.destroy)
self._root.bind('<Alt-q>', self.destroy)
self._root.bind('<Alt-x>', self.destroy)
self._root.bind('<Control-x>', self.destroy)
#self._root.bind('<Control-d>', self.destroy)
self._root.title('Epydoc')
self._rootframe = Frame(self._root, background=BG_COLOR,
border=2, relief='raised')
self._rootframe.pack(expand=1, fill='both', padx=2, pady=2)
# Set up the basic frames. Do not pack the options frame or
# the messages frame; the GUI has buttons to expand them.
leftframe = Frame(self._rootframe, background=BG_COLOR)
leftframe.pack(expand=1, fill='both', side='left')
optsframe = Frame(self._rootframe, background=BG_COLOR)
mainframe = Frame(leftframe, background=BG_COLOR)
mainframe.pack(expand=1, fill='both', side='top')
ctrlframe = Frame(mainframe, background=BG_COLOR)
ctrlframe.pack(side="bottom", fill='x', expand=0)
msgsframe = Frame(leftframe, background=BG_COLOR)
self._optsframe = optsframe
self._msgsframe = msgsframe
# Initialize all the frames, etc.
self._init_menubar()
self._init_progress_bar(mainframe)
self._init_module_list(mainframe)
self._init_options(optsframe, ctrlframe)
self._init_messages(msgsframe, ctrlframe)
self._init_bindings()
# Set up logging
self._logger = GUILogger(self._progress, self._cancel)
log.register_logger(self._logger)
# Open the messages pane by default.
self._messages_toggle()
## For testing options:
#self._options_toggle()
def _init_menubar(self):
menubar = Menu(self._root, borderwidth=2,
background=BG_COLOR,
activebackground=BG_COLOR)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label='New Project', underline=0,
command=self._new,
accelerator='Ctrl-n')
filemenu.add_command(label='Open Project', underline=0,
command=self._open,
accelerator='Ctrl-o')
filemenu.add_command(label='Save Project', underline=0,
command=self._save,
accelerator='Ctrl-s')
filemenu.add_command(label='Save As..', underline=5,
command=self._saveas,
accelerator='Ctrl-a')
filemenu.add_separator()
filemenu.add_command(label='Exit', underline=1,
command=self.destroy,
accelerator='Ctrl-x')
menubar.add_cascade(label='File', underline=0, menu=filemenu)
gomenu = Menu(menubar, tearoff=0)
gomenu.add_command(label='Run Epydoc', command=self._open,
underline=0, accelerator='Alt-g')
menubar.add_cascade(label='Run', menu=gomenu, underline=0)
self._root.config(menu=menubar)
def _init_module_list(self, mainframe):
mframe1 = Frame(mainframe, relief='groove', border=2,
background=BG_COLOR)
mframe1.pack(side="top", fill='both', expand=1, padx=4, pady=3)
l = Label(mframe1, text="Modules to document:",
justify='left', **COLOR_CONFIG)
l.pack(side='top', fill='none', anchor='nw', expand=0)
mframe2 = Frame(mframe1, background=BG_COLOR)
mframe2.pack(side="top", fill='both', expand=1)
mframe3 = Frame(mframe1, background=BG_COLOR)
mframe3.pack(side="bottom", fill='x', expand=0)
self._module_list = Listbox(mframe2, width=80, height=10,
selectmode='multiple',
**LISTBOX_CONFIG)
self._module_list.pack(side="left", fill='both', expand=1)
sb = Scrollbar(mframe2, orient='vertical',**SB_CONFIG)
sb['command']=self._module_list.yview
sb.pack(side='right', fill='y')
self._module_list.config(yscrollcommand=sb.set)
Label(mframe3, text="Add:", **COLOR_CONFIG).pack(side='left')
self._module_entry = Entry(mframe3, **ENTRY_CONFIG)
self._module_entry.pack(side='left', fill='x', expand=1)
self._module_entry.bind('<Return>', self._entry_module)
self._module_delete = Button(mframe3, text="Remove",
command=self._delete_module,
**BUTTON_CONFIG)
self._module_delete.pack(side='right', expand=0, padx=2)
self._module_browse = Button(mframe3, text="Browse",
command=self._browse_module,
**BUTTON_CONFIG)
self._module_browse.pack(side='right', expand=0, padx=2)
def _init_progress_bar(self, mainframe):
pframe1 = Frame(mainframe, background=BG_COLOR)
pframe1.pack(side="bottom", fill='x', expand=0)
self._go_button = Button(pframe1, width=4, text='Start',
underline=0, command=self._go,
**BUTTON_CONFIG)
self._go_button.pack(side='left', padx=4)
pframe2 = Frame(pframe1, relief='groove', border=2,
background=BG_COLOR)
pframe2.pack(side="top", fill='x', expand=1, padx=4, pady=3)
Label(pframe2, text='Progress:', **COLOR_CONFIG).pack(side='left')
H = self._H = PROGRESS_HEIGHT
W = self._W = PROGRESS_WIDTH
c = self._canvas = Canvas(pframe2, height=H+DH, width=W+DW,
background=PROGRESS_BG, border=0,
selectborderwidth=0, relief='sunken',
insertwidth=0, insertborderwidth=0,
highlightbackground=BG_COLOR)
self._canvas.pack(side='left', fill='x', expand=1, padx=4)
self._r2 = c.create_rectangle(0,0,0,0, outline=PROGRESS_COLOR2)
self._r3 = c.create_rectangle(0,0,0,0, outline=PROGRESS_COLOR3)
self._r1 = c.create_rectangle(0,0,0,0, fill=PROGRESS_COLOR1,
outline='')
self._canvas.bind('<Configure>', self._configure)
def _init_messages(self, msgsframe, ctrlframe):
self._downImage = PhotoImage(master=self._root, data=DOWN_GIF)
self._upImage = PhotoImage(master=self._root, data=UP_GIF)
# Set up the messages control frame
b1 = Button(ctrlframe, text="Messages", justify='center',
command=self._messages_toggle, underline=0,
highlightthickness=0, activebackground=BG_COLOR,
border=0, relief='flat', padx=2, pady=0, **COLOR_CONFIG)
b2 = Button(ctrlframe, image=self._downImage, relief='flat',
border=0, command=self._messages_toggle,
activebackground=BG_COLOR, **COLOR_CONFIG)
self._message_button = b2
self._messages_visible = 0
b2.pack(side="left")
b1.pack(side="left")
f = Frame(msgsframe, background=BG_COLOR)
f.pack(side='top', expand=1, fill='both')
messages = Text(f, width=80, height=10, **ENTRY_CONFIG)
messages['state'] = 'disabled'
messages.pack(fill='both', expand=1, side='left')
self._messages = messages
# Add a scrollbar
sb = Scrollbar(f, orient='vertical', **SB_CONFIG)
sb.pack(fill='y', side='right')
sb['command'] = messages.yview
messages['yscrollcommand'] = sb.set
# Set up some colorization tags
messages.tag_config('error', foreground=ERROR_COLOR)
messages.tag_config('warning', foreground=WARNING_COLOR)
messages.tag_config('guierror', foreground=GUIERROR_COLOR)
messages.tag_config('message', foreground=MESSAGE_COLOR)
messages.tag_config('header', foreground=HEADER_COLOR)
messages.tag_config('uline', underline=1)
# Keep track of tag state..
self._in_header = 0
self._last_tag = 'error'
# Add some buttons
buttons = Frame(msgsframe, background=BG_COLOR)
buttons.pack(side='bottom', fill='x')
self._show_errors = IntVar(self._root)
self._show_errors.set(1)
self._show_warnings = IntVar(self._root)
self._show_warnings.set(1)
self._show_messages = IntVar(self._root)
self._show_messages.set(0)
Checkbutton(buttons, text='Show Messages', var=self._show_messages,
command=self._update_msg_tags,
**SHOWMSG_CONFIG).pack(side='left')
Checkbutton(buttons, text='Show Warnings', var=self._show_warnings,
command=self._update_msg_tags,
**SHOWWRN_CONFIG).pack(side='left')
Checkbutton(buttons, text='Show Errors', var=self._show_errors,
command=self._update_msg_tags,
**SHOWERR_CONFIG).pack(side='left')
self._update_msg_tags()
def _update_msg_tags(self, *e):
elide_errors = not self._show_errors.get()
elide_warnings = not self._show_warnings.get()
elide_messages = not self._show_messages.get()
elide_headers = elide_errors and elide_warnings
self._messages.tag_config('error', elide=elide_errors)
self._messages.tag_config('guierror', elide=elide_errors)
self._messages.tag_config('warning', elide=elide_warnings)
self._messages.tag_config('message', elide=elide_messages)
self._messages.tag_config('header', elide=elide_headers)
def _init_options(self, optsframe, ctrlframe):
self._leftImage=PhotoImage(master=self._root, data=LEFT_GIF)
self._rightImage=PhotoImage(master=self._root, data=RIGHT_GIF)
# Set up the options control frame
b1 = Button(ctrlframe, text="Options", justify='center',
border=0, relief='flat',
command=self._options_toggle, padx=2,
underline=0, pady=0, highlightthickness=0,
activebackground=BG_COLOR, **COLOR_CONFIG)
b2 = Button(ctrlframe, image=self._rightImage, relief='flat',
border=0, command=self._options_toggle,
activebackground=BG_COLOR, **COLOR_CONFIG)
self._option_button = b2
self._options_visible = 0
b2.pack(side="right")
b1.pack(side="right")
oframe2 = Frame(optsframe, relief='groove', border=2,
background=BG_COLOR)
oframe2.pack(side="right", fill='both',
expand=0, padx=4, pady=3, ipadx=4)
Label(oframe2, text="Project Options", font='helvetica -16',
**COLOR_CONFIG).pack(anchor='w')
oframe3 = Frame(oframe2, background=BG_COLOR)
oframe3.pack(fill='x')
oframe4 = Frame(oframe2, background=BG_COLOR)
oframe4.pack(fill='x')
oframe7 = Frame(oframe2, background=BG_COLOR)
oframe7.pack(fill='x')
div = Frame(oframe2, background=BG_COLOR, border=1, relief='sunk')
div.pack(ipady=1, fill='x', padx=4, pady=2)
Label(oframe2, text="Help File", font='helvetica -16',
**COLOR_CONFIG).pack(anchor='w')
oframe5 = Frame(oframe2, background=BG_COLOR)
oframe5.pack(fill='x')
div = Frame(oframe2, background=BG_COLOR, border=1, relief='sunk')
div.pack(ipady=1, fill='x', padx=4, pady=2)
Label(oframe2, text="CSS Stylesheet", font='helvetica -16',
**COLOR_CONFIG).pack(anchor='w')
oframe6 = Frame(oframe2, background=BG_COLOR)
oframe6.pack(fill='x')
#==================== oframe3 ====================
# -n NAME, --name NAME
row = 0
l = Label(oframe3, text="Project Name:", **COLOR_CONFIG)
l.grid(row=row, column=0, sticky='e')
self._name_entry = Entry(oframe3, **ENTRY_CONFIG)
self._name_entry.grid(row=row, column=1, sticky='ew', columnspan=3)
# -u URL, --url URL
row += 1
l = Label(oframe3, text="Project URL:", **COLOR_CONFIG)
l.grid(row=row, column=0, sticky='e')
self._url_entry = Entry(oframe3, **ENTRY_CONFIG)
self._url_entry.grid(row=row, column=1, sticky='ew', columnspan=3)
# -o DIR, --output DIR
row += 1
l = Label(oframe3, text="Output Directory:", **COLOR_CONFIG)
l.grid(row=row, column=0, sticky='e')
self._out_entry = Entry(oframe3, **ENTRY_CONFIG)
self._out_entry.grid(row=row, column=1, sticky='ew', columnspan=2)
self._out_browse = Button(oframe3, text="Browse",
command=self._browse_out,
**BUTTON_CONFIG)
self._out_browse.grid(row=row, column=3, sticky='ew', padx=2)
#==================== oframe4 ====================
# --no-frames
row = 0
self._frames_var = IntVar(self._root)
self._frames_var.set(1)
l = Label(oframe4, text="Generate a frame-based table of contents",
**COLOR_CONFIG)
l.grid(row=row, column=1, sticky='w')
cb = Checkbutton(oframe4, var=self._frames_var, **CBUTTON_CONFIG)
cb.grid(row=row, column=0, sticky='e')
# --no-private
row += 1
self._private_var = IntVar(self._root)
self._private_var.set(1)
l = Label(oframe4, text="Generate documentation for private objects",
**COLOR_CONFIG)
l.grid(row=row, column=1, sticky='w')
cb = Checkbutton(oframe4, var=self._private_var, **CBUTTON_CONFIG)
cb.grid(row=row, column=0, sticky='e')
# --show-imports
row += 1
self._imports_var = IntVar(self._root)
self._imports_var.set(0)
l = Label(oframe4, text="List imported classes and functions",
**COLOR_CONFIG)
l.grid(row=row, column=1, sticky='w')
cb = Checkbutton(oframe4, var=self._imports_var, **CBUTTON_CONFIG)
cb.grid(row=row, column=0, sticky='e')
#==================== oframe7 ====================
# --docformat
row += 1
l = Label(oframe7, text="Default Docformat:", **COLOR_CONFIG)
l.grid(row=row, column=0, sticky='e')
df_var = self._docformat_var = StringVar(self._root)
self._docformat_var.set('epytext')
b = Radiobutton(oframe7, var=df_var, text='Epytext',
value='epytext', **CBUTTON_CONFIG)
b.grid(row=row, column=1, sticky='w')
b = Radiobutton(oframe7, var=df_var, text='ReStructuredText',
value='restructuredtext', **CBUTTON_CONFIG)
b.grid(row=row, column=2, columnspan=2, sticky='w')
row += 1
b = Radiobutton(oframe7, var=df_var, text='Plaintext',
value='plaintext', **CBUTTON_CONFIG)
b.grid(row=row, column=1, sticky='w')
b = Radiobutton(oframe7, var=df_var, text='Javadoc',
value='javadoc', **CBUTTON_CONFIG)
b.grid(row=row, column=2, columnspan=2, sticky='w')
row += 1
# Separater
Frame(oframe7, background=BG_COLOR).grid(row=row, column=1, pady=3)
row += 1
# --inheritance
l = Label(oframe7, text="Inheritance Style:", **COLOR_CONFIG)
l.grid(row=row, column=0, sticky='e')
inh_var = self._inheritance_var = StringVar(self._root)
self._inheritance_var.set('grouped')
b = Radiobutton(oframe7, var=inh_var, text='Grouped',
value='grouped', **CBUTTON_CONFIG)
b.grid(row=row, column=1, sticky='w')
b = Radiobutton(oframe7, var=inh_var, text='Listed',
value='listed', **CBUTTON_CONFIG)
b.grid(row=row, column=2, sticky='w')
b = Radiobutton(oframe7, var=inh_var, text='Included',
value='included', **CBUTTON_CONFIG)
b.grid(row=row, column=3, sticky='w')
row += 1
# Separater
Frame(oframe7, background=BG_COLOR).grid(row=row, column=1, pady=3)
row += 1
# --parse-only, --introspect-only
l = Label(oframe7, text="Get docs from:", **COLOR_CONFIG)
l.grid(row=row, column=0, sticky='e')
iop_var = self._introspect_or_parse_var = StringVar(self._root)
self._introspect_or_parse_var.set('both')
b = Radiobutton(oframe7, var=iop_var, text='Parsing',
value='parse', **CBUTTON_CONFIG)
b.grid(row=row, column=1, sticky='w')
b = Radiobutton(oframe7, var=iop_var, text='Introspecting',
value='introspect', **CBUTTON_CONFIG)
b.grid(row=row, column=2, sticky='w')
b = Radiobutton(oframe7, var=iop_var, text='Both',
value='both', **CBUTTON_CONFIG)
b.grid(row=row, column=3, sticky='w')
row += 1
#==================== oframe5 ====================
# --help-file FILE
row = 0
self._help_var = StringVar(self._root)
self._help_var.set('default')
b = Radiobutton(oframe5, var=self._help_var,
text='Default',
value='default', **CBUTTON_CONFIG)
b.grid(row=row, column=1, sticky='w')
row += 1
b = Radiobutton(oframe5, var=self._help_var,
text='Select File',
value='-other-', **CBUTTON_CONFIG)
b.grid(row=row, column=1, sticky='w')
self._help_entry = Entry(oframe5, **ENTRY_CONFIG)
self._help_entry.grid(row=row, column=2, sticky='ew')
self._help_browse = Button(oframe5, text='Browse',
command=self._browse_help,
**BUTTON_CONFIG)
self._help_browse.grid(row=row, column=3, sticky='ew', padx=2)
from epydoc.docwriter.html_css import STYLESHEETS
items = STYLESHEETS.items()
def _css_sort(css1, css2):
if css1[0] == 'default': return -1
elif css2[0] == 'default': return 1
else: return cmp(css1[0], css2[0])
items.sort(_css_sort)
#==================== oframe6 ====================
# -c CSS, --css CSS
# --private-css CSS
row = 0
#l = Label(oframe6, text="Public", **COLOR_CONFIG)
#l.grid(row=row, column=0, sticky='e')
#l = Label(oframe6, text="Private", **COLOR_CONFIG)
#l.grid(row=row, column=1, sticky='w')
row += 1
css_var = self._css_var = StringVar(self._root)
css_var.set('default')
#private_css_var = self._private_css_var = StringVar(self._root)
#private_css_var.set('default')
for (name, (sheet, descr)) in items:
b = Radiobutton(oframe6, var=css_var, value=name, **CBUTTON_CONFIG)
b.grid(row=row, column=0, sticky='e')
#b = Radiobutton(oframe6, var=private_css_var, value=name,
# text=name, **CBUTTON_CONFIG)
#b.grid(row=row, column=1, sticky='w')
l = Label(oframe6, text=descr, **COLOR_CONFIG)
l.grid(row=row, column=1, sticky='w')
row += 1
b = Radiobutton(oframe6, var=css_var, value='-other-',
**CBUTTON_CONFIG)
b.grid(row=row, column=0, sticky='e')
#b = Radiobutton(oframe6, text='Select File', var=private_css_var,
# value='-other-', **CBUTTON_CONFIG)
#b.grid(row=row, column=1, sticky='w')
#l = Label(oframe6, text='Select File', **COLOR_CONFIG)
#l.grid(row=row, column=1, sticky='w')
self._css_entry = Entry(oframe6, **ENTRY_CONFIG)
self._css_entry.grid(row=row, column=1, sticky='ew')
self._css_browse = Button(oframe6, text="Browse",
command=self._browse_css,
**BUTTON_CONFIG)
self._css_browse.grid(row=row, column=2, sticky='ew', padx=2)
def _init_bindings(self):
self._root.bind('<Delete>', self._delete_module)
self._root.bind('<Alt-o>', self._options_toggle)
self._root.bind('<Alt-m>', self._messages_toggle)
self._root.bind('<F5>', self._go)
self._root.bind('<Alt-s>', self._go)
self._root.bind('<Control-n>', self._new)
self._root.bind('<Control-o>', self._open)
self._root.bind('<Control-s>', self._save)
self._root.bind('<Control-a>', self._saveas)
def _options_toggle(self, *e):
if self._options_visible:
self._optsframe.forget()
self._option_button['image'] = self._rightImage
self._options_visible = 0
else:
self._optsframe.pack(fill='both', side='right')
self._option_button['image'] = self._leftImage
self._options_visible = 1
def _messages_toggle(self, *e):
if self._messages_visible:
self._msgsframe.forget()
self._message_button['image'] = self._rightImage
self._messages_visible = 0
else:
self._msgsframe.pack(fill='both', side='bottom', expand=1)
self._message_button['image'] = self._leftImage
self._messages_visible = 1
def _configure(self, event):
self._W = event.width-DW
def _delete_module(self, *e):
selection = self._module_list.curselection()
if len(selection) != 1: return
self._module_list.delete(selection[0])
def _entry_module(self, *e):
modules = [self._module_entry.get()]
if glob.has_magic(modules[0]):
modules = glob.glob(modules[0])
for name in modules:
self.add_module(name, check=1)
self._module_entry.delete(0, 'end')
def _browse_module(self, *e):
title = 'Select a module for documentation'
ftypes = [('Python module', '.py'),
('Python extension', '.so'),
('All files', '*')]
filename = askopenfilename(filetypes=ftypes, title=title,
defaultextension='.py',
initialdir=self._init_dir)
if not filename: return
self._init_dir = os.path.dirname(filename)
self.add_module(filename, check=1)
def _browse_css(self, *e):
title = 'Select a CSS stylesheet'
ftypes = [('CSS Stylesheet', '.css'), ('All files', '*')]
filename = askopenfilename(filetypes=ftypes, title=title,
defaultextension='.css')
if not filename: return
self._css_entry.delete(0, 'end')
self._css_entry.insert(0, filename)
def _browse_help(self, *e):
title = 'Select a help file'
self._help_var.set('-other-')
ftypes = [('HTML file', '.html'), ('All files', '*')]
filename = askopenfilename(filetypes=ftypes, title=title,
defaultextension='.html')
if not filename: return
self._help_entry.delete(0, 'end')
self._help_entry.insert(0, filename)
def _browse_out(self, *e):
ftypes = [('All files', '*')]
title = 'Choose the output directory'
if askdirectory is not None:
filename = askdirectory(mustexist=0, title=title)
if not filename: return
else:
# Hack for Python 2.1 or earlier:
filename = asksaveasfilename(filetypes=ftypes, title=title,
initialfile='--this directory--')
if not filename: return
(f1, f2) = os.path.split(filename)
if f2 == '--this directory--': filename = f1
self._out_entry.delete(0, 'end')
self._out_entry.insert(0, filename)
def destroy(self, *e):
if self._root is None: return
# Unload any modules that we've imported
for m in sys.modules.keys():
if m not in self._old_modules: del sys.modules[m]
self._root.destroy()
self._root = None
def add_module(self, name, check=0):
from epydoc.util import is_package_dir, is_pyname, is_module_file
from epydoc.docintrospecter import get_value_from_name
from epydoc.docintrospecter import get_value_from_filename
if (os.path.isfile(name) or is_package_dir(name) or is_pyname(name)):
# Check that it's a good module, if requested.
if check:
try:
if is_module_file(name) or is_package_dir(name):
get_value_from_filename(name)
elif os.path.isfile(name):
get_value_from_scriptname(name)
else:
get_value_from_name(name)
except ImportError, e:
log.error(e)
self._update_messages()
self._root.bell()
return
# Add the module to the list of modules.
self._module_list.insert('end', name)
self._module_list.yview('end')
else:
log.error("Couldn't find %r" % name)
self._update_messages()
self._root.bell()
def mainloop(self, *args, **kwargs):
self._root.mainloop(*args, **kwargs)
def _getopts(self):
options = {}
options['modules'] = self._module_list.get(0, 'end')
options['prj_name'] = self._name_entry.get() or ''
options['prj_url'] = self._url_entry.get() or None
options['docformat'] = self._docformat_var.get()
options['inheritance'] = self._inheritance_var.get()
options['introspect_or_parse'] = self._introspect_or_parse_var.get()
options['target'] = self._out_entry.get() or 'html'
options['frames'] = self._frames_var.get()
options['private'] = self._private_var.get()
options['show_imports'] = self._imports_var.get()
if self._help_var.get() == '-other-':
options['help'] = self._help_entry.get() or None
else:
options['help'] = None
if self._css_var.get() == '-other-':
options['css'] = self._css_entry.get() or 'default'
else:
options['css'] = self._css_var.get() or 'default'
#if self._private_css_var.get() == '-other-':
# options['private_css'] = self._css_entry.get() or 'default'
#else:
# options['private_css'] = self._private_css_var.get() or 'default'
return options
def _go(self, *e):
if len(self._module_list.get(0,'end')) == 0:
self._root.bell()
return
if self._progress[0] != None:
self._cancel[0] = 1
return
# Construct the argument list for document().
opts = self._getopts()
self._progress[0] = 0.0
self._cancel[0] = 0
args = (opts, self._cancel, self._progress)
# Clear the messages window.
self._messages['state'] = 'normal'
self._messages.delete('0.0', 'end')
self._messages['state'] = 'disabled'
self._logger.clear()
# Restore the module list. This will force re-loading of
# anything that we're documenting.
for m in sys.modules.keys():
if m not in self._old_modules:
del sys.modules[m]
# [xx] Reset caches??
# Start documenting
start_new_thread(document, args)
# Start the progress bar.
self._go_button['text'] = 'Stop'
self._afterid += 1
dt = 300 # How often to update, in milliseconds
self._update(dt, self._afterid)
def _update_messages(self):
while 1:
level, data = self._logger.read()
if data is None: break
self._messages['state'] = 'normal'
if level == 'header':
self._messages.insert('end', data, 'header')
elif level == 'uline':
self._messages.insert('end', data, 'uline header')
elif level >= log.ERROR:
data= data.rstrip()+'\n\n'
self._messages.insert('end', data, 'guierror')
elif level >= log.DOCSTRING_WARNING:
data= data.rstrip()+'\n\n'
self._messages.insert('end', data, 'warning')
elif log >= log.INFO:
data= data.rstrip()+'\n\n'
self._messages.insert('end', data, 'message')
# if data == '\n':
# if self._last_tag != 'header2':
# self._messages.insert('end', '\n', self._last_tag)
# elif data == '='*75:
# if self._messages.get('end-3c', 'end') == '\n\n\n':
# self._messages.delete('end-1c')
# self._in_header = 1
# self._messages.insert('end', ' '*75, 'uline header')
# self._last_tag = 'header'
# elif data == '-'*75:
# self._in_header = 0
# self._last_tag = 'header2'
# elif self._in_header:
# self._messages.insert('end', data, 'header')
# self._last_tag = 'header'
# elif re.match(r'\s*(L\d+:|-)?\s*Warning: ', data):
# self._messages.insert('end', data, 'warning')
# self._last_tag = 'warning'
# else:
# self._messages.insert('end', data, 'error')
# self._last_tag = 'error'
self._messages['state'] = 'disabled'
self._messages.yview('end')
def _update(self, dt, id):
if self._root is None: return
if self._progress[0] is None: return
if id != self._afterid: return
# Update the messages box
self._update_messages()
# Update the progress bar.
if self._progress[0] == 'done': p = self._W + DX
elif self._progress[0] == 'cancel': p = -5
else: p = DX + self._W * self._progress[0]
self._canvas.coords(self._r1, DX+1, DY+1, p, self._H+1)
self._canvas.coords(self._r2, DX, DY, p-1, self._H)
self._canvas.coords(self._r3, DX+1, DY+1, p, self._H+1)
# Are we done?
if self._progress[0] in ('done', 'cancel'):
if self._progress[0] == 'cancel': self._root.bell()
self._go_button['text'] = 'Start'
self._progress[0] = None
return
self._root.after(dt, self._update, dt, id)
def _new(self, *e):
self._module_list.delete(0, 'end')
self._name_entry.delete(0, 'end')
self._url_entry.delete(0, 'end')
self._docformat_var.set('epytext')
self._inheritance_var.set('grouped')
self._introspect_or_parse_var.set('both')
self._out_entry.delete(0, 'end')
self._module_entry.delete(0, 'end')
self._css_entry.delete(0, 'end')
self._help_entry.delete(0, 'end')
self._frames_var.set(1)
self._private_var.set(1)
self._imports_var.set(0)
self._css_var.set('default')
#self._private_css_var.set('default')
self._help_var.set('default')
self._filename = None
self._init_dir = None
def _open(self, *e):
title = 'Open project'
ftypes = [('Project file', '.prj'),
('All files', '*')]
filename = askopenfilename(filetypes=ftypes, title=title,
defaultextension='.css')
if not filename: return
self.open(filename)
def open(self, prjfile):
from epydoc.docwriter.html_css import STYLESHEETS
self._filename = prjfile
try:
opts = load(open(prjfile, 'r'))
modnames = list(opts.get('modules', []))
modnames.sort()
self._module_list.delete(0, 'end')
for name in modnames:
self.add_module(name)
self._module_entry.delete(0, 'end')
self._name_entry.delete(0, 'end')
if opts.get('prj_name'):
self._name_entry.insert(0, opts['prj_name'])
self._url_entry.delete(0, 'end')
if opts.get('prj_url'):
self._url_entry.insert(0, opts['prj_url'])
self._docformat_var.set(opts.get('docformat', 'epytext'))
self._inheritance_var.set(opts.get('inheritance', 'grouped'))
self._introspect_or_parse_var.set(
opts.get('introspect_or_parse', 'both'))
self._help_entry.delete(0, 'end')
if opts.get('help') is None:
self._help_var.set('default')
else:
self._help_var.set('-other-')
self._help_entry.insert(0, opts.get('help'))
self._out_entry.delete(0, 'end')
self._out_entry.insert(0, opts.get('target', 'html'))
self._frames_var.set(opts.get('frames', 1))
self._private_var.set(opts.get('private', 1))
self._imports_var.set(opts.get('show_imports', 0))
self._css_entry.delete(0, 'end')
if opts.get('css', 'default') in STYLESHEETS.keys():
self._css_var.set(opts.get('css', 'default'))
else:
self._css_var.set('-other-')
self._css_entry.insert(0, opts.get('css', 'default'))
#if opts.get('private_css', 'default') in STYLESHEETS.keys():
# self._private_css_var.set(opts.get('private_css', 'default'))
#else:
# self._private_css_var.set('-other-')
# self._css_entry.insert(0, opts.get('private_css', 'default'))
except Exception, e:
log.error('Error opening %s: %s' % (prjfile, e))
self._root.bell()
def _save(self, *e):
if self._filename is None: return self._saveas()
try:
opts = self._getopts()
dump(opts, open(self._filename, 'w'))
except Exception, e:
if self._filename is None:
log.error('Error saving: %s' % e)
else:
log.error('Error saving %s: %s' % (self._filename, e))
self._root.bell()
def _saveas(self, *e):
title = 'Save project as'
ftypes = [('Project file', '.prj'), ('All files', '*')]
filename = asksaveasfilename(filetypes=ftypes, title=title,
defaultextension='.prj')
if not filename: return
self._filename = filename
self._save()
def _version():
"""
Display the version information, and exit.
@rtype: C{None}
"""
import epydoc
print "Epydoc version %s" % epydoc.__version__
sys.exit(0)
# At some point I could add:
# --show-messages, --hide-messages
# --show-options, --hide-options
def _usage():
print
print 'Usage: epydocgui [OPTIONS] [FILE.prj | MODULES...]'
print
print ' FILE.prj An epydoc GUI project file.'
print ' MODULES... A list of Python modules to document.'
print ' -V, --version Print the version of epydoc.'
print ' -h, -?, --help, --usage Display this usage message'
print ' --debug Do not suppress error messages'
print
sys.exit(0)
def _error(s):
s = '%s; run "%s -h" for usage' % (s, os.path.basename(sys.argv[0]))
if len(s) > 80:
i = s.rfind(' ', 0, 80)
if i>0: s = s[:i]+'\n'+s[i+1:]
print >>sys.stderr, s
sys.exit(1)
def gui():
global DEBUG
sys.stderr = sys.__stderr__
projects = []
modules = []
for arg in sys.argv[1:]:
if arg[0] == '-':
if arg != '-V': arg = arg.lower()
if arg in ('-h', '--help', '-?', '--usage'): _usage()
elif arg in ('-V', '--version'): _version()
elif arg in ('--debug',): DEBUG = 1
else:
_error('Unknown parameter %r' % arg)
elif arg[-4:] == '.prj': projects.append(arg)
else: modules.append(arg)
if len(projects) > 1:
_error('Too many projects')
if len(projects) == 1:
if len(modules) > 0:
_error('You must specify either a project or a list of modules')
if not os.path.exists(projects[0]):
_error('Cannot open project file %s' % projects[0])
gui = EpydocGUI()
gui.open(projects[0])
gui.mainloop()
else:
gui = EpydocGUI()
for module in modules: gui.add_module(module, check=1)
gui.mainloop()
if __name__ == '__main__': gui()
| <filename>python/helpers/epydoc/gui.py
#!/usr/bin/env python
#
# objdoc: epydoc command-line interface
# <NAME>
#
# Created [03/15/02 10:31 PM]
# $Id: gui.py 646 2004-03-19 19:01:37Z edloper $
#
"""
Graphical interface to epydoc. This interface might be useful for
systems where it's inconvenient to use the command-line interface
(such as Windows). It supports many (but not all) of the features
that are supported by the command-line interface. It also supports
loading and saving of X{project files}, which store a set of related
modules, and the options that should be used to generate the
documentation for those modules.
Usage::
epydocgui [OPTIONS] [FILE.prj | MODULES...]
FILE.prj An epydoc GUI project file.
MODULES... A list of Python modules to document.
-V, --version Print the version of epydoc.
-h, -?, --help, --usage Display this usage message
--debug Do not suppress error messages
@todo: Use ini-style project files, rather than pickles (using the
same format as the CLI).
"""
__docformat__ = 'epytext en'
import sys, os.path, re, glob
from Tkinter import *
from tkFileDialog import askopenfilename, asksaveasfilename
from thread import start_new_thread, exit_thread
from pickle import dump, load
# askdirectory is only defined in python 2.2+; fall back on
# asksaveasfilename if it's not available.
try: from tkFileDialog import askdirectory
except: askdirectory = None
# Include support for Zope, if it's available.
try: import ZODB
except: pass
##/////////////////////////////////////////////////////////////////////////
## CONSTANTS
##/////////////////////////////////////////////////////////////////////////
DEBUG = 0
# Colors for tkinter display
BG_COLOR='#e0e0e0'
ACTIVEBG_COLOR='#e0e0e0'
TEXT_COLOR='black'
ENTRYSELECT_COLOR = ACTIVEBG_COLOR
SELECT_COLOR = '#208070'
MESSAGE_COLOR = '#000060'
ERROR_COLOR = '#600000'
GUIERROR_COLOR = '#600000'
WARNING_COLOR = '#604000'
HEADER_COLOR = '#000000'
# Convenience dictionaries for specifying widget colors
COLOR_CONFIG = {'background':BG_COLOR, 'highlightcolor': BG_COLOR,
'foreground':TEXT_COLOR, 'highlightbackground': BG_COLOR}
ENTRY_CONFIG = {'background':BG_COLOR, 'highlightcolor': BG_COLOR,
'foreground':TEXT_COLOR, 'highlightbackground': BG_COLOR,
'selectbackground': ENTRYSELECT_COLOR,
'selectforeground': TEXT_COLOR}
SB_CONFIG = {'troughcolor':BG_COLOR, 'activebackground':BG_COLOR,
'background':BG_COLOR, 'highlightbackground':BG_COLOR}
LISTBOX_CONFIG = {'highlightcolor': BG_COLOR, 'highlightbackground': BG_COLOR,
'foreground':TEXT_COLOR, 'selectforeground': TEXT_COLOR,
'selectbackground': ACTIVEBG_COLOR, 'background':BG_COLOR}
BUTTON_CONFIG = {'background':BG_COLOR, 'highlightthickness':0, 'padx':4,
'highlightbackground': BG_COLOR, 'foreground':TEXT_COLOR,
'highlightcolor': BG_COLOR, 'activeforeground': TEXT_COLOR,
'activebackground': ACTIVEBG_COLOR, 'pady':0}
CBUTTON_CONFIG = {'background':BG_COLOR, 'highlightthickness':0, 'padx':4,
'highlightbackground': BG_COLOR, 'foreground':TEXT_COLOR,
'highlightcolor': BG_COLOR, 'activeforeground': TEXT_COLOR,
'activebackground': ACTIVEBG_COLOR, 'pady':0,
'selectcolor': SELECT_COLOR}
SHOWMSG_CONFIG = CBUTTON_CONFIG.copy()
SHOWMSG_CONFIG['foreground'] = MESSAGE_COLOR
SHOWWRN_CONFIG = CBUTTON_CONFIG.copy()
SHOWWRN_CONFIG['foreground'] = WARNING_COLOR
SHOWERR_CONFIG = CBUTTON_CONFIG.copy()
SHOWERR_CONFIG['foreground'] = ERROR_COLOR
# Colors for the progress bar
PROGRESS_HEIGHT = 16
PROGRESS_WIDTH = 200
PROGRESS_BG='#305060'
PROGRESS_COLOR1 = '#30c070'
PROGRESS_COLOR2 = '#60ffa0'
PROGRESS_COLOR3 = '#106030'
# On tkinter canvases, where's the zero coordinate?
if sys.platform.lower().startswith('win'):
DX = 3; DY = 3
DH = 0; DW = 7
else:
DX = 1; DY = 1
DH = 1; DW = 3
# How much of the progress is in each subtask?
IMPORT_PROGRESS = 0.1
BUILD_PROGRESS = 0.2
WRITE_PROGRESS = 1.0 - BUILD_PROGRESS - IMPORT_PROGRESS
##/////////////////////////////////////////////////////////////////////////
## IMAGE CONSTANTS
##/////////////////////////////////////////////////////////////////////////
UP_GIF = '''\
R0lGODlhCwAMALMAANnZ2QDMmQCZZgBmZgAAAAAzM////////wAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAACH5BAEAAAAALAAAAAALAAwAAAQjEMhJKxCW4gzCIJxXZIEwFGDlDadqsii1sq1U0nA64+ON
5xEAOw==
'''
DOWN_GIF = '''\
R0lGODlhCwAMALMAANnZ2QDMmQCZZgBmZgAAAAAzM////////wAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAACH5BAEAAAAALAAAAAALAAwAAAQmEIQxgLVUCsppsVPngVtXEFfIfWk5nBe4xuSL0tKLy/cu
7JffJQIAOw==
'''
LEFT_GIF='''\
R0lGODlhDAALAKIAANnZ2QDMmQCZZgBmZgAAAAAzM////////yH5BAEAAAAALAAAAAAMAAsAAAM4
CLocgaCrESiDoBshOAoAgBEyMzgAEIGCowsiOLoLgEBVOLoIqlSFo4OgC1RYM4Ogq1RYg6DLVJgA
Ow==
'''
RIGHT_GIF='''\
R0lGODlhDAALAKIAANnZ2QDMmQBmZgCZZgAzMwAAAP///////yH5BAEAAAAALAAAAAAMAAsAAAM5
GIGgyzIYgaCrIigTgaALIigyEQiqKLoTgaAoujuDgKJLVAgqIoJEBQAIIkKEhaArRFgIukqFoMsJ
ADs=
'''
##/////////////////////////////////////////////////////////////////////////
## MessageIO
##/////////////////////////////////////////////////////////////////////////
from epydoc import log
from epydoc.util import wordwrap
class GUILogger(log.Logger):
_STAGES = [40, 7, 1, 3, 1, 30, 1, 2, 100]
def __init__(self, progress, cancel):
self._progress = progress
self._cancel = cancel
self.clear()
def clear(self):
self._messages = []
self._n = 0
self._stage = 0
self._message_blocks = []
def log(self, level, message):
message = wordwrap(str(message)).rstrip() + '\n'
if self._message_blocks:
self._message_blocks[-1][-1].append( (level, message) )
else:
self._messages.append( (level, message) )
def start_block(self, header):
self._message_blocks.append( (header, []) )
def end_block(self):
header, messages = self._message_blocks.pop()
if messages:
self._messages.append( ('uline', ' '*75+'\n') )
self.log('header', header)
self._messages += messages
self._messages.append( ('uline', ' '*75+'\n') )
def start_progress(self, header=None):
self.log(log.INFO, header)
self._stage += 1
def end_progress(self):
pass
def progress(self, percent, message=''):
if self._cancel[0]: exit_thread()
i = self._stage - 1
p = ((sum(self._STAGES[:i]) + percent*self._STAGES[i]) /
float(sum(self._STAGES)))
self._progress[0] = p
def read(self):
if self._n >= len(self._messages):
return None, None
else:
self._n += 1
return self._messages[self._n-1]
##/////////////////////////////////////////////////////////////////////////
## THREADED DOCUMENTER
##/////////////////////////////////////////////////////////////////////////
def document(options, cancel, done):
"""
Create the documentation for C{modules}, using the options
specified by C{options}. C{document} is designed to be started in
its own thread by L{EpydocGUI._go}.
@param options: The options to use for generating documentation.
This includes keyword options that can be given to
L{docwriter.html.HTMLWriter}, as well as the option C{target}, which
controls where the output is written to.
@type options: C{dictionary}
"""
from epydoc.docwriter.html import HTMLWriter
from epydoc.docbuilder import build_doc_index
import epydoc.docstringparser
# Set the default docformat.
docformat = options.get('docformat', 'epytext')
epydoc.docstringparser.DEFAULT_DOCFORMAT = docformat
try:
parse = options['introspect_or_parse'] in ('parse', 'both')
introspect = options['introspect_or_parse'] in ('introspect', 'both')
docindex = build_doc_index(options['modules'], parse, introspect)
html_writer = HTMLWriter(docindex, **options)
log.start_progress('Writing HTML docs to %r' % options['target'])
html_writer.write(options['target'])
log.end_progress()
# We're done.
log.warning('Finished!')
done[0] = 'done'
except SystemExit:
# Cancel.
log.error('Cancelled!')
done[0] ='cancel'
raise
except Exception, e:
# We failed.
log.error('Internal error: %s' % e)
done[0] ='cancel'
raise
except:
# We failed.
log.error('Internal error!')
done[0] ='cancel'
raise
##/////////////////////////////////////////////////////////////////////////
## GUI
##/////////////////////////////////////////////////////////////////////////
class EpydocGUI:
"""
A graphical user interace to epydoc.
"""
def __init__(self):
self._afterid = 0
self._progress = [None]
self._cancel = [0]
self._filename = None
self._init_dir = None
# Store a copy of sys.modules, so that we can restore it
# later. This is useful for making sure that we reload
# everything when we re-build its documentation. This will
# *not* reload the modules that are present when the EpydocGUI
# is created, but that should only contain some builtins, some
# epydoc modules, Tkinter, pickle, and thread..
self._old_modules = sys.modules.keys()
# Create the main window.
self._root = Tk()
self._root['background']=BG_COLOR
self._root.bind('<Control-q>', self.destroy)
self._root.bind('<Alt-q>', self.destroy)
self._root.bind('<Alt-x>', self.destroy)
self._root.bind('<Control-x>', self.destroy)
#self._root.bind('<Control-d>', self.destroy)
self._root.title('Epydoc')
self._rootframe = Frame(self._root, background=BG_COLOR,
border=2, relief='raised')
self._rootframe.pack(expand=1, fill='both', padx=2, pady=2)
# Set up the basic frames. Do not pack the options frame or
# the messages frame; the GUI has buttons to expand them.
leftframe = Frame(self._rootframe, background=BG_COLOR)
leftframe.pack(expand=1, fill='both', side='left')
optsframe = Frame(self._rootframe, background=BG_COLOR)
mainframe = Frame(leftframe, background=BG_COLOR)
mainframe.pack(expand=1, fill='both', side='top')
ctrlframe = Frame(mainframe, background=BG_COLOR)
ctrlframe.pack(side="bottom", fill='x', expand=0)
msgsframe = Frame(leftframe, background=BG_COLOR)
self._optsframe = optsframe
self._msgsframe = msgsframe
# Initialize all the frames, etc.
self._init_menubar()
self._init_progress_bar(mainframe)
self._init_module_list(mainframe)
self._init_options(optsframe, ctrlframe)
self._init_messages(msgsframe, ctrlframe)
self._init_bindings()
# Set up logging
self._logger = GUILogger(self._progress, self._cancel)
log.register_logger(self._logger)
# Open the messages pane by default.
self._messages_toggle()
## For testing options:
#self._options_toggle()
def _init_menubar(self):
menubar = Menu(self._root, borderwidth=2,
background=BG_COLOR,
activebackground=BG_COLOR)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label='New Project', underline=0,
command=self._new,
accelerator='Ctrl-n')
filemenu.add_command(label='Open Project', underline=0,
command=self._open,
accelerator='Ctrl-o')
filemenu.add_command(label='Save Project', underline=0,
command=self._save,
accelerator='Ctrl-s')
filemenu.add_command(label='Save As..', underline=5,
command=self._saveas,
accelerator='Ctrl-a')
filemenu.add_separator()
filemenu.add_command(label='Exit', underline=1,
command=self.destroy,
accelerator='Ctrl-x')
menubar.add_cascade(label='File', underline=0, menu=filemenu)
gomenu = Menu(menubar, tearoff=0)
gomenu.add_command(label='Run Epydoc', command=self._open,
underline=0, accelerator='Alt-g')
menubar.add_cascade(label='Run', menu=gomenu, underline=0)
self._root.config(menu=menubar)
def _init_module_list(self, mainframe):
mframe1 = Frame(mainframe, relief='groove', border=2,
background=BG_COLOR)
mframe1.pack(side="top", fill='both', expand=1, padx=4, pady=3)
l = Label(mframe1, text="Modules to document:",
justify='left', **COLOR_CONFIG)
l.pack(side='top', fill='none', anchor='nw', expand=0)
mframe2 = Frame(mframe1, background=BG_COLOR)
mframe2.pack(side="top", fill='both', expand=1)
mframe3 = Frame(mframe1, background=BG_COLOR)
mframe3.pack(side="bottom", fill='x', expand=0)
self._module_list = Listbox(mframe2, width=80, height=10,
selectmode='multiple',
**LISTBOX_CONFIG)
self._module_list.pack(side="left", fill='both', expand=1)
sb = Scrollbar(mframe2, orient='vertical',**SB_CONFIG)
sb['command']=self._module_list.yview
sb.pack(side='right', fill='y')
self._module_list.config(yscrollcommand=sb.set)
Label(mframe3, text="Add:", **COLOR_CONFIG).pack(side='left')
self._module_entry = Entry(mframe3, **ENTRY_CONFIG)
self._module_entry.pack(side='left', fill='x', expand=1)
self._module_entry.bind('<Return>', self._entry_module)
self._module_delete = Button(mframe3, text="Remove",
command=self._delete_module,
**BUTTON_CONFIG)
self._module_delete.pack(side='right', expand=0, padx=2)
self._module_browse = Button(mframe3, text="Browse",
command=self._browse_module,
**BUTTON_CONFIG)
self._module_browse.pack(side='right', expand=0, padx=2)
def _init_progress_bar(self, mainframe):
pframe1 = Frame(mainframe, background=BG_COLOR)
pframe1.pack(side="bottom", fill='x', expand=0)
self._go_button = Button(pframe1, width=4, text='Start',
underline=0, command=self._go,
**BUTTON_CONFIG)
self._go_button.pack(side='left', padx=4)
pframe2 = Frame(pframe1, relief='groove', border=2,
background=BG_COLOR)
pframe2.pack(side="top", fill='x', expand=1, padx=4, pady=3)
Label(pframe2, text='Progress:', **COLOR_CONFIG).pack(side='left')
H = self._H = PROGRESS_HEIGHT
W = self._W = PROGRESS_WIDTH
c = self._canvas = Canvas(pframe2, height=H+DH, width=W+DW,
background=PROGRESS_BG, border=0,
selectborderwidth=0, relief='sunken',
insertwidth=0, insertborderwidth=0,
highlightbackground=BG_COLOR)
self._canvas.pack(side='left', fill='x', expand=1, padx=4)
self._r2 = c.create_rectangle(0,0,0,0, outline=PROGRESS_COLOR2)
self._r3 = c.create_rectangle(0,0,0,0, outline=PROGRESS_COLOR3)
self._r1 = c.create_rectangle(0,0,0,0, fill=PROGRESS_COLOR1,
outline='')
self._canvas.bind('<Configure>', self._configure)
def _init_messages(self, msgsframe, ctrlframe):
self._downImage = PhotoImage(master=self._root, data=DOWN_GIF)
self._upImage = PhotoImage(master=self._root, data=UP_GIF)
# Set up the messages control frame
b1 = Button(ctrlframe, text="Messages", justify='center',
command=self._messages_toggle, underline=0,
highlightthickness=0, activebackground=BG_COLOR,
border=0, relief='flat', padx=2, pady=0, **COLOR_CONFIG)
b2 = Button(ctrlframe, image=self._downImage, relief='flat',
border=0, command=self._messages_toggle,
activebackground=BG_COLOR, **COLOR_CONFIG)
self._message_button = b2
self._messages_visible = 0
b2.pack(side="left")
b1.pack(side="left")
f = Frame(msgsframe, background=BG_COLOR)
f.pack(side='top', expand=1, fill='both')
messages = Text(f, width=80, height=10, **ENTRY_CONFIG)
messages['state'] = 'disabled'
messages.pack(fill='both', expand=1, side='left')
self._messages = messages
# Add a scrollbar
sb = Scrollbar(f, orient='vertical', **SB_CONFIG)
sb.pack(fill='y', side='right')
sb['command'] = messages.yview
messages['yscrollcommand'] = sb.set
# Set up some colorization tags
messages.tag_config('error', foreground=ERROR_COLOR)
messages.tag_config('warning', foreground=WARNING_COLOR)
messages.tag_config('guierror', foreground=GUIERROR_COLOR)
messages.tag_config('message', foreground=MESSAGE_COLOR)
messages.tag_config('header', foreground=HEADER_COLOR)
messages.tag_config('uline', underline=1)
# Keep track of tag state..
self._in_header = 0
self._last_tag = 'error'
# Add some buttons
buttons = Frame(msgsframe, background=BG_COLOR)
buttons.pack(side='bottom', fill='x')
self._show_errors = IntVar(self._root)
self._show_errors.set(1)
self._show_warnings = IntVar(self._root)
self._show_warnings.set(1)
self._show_messages = IntVar(self._root)
self._show_messages.set(0)
Checkbutton(buttons, text='Show Messages', var=self._show_messages,
command=self._update_msg_tags,
**SHOWMSG_CONFIG).pack(side='left')
Checkbutton(buttons, text='Show Warnings', var=self._show_warnings,
command=self._update_msg_tags,
**SHOWWRN_CONFIG).pack(side='left')
Checkbutton(buttons, text='Show Errors', var=self._show_errors,
command=self._update_msg_tags,
**SHOWERR_CONFIG).pack(side='left')
self._update_msg_tags()
def _update_msg_tags(self, *e):
elide_errors = not self._show_errors.get()
elide_warnings = not self._show_warnings.get()
elide_messages = not self._show_messages.get()
elide_headers = elide_errors and elide_warnings
self._messages.tag_config('error', elide=elide_errors)
self._messages.tag_config('guierror', elide=elide_errors)
self._messages.tag_config('warning', elide=elide_warnings)
self._messages.tag_config('message', elide=elide_messages)
self._messages.tag_config('header', elide=elide_headers)
def _init_options(self, optsframe, ctrlframe):
self._leftImage=PhotoImage(master=self._root, data=LEFT_GIF)
self._rightImage=PhotoImage(master=self._root, data=RIGHT_GIF)
# Set up the options control frame
b1 = Button(ctrlframe, text="Options", justify='center',
border=0, relief='flat',
command=self._options_toggle, padx=2,
underline=0, pady=0, highlightthickness=0,
activebackground=BG_COLOR, **COLOR_CONFIG)
b2 = Button(ctrlframe, image=self._rightImage, relief='flat',
border=0, command=self._options_toggle,
activebackground=BG_COLOR, **COLOR_CONFIG)
self._option_button = b2
self._options_visible = 0
b2.pack(side="right")
b1.pack(side="right")
oframe2 = Frame(optsframe, relief='groove', border=2,
background=BG_COLOR)
oframe2.pack(side="right", fill='both',
expand=0, padx=4, pady=3, ipadx=4)
Label(oframe2, text="Project Options", font='helvetica -16',
**COLOR_CONFIG).pack(anchor='w')
oframe3 = Frame(oframe2, background=BG_COLOR)
oframe3.pack(fill='x')
oframe4 = Frame(oframe2, background=BG_COLOR)
oframe4.pack(fill='x')
oframe7 = Frame(oframe2, background=BG_COLOR)
oframe7.pack(fill='x')
div = Frame(oframe2, background=BG_COLOR, border=1, relief='sunk')
div.pack(ipady=1, fill='x', padx=4, pady=2)
Label(oframe2, text="Help File", font='helvetica -16',
**COLOR_CONFIG).pack(anchor='w')
oframe5 = Frame(oframe2, background=BG_COLOR)
oframe5.pack(fill='x')
div = Frame(oframe2, background=BG_COLOR, border=1, relief='sunk')
div.pack(ipady=1, fill='x', padx=4, pady=2)
Label(oframe2, text="CSS Stylesheet", font='helvetica -16',
**COLOR_CONFIG).pack(anchor='w')
oframe6 = Frame(oframe2, background=BG_COLOR)
oframe6.pack(fill='x')
#==================== oframe3 ====================
# -n NAME, --name NAME
row = 0
l = Label(oframe3, text="Project Name:", **COLOR_CONFIG)
l.grid(row=row, column=0, sticky='e')
self._name_entry = Entry(oframe3, **ENTRY_CONFIG)
self._name_entry.grid(row=row, column=1, sticky='ew', columnspan=3)
# -u URL, --url URL
row += 1
l = Label(oframe3, text="Project URL:", **COLOR_CONFIG)
l.grid(row=row, column=0, sticky='e')
self._url_entry = Entry(oframe3, **ENTRY_CONFIG)
self._url_entry.grid(row=row, column=1, sticky='ew', columnspan=3)
# -o DIR, --output DIR
row += 1
l = Label(oframe3, text="Output Directory:", **COLOR_CONFIG)
l.grid(row=row, column=0, sticky='e')
self._out_entry = Entry(oframe3, **ENTRY_CONFIG)
self._out_entry.grid(row=row, column=1, sticky='ew', columnspan=2)
self._out_browse = Button(oframe3, text="Browse",
command=self._browse_out,
**BUTTON_CONFIG)
self._out_browse.grid(row=row, column=3, sticky='ew', padx=2)
#==================== oframe4 ====================
# --no-frames
row = 0
self._frames_var = IntVar(self._root)
self._frames_var.set(1)
l = Label(oframe4, text="Generate a frame-based table of contents",
**COLOR_CONFIG)
l.grid(row=row, column=1, sticky='w')
cb = Checkbutton(oframe4, var=self._frames_var, **CBUTTON_CONFIG)
cb.grid(row=row, column=0, sticky='e')
# --no-private
row += 1
self._private_var = IntVar(self._root)
self._private_var.set(1)
l = Label(oframe4, text="Generate documentation for private objects",
**COLOR_CONFIG)
l.grid(row=row, column=1, sticky='w')
cb = Checkbutton(oframe4, var=self._private_var, **CBUTTON_CONFIG)
cb.grid(row=row, column=0, sticky='e')
# --show-imports
row += 1
self._imports_var = IntVar(self._root)
self._imports_var.set(0)
l = Label(oframe4, text="List imported classes and functions",
**COLOR_CONFIG)
l.grid(row=row, column=1, sticky='w')
cb = Checkbutton(oframe4, var=self._imports_var, **CBUTTON_CONFIG)
cb.grid(row=row, column=0, sticky='e')
#==================== oframe7 ====================
# --docformat
row += 1
l = Label(oframe7, text="Default Docformat:", **COLOR_CONFIG)
l.grid(row=row, column=0, sticky='e')
df_var = self._docformat_var = StringVar(self._root)
self._docformat_var.set('epytext')
b = Radiobutton(oframe7, var=df_var, text='Epytext',
value='epytext', **CBUTTON_CONFIG)
b.grid(row=row, column=1, sticky='w')
b = Radiobutton(oframe7, var=df_var, text='ReStructuredText',
value='restructuredtext', **CBUTTON_CONFIG)
b.grid(row=row, column=2, columnspan=2, sticky='w')
row += 1
b = Radiobutton(oframe7, var=df_var, text='Plaintext',
value='plaintext', **CBUTTON_CONFIG)
b.grid(row=row, column=1, sticky='w')
b = Radiobutton(oframe7, var=df_var, text='Javadoc',
value='javadoc', **CBUTTON_CONFIG)
b.grid(row=row, column=2, columnspan=2, sticky='w')
row += 1
# Separater
Frame(oframe7, background=BG_COLOR).grid(row=row, column=1, pady=3)
row += 1
# --inheritance
l = Label(oframe7, text="Inheritance Style:", **COLOR_CONFIG)
l.grid(row=row, column=0, sticky='e')
inh_var = self._inheritance_var = StringVar(self._root)
self._inheritance_var.set('grouped')
b = Radiobutton(oframe7, var=inh_var, text='Grouped',
value='grouped', **CBUTTON_CONFIG)
b.grid(row=row, column=1, sticky='w')
b = Radiobutton(oframe7, var=inh_var, text='Listed',
value='listed', **CBUTTON_CONFIG)
b.grid(row=row, column=2, sticky='w')
b = Radiobutton(oframe7, var=inh_var, text='Included',
value='included', **CBUTTON_CONFIG)
b.grid(row=row, column=3, sticky='w')
row += 1
# Separater
Frame(oframe7, background=BG_COLOR).grid(row=row, column=1, pady=3)
row += 1
# --parse-only, --introspect-only
l = Label(oframe7, text="Get docs from:", **COLOR_CONFIG)
l.grid(row=row, column=0, sticky='e')
iop_var = self._introspect_or_parse_var = StringVar(self._root)
self._introspect_or_parse_var.set('both')
b = Radiobutton(oframe7, var=iop_var, text='Parsing',
value='parse', **CBUTTON_CONFIG)
b.grid(row=row, column=1, sticky='w')
b = Radiobutton(oframe7, var=iop_var, text='Introspecting',
value='introspect', **CBUTTON_CONFIG)
b.grid(row=row, column=2, sticky='w')
b = Radiobutton(oframe7, var=iop_var, text='Both',
value='both', **CBUTTON_CONFIG)
b.grid(row=row, column=3, sticky='w')
row += 1
#==================== oframe5 ====================
# --help-file FILE
row = 0
self._help_var = StringVar(self._root)
self._help_var.set('default')
b = Radiobutton(oframe5, var=self._help_var,
text='Default',
value='default', **CBUTTON_CONFIG)
b.grid(row=row, column=1, sticky='w')
row += 1
b = Radiobutton(oframe5, var=self._help_var,
text='Select File',
value='-other-', **CBUTTON_CONFIG)
b.grid(row=row, column=1, sticky='w')
self._help_entry = Entry(oframe5, **ENTRY_CONFIG)
self._help_entry.grid(row=row, column=2, sticky='ew')
self._help_browse = Button(oframe5, text='Browse',
command=self._browse_help,
**BUTTON_CONFIG)
self._help_browse.grid(row=row, column=3, sticky='ew', padx=2)
from epydoc.docwriter.html_css import STYLESHEETS
items = STYLESHEETS.items()
def _css_sort(css1, css2):
if css1[0] == 'default': return -1
elif css2[0] == 'default': return 1
else: return cmp(css1[0], css2[0])
items.sort(_css_sort)
#==================== oframe6 ====================
# -c CSS, --css CSS
# --private-css CSS
row = 0
#l = Label(oframe6, text="Public", **COLOR_CONFIG)
#l.grid(row=row, column=0, sticky='e')
#l = Label(oframe6, text="Private", **COLOR_CONFIG)
#l.grid(row=row, column=1, sticky='w')
row += 1
css_var = self._css_var = StringVar(self._root)
css_var.set('default')
#private_css_var = self._private_css_var = StringVar(self._root)
#private_css_var.set('default')
for (name, (sheet, descr)) in items:
b = Radiobutton(oframe6, var=css_var, value=name, **CBUTTON_CONFIG)
b.grid(row=row, column=0, sticky='e')
#b = Radiobutton(oframe6, var=private_css_var, value=name,
# text=name, **CBUTTON_CONFIG)
#b.grid(row=row, column=1, sticky='w')
l = Label(oframe6, text=descr, **COLOR_CONFIG)
l.grid(row=row, column=1, sticky='w')
row += 1
b = Radiobutton(oframe6, var=css_var, value='-other-',
**CBUTTON_CONFIG)
b.grid(row=row, column=0, sticky='e')
#b = Radiobutton(oframe6, text='Select File', var=private_css_var,
# value='-other-', **CBUTTON_CONFIG)
#b.grid(row=row, column=1, sticky='w')
#l = Label(oframe6, text='Select File', **COLOR_CONFIG)
#l.grid(row=row, column=1, sticky='w')
self._css_entry = Entry(oframe6, **ENTRY_CONFIG)
self._css_entry.grid(row=row, column=1, sticky='ew')
self._css_browse = Button(oframe6, text="Browse",
command=self._browse_css,
**BUTTON_CONFIG)
self._css_browse.grid(row=row, column=2, sticky='ew', padx=2)
def _init_bindings(self):
self._root.bind('<Delete>', self._delete_module)
self._root.bind('<Alt-o>', self._options_toggle)
self._root.bind('<Alt-m>', self._messages_toggle)
self._root.bind('<F5>', self._go)
self._root.bind('<Alt-s>', self._go)
self._root.bind('<Control-n>', self._new)
self._root.bind('<Control-o>', self._open)
self._root.bind('<Control-s>', self._save)
self._root.bind('<Control-a>', self._saveas)
def _options_toggle(self, *e):
if self._options_visible:
self._optsframe.forget()
self._option_button['image'] = self._rightImage
self._options_visible = 0
else:
self._optsframe.pack(fill='both', side='right')
self._option_button['image'] = self._leftImage
self._options_visible = 1
def _messages_toggle(self, *e):
if self._messages_visible:
self._msgsframe.forget()
self._message_button['image'] = self._rightImage
self._messages_visible = 0
else:
self._msgsframe.pack(fill='both', side='bottom', expand=1)
self._message_button['image'] = self._leftImage
self._messages_visible = 1
def _configure(self, event):
self._W = event.width-DW
def _delete_module(self, *e):
selection = self._module_list.curselection()
if len(selection) != 1: return
self._module_list.delete(selection[0])
def _entry_module(self, *e):
modules = [self._module_entry.get()]
if glob.has_magic(modules[0]):
modules = glob.glob(modules[0])
for name in modules:
self.add_module(name, check=1)
self._module_entry.delete(0, 'end')
def _browse_module(self, *e):
title = 'Select a module for documentation'
ftypes = [('Python module', '.py'),
('Python extension', '.so'),
('All files', '*')]
filename = askopenfilename(filetypes=ftypes, title=title,
defaultextension='.py',
initialdir=self._init_dir)
if not filename: return
self._init_dir = os.path.dirname(filename)
self.add_module(filename, check=1)
def _browse_css(self, *e):
title = 'Select a CSS stylesheet'
ftypes = [('CSS Stylesheet', '.css'), ('All files', '*')]
filename = askopenfilename(filetypes=ftypes, title=title,
defaultextension='.css')
if not filename: return
self._css_entry.delete(0, 'end')
self._css_entry.insert(0, filename)
def _browse_help(self, *e):
title = 'Select a help file'
self._help_var.set('-other-')
ftypes = [('HTML file', '.html'), ('All files', '*')]
filename = askopenfilename(filetypes=ftypes, title=title,
defaultextension='.html')
if not filename: return
self._help_entry.delete(0, 'end')
self._help_entry.insert(0, filename)
def _browse_out(self, *e):
ftypes = [('All files', '*')]
title = 'Choose the output directory'
if askdirectory is not None:
filename = askdirectory(mustexist=0, title=title)
if not filename: return
else:
# Hack for Python 2.1 or earlier:
filename = asksaveasfilename(filetypes=ftypes, title=title,
initialfile='--this directory--')
if not filename: return
(f1, f2) = os.path.split(filename)
if f2 == '--this directory--': filename = f1
self._out_entry.delete(0, 'end')
self._out_entry.insert(0, filename)
def destroy(self, *e):
if self._root is None: return
# Unload any modules that we've imported
for m in sys.modules.keys():
if m not in self._old_modules: del sys.modules[m]
self._root.destroy()
self._root = None
def add_module(self, name, check=0):
from epydoc.util import is_package_dir, is_pyname, is_module_file
from epydoc.docintrospecter import get_value_from_name
from epydoc.docintrospecter import get_value_from_filename
if (os.path.isfile(name) or is_package_dir(name) or is_pyname(name)):
# Check that it's a good module, if requested.
if check:
try:
if is_module_file(name) or is_package_dir(name):
get_value_from_filename(name)
elif os.path.isfile(name):
get_value_from_scriptname(name)
else:
get_value_from_name(name)
except ImportError, e:
log.error(e)
self._update_messages()
self._root.bell()
return
# Add the module to the list of modules.
self._module_list.insert('end', name)
self._module_list.yview('end')
else:
log.error("Couldn't find %r" % name)
self._update_messages()
self._root.bell()
def mainloop(self, *args, **kwargs):
self._root.mainloop(*args, **kwargs)
def _getopts(self):
options = {}
options['modules'] = self._module_list.get(0, 'end')
options['prj_name'] = self._name_entry.get() or ''
options['prj_url'] = self._url_entry.get() or None
options['docformat'] = self._docformat_var.get()
options['inheritance'] = self._inheritance_var.get()
options['introspect_or_parse'] = self._introspect_or_parse_var.get()
options['target'] = self._out_entry.get() or 'html'
options['frames'] = self._frames_var.get()
options['private'] = self._private_var.get()
options['show_imports'] = self._imports_var.get()
if self._help_var.get() == '-other-':
options['help'] = self._help_entry.get() or None
else:
options['help'] = None
if self._css_var.get() == '-other-':
options['css'] = self._css_entry.get() or 'default'
else:
options['css'] = self._css_var.get() or 'default'
#if self._private_css_var.get() == '-other-':
# options['private_css'] = self._css_entry.get() or 'default'
#else:
# options['private_css'] = self._private_css_var.get() or 'default'
return options
def _go(self, *e):
if len(self._module_list.get(0,'end')) == 0:
self._root.bell()
return
if self._progress[0] != None:
self._cancel[0] = 1
return
# Construct the argument list for document().
opts = self._getopts()
self._progress[0] = 0.0
self._cancel[0] = 0
args = (opts, self._cancel, self._progress)
# Clear the messages window.
self._messages['state'] = 'normal'
self._messages.delete('0.0', 'end')
self._messages['state'] = 'disabled'
self._logger.clear()
# Restore the module list. This will force re-loading of
# anything that we're documenting.
for m in sys.modules.keys():
if m not in self._old_modules:
del sys.modules[m]
# [xx] Reset caches??
# Start documenting
start_new_thread(document, args)
# Start the progress bar.
self._go_button['text'] = 'Stop'
self._afterid += 1
dt = 300 # How often to update, in milliseconds
self._update(dt, self._afterid)
def _update_messages(self):
while 1:
level, data = self._logger.read()
if data is None: break
self._messages['state'] = 'normal'
if level == 'header':
self._messages.insert('end', data, 'header')
elif level == 'uline':
self._messages.insert('end', data, 'uline header')
elif level >= log.ERROR:
data= data.rstrip()+'\n\n'
self._messages.insert('end', data, 'guierror')
elif level >= log.DOCSTRING_WARNING:
data= data.rstrip()+'\n\n'
self._messages.insert('end', data, 'warning')
elif log >= log.INFO:
data= data.rstrip()+'\n\n'
self._messages.insert('end', data, 'message')
# if data == '\n':
# if self._last_tag != 'header2':
# self._messages.insert('end', '\n', self._last_tag)
# elif data == '='*75:
# if self._messages.get('end-3c', 'end') == '\n\n\n':
# self._messages.delete('end-1c')
# self._in_header = 1
# self._messages.insert('end', ' '*75, 'uline header')
# self._last_tag = 'header'
# elif data == '-'*75:
# self._in_header = 0
# self._last_tag = 'header2'
# elif self._in_header:
# self._messages.insert('end', data, 'header')
# self._last_tag = 'header'
# elif re.match(r'\s*(L\d+:|-)?\s*Warning: ', data):
# self._messages.insert('end', data, 'warning')
# self._last_tag = 'warning'
# else:
# self._messages.insert('end', data, 'error')
# self._last_tag = 'error'
self._messages['state'] = 'disabled'
self._messages.yview('end')
def _update(self, dt, id):
if self._root is None: return
if self._progress[0] is None: return
if id != self._afterid: return
# Update the messages box
self._update_messages()
# Update the progress bar.
if self._progress[0] == 'done': p = self._W + DX
elif self._progress[0] == 'cancel': p = -5
else: p = DX + self._W * self._progress[0]
self._canvas.coords(self._r1, DX+1, DY+1, p, self._H+1)
self._canvas.coords(self._r2, DX, DY, p-1, self._H)
self._canvas.coords(self._r3, DX+1, DY+1, p, self._H+1)
# Are we done?
if self._progress[0] in ('done', 'cancel'):
if self._progress[0] == 'cancel': self._root.bell()
self._go_button['text'] = 'Start'
self._progress[0] = None
return
self._root.after(dt, self._update, dt, id)
def _new(self, *e):
self._module_list.delete(0, 'end')
self._name_entry.delete(0, 'end')
self._url_entry.delete(0, 'end')
self._docformat_var.set('epytext')
self._inheritance_var.set('grouped')
self._introspect_or_parse_var.set('both')
self._out_entry.delete(0, 'end')
self._module_entry.delete(0, 'end')
self._css_entry.delete(0, 'end')
self._help_entry.delete(0, 'end')
self._frames_var.set(1)
self._private_var.set(1)
self._imports_var.set(0)
self._css_var.set('default')
#self._private_css_var.set('default')
self._help_var.set('default')
self._filename = None
self._init_dir = None
def _open(self, *e):
title = 'Open project'
ftypes = [('Project file', '.prj'),
('All files', '*')]
filename = askopenfilename(filetypes=ftypes, title=title,
defaultextension='.css')
if not filename: return
self.open(filename)
def open(self, prjfile):
from epydoc.docwriter.html_css import STYLESHEETS
self._filename = prjfile
try:
opts = load(open(prjfile, 'r'))
modnames = list(opts.get('modules', []))
modnames.sort()
self._module_list.delete(0, 'end')
for name in modnames:
self.add_module(name)
self._module_entry.delete(0, 'end')
self._name_entry.delete(0, 'end')
if opts.get('prj_name'):
self._name_entry.insert(0, opts['prj_name'])
self._url_entry.delete(0, 'end')
if opts.get('prj_url'):
self._url_entry.insert(0, opts['prj_url'])
self._docformat_var.set(opts.get('docformat', 'epytext'))
self._inheritance_var.set(opts.get('inheritance', 'grouped'))
self._introspect_or_parse_var.set(
opts.get('introspect_or_parse', 'both'))
self._help_entry.delete(0, 'end')
if opts.get('help') is None:
self._help_var.set('default')
else:
self._help_var.set('-other-')
self._help_entry.insert(0, opts.get('help'))
self._out_entry.delete(0, 'end')
self._out_entry.insert(0, opts.get('target', 'html'))
self._frames_var.set(opts.get('frames', 1))
self._private_var.set(opts.get('private', 1))
self._imports_var.set(opts.get('show_imports', 0))
self._css_entry.delete(0, 'end')
if opts.get('css', 'default') in STYLESHEETS.keys():
self._css_var.set(opts.get('css', 'default'))
else:
self._css_var.set('-other-')
self._css_entry.insert(0, opts.get('css', 'default'))
#if opts.get('private_css', 'default') in STYLESHEETS.keys():
# self._private_css_var.set(opts.get('private_css', 'default'))
#else:
# self._private_css_var.set('-other-')
# self._css_entry.insert(0, opts.get('private_css', 'default'))
except Exception, e:
log.error('Error opening %s: %s' % (prjfile, e))
self._root.bell()
def _save(self, *e):
if self._filename is None: return self._saveas()
try:
opts = self._getopts()
dump(opts, open(self._filename, 'w'))
except Exception, e:
if self._filename is None:
log.error('Error saving: %s' % e)
else:
log.error('Error saving %s: %s' % (self._filename, e))
self._root.bell()
def _saveas(self, *e):
title = 'Save project as'
ftypes = [('Project file', '.prj'), ('All files', '*')]
filename = asksaveasfilename(filetypes=ftypes, title=title,
defaultextension='.prj')
if not filename: return
self._filename = filename
self._save()
def _version():
"""
Display the version information, and exit.
@rtype: C{None}
"""
import epydoc
print "Epydoc version %s" % epydoc.__version__
sys.exit(0)
# At some point I could add:
# --show-messages, --hide-messages
# --show-options, --hide-options
def _usage():
print
print 'Usage: epydocgui [OPTIONS] [FILE.prj | MODULES...]'
print
print ' FILE.prj An epydoc GUI project file.'
print ' MODULES... A list of Python modules to document.'
print ' -V, --version Print the version of epydoc.'
print ' -h, -?, --help, --usage Display this usage message'
print ' --debug Do not suppress error messages'
print
sys.exit(0)
def _error(s):
s = '%s; run "%s -h" for usage' % (s, os.path.basename(sys.argv[0]))
if len(s) > 80:
i = s.rfind(' ', 0, 80)
if i>0: s = s[:i]+'\n'+s[i+1:]
print >>sys.stderr, s
sys.exit(1)
def gui():
global DEBUG
sys.stderr = sys.__stderr__
projects = []
modules = []
for arg in sys.argv[1:]:
if arg[0] == '-':
if arg != '-V': arg = arg.lower()
if arg in ('-h', '--help', '-?', '--usage'): _usage()
elif arg in ('-V', '--version'): _version()
elif arg in ('--debug',): DEBUG = 1
else:
_error('Unknown parameter %r' % arg)
elif arg[-4:] == '.prj': projects.append(arg)
else: modules.append(arg)
if len(projects) > 1:
_error('Too many projects')
if len(projects) == 1:
if len(modules) > 0:
_error('You must specify either a project or a list of modules')
if not os.path.exists(projects[0]):
_error('Cannot open project file %s' % projects[0])
gui = EpydocGUI()
gui.open(projects[0])
gui.mainloop()
else:
gui = EpydocGUI()
for module in modules: gui.add_module(module, check=1)
gui.mainloop()
if __name__ == '__main__': gui()
| en | 0.343344 | #!/usr/bin/env python # # objdoc: epydoc command-line interface # <NAME> # # Created [03/15/02 10:31 PM] # $Id: gui.py 646 2004-03-19 19:01:37Z edloper $ # Graphical interface to epydoc. This interface might be useful for systems where it's inconvenient to use the command-line interface (such as Windows). It supports many (but not all) of the features that are supported by the command-line interface. It also supports loading and saving of X{project files}, which store a set of related modules, and the options that should be used to generate the documentation for those modules. Usage:: epydocgui [OPTIONS] [FILE.prj | MODULES...] FILE.prj An epydoc GUI project file. MODULES... A list of Python modules to document. -V, --version Print the version of epydoc. -h, -?, --help, --usage Display this usage message --debug Do not suppress error messages @todo: Use ini-style project files, rather than pickles (using the same format as the CLI). # askdirectory is only defined in python 2.2+; fall back on # asksaveasfilename if it's not available. # Include support for Zope, if it's available. ##///////////////////////////////////////////////////////////////////////// ## CONSTANTS ##///////////////////////////////////////////////////////////////////////// # Colors for tkinter display # Convenience dictionaries for specifying widget colors # Colors for the progress bar # On tkinter canvases, where's the zero coordinate? # How much of the progress is in each subtask? ##///////////////////////////////////////////////////////////////////////// ## IMAGE CONSTANTS ##///////////////////////////////////////////////////////////////////////// \ R0lGODlhCwAMALMAANnZ2QDMmQCZZgBmZgAAAAAzM////////wAAAAAAAAAAAAAAAAAAAAAAAAAA AAAAACH5BAEAAAAALAAAAAALAAwAAAQjEMhJKxCW4gzCIJxXZIEwFGDlDadqsii1sq1U0nA64+ON 5xEAOw== \ R0lGODlhCwAMALMAANnZ2QDMmQCZZgBmZgAAAAAzM////////wAAAAAAAAAAAAAAAAAAAAAAAAAA AAAAACH5BAEAAAAALAAAAAALAAwAAAQmEIQxgLVUCsppsVPngVtXEFfIfWk5nBe4xuSL0tKLy/cu 7JffJQIAOw== \ R0lGODlhDAALAKIAANnZ2QDMmQCZZgBmZgAAAAAzM////////yH5BAEAAAAALAAAAAAMAAsAAAM4 CLocgaCrESiDoBshOAoAgBEyMzgAEIGCowsiOLoLgEBVOLoIqlSFo4OgC1RYM4Ogq1RYg6DLVJgA Ow== \ R0lGODlhDAALAKIAANnZ2QDMmQBmZgCZZgAzMwAAAP///////yH5BAEAAAAALAAAAAAMAAsAAAM5 GIGgyzIYgaCrIigTgaALIigyEQiqKLoTgaAoujuDgKJLVAgqIoJEBQAIIkKEhaArRFgIukqFoMsJ ADs= ##///////////////////////////////////////////////////////////////////////// ## MessageIO ##///////////////////////////////////////////////////////////////////////// ##///////////////////////////////////////////////////////////////////////// ## THREADED DOCUMENTER ##///////////////////////////////////////////////////////////////////////// Create the documentation for C{modules}, using the options specified by C{options}. C{document} is designed to be started in its own thread by L{EpydocGUI._go}. @param options: The options to use for generating documentation. This includes keyword options that can be given to L{docwriter.html.HTMLWriter}, as well as the option C{target}, which controls where the output is written to. @type options: C{dictionary} # Set the default docformat. # We're done. # Cancel. # We failed. # We failed. ##///////////////////////////////////////////////////////////////////////// ## GUI ##///////////////////////////////////////////////////////////////////////// A graphical user interace to epydoc. # Store a copy of sys.modules, so that we can restore it # later. This is useful for making sure that we reload # everything when we re-build its documentation. This will # *not* reload the modules that are present when the EpydocGUI # is created, but that should only contain some builtins, some # epydoc modules, Tkinter, pickle, and thread.. # Create the main window. #self._root.bind('<Control-d>', self.destroy) # Set up the basic frames. Do not pack the options frame or # the messages frame; the GUI has buttons to expand them. # Initialize all the frames, etc. # Set up logging # Open the messages pane by default. ## For testing options: #self._options_toggle() # Set up the messages control frame # Add a scrollbar # Set up some colorization tags # Keep track of tag state.. # Add some buttons # Set up the options control frame #==================== oframe3 ==================== # -n NAME, --name NAME # -u URL, --url URL # -o DIR, --output DIR #==================== oframe4 ==================== # --no-frames # --no-private # --show-imports #==================== oframe7 ==================== # --docformat # Separater # --inheritance # Separater # --parse-only, --introspect-only #==================== oframe5 ==================== # --help-file FILE #==================== oframe6 ==================== # -c CSS, --css CSS # --private-css CSS #l = Label(oframe6, text="Public", **COLOR_CONFIG) #l.grid(row=row, column=0, sticky='e') #l = Label(oframe6, text="Private", **COLOR_CONFIG) #l.grid(row=row, column=1, sticky='w') #private_css_var = self._private_css_var = StringVar(self._root) #private_css_var.set('default') #b = Radiobutton(oframe6, var=private_css_var, value=name, # text=name, **CBUTTON_CONFIG) #b.grid(row=row, column=1, sticky='w') #b = Radiobutton(oframe6, text='Select File', var=private_css_var, # value='-other-', **CBUTTON_CONFIG) #b.grid(row=row, column=1, sticky='w') #l = Label(oframe6, text='Select File', **COLOR_CONFIG) #l.grid(row=row, column=1, sticky='w') # Hack for Python 2.1 or earlier: # Unload any modules that we've imported # Check that it's a good module, if requested. # Add the module to the list of modules. #if self._private_css_var.get() == '-other-': # options['private_css'] = self._css_entry.get() or 'default' #else: # options['private_css'] = self._private_css_var.get() or 'default' # Construct the argument list for document(). # Clear the messages window. # Restore the module list. This will force re-loading of # anything that we're documenting. # [xx] Reset caches?? # Start documenting # Start the progress bar. # How often to update, in milliseconds # if data == '\n': # if self._last_tag != 'header2': # self._messages.insert('end', '\n', self._last_tag) # elif data == '='*75: # if self._messages.get('end-3c', 'end') == '\n\n\n': # self._messages.delete('end-1c') # self._in_header = 1 # self._messages.insert('end', ' '*75, 'uline header') # self._last_tag = 'header' # elif data == '-'*75: # self._in_header = 0 # self._last_tag = 'header2' # elif self._in_header: # self._messages.insert('end', data, 'header') # self._last_tag = 'header' # elif re.match(r'\s*(L\d+:|-)?\s*Warning: ', data): # self._messages.insert('end', data, 'warning') # self._last_tag = 'warning' # else: # self._messages.insert('end', data, 'error') # self._last_tag = 'error' # Update the messages box # Update the progress bar. # Are we done? #self._private_css_var.set('default') #if opts.get('private_css', 'default') in STYLESHEETS.keys(): # self._private_css_var.set(opts.get('private_css', 'default')) #else: # self._private_css_var.set('-other-') # self._css_entry.insert(0, opts.get('private_css', 'default')) Display the version information, and exit. @rtype: C{None} # At some point I could add: # --show-messages, --hide-messages # --show-options, --hide-options | 2.403937 | 2 |
python/paddle/fluid/tests/unittests/dygraph_to_static/test_dict.py | TochkaAI/Paddle | 3 | 6631301 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import six
import numpy as np
import unittest
import paddle
import paddle.fluid as fluid
from paddle.jit import to_static
from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator
PLACE = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace(
)
class SubNetWithDict(fluid.dygraph.Layer):
def __init__(self, hidden_size=16, output_size=16):
super(SubNetWithDict, self).__init__()
init_weight = lambda x: fluid.ParamAttr(initializer=fluid.initializer.Constant(x))
self.q_fc = fluid.dygraph.Linear(
input_dim=hidden_size,
output_dim=output_size,
bias_attr=False,
param_attr=init_weight(0.6))
self.k_fc = fluid.dygraph.Linear(
input_dim=hidden_size,
output_dim=output_size,
bias_attr=False,
param_attr=init_weight(0.5))
self.v_fc = fluid.dygraph.Linear(
input_dim=hidden_size,
output_dim=output_size,
bias_attr=False,
param_attr=init_weight(0.2))
def forward(self, input, cache=None):
input = fluid.dygraph.to_variable(input)
q = self.q_fc(input)
k = self.k_fc(input)
v = self.v_fc(input)
if cache is not None:
cache_k, cache_v = cache["k"], cache["v"]
k = 0.1 * cache_k + k
v = 0.2 * cache_v + v
cache["k"], cache["v"] = k, v
weight = fluid.layers.matmul(x=q, y=k, transpose_y=True)
weight = fluid.layers.softmax(weight)
out = fluid.layers.matmul(weight, v)
return out
class MainNetWithDict(fluid.dygraph.Layer):
def __init__(self, batch_size=64, hidden_size=16, output_size=16):
super(MainNetWithDict, self).__init__()
self.batch_size = batch_size
self.hidden_size = hidden_size
self.output_size = output_size
self.sub_net = SubNetWithDict(hidden_size, output_size)
@to_static
def forward(self, input, max_len=4):
input = fluid.dygraph.to_variable(input)
cache = {
"k": fluid.layers.fill_constant(
shape=[self.batch_size, self.output_size],
dtype='float32',
value=0),
"v": fluid.layers.fill_constant(
shape=[self.batch_size, self.output_size],
dtype='float32',
value=0),
}
# TODO(Aurelius84): The following code will be converted into:
# max_len = layers.cond(layers.shape(input)[0] != max_len,
# lambda: layers.shape(input)[0], lambda: max_len)
# But max_len should be wrapped into tensor, which is not supported.
# Comment out this line of code for now.
# max_len = input.shape[0] if input.shape[0] != max_len else max_len
out = input
for i in range(max_len):
out = self.sub_net(out, cache)
cache = update_cache(cache)
return out
# Test to call function defined outside of class.
def update_cache(cache):
for k, val in six.iteritems(cache):
cache[k] = fluid.layers.softmax(val)
return cache
class TestNetWithDict(unittest.TestCase):
"""
TestCase for the transformation from control flow `if/else`
dependent on tensor in Dygraph into Static `fluid.layers.cond`.
"""
def setUp(self):
self.x = np.random.random([10, 16]).astype('float32')
self.batch_size = self.x.shape[0]
def _run_static(self):
return self.train(to_static=True)
def _run_dygraph(self):
return self.train(to_static=False)
def train(self, to_static=False):
prog_trans = ProgramTranslator()
prog_trans.enable(to_static)
with fluid.dygraph.guard(PLACE):
net = MainNetWithDict(batch_size=self.batch_size)
ret = net(self.x)
return ret.numpy()
def test_ast_to_func(self):
self.assertTrue((self._run_dygraph() == self._run_static()).all())
# Tests for dict pop
@paddle.jit.to_static
def test_dic_pop(x):
x = paddle.to_tensor(x)
dict_a = {"red": 0, "green": 1, "blue": 2}
m = dict_a.pop("red")
n = dict_a.pop("black", 3)
out = x + m + n
return out
@paddle.jit.to_static
def test_dic_pop_2(x):
x = paddle.to_tensor(x)
dict_a = {"red": x, "green": x + 1, "blue": x + 3}
m = dict_a.pop("red")
n = dict_a.pop("black", 3)
out = x + m + n
return out
class TestDictPop(unittest.TestCase):
def setUp(self):
self.input = np.random.random((3)).astype('int32')
self.place = paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda(
) else paddle.CPUPlace()
self._set_test_func()
def _set_test_func(self):
self.dygraph_func = test_dic_pop
def _run_static(self):
return self._run(to_static=True)
def _run_dygraph(self):
return self._run(to_static=False)
def _run(self, to_static):
prog_trans = ProgramTranslator()
prog_trans.enable(to_static)
result = self.dygraph_func(self.input)
return result.numpy()
def test_transformed_result(self):
dygraph_res = self._run_dygraph()
static_res = self._run_static()
self.assertTrue(
np.allclose(dygraph_res, static_res),
msg='dygraph result is {}\nstatic result is {}'.format(dygraph_res,
static_res))
class TestDictPop2(TestDictPop):
def _set_test_func(self):
self.dygraph_func = test_dic_pop_2
class NetWithDictPop(paddle.nn.Layer):
def __init__(self):
super(NetWithDictPop, self).__init__()
@to_static
def forward(self, x, **kwargs):
x = paddle.to_tensor(x)
y = kwargs.pop('y', None)
if y:
y = paddle.to_tensor(x)
x += y
x.mean()
return x
class TestDictPop(TestNetWithDict):
def setUp(self):
self.x = np.array([2, 2]).astype('float32')
def train(self, to_static=False):
prog_trans = ProgramTranslator()
prog_trans.enable(to_static)
with fluid.dygraph.guard(PLACE):
net = NetWithDictPop()
ret = net(z=0, x=self.x, y=True)
return ret.numpy()
def test_ast_to_func(self):
dygraph_result = self._run_dygraph()
static_result = self._run_static()
self.assertTrue(
(dygraph_result == static_result).all(),
msg="dygraph result: {}\nstatic result: {}".format(dygraph_result,
static_result))
class TestDictCmpInFor(unittest.TestCase):
def test_with_for(self):
def func():
pos = [1, 3]
neg = [-1, -3]
dict_val = {'minus': 0}
# test `zip` with `for`
for (x, y) in zip(pos, neg):
val = x - y
dict_val.update(
{k: val + dict_val[k]
for k, v in dict_val.items()})
return dict_val
self.assertEqual(paddle.jit.to_static(func)()['minus'], 8)
def test_with_for_enumerate(self):
def func():
pos = [1, 3]
neg = [-1, -3]
dict_val = {'minus': 0}
# test `zip` with `for`
for i, (x, y) in enumerate(zip(pos, neg)):
val = x - y
dict_val.update(
{k: val + dict_val[k]
for k, v in dict_val.items()})
return dict_val
self.assertEqual(paddle.jit.to_static(func)()['minus'], 8)
if __name__ == '__main__':
unittest.main()
| # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import six
import numpy as np
import unittest
import paddle
import paddle.fluid as fluid
from paddle.jit import to_static
from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator
PLACE = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace(
)
class SubNetWithDict(fluid.dygraph.Layer):
def __init__(self, hidden_size=16, output_size=16):
super(SubNetWithDict, self).__init__()
init_weight = lambda x: fluid.ParamAttr(initializer=fluid.initializer.Constant(x))
self.q_fc = fluid.dygraph.Linear(
input_dim=hidden_size,
output_dim=output_size,
bias_attr=False,
param_attr=init_weight(0.6))
self.k_fc = fluid.dygraph.Linear(
input_dim=hidden_size,
output_dim=output_size,
bias_attr=False,
param_attr=init_weight(0.5))
self.v_fc = fluid.dygraph.Linear(
input_dim=hidden_size,
output_dim=output_size,
bias_attr=False,
param_attr=init_weight(0.2))
def forward(self, input, cache=None):
input = fluid.dygraph.to_variable(input)
q = self.q_fc(input)
k = self.k_fc(input)
v = self.v_fc(input)
if cache is not None:
cache_k, cache_v = cache["k"], cache["v"]
k = 0.1 * cache_k + k
v = 0.2 * cache_v + v
cache["k"], cache["v"] = k, v
weight = fluid.layers.matmul(x=q, y=k, transpose_y=True)
weight = fluid.layers.softmax(weight)
out = fluid.layers.matmul(weight, v)
return out
class MainNetWithDict(fluid.dygraph.Layer):
def __init__(self, batch_size=64, hidden_size=16, output_size=16):
super(MainNetWithDict, self).__init__()
self.batch_size = batch_size
self.hidden_size = hidden_size
self.output_size = output_size
self.sub_net = SubNetWithDict(hidden_size, output_size)
@to_static
def forward(self, input, max_len=4):
input = fluid.dygraph.to_variable(input)
cache = {
"k": fluid.layers.fill_constant(
shape=[self.batch_size, self.output_size],
dtype='float32',
value=0),
"v": fluid.layers.fill_constant(
shape=[self.batch_size, self.output_size],
dtype='float32',
value=0),
}
# TODO(Aurelius84): The following code will be converted into:
# max_len = layers.cond(layers.shape(input)[0] != max_len,
# lambda: layers.shape(input)[0], lambda: max_len)
# But max_len should be wrapped into tensor, which is not supported.
# Comment out this line of code for now.
# max_len = input.shape[0] if input.shape[0] != max_len else max_len
out = input
for i in range(max_len):
out = self.sub_net(out, cache)
cache = update_cache(cache)
return out
# Test to call function defined outside of class.
def update_cache(cache):
for k, val in six.iteritems(cache):
cache[k] = fluid.layers.softmax(val)
return cache
class TestNetWithDict(unittest.TestCase):
"""
TestCase for the transformation from control flow `if/else`
dependent on tensor in Dygraph into Static `fluid.layers.cond`.
"""
def setUp(self):
self.x = np.random.random([10, 16]).astype('float32')
self.batch_size = self.x.shape[0]
def _run_static(self):
return self.train(to_static=True)
def _run_dygraph(self):
return self.train(to_static=False)
def train(self, to_static=False):
prog_trans = ProgramTranslator()
prog_trans.enable(to_static)
with fluid.dygraph.guard(PLACE):
net = MainNetWithDict(batch_size=self.batch_size)
ret = net(self.x)
return ret.numpy()
def test_ast_to_func(self):
self.assertTrue((self._run_dygraph() == self._run_static()).all())
# Tests for dict pop
@paddle.jit.to_static
def test_dic_pop(x):
x = paddle.to_tensor(x)
dict_a = {"red": 0, "green": 1, "blue": 2}
m = dict_a.pop("red")
n = dict_a.pop("black", 3)
out = x + m + n
return out
@paddle.jit.to_static
def test_dic_pop_2(x):
x = paddle.to_tensor(x)
dict_a = {"red": x, "green": x + 1, "blue": x + 3}
m = dict_a.pop("red")
n = dict_a.pop("black", 3)
out = x + m + n
return out
class TestDictPop(unittest.TestCase):
def setUp(self):
self.input = np.random.random((3)).astype('int32')
self.place = paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda(
) else paddle.CPUPlace()
self._set_test_func()
def _set_test_func(self):
self.dygraph_func = test_dic_pop
def _run_static(self):
return self._run(to_static=True)
def _run_dygraph(self):
return self._run(to_static=False)
def _run(self, to_static):
prog_trans = ProgramTranslator()
prog_trans.enable(to_static)
result = self.dygraph_func(self.input)
return result.numpy()
def test_transformed_result(self):
dygraph_res = self._run_dygraph()
static_res = self._run_static()
self.assertTrue(
np.allclose(dygraph_res, static_res),
msg='dygraph result is {}\nstatic result is {}'.format(dygraph_res,
static_res))
class TestDictPop2(TestDictPop):
def _set_test_func(self):
self.dygraph_func = test_dic_pop_2
class NetWithDictPop(paddle.nn.Layer):
def __init__(self):
super(NetWithDictPop, self).__init__()
@to_static
def forward(self, x, **kwargs):
x = paddle.to_tensor(x)
y = kwargs.pop('y', None)
if y:
y = paddle.to_tensor(x)
x += y
x.mean()
return x
class TestDictPop(TestNetWithDict):
def setUp(self):
self.x = np.array([2, 2]).astype('float32')
def train(self, to_static=False):
prog_trans = ProgramTranslator()
prog_trans.enable(to_static)
with fluid.dygraph.guard(PLACE):
net = NetWithDictPop()
ret = net(z=0, x=self.x, y=True)
return ret.numpy()
def test_ast_to_func(self):
dygraph_result = self._run_dygraph()
static_result = self._run_static()
self.assertTrue(
(dygraph_result == static_result).all(),
msg="dygraph result: {}\nstatic result: {}".format(dygraph_result,
static_result))
class TestDictCmpInFor(unittest.TestCase):
def test_with_for(self):
def func():
pos = [1, 3]
neg = [-1, -3]
dict_val = {'minus': 0}
# test `zip` with `for`
for (x, y) in zip(pos, neg):
val = x - y
dict_val.update(
{k: val + dict_val[k]
for k, v in dict_val.items()})
return dict_val
self.assertEqual(paddle.jit.to_static(func)()['minus'], 8)
def test_with_for_enumerate(self):
def func():
pos = [1, 3]
neg = [-1, -3]
dict_val = {'minus': 0}
# test `zip` with `for`
for i, (x, y) in enumerate(zip(pos, neg)):
val = x - y
dict_val.update(
{k: val + dict_val[k]
for k, v in dict_val.items()})
return dict_val
self.assertEqual(paddle.jit.to_static(func)()['minus'], 8)
if __name__ == '__main__':
unittest.main()
| en | 0.789072 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # TODO(Aurelius84): The following code will be converted into: # max_len = layers.cond(layers.shape(input)[0] != max_len, # lambda: layers.shape(input)[0], lambda: max_len) # But max_len should be wrapped into tensor, which is not supported. # Comment out this line of code for now. # max_len = input.shape[0] if input.shape[0] != max_len else max_len # Test to call function defined outside of class. TestCase for the transformation from control flow `if/else` dependent on tensor in Dygraph into Static `fluid.layers.cond`. # Tests for dict pop # test `zip` with `for` # test `zip` with `for` | 2.001274 | 2 |
example/bulk.py | loevlie/ce_expansion | 1 | 6631302 | <filename>example/bulk.py<gh_stars>1-10
import os
import numpy as np
from ce_expansion.atomgraph import atomgraph
from ce_expansion.npdb import db_inter
# GLOBAL fontsize of axis labels and text
FS = 40
shape = ['icosahedron', 'cuboctahedron',
'elongated-pentagonal-bipyramid', 'fcc-cube'][0]
metals = 'aucu'
minn = 1 if shape.startswith('cub') else 2
# number of shells on inside and outside to ignore in calculation
buffer = 3
for s in range(2 * buffer + 1, 11):
if shape.startswith('cub'):
s -= 1
# get number of atoms
n = db_inter.get_shell2num(shape, s)
# get half such that metal1 has the extra atom (if natoms is odd)
n = (n + n % 2) // 2
res = db_inter.get_bimet_result(metals, shape=shape, num_shells=s,
n_metal1=n)
# get ordering array
ordering = np.array([int(i) for i in res.ordering])
# load bonds list
bonds = res.nanoparticle.load_bonds_list()
# build atomgraph object
ag = atomgraph.AtomGraph(bonds, 'Au', 'Cu')
# get atom indices for each shell
shells = db_inter.build_atoms_in_shell_dict(shape, s)
# create a 'Test Atom' to ensure shells are being correctly counted
test_atom = res.build_atoms_obj()
# remove shells from dict not in study
maxshell = max(shells.keys())
dropcount = 0
for drop in range(buffer):
# pop inner and outer <buffer> layers
for d in shells.pop(drop) + shells.pop(maxshell - drop):
# set symbol of dropped atoms to Br
test_atom[d].symbol = 'Br'
# track number of atoms dropped
dropcount += 1
# track counts
# [Au-Au, Au-Cu, Cu-Cu]
counts = np.zeros((len(test_atom) - dropcount, 3))
# number of shells in study
nshellstudy = len(shells)
atomi = 0
for s in sorted(shells):
for i in shells[s]:
# base atom type (0: Au, 1: Cu)
a1 = ordering[i]
matches = np.unique(bonds[np.where(bonds == i)[0]])
i2s = np.array([j for j in matches if j != atomi])
for i2 in i2s:
a2 = ordering[i2]
counts[atomi, a1 + a2] += 1
atomi += 1
# get each count type
au_counts = counts[np.where(counts[:, 2] == 0)[0]][:, :2]
cu_counts = np.flip(counts[np.where(counts[:, 0] == 0)[0]][:, 1:], 0)
# ensure that all atoms have been correctly accounted for
assert len(au_counts) + len(cu_counts) == len(test_atom) - dropcount
assert len(au_counts) == (test_atom.symbols == 'Au').sum()
assert len(cu_counts) == (test_atom.symbols == 'Cu').sum()
# calc count fractions
au_fracs = au_counts.mean(0) / 12
cu_fracs = cu_counts.mean(0) / 12
# TEMP FIX!!!
# only look at CN 12 atoms
tokeepcn = np.where(ag.cns == 12)[0]
todropcn = np.where(ag.cns != 12)[0]
# save half of test_atom
if nshellstudy > 0:
test_atom.positions -= test_atom.positions.mean(0)
test_ato2 = test_atom[np.where(
abs(test_atom.positions[:, 0]) < 1)[0]]
test_ato2.write(os.path.expanduser('~') +
'\\desktop\\SAMPLES\\slice-%ishells_%s.xyz'
% (nshellstudy, shape[:3]))
del test_atom[test_atom.symbols == 'Br']
test_atom.write(os.path.expanduser('~') +
'\\desktop\\SAMPLES\\%ishells_%s.xyz'
% (nshellstudy, shape[:3]))
print(''.center(20, '-'))
print(shape)
print('%i total atoms' % res.num_atoms)
print('%i atoms ignored' % dropcount)
print('%i atoms studied' % len(counts))
print('%i shells studied' % nshellstudy)
print('Au: -Au (%.2f), -Cu (%.2f)' % (au_fracs[0], au_fracs[1]))
print('Cu: -Cu (%.2f), -Au (%.2f)' % (cu_fracs[0], cu_fracs[1]))
print(''.center(20, '-'))
| <filename>example/bulk.py<gh_stars>1-10
import os
import numpy as np
from ce_expansion.atomgraph import atomgraph
from ce_expansion.npdb import db_inter
# GLOBAL fontsize of axis labels and text
FS = 40
shape = ['icosahedron', 'cuboctahedron',
'elongated-pentagonal-bipyramid', 'fcc-cube'][0]
metals = 'aucu'
minn = 1 if shape.startswith('cub') else 2
# number of shells on inside and outside to ignore in calculation
buffer = 3
for s in range(2 * buffer + 1, 11):
if shape.startswith('cub'):
s -= 1
# get number of atoms
n = db_inter.get_shell2num(shape, s)
# get half such that metal1 has the extra atom (if natoms is odd)
n = (n + n % 2) // 2
res = db_inter.get_bimet_result(metals, shape=shape, num_shells=s,
n_metal1=n)
# get ordering array
ordering = np.array([int(i) for i in res.ordering])
# load bonds list
bonds = res.nanoparticle.load_bonds_list()
# build atomgraph object
ag = atomgraph.AtomGraph(bonds, 'Au', 'Cu')
# get atom indices for each shell
shells = db_inter.build_atoms_in_shell_dict(shape, s)
# create a 'Test Atom' to ensure shells are being correctly counted
test_atom = res.build_atoms_obj()
# remove shells from dict not in study
maxshell = max(shells.keys())
dropcount = 0
for drop in range(buffer):
# pop inner and outer <buffer> layers
for d in shells.pop(drop) + shells.pop(maxshell - drop):
# set symbol of dropped atoms to Br
test_atom[d].symbol = 'Br'
# track number of atoms dropped
dropcount += 1
# track counts
# [Au-Au, Au-Cu, Cu-Cu]
counts = np.zeros((len(test_atom) - dropcount, 3))
# number of shells in study
nshellstudy = len(shells)
atomi = 0
for s in sorted(shells):
for i in shells[s]:
# base atom type (0: Au, 1: Cu)
a1 = ordering[i]
matches = np.unique(bonds[np.where(bonds == i)[0]])
i2s = np.array([j for j in matches if j != atomi])
for i2 in i2s:
a2 = ordering[i2]
counts[atomi, a1 + a2] += 1
atomi += 1
# get each count type
au_counts = counts[np.where(counts[:, 2] == 0)[0]][:, :2]
cu_counts = np.flip(counts[np.where(counts[:, 0] == 0)[0]][:, 1:], 0)
# ensure that all atoms have been correctly accounted for
assert len(au_counts) + len(cu_counts) == len(test_atom) - dropcount
assert len(au_counts) == (test_atom.symbols == 'Au').sum()
assert len(cu_counts) == (test_atom.symbols == 'Cu').sum()
# calc count fractions
au_fracs = au_counts.mean(0) / 12
cu_fracs = cu_counts.mean(0) / 12
# TEMP FIX!!!
# only look at CN 12 atoms
tokeepcn = np.where(ag.cns == 12)[0]
todropcn = np.where(ag.cns != 12)[0]
# save half of test_atom
if nshellstudy > 0:
test_atom.positions -= test_atom.positions.mean(0)
test_ato2 = test_atom[np.where(
abs(test_atom.positions[:, 0]) < 1)[0]]
test_ato2.write(os.path.expanduser('~') +
'\\desktop\\SAMPLES\\slice-%ishells_%s.xyz'
% (nshellstudy, shape[:3]))
del test_atom[test_atom.symbols == 'Br']
test_atom.write(os.path.expanduser('~') +
'\\desktop\\SAMPLES\\%ishells_%s.xyz'
% (nshellstudy, shape[:3]))
print(''.center(20, '-'))
print(shape)
print('%i total atoms' % res.num_atoms)
print('%i atoms ignored' % dropcount)
print('%i atoms studied' % len(counts))
print('%i shells studied' % nshellstudy)
print('Au: -Au (%.2f), -Cu (%.2f)' % (au_fracs[0], au_fracs[1]))
print('Cu: -Cu (%.2f), -Au (%.2f)' % (cu_fracs[0], cu_fracs[1]))
print(''.center(20, '-'))
| en | 0.798014 | # GLOBAL fontsize of axis labels and text # number of shells on inside and outside to ignore in calculation # get number of atoms # get half such that metal1 has the extra atom (if natoms is odd) # get ordering array # load bonds list # build atomgraph object # get atom indices for each shell # create a 'Test Atom' to ensure shells are being correctly counted # remove shells from dict not in study # pop inner and outer <buffer> layers # set symbol of dropped atoms to Br # track number of atoms dropped # track counts # [Au-Au, Au-Cu, Cu-Cu] # number of shells in study # base atom type (0: Au, 1: Cu) # get each count type # ensure that all atoms have been correctly accounted for # calc count fractions # TEMP FIX!!! # only look at CN 12 atoms # save half of test_atom | 2.331858 | 2 |
Documentation/GuidesFromPlosCompBioPaper/ExampleCaseC/AdditionalInputFiles/PRSCondition/LAD2coronaryRdController.py | carthurs/CRIMSONGUI | 10 | 6631303 | version https://git-lfs.github.com/spec/v1
oid sha256:61bea305f96063c5fb171c13e8b1ca15cae0d5682a9a7b4ecc29798ff2741fbd
size 11919
| version https://git-lfs.github.com/spec/v1
oid sha256:61bea305f96063c5fb171c13e8b1ca15cae0d5682a9a7b4ecc29798ff2741fbd
size 11919
| none | 1 | 0.765739 | 1 |
|
proj/pred/app/archive_logfile.py | kant/predictprotein-webserver-proq3 | 0 | 6631304 | #!/usr/bin/python
# Filename: archive_logfile.py
# Description: archive logfile using gnu gzip
import os
import sys
import re
import gzip
progname = os.path.basename(sys.argv[0])
wspace = ''.join([" "]*len(progname))
usage_short="""
Usage: %s FILE [FILE ...] [-maxsize STR]
"""%(progname)
usage_ext="""
Description:
Archive (gzip) the logfile if its size is over maxsize
OPTIONS:
-l LISTFILE List of log files
-maxsize STR Set the threshold of the filesize, the logfile will be gzipped
if its file size is >= maxsize, (default: 20M)
e.g. 500k, 20M, 500000b, 5000, 1G
-h, --help Print this help message and exit
Created 2014-05-22, updated 2014-05-22, <NAME>
"""
usage_exp="""
Examples:
%s /var/log/program.output.log
"""%(progname)
def PrintHelp(fpout=sys.stdout):#{{{
print >> fpout, usage_short
print >> fpout, usage_ext
print >> fpout, usage_exp#}}}
def my_getopt_str(argv, i):#{{{
"""
Get a string from the argument list, return the string and the updated
index to the argument list
"""
try:
opt = argv[i+1]
if opt[0] == "-":
msg = "Error! option '%s' must be followed by a string"\
", not an option arg."
print >> sys.stderr, msg%(argv[i])
sys.exit(1)
return (opt, i+2)
except IndexError:
msg = "Error! option '%s' must be followed by a string"
print >> sys.stderr, msg%(argv[i])
raise
#}}}
def Size_human2byte(s):#{{{
if s.isdigit():
return int(s)
else:
s = s.upper()
match = re.match(r"([0-9]+)([A-Z]+)", s , re.I)
if match:
items = match.groups()
size = int(items[0])
if items[1] in ["B"]:
return size
elif items[1] in ["K", "KB"]:
return size*1024
elif items[1] in ["M", "MB"]:
return size*1024*1024
elif items[1] in ["G", "GB"]:
return size*1024*1024*1024
else:
print >> sys.stderr, "Bad maxsize argument:",s
return -1
else:
print >> sys.stderr, "Bad maxsize argument:",s
return -1
#}}}
def ArchiveFile(filename, maxsize):#{{{
"""
Archive the logfile if its size exceeds the limit
"""
if not os.path.exists(filename):
print >> sys.stderr, filename, "does not exist. ignore."
return 1
else:
filesize = os.path.getsize(filename)
if filesize > maxsize:
cnt = 0
zipfile = ""
while 1:
cnt += 1
zipfile = "%s.%d.gz"%(filename, cnt)
if not os.path.exists(zipfile):
break
# write zip file
try:
f_in = open(filename, 'rb')
except IOError:
print >> sys.stderr, "Failed to read %s"%(filename)
return 1
try:
f_out = gzip.open(zipfile, 'wb')
except IOError:
print >> sys.stderr, "Failed to write to %s"%(zipfile)
return 1
f_out.writelines(f_in)
f_out.close()
f_in.close()
print "%s is archived to %s"%(filename, zipfile)
os.remove(filename)
return 0
#}}}
def main(g_params):#{{{
argv = sys.argv
numArgv = len(argv)
if numArgv < 2:
PrintHelp()
return 1
fileList = []
fileListFile = ""
maxsize_str = ""
i = 1
isNonOptionArg=False
while i < numArgv:
if isNonOptionArg == True:
fileList.append(argv[i])
isNonOptionArg = False
i += 1
elif argv[i] == "--":
isNonOptionArg = True
i += 1
elif argv[i][0] == "-":
if argv[i] in ["-h", "--help"]:
PrintHelp()
return 1
elif argv[i] in ["-maxsize", "--maxsize"]:
(maxsize_str, i) = my_getopt_str(argv, i)
elif argv[i] in ["-l", "--l"] :
(fileListFile, i) = my_getopt_str(argv, i)
elif argv[i] in ["-q", "--q"]:
g_params['isQuiet'] = True
i += 1
else:
print >> sys.stderr, "Error! Wrong argument:", argv[i]
return 1
else:
fileList.append(argv[i])
i += 1
if maxsize_str != "":
maxsize = Size_human2byte(maxsize_str)
if maxsize > 0:
g_params['maxsize'] = maxsize
else:
return 1
# print "maxsize=", g_params['maxsize']
if fileListFile != "":
tmplist = open(fileListFile, "r").read().split('\n')
tmplist = [x.strip() for x in tmplist]
fileList += tmplist
if len(fileList) < 1:
print >> sys.stderr, "No input file is set. exit."
for i in xrange(len(fileList)):
# print "%d --> %s" %(i, fileList[i])
ArchiveFile(fileList[i], g_params['maxsize'])
#}}}
def InitGlobalParameter():#{{{
g_params = {}
g_params['isQuiet'] = True
g_params['maxsize'] = 20*1024*1024
return g_params
#}}}
if __name__ == '__main__' :
g_params = InitGlobalParameter()
sys.exit(main(g_params))
| #!/usr/bin/python
# Filename: archive_logfile.py
# Description: archive logfile using gnu gzip
import os
import sys
import re
import gzip
progname = os.path.basename(sys.argv[0])
wspace = ''.join([" "]*len(progname))
usage_short="""
Usage: %s FILE [FILE ...] [-maxsize STR]
"""%(progname)
usage_ext="""
Description:
Archive (gzip) the logfile if its size is over maxsize
OPTIONS:
-l LISTFILE List of log files
-maxsize STR Set the threshold of the filesize, the logfile will be gzipped
if its file size is >= maxsize, (default: 20M)
e.g. 500k, 20M, 500000b, 5000, 1G
-h, --help Print this help message and exit
Created 2014-05-22, updated 2014-05-22, <NAME>
"""
usage_exp="""
Examples:
%s /var/log/program.output.log
"""%(progname)
def PrintHelp(fpout=sys.stdout):#{{{
print >> fpout, usage_short
print >> fpout, usage_ext
print >> fpout, usage_exp#}}}
def my_getopt_str(argv, i):#{{{
"""
Get a string from the argument list, return the string and the updated
index to the argument list
"""
try:
opt = argv[i+1]
if opt[0] == "-":
msg = "Error! option '%s' must be followed by a string"\
", not an option arg."
print >> sys.stderr, msg%(argv[i])
sys.exit(1)
return (opt, i+2)
except IndexError:
msg = "Error! option '%s' must be followed by a string"
print >> sys.stderr, msg%(argv[i])
raise
#}}}
def Size_human2byte(s):#{{{
if s.isdigit():
return int(s)
else:
s = s.upper()
match = re.match(r"([0-9]+)([A-Z]+)", s , re.I)
if match:
items = match.groups()
size = int(items[0])
if items[1] in ["B"]:
return size
elif items[1] in ["K", "KB"]:
return size*1024
elif items[1] in ["M", "MB"]:
return size*1024*1024
elif items[1] in ["G", "GB"]:
return size*1024*1024*1024
else:
print >> sys.stderr, "Bad maxsize argument:",s
return -1
else:
print >> sys.stderr, "Bad maxsize argument:",s
return -1
#}}}
def ArchiveFile(filename, maxsize):#{{{
"""
Archive the logfile if its size exceeds the limit
"""
if not os.path.exists(filename):
print >> sys.stderr, filename, "does not exist. ignore."
return 1
else:
filesize = os.path.getsize(filename)
if filesize > maxsize:
cnt = 0
zipfile = ""
while 1:
cnt += 1
zipfile = "%s.%d.gz"%(filename, cnt)
if not os.path.exists(zipfile):
break
# write zip file
try:
f_in = open(filename, 'rb')
except IOError:
print >> sys.stderr, "Failed to read %s"%(filename)
return 1
try:
f_out = gzip.open(zipfile, 'wb')
except IOError:
print >> sys.stderr, "Failed to write to %s"%(zipfile)
return 1
f_out.writelines(f_in)
f_out.close()
f_in.close()
print "%s is archived to %s"%(filename, zipfile)
os.remove(filename)
return 0
#}}}
def main(g_params):#{{{
argv = sys.argv
numArgv = len(argv)
if numArgv < 2:
PrintHelp()
return 1
fileList = []
fileListFile = ""
maxsize_str = ""
i = 1
isNonOptionArg=False
while i < numArgv:
if isNonOptionArg == True:
fileList.append(argv[i])
isNonOptionArg = False
i += 1
elif argv[i] == "--":
isNonOptionArg = True
i += 1
elif argv[i][0] == "-":
if argv[i] in ["-h", "--help"]:
PrintHelp()
return 1
elif argv[i] in ["-maxsize", "--maxsize"]:
(maxsize_str, i) = my_getopt_str(argv, i)
elif argv[i] in ["-l", "--l"] :
(fileListFile, i) = my_getopt_str(argv, i)
elif argv[i] in ["-q", "--q"]:
g_params['isQuiet'] = True
i += 1
else:
print >> sys.stderr, "Error! Wrong argument:", argv[i]
return 1
else:
fileList.append(argv[i])
i += 1
if maxsize_str != "":
maxsize = Size_human2byte(maxsize_str)
if maxsize > 0:
g_params['maxsize'] = maxsize
else:
return 1
# print "maxsize=", g_params['maxsize']
if fileListFile != "":
tmplist = open(fileListFile, "r").read().split('\n')
tmplist = [x.strip() for x in tmplist]
fileList += tmplist
if len(fileList) < 1:
print >> sys.stderr, "No input file is set. exit."
for i in xrange(len(fileList)):
# print "%d --> %s" %(i, fileList[i])
ArchiveFile(fileList[i], g_params['maxsize'])
#}}}
def InitGlobalParameter():#{{{
g_params = {}
g_params['isQuiet'] = True
g_params['maxsize'] = 20*1024*1024
return g_params
#}}}
if __name__ == '__main__' :
g_params = InitGlobalParameter()
sys.exit(main(g_params))
| en | 0.44535 | #!/usr/bin/python # Filename: archive_logfile.py # Description: archive logfile using gnu gzip Usage: %s FILE [FILE ...] [-maxsize STR] Description: Archive (gzip) the logfile if its size is over maxsize OPTIONS: -l LISTFILE List of log files -maxsize STR Set the threshold of the filesize, the logfile will be gzipped if its file size is >= maxsize, (default: 20M) e.g. 500k, 20M, 500000b, 5000, 1G -h, --help Print this help message and exit Created 2014-05-22, updated 2014-05-22, <NAME> Examples: %s /var/log/program.output.log #{{{ #}}} #{{{ Get a string from the argument list, return the string and the updated index to the argument list #}}} #{{{ #}}} #{{{ Archive the logfile if its size exceeds the limit # write zip file #}}} #{{{ # print "maxsize=", g_params['maxsize'] # print "%d --> %s" %(i, fileList[i]) #}}} #{{{ #}}} | 3.069352 | 3 |
python/client/azure/mgmt/redhatopenshift/v2020_10_31_preview/_azure_red_hat_open_shift_client.py | dkorzuno/ARO-RP | 0 | 6631305 | <filename>python/client/azure/mgmt/redhatopenshift/v2020_10_31_preview/_azure_red_hat_open_shift_client.py<gh_stars>0
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import SDKClient
from msrest import Serializer, Deserializer
from ._configuration import AzureRedHatOpenShiftClientConfiguration
from .operations import Operations
from .operations import OpenShiftClustersOperations
from . import models
class AzureRedHatOpenShiftClient(SDKClient):
"""Rest API for Azure Red Hat OpenShift 4
:ivar config: Configuration for client.
:vartype config: AzureRedHatOpenShiftClientConfiguration
:ivar operations: Operations operations
:vartype operations: azure.mgmt.redhatopenshift.v2020_10_31_preview.operations.Operations
:ivar open_shift_clusters: OpenShiftClusters operations
:vartype open_shift_clusters: azure.mgmt.redhatopenshift.v2020_10_31_preview.operations.OpenShiftClustersOperations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
self.config = AzureRedHatOpenShiftClientConfiguration(credentials, subscription_id, base_url)
super(AzureRedHatOpenShiftClient, self).__init__(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '2020-10-31-preview'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.operations = Operations(
self._client, self.config, self._serialize, self._deserialize)
self.open_shift_clusters = OpenShiftClustersOperations(
self._client, self.config, self._serialize, self._deserialize)
| <filename>python/client/azure/mgmt/redhatopenshift/v2020_10_31_preview/_azure_red_hat_open_shift_client.py<gh_stars>0
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import SDKClient
from msrest import Serializer, Deserializer
from ._configuration import AzureRedHatOpenShiftClientConfiguration
from .operations import Operations
from .operations import OpenShiftClustersOperations
from . import models
class AzureRedHatOpenShiftClient(SDKClient):
"""Rest API for Azure Red Hat OpenShift 4
:ivar config: Configuration for client.
:vartype config: AzureRedHatOpenShiftClientConfiguration
:ivar operations: Operations operations
:vartype operations: azure.mgmt.redhatopenshift.v2020_10_31_preview.operations.Operations
:ivar open_shift_clusters: OpenShiftClusters operations
:vartype open_shift_clusters: azure.mgmt.redhatopenshift.v2020_10_31_preview.operations.OpenShiftClustersOperations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
self.config = AzureRedHatOpenShiftClientConfiguration(credentials, subscription_id, base_url)
super(AzureRedHatOpenShiftClient, self).__init__(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '2020-10-31-preview'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.operations = Operations(
self._client, self.config, self._serialize, self._deserialize)
self.open_shift_clusters = OpenShiftClustersOperations(
self._client, self.config, self._serialize, self._deserialize)
| en | 0.626722 | # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft and contributors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # limitations under the License. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- Rest API for Azure Red Hat OpenShift 4 :ivar config: Configuration for client. :vartype config: AzureRedHatOpenShiftClientConfiguration :ivar operations: Operations operations :vartype operations: azure.mgmt.redhatopenshift.v2020_10_31_preview.operations.Operations :ivar open_shift_clusters: OpenShiftClusters operations :vartype open_shift_clusters: azure.mgmt.redhatopenshift.v2020_10_31_preview.operations.OpenShiftClustersOperations :param credentials: Credentials needed for the client to connect to Azure. :type credentials: :mod:`A msrestazure Credentials object<msrestazure.azure_active_directory>` :param subscription_id: The ID of the target subscription. :type subscription_id: str :param str base_url: Service URL | 1.409257 | 1 |
docs/notebooks/active_learning.pct.py | ChrisMorter/trieste | 0 | 6631306 | <reponame>ChrisMorter/trieste
# %% [markdown]
# # Active Learning
# %% [markdown]
# Sometimes, we may just want to learn a black-box function, rather than optimizing it. This goal is known as active learning and corresponds to choosing query points that reduce our model uncertainty. This notebook demonstrates how to perform Bayesian active learning using Trieste.
# %%
# %matplotlib inline
import numpy as np
import tensorflow as tf
np.random.seed(1793)
tf.random.set_seed(1793)
# %% [markdown]
# ## Describe the problem
#
# In this example, we will perform active learning for the scaled Branin function.
# %%
from trieste.objectives import scaled_branin
from util.plotting_plotly import plot_function_plotly
from trieste.space import Box
search_space = Box([0, 0], [1, 1])
fig = plot_function_plotly(scaled_branin, search_space.lower, search_space.upper, grid_density=20)
fig.update_layout(height=400, width=400)
fig.show()
# %% [markdown]
# We begin our Bayesian active learning from a two-point initial design built from a space-filling Halton sequence.
# %%
import trieste
observer = trieste.objectives.utils.mk_observer(scaled_branin)
num_initial_points = 4
initial_query_points = search_space.sample_halton(num_initial_points)
initial_data = observer(initial_query_points)
# %% [markdown]
# ## Surrogate model
#
# Just like in sequential optimization, we fit a surrogate Gaussian process model as implemented in GPflow to the initial data. The GPflow models cannot be used directly in our Bayesian optimization routines, so we build a GPflow's `GPR` model and pass it to the `GaussianProcessRegression` wrapper.
# %%
import gpflow
from trieste.models.gpflow.models import GaussianProcessRegression
def build_model(data):
variance = tf.math.reduce_variance(data.observations)
kernel = gpflow.kernels.RBF(variance=variance, lengthscales=[2, 2])
gpr = gpflow.models.GPR(data.astuple(), kernel, noise_variance=1e-5)
gpflow.set_trainable(gpr.likelihood, False)
return GaussianProcessRegression(gpr)
model = build_model(initial_data)
# %% [markdown]
# ## Active learning using predictive variance
#
# For our first active learning example, we will use a simple acquisition function known as `PredictiveVariance` which chooses points for which we are highly uncertain (i.e. the predictive posterior covariance matrix at these points has large determinant), as discussed in <cite data-cite="MacKay1992"/>. Note that this also implies that our model needs to have `predict_joint` method to be able to return the full covariance, and it's likely to be expensive to compute.
#
# We will now demonstrate how to choose individual query points using `PredictiveVariance` before moving onto batch active learning. For both cases, we can utilize Trieste's `BayesianOptimizer` to do the active learning steps.
#
# %%
from trieste.acquisition.function import PredictiveVariance
from trieste.acquisition.optimizer import generate_continuous_optimizer
from trieste.acquisition.rule import EfficientGlobalOptimization
acq = PredictiveVariance()
rule = EfficientGlobalOptimization(
builder=acq, optimizer=generate_continuous_optimizer()
)
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)
# %% [markdown]
# To plot the contour of variance of our model at each step, we can set the `track_state` parameter to `True` in `bo.optimize()`, this will make Trieste record our model at each iteration.
# %%
bo_iter = 5
result = bo.optimize(bo_iter, initial_data, model, rule, track_state=True)
# %% [markdown]
# Then we can retrieve our final dataset from the active learning steps.
# %%
dataset = result.try_get_final_dataset()
query_points = dataset.query_points.numpy()
observations = dataset.observations.numpy()
# %% [markdown]
# Finally, we can check the performance of our `PredictiveVariance` active learning acquisition function by plotting the predictive variance landscape of our model. We can see how it samples regions for which our model is highly uncertain.
# %%
from util.plotting import plot_bo_points, plot_function_2d
def plot_active_learning_query(result, bo_iter, num_initial_points, query_points, num_query=1):
for i in range(bo_iter):
def pred_var(x):
_, var = result.history[i].models["OBJECTIVE"].model.predict_f(x)
return var
_, ax = plot_function_2d(
pred_var,
search_space.lower - 0.01,
search_space.upper + 0.01,
grid_density=100,
contour=True,
colorbar=True,
figsize=(10, 6),
title=["Variance contour with queried points at iter:" + str(i + 1)],
xlabel="$X_1$",
ylabel="$X_2$",
)
plot_bo_points(
query_points[: num_initial_points + (i * num_query)], ax[0, 0], num_initial_points
)
plot_active_learning_query(result, bo_iter, num_initial_points, query_points)
# %% [markdown]
# ## Batch active learning using predictive variance
#
# For some cases, query several points at a time can be convenient by doing batch active learning. For this case, we must pass a num_query_points input to our `EfficientGlobalOptimization` rule. The drawback of the batch predictive variance is, it tends to query in high variance area less accurately, compared to the sequentially drawing one point at a time.
# %%
bo_iter = 5
num_query = 3
model = build_model(initial_data)
acq = PredictiveVariance()
rule = EfficientGlobalOptimization(
num_query_points=num_query, builder=acq, optimizer=generate_continuous_optimizer()
)
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)
result = bo.optimize(bo_iter, initial_data, model, rule, track_state=True)
# %% [markdown]
# After that, we can retrieve our final dataset.
# %%
dataset = result.try_get_final_dataset()
query_points = dataset.query_points.numpy()
observations = dataset.observations.numpy()
# %% [markdown]
# Now we can visualize the batch predictive variance using our plotting function.
# %%
from util.plotting import plot_bo_points, plot_function_2d
plot_active_learning_query(result, bo_iter, num_initial_points, query_points, num_query)
# %% [markdown]
# ## LICENSE
#
# [Apache License 2.0](https://github.com/secondmind-labs/trieste/blob/develop/LICENSE)
| # %% [markdown]
# # Active Learning
# %% [markdown]
# Sometimes, we may just want to learn a black-box function, rather than optimizing it. This goal is known as active learning and corresponds to choosing query points that reduce our model uncertainty. This notebook demonstrates how to perform Bayesian active learning using Trieste.
# %%
# %matplotlib inline
import numpy as np
import tensorflow as tf
np.random.seed(1793)
tf.random.set_seed(1793)
# %% [markdown]
# ## Describe the problem
#
# In this example, we will perform active learning for the scaled Branin function.
# %%
from trieste.objectives import scaled_branin
from util.plotting_plotly import plot_function_plotly
from trieste.space import Box
search_space = Box([0, 0], [1, 1])
fig = plot_function_plotly(scaled_branin, search_space.lower, search_space.upper, grid_density=20)
fig.update_layout(height=400, width=400)
fig.show()
# %% [markdown]
# We begin our Bayesian active learning from a two-point initial design built from a space-filling Halton sequence.
# %%
import trieste
observer = trieste.objectives.utils.mk_observer(scaled_branin)
num_initial_points = 4
initial_query_points = search_space.sample_halton(num_initial_points)
initial_data = observer(initial_query_points)
# %% [markdown]
# ## Surrogate model
#
# Just like in sequential optimization, we fit a surrogate Gaussian process model as implemented in GPflow to the initial data. The GPflow models cannot be used directly in our Bayesian optimization routines, so we build a GPflow's `GPR` model and pass it to the `GaussianProcessRegression` wrapper.
# %%
import gpflow
from trieste.models.gpflow.models import GaussianProcessRegression
def build_model(data):
variance = tf.math.reduce_variance(data.observations)
kernel = gpflow.kernels.RBF(variance=variance, lengthscales=[2, 2])
gpr = gpflow.models.GPR(data.astuple(), kernel, noise_variance=1e-5)
gpflow.set_trainable(gpr.likelihood, False)
return GaussianProcessRegression(gpr)
model = build_model(initial_data)
# %% [markdown]
# ## Active learning using predictive variance
#
# For our first active learning example, we will use a simple acquisition function known as `PredictiveVariance` which chooses points for which we are highly uncertain (i.e. the predictive posterior covariance matrix at these points has large determinant), as discussed in <cite data-cite="MacKay1992"/>. Note that this also implies that our model needs to have `predict_joint` method to be able to return the full covariance, and it's likely to be expensive to compute.
#
# We will now demonstrate how to choose individual query points using `PredictiveVariance` before moving onto batch active learning. For both cases, we can utilize Trieste's `BayesianOptimizer` to do the active learning steps.
#
# %%
from trieste.acquisition.function import PredictiveVariance
from trieste.acquisition.optimizer import generate_continuous_optimizer
from trieste.acquisition.rule import EfficientGlobalOptimization
acq = PredictiveVariance()
rule = EfficientGlobalOptimization(
builder=acq, optimizer=generate_continuous_optimizer()
)
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)
# %% [markdown]
# To plot the contour of variance of our model at each step, we can set the `track_state` parameter to `True` in `bo.optimize()`, this will make Trieste record our model at each iteration.
# %%
bo_iter = 5
result = bo.optimize(bo_iter, initial_data, model, rule, track_state=True)
# %% [markdown]
# Then we can retrieve our final dataset from the active learning steps.
# %%
dataset = result.try_get_final_dataset()
query_points = dataset.query_points.numpy()
observations = dataset.observations.numpy()
# %% [markdown]
# Finally, we can check the performance of our `PredictiveVariance` active learning acquisition function by plotting the predictive variance landscape of our model. We can see how it samples regions for which our model is highly uncertain.
# %%
from util.plotting import plot_bo_points, plot_function_2d
def plot_active_learning_query(result, bo_iter, num_initial_points, query_points, num_query=1):
for i in range(bo_iter):
def pred_var(x):
_, var = result.history[i].models["OBJECTIVE"].model.predict_f(x)
return var
_, ax = plot_function_2d(
pred_var,
search_space.lower - 0.01,
search_space.upper + 0.01,
grid_density=100,
contour=True,
colorbar=True,
figsize=(10, 6),
title=["Variance contour with queried points at iter:" + str(i + 1)],
xlabel="$X_1$",
ylabel="$X_2$",
)
plot_bo_points(
query_points[: num_initial_points + (i * num_query)], ax[0, 0], num_initial_points
)
plot_active_learning_query(result, bo_iter, num_initial_points, query_points)
# %% [markdown]
# ## Batch active learning using predictive variance
#
# For some cases, query several points at a time can be convenient by doing batch active learning. For this case, we must pass a num_query_points input to our `EfficientGlobalOptimization` rule. The drawback of the batch predictive variance is, it tends to query in high variance area less accurately, compared to the sequentially drawing one point at a time.
# %%
bo_iter = 5
num_query = 3
model = build_model(initial_data)
acq = PredictiveVariance()
rule = EfficientGlobalOptimization(
num_query_points=num_query, builder=acq, optimizer=generate_continuous_optimizer()
)
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)
result = bo.optimize(bo_iter, initial_data, model, rule, track_state=True)
# %% [markdown]
# After that, we can retrieve our final dataset.
# %%
dataset = result.try_get_final_dataset()
query_points = dataset.query_points.numpy()
observations = dataset.observations.numpy()
# %% [markdown]
# Now we can visualize the batch predictive variance using our plotting function.
# %%
from util.plotting import plot_bo_points, plot_function_2d
plot_active_learning_query(result, bo_iter, num_initial_points, query_points, num_query)
# %% [markdown]
# ## LICENSE
#
# [Apache License 2.0](https://github.com/secondmind-labs/trieste/blob/develop/LICENSE) | en | 0.861726 | # %% [markdown] # # Active Learning # %% [markdown] # Sometimes, we may just want to learn a black-box function, rather than optimizing it. This goal is known as active learning and corresponds to choosing query points that reduce our model uncertainty. This notebook demonstrates how to perform Bayesian active learning using Trieste. # %% # %matplotlib inline # %% [markdown] # ## Describe the problem # # In this example, we will perform active learning for the scaled Branin function. # %% # %% [markdown] # We begin our Bayesian active learning from a two-point initial design built from a space-filling Halton sequence. # %% # %% [markdown] # ## Surrogate model # # Just like in sequential optimization, we fit a surrogate Gaussian process model as implemented in GPflow to the initial data. The GPflow models cannot be used directly in our Bayesian optimization routines, so we build a GPflow's `GPR` model and pass it to the `GaussianProcessRegression` wrapper. # %% # %% [markdown] # ## Active learning using predictive variance # # For our first active learning example, we will use a simple acquisition function known as `PredictiveVariance` which chooses points for which we are highly uncertain (i.e. the predictive posterior covariance matrix at these points has large determinant), as discussed in <cite data-cite="MacKay1992"/>. Note that this also implies that our model needs to have `predict_joint` method to be able to return the full covariance, and it's likely to be expensive to compute. # # We will now demonstrate how to choose individual query points using `PredictiveVariance` before moving onto batch active learning. For both cases, we can utilize Trieste's `BayesianOptimizer` to do the active learning steps. # # %% # %% [markdown] # To plot the contour of variance of our model at each step, we can set the `track_state` parameter to `True` in `bo.optimize()`, this will make Trieste record our model at each iteration. # %% # %% [markdown] # Then we can retrieve our final dataset from the active learning steps. # %% # %% [markdown] # Finally, we can check the performance of our `PredictiveVariance` active learning acquisition function by plotting the predictive variance landscape of our model. We can see how it samples regions for which our model is highly uncertain. # %% # %% [markdown] # ## Batch active learning using predictive variance # # For some cases, query several points at a time can be convenient by doing batch active learning. For this case, we must pass a num_query_points input to our `EfficientGlobalOptimization` rule. The drawback of the batch predictive variance is, it tends to query in high variance area less accurately, compared to the sequentially drawing one point at a time. # %% # %% [markdown] # After that, we can retrieve our final dataset. # %% # %% [markdown] # Now we can visualize the batch predictive variance using our plotting function. # %% # %% [markdown] # ## LICENSE # # [Apache License 2.0](https://github.com/secondmind-labs/trieste/blob/develop/LICENSE) | 3.188941 | 3 |
euler5.py | G00364756/Programming_Exercises | 0 | 6631307 | <reponame>G00364756/Programming_Exercises
# <NAME> - G00364756 - 22/02/2018
# Exercise 4, Topic 4: euler5.py
# Project Euler_Problem 5:
# 2520 is the smallest number that can be divided by
# each of the numbers from 1 to 10 without any remainder.
# What is the smallest positive number that is evenly
# divisible by all of the numbers from 1 to 20?
# This code is cumbersome and inefficient but gets the job done.
# I imagine the aim of the exercise is to become more capable with
# loops, ranges and lists but I wanted to see if I could
# intuitively create code to solve this problem rather than googling
# how to do it.
# I originally set n to 1 and incremented n by 1 but this took
# over 30 seconds to compute. Setting n to 20 as the starting point,
# because the range of numbers that we have to divide by ends at 20,
# and incrimenting by 20 speeds the code up drastically without
# comprimising what the code is intended to do.
def divisable(x):
"""Function created to find the number which is divisable by all the numbers from 1 to 20 without leaving a remainder"""
# Set n greater than 0 as zero modulus anything gives a remainder of 0.
n = 20
# This is where the code could encounter problems, if the
# end result is a larger than the number we set the loop limit to
# the code will never reach the end result. Need to put in a very large number
# to account for this, or in this case set the loop to something it will
# never reach ( i.e. while n is not equal to zero "n != 0")
# The improvements mean the code spits out the final value in 5 seconds.
while n != 0:
# I wasn't sure how to do the modulus operand with arrays so I had
# to list the criteria individually and link them with and "and"
# operand.
if (n % 1 == 0) and (n % 2 == 0) and (n % 3 == 0) and (n % 4 == 0) and (n % 5 == 0) and (n % 6 == 0) and (n % 7 == 0) and (n % 8 == 0) and (n % 9 == 0) and (n % 10 == 0) and (n % 11 == 0) and (n % 12 == 0) and (n % 13 == 0) and (n % 14 == 0) and (n % 15 == 0) and (n % 16 == 0) and (n % 17 == 0) and (n % 18 == 0) and (n % 19 == 0) and (n % 20 == 0):
return(n)
break
# It is important to use break otherwise the code will continue
# to spit out the final value, an endless loop.
else:
n = n + 20
print(divisable(1)) | # <NAME> - G00364756 - 22/02/2018
# Exercise 4, Topic 4: euler5.py
# Project Euler_Problem 5:
# 2520 is the smallest number that can be divided by
# each of the numbers from 1 to 10 without any remainder.
# What is the smallest positive number that is evenly
# divisible by all of the numbers from 1 to 20?
# This code is cumbersome and inefficient but gets the job done.
# I imagine the aim of the exercise is to become more capable with
# loops, ranges and lists but I wanted to see if I could
# intuitively create code to solve this problem rather than googling
# how to do it.
# I originally set n to 1 and incremented n by 1 but this took
# over 30 seconds to compute. Setting n to 20 as the starting point,
# because the range of numbers that we have to divide by ends at 20,
# and incrimenting by 20 speeds the code up drastically without
# comprimising what the code is intended to do.
def divisable(x):
"""Function created to find the number which is divisable by all the numbers from 1 to 20 without leaving a remainder"""
# Set n greater than 0 as zero modulus anything gives a remainder of 0.
n = 20
# This is where the code could encounter problems, if the
# end result is a larger than the number we set the loop limit to
# the code will never reach the end result. Need to put in a very large number
# to account for this, or in this case set the loop to something it will
# never reach ( i.e. while n is not equal to zero "n != 0")
# The improvements mean the code spits out the final value in 5 seconds.
while n != 0:
# I wasn't sure how to do the modulus operand with arrays so I had
# to list the criteria individually and link them with and "and"
# operand.
if (n % 1 == 0) and (n % 2 == 0) and (n % 3 == 0) and (n % 4 == 0) and (n % 5 == 0) and (n % 6 == 0) and (n % 7 == 0) and (n % 8 == 0) and (n % 9 == 0) and (n % 10 == 0) and (n % 11 == 0) and (n % 12 == 0) and (n % 13 == 0) and (n % 14 == 0) and (n % 15 == 0) and (n % 16 == 0) and (n % 17 == 0) and (n % 18 == 0) and (n % 19 == 0) and (n % 20 == 0):
return(n)
break
# It is important to use break otherwise the code will continue
# to spit out the final value, an endless loop.
else:
n = n + 20
print(divisable(1)) | en | 0.948493 | # <NAME> - G00364756 - 22/02/2018 # Exercise 4, Topic 4: euler5.py # Project Euler_Problem 5: # 2520 is the smallest number that can be divided by # each of the numbers from 1 to 10 without any remainder. # What is the smallest positive number that is evenly # divisible by all of the numbers from 1 to 20? # This code is cumbersome and inefficient but gets the job done. # I imagine the aim of the exercise is to become more capable with # loops, ranges and lists but I wanted to see if I could # intuitively create code to solve this problem rather than googling # how to do it. # I originally set n to 1 and incremented n by 1 but this took # over 30 seconds to compute. Setting n to 20 as the starting point, # because the range of numbers that we have to divide by ends at 20, # and incrimenting by 20 speeds the code up drastically without # comprimising what the code is intended to do. Function created to find the number which is divisable by all the numbers from 1 to 20 without leaving a remainder # Set n greater than 0 as zero modulus anything gives a remainder of 0. # This is where the code could encounter problems, if the # end result is a larger than the number we set the loop limit to # the code will never reach the end result. Need to put in a very large number # to account for this, or in this case set the loop to something it will # never reach ( i.e. while n is not equal to zero "n != 0") # The improvements mean the code spits out the final value in 5 seconds. # I wasn't sure how to do the modulus operand with arrays so I had # to list the criteria individually and link them with and "and" # operand. # It is important to use break otherwise the code will continue # to spit out the final value, an endless loop. | 3.712294 | 4 |
python/764.largest-plus-sign.py | stavanmehta/leetcode | 0 | 6631308 | <reponame>stavanmehta/leetcode
class Solution:
def orderOfLargestPlusSign(self, N: int, mines: List[List[int]]) -> int:
| class Solution:
def orderOfLargestPlusSign(self, N: int, mines: List[List[int]]) -> int: | none | 1 | 2.079668 | 2 |
|
mywork/adult_income.py | qiudebo/13learn | 1 | 6631309 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("python Spark SQL basic adult")\
.config("spark.some.config.option", "some-value")\
.getOrCreate()
df = spark.read.text("/Users/qiudebo/PycharmProjects/stanford_cs231/spark_example/data/adult/adult.data")
print(type(df))
print(df.columns)
df.take(1)
spark.stop()
| #!/usr/bin/python
# -*- coding: utf-8 -*-
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("python Spark SQL basic adult")\
.config("spark.some.config.option", "some-value")\
.getOrCreate()
df = spark.read.text("/Users/qiudebo/PycharmProjects/stanford_cs231/spark_example/data/adult/adult.data")
print(type(df))
print(df.columns)
df.take(1)
spark.stop()
| en | 0.44423 | #!/usr/bin/python # -*- coding: utf-8 -*- | 3.181182 | 3 |
tests/test.py | Confy255/group3predict | 1 | 6631310 | from . import ourmodule | from . import ourmodule | none | 1 | 1.198964 | 1 |
|
blog/views.py | jeanlucancey/pronunciamento | 0 | 6631311 | from django.shortcuts import (
get_object_or_404, redirect, render)
from django.views.decorators.http import \
require_http_methods
from django.views.generic import View
from .forms import PostForm
from .models import Post
class PostCreate(View):
form_class = PostForm
template_name = 'blog/post_form.html'
def get(self, request):
return render(
request,
self.template_name,
{'form': self.form_class()})
def post(self, request):
bound_form = self.form_class(request.POST)
if bound_form.is_valid():
new_post = bound_form.save()
return redirect(new_post)
else:
return render(
request,
self.template_name,
{'form': bound_form})
class PostDelete(View):
def get(self, request, year, month, slug):
post = get_object_or_404(
Post,
pub_date__year=year,
pub_date__month=month,
slug__iexact=slug)
return render(
request,
'blog/post_confirm_delete.html',
{'post': post})
def post(self, request, year, month, slug):
post = get_object_or_404(
Post,
pub_date__year=year,
pub_date__month=month,
slug__iexact=slug)
post.delete()
return redirect('blog_post_list')
@require_http_methods(['HEAD', 'GET'])
def post_detail(request, year, month, slug):
post = get_object_or_404(
Post,
pub_date__year=year,
pub_date__month=month,
slug=slug)
return render(
request,
'blog/post_detail.html',
{'post': post})
class PostList(View):
def get(self, request):
return render(
request,
'blog/post_list.html',
{'post_list': Post.objects.all()})
class PostUpdate(View):
form_class = PostForm
model = Post
template_name = 'blog/post_form_update.html'
def get_object(self, year, month, slug):
return get_object_or_404(
self.model,
pub_date__year=year,
pub_date__month=month,
slug=slug)
def get(self, request, year, month, slug):
post = self.get_object(year, month, slug)
context = {
'form': self.form_class(
instance=post),
'post': post,
}
return render(
request, self.template_name, context)
def post(self, request, year, month, slug):
post = self.get_object(year, month, slug)
bound_form = self.form_class(
request.POST, instance=post)
if bound_form.is_valid():
new_post = bound_form.save()
return redirect(new_post)
else:
context = {
'form': bound_form,
'post': post,
}
return render(
request,
self.template_name,
context)
| from django.shortcuts import (
get_object_or_404, redirect, render)
from django.views.decorators.http import \
require_http_methods
from django.views.generic import View
from .forms import PostForm
from .models import Post
class PostCreate(View):
form_class = PostForm
template_name = 'blog/post_form.html'
def get(self, request):
return render(
request,
self.template_name,
{'form': self.form_class()})
def post(self, request):
bound_form = self.form_class(request.POST)
if bound_form.is_valid():
new_post = bound_form.save()
return redirect(new_post)
else:
return render(
request,
self.template_name,
{'form': bound_form})
class PostDelete(View):
def get(self, request, year, month, slug):
post = get_object_or_404(
Post,
pub_date__year=year,
pub_date__month=month,
slug__iexact=slug)
return render(
request,
'blog/post_confirm_delete.html',
{'post': post})
def post(self, request, year, month, slug):
post = get_object_or_404(
Post,
pub_date__year=year,
pub_date__month=month,
slug__iexact=slug)
post.delete()
return redirect('blog_post_list')
@require_http_methods(['HEAD', 'GET'])
def post_detail(request, year, month, slug):
post = get_object_or_404(
Post,
pub_date__year=year,
pub_date__month=month,
slug=slug)
return render(
request,
'blog/post_detail.html',
{'post': post})
class PostList(View):
def get(self, request):
return render(
request,
'blog/post_list.html',
{'post_list': Post.objects.all()})
class PostUpdate(View):
form_class = PostForm
model = Post
template_name = 'blog/post_form_update.html'
def get_object(self, year, month, slug):
return get_object_or_404(
self.model,
pub_date__year=year,
pub_date__month=month,
slug=slug)
def get(self, request, year, month, slug):
post = self.get_object(year, month, slug)
context = {
'form': self.form_class(
instance=post),
'post': post,
}
return render(
request, self.template_name, context)
def post(self, request, year, month, slug):
post = self.get_object(year, month, slug)
bound_form = self.form_class(
request.POST, instance=post)
if bound_form.is_valid():
new_post = bound_form.save()
return redirect(new_post)
else:
context = {
'form': bound_form,
'post': post,
}
return render(
request,
self.template_name,
context)
| none | 1 | 2.122144 | 2 |
|
ticker.py | reaganking/Ticker | 0 | 6631312 | <filename>ticker.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''show scores of today's NHL games'''
import datetime
import json
import os
import platform
import sys
import time
import requests
from colorama import init, Fore, Style
from pytz import reference
# API purportedly updates every 60 seconds
REFRESH_TIME = 30
API_URL = 'http://live.nhle.com/GameData/RegularSeasonScoreboardv3.jsonp'
TEST = False
def main():
'''generates a scoreboard of today's NHL games'''
games_today = False
playoffs = False
# Today's date
t_object = datetime.datetime.now()
today_date = "" + t_object.strftime("%A") + " " + "%s/%s" % (t_object.month, t_object.day)
# Yesterday's date
y_object = t_object - datetime.timedelta(days=1)
yesterday_date = "" + y_object.strftime("%A") + " " + "%s/%s" % (y_object.month, y_object.day)
while True:
scraped_page = requests.get(API_URL)
# Convert the scraped page to text and trim
scraped_page = scraped_page.text.replace('loadScoreboard(', '')
scraped_page = scraped_page[:-1]
# Create JSON object
data = json.loads(scraped_page)
clear_screen()
for key in data:
if key == 'games':
for game_info in data[key]:
# extract useful info from JSON
game_id = str(game_info['id'])
game_clock = game_info['ts']
game_stage = game_info['tsc']
status = game_info['bs']
away_locale = fix_locale(game_info['atn'])
away_name = fix_name(game_info['atv']).title()
away_score = game_info['ats']
away_result = game_info['atc']
home_locale = fix_locale(game_info['htn'])
home_name = fix_name(game_info['htv']).title()
home_score = game_info['hts']
home_result = game_info['htc']
if game_id[4:6] == '03':
playoffs = True
series_game_number = game_id[-1:]
# Show today's games
if today_date in game_clock.title() \
or 'TODAY' in game_clock \
or 'LIVE' in status:
games_today = True
header_text = away_locale + ' ' + away_name + \
' @ ' + home_locale + ' ' + home_name
# Show the game number of current 7-game series,
# if it's playoff time
if playoffs:
header_text += ' -- Game ' + series_game_number
# Different displays for different states of game:
# Game from yesterday, ex: YESTERDAY (FINAL 2nd OT)
# Game from today finished, ex: TODAY (FINAL 2nd OT)
if 'FINAL' in status:
if yesterday_date in game_clock.title():
header_text += '\nYESTERDAY '
elif today_date in game_clock.title() or 'TODAY' in game_clock:
header_text += '\nTODAY '
else:
header_text += game_clock.title()
header_text += '(' + status + ')'
# Upcoming game, ex: TUESDAY 4/21, 7:00 PM MDT)
elif 'DAY' in game_clock and 'FINAL' not in status:
timezone = local_time()
header_text += Fore.YELLOW + \
'\n(' + game_clock + ', ' + status + \
' ' + timezone + ')' + Fore.RESET
# Last 5 minutes of game and all of overtime,
# eg. (1:59 3rd PERIOD) in *red* font
elif 'LIVE' in status and 'critical' in game_stage:
header_text += Fore.RED + \
'\n(' + game_clock + ' PERIOD)' + Fore.RESET
# Any other time in game
# eg. (10:34 1st PERIOD)
else:
header_text += Fore.YELLOW + \
'\n(' + game_clock + Style.RESET_ALL
if 'PRE GAME' not in game_clock:
header_text += Fore.YELLOW + ' PERIOD'
header_text += Fore.YELLOW + ')' + Style.RESET_ALL
print(header_text)
# Highlight the winner of finished games in blue, games underway in green:
if away_result == 'winner': # Away team wins
print(Style.BRIGHT + Fore.BLUE + away_name + ' ' + away_score
+ Style.RESET_ALL + ' - ' + home_score + ' ' + home_name)
elif home_result == 'winner': # Home team wins
print(away_name + ' ' + away_score + ' - ' + Style.BRIGHT
+ Fore.BLUE + home_score + ' ' + home_name + Style.RESET_ALL)
elif 'progress' in game_stage or 'critical' in game_stage: # Game underway
print(Fore.GREEN + away_name + ' ' + away_score + ' - '
+ home_score + ' ' + home_name + Fore.RESET)
print('')
if not games_today:
print('\nThere are no NHL games scheduled for today.\n')
# Perform the sleep only if we're not currently testing
if TEST is True:
sys.exit(0)
else:
time.sleep(REFRESH_TIME)
print('\n')
def clear_screen():
'''os-adaptive screen wipe'''
if platform.system() == 'Windows':
os.system('cls')
else:
os.system('clear')
def fix_locale(team_locale):
'''modify place names from the values in JSON'''
if 'NY ' in team_locale:
return 'New York'
elif 'Montr' in team_locale:
return u'Montréal'
return team_locale
def fix_name(team_name):
'''modify team names from the values in JSON'''
if 'wings' in team_name:
return 'Red Wings'
elif 'jackets' in team_name:
return 'Blue Jackets'
elif 'leafs' in team_name:
return 'Maple Leafs'
elif 'knights' in team_name:
return 'Golden Knights'
return team_name
def local_time():
'''get local timezone'''
today = datetime.datetime.now()
localtime = reference.LocalTimezone()
return localtime.tzname(today)
def parse_arguments(arguments):
'''process the arguments provided at runtime'''
for index in range(1, len(arguments)):
argument = arguments[index]
if argument == '--test' or argument == '-t':
print('Running in TEST mode.\n')
global TEST
TEST = True
if __name__ == '__main__':
init() # colorama
parse_arguments(sys.argv)
main()
# Originally forked from <NAME>'s NHL-Scores - https://github.com/jtf323/NHL-Scores
| <filename>ticker.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''show scores of today's NHL games'''
import datetime
import json
import os
import platform
import sys
import time
import requests
from colorama import init, Fore, Style
from pytz import reference
# API purportedly updates every 60 seconds
REFRESH_TIME = 30
API_URL = 'http://live.nhle.com/GameData/RegularSeasonScoreboardv3.jsonp'
TEST = False
def main():
'''generates a scoreboard of today's NHL games'''
games_today = False
playoffs = False
# Today's date
t_object = datetime.datetime.now()
today_date = "" + t_object.strftime("%A") + " " + "%s/%s" % (t_object.month, t_object.day)
# Yesterday's date
y_object = t_object - datetime.timedelta(days=1)
yesterday_date = "" + y_object.strftime("%A") + " " + "%s/%s" % (y_object.month, y_object.day)
while True:
scraped_page = requests.get(API_URL)
# Convert the scraped page to text and trim
scraped_page = scraped_page.text.replace('loadScoreboard(', '')
scraped_page = scraped_page[:-1]
# Create JSON object
data = json.loads(scraped_page)
clear_screen()
for key in data:
if key == 'games':
for game_info in data[key]:
# extract useful info from JSON
game_id = str(game_info['id'])
game_clock = game_info['ts']
game_stage = game_info['tsc']
status = game_info['bs']
away_locale = fix_locale(game_info['atn'])
away_name = fix_name(game_info['atv']).title()
away_score = game_info['ats']
away_result = game_info['atc']
home_locale = fix_locale(game_info['htn'])
home_name = fix_name(game_info['htv']).title()
home_score = game_info['hts']
home_result = game_info['htc']
if game_id[4:6] == '03':
playoffs = True
series_game_number = game_id[-1:]
# Show today's games
if today_date in game_clock.title() \
or 'TODAY' in game_clock \
or 'LIVE' in status:
games_today = True
header_text = away_locale + ' ' + away_name + \
' @ ' + home_locale + ' ' + home_name
# Show the game number of current 7-game series,
# if it's playoff time
if playoffs:
header_text += ' -- Game ' + series_game_number
# Different displays for different states of game:
# Game from yesterday, ex: YESTERDAY (FINAL 2nd OT)
# Game from today finished, ex: TODAY (FINAL 2nd OT)
if 'FINAL' in status:
if yesterday_date in game_clock.title():
header_text += '\nYESTERDAY '
elif today_date in game_clock.title() or 'TODAY' in game_clock:
header_text += '\nTODAY '
else:
header_text += game_clock.title()
header_text += '(' + status + ')'
# Upcoming game, ex: TUESDAY 4/21, 7:00 PM MDT)
elif 'DAY' in game_clock and 'FINAL' not in status:
timezone = local_time()
header_text += Fore.YELLOW + \
'\n(' + game_clock + ', ' + status + \
' ' + timezone + ')' + Fore.RESET
# Last 5 minutes of game and all of overtime,
# eg. (1:59 3rd PERIOD) in *red* font
elif 'LIVE' in status and 'critical' in game_stage:
header_text += Fore.RED + \
'\n(' + game_clock + ' PERIOD)' + Fore.RESET
# Any other time in game
# eg. (10:34 1st PERIOD)
else:
header_text += Fore.YELLOW + \
'\n(' + game_clock + Style.RESET_ALL
if 'PRE GAME' not in game_clock:
header_text += Fore.YELLOW + ' PERIOD'
header_text += Fore.YELLOW + ')' + Style.RESET_ALL
print(header_text)
# Highlight the winner of finished games in blue, games underway in green:
if away_result == 'winner': # Away team wins
print(Style.BRIGHT + Fore.BLUE + away_name + ' ' + away_score
+ Style.RESET_ALL + ' - ' + home_score + ' ' + home_name)
elif home_result == 'winner': # Home team wins
print(away_name + ' ' + away_score + ' - ' + Style.BRIGHT
+ Fore.BLUE + home_score + ' ' + home_name + Style.RESET_ALL)
elif 'progress' in game_stage or 'critical' in game_stage: # Game underway
print(Fore.GREEN + away_name + ' ' + away_score + ' - '
+ home_score + ' ' + home_name + Fore.RESET)
print('')
if not games_today:
print('\nThere are no NHL games scheduled for today.\n')
# Perform the sleep only if we're not currently testing
if TEST is True:
sys.exit(0)
else:
time.sleep(REFRESH_TIME)
print('\n')
def clear_screen():
'''os-adaptive screen wipe'''
if platform.system() == 'Windows':
os.system('cls')
else:
os.system('clear')
def fix_locale(team_locale):
'''modify place names from the values in JSON'''
if 'NY ' in team_locale:
return 'New York'
elif 'Montr' in team_locale:
return u'Montréal'
return team_locale
def fix_name(team_name):
'''modify team names from the values in JSON'''
if 'wings' in team_name:
return 'Red Wings'
elif 'jackets' in team_name:
return 'Blue Jackets'
elif 'leafs' in team_name:
return 'Maple Leafs'
elif 'knights' in team_name:
return 'Golden Knights'
return team_name
def local_time():
'''get local timezone'''
today = datetime.datetime.now()
localtime = reference.LocalTimezone()
return localtime.tzname(today)
def parse_arguments(arguments):
'''process the arguments provided at runtime'''
for index in range(1, len(arguments)):
argument = arguments[index]
if argument == '--test' or argument == '-t':
print('Running in TEST mode.\n')
global TEST
TEST = True
if __name__ == '__main__':
init() # colorama
parse_arguments(sys.argv)
main()
# Originally forked from <NAME>'s NHL-Scores - https://github.com/jtf323/NHL-Scores
| en | 0.805939 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- show scores of today's NHL games # API purportedly updates every 60 seconds generates a scoreboard of today's NHL games # Today's date # Yesterday's date # Convert the scraped page to text and trim # Create JSON object # extract useful info from JSON # Show today's games # Show the game number of current 7-game series, # if it's playoff time # Different displays for different states of game: # Game from yesterday, ex: YESTERDAY (FINAL 2nd OT) # Game from today finished, ex: TODAY (FINAL 2nd OT) # Upcoming game, ex: TUESDAY 4/21, 7:00 PM MDT) # Last 5 minutes of game and all of overtime, # eg. (1:59 3rd PERIOD) in *red* font # Any other time in game # eg. (10:34 1st PERIOD) # Highlight the winner of finished games in blue, games underway in green: # Away team wins # Home team wins # Game underway # Perform the sleep only if we're not currently testing os-adaptive screen wipe modify place names from the values in JSON modify team names from the values in JSON get local timezone process the arguments provided at runtime # colorama # Originally forked from <NAME>'s NHL-Scores - https://github.com/jtf323/NHL-Scores | 2.94339 | 3 |
simulation/utils/machine_learning/cycle_gan/image_translator.py | KITcar-Team/kitcar-gazebo-simulation | 13 | 6631313 | import argparse
import os
import pathlib
import cv2
import numpy as np
import torch
from PIL import Image
from simulation.utils.machine_learning.data.base_dataset import get_transform
from simulation.utils.machine_learning.data.image_operations import tensor2im
from simulation.utils.machine_learning.models import resnet_generator
from simulation.utils.machine_learning.models.helper import get_norm_layer, init_net
from .configs.test_options import CycleGANTestOptions, WassersteinCycleGANTestOptions
from .models import generator
from .models.cycle_gan_model import CycleGANModel
from .models.wcycle_gan import WassersteinCycleGANModel
class ImageTranslator:
"""Implementation of a simple ROS interface to translate simulated to "real" images."""
def __init__(self, use_wasserstein=True):
"""Initialize the ImageTranslator class.
Use default test options but could be via command-line. Load and setup the model
"""
opt = WassersteinCycleGANTestOptions if use_wasserstein else CycleGANTestOptions
opt.checkpoints_dir = os.path.join(
pathlib.Path(__file__).parent.absolute(), opt.checkpoints_dir
)
tf_properties = {
"load_size": opt.load_size,
"crop_size": opt.crop_size,
"preprocess": opt.preprocess,
"mask": os.path.join(os.path.dirname(__file__), opt.mask),
"no_flip": True,
"grayscale": True,
}
self.transform = get_transform(**tf_properties)
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if opt.is_wgan:
netg_b_to_a = resnet_generator.ResnetGenerator(
opt.input_nc,
opt.output_nc,
opt.ngf,
get_norm_layer(opt.norm),
dilations=opt.dilations,
conv_layers_in_block=opt.conv_layers_in_block,
)
else:
netg_b_to_a = generator.create_generator(
opt.input_nc,
opt.output_nc,
opt.ngf,
opt.netg,
opt.norm,
not opt.no_dropout,
opt.activation,
opt.conv_layers_in_block,
opt.dilations,
)
netg_b_to_a = init_net(netg_b_to_a, opt.init_type, opt.init_gain, self.device)
ModelClass = CycleGANModel if not opt.is_wgan else WassersteinCycleGANModel
self.model = ModelClass.from_dict(
netg_a_to_b=None, netg_b_to_a=netg_b_to_a, **opt.to_dict()
)
self.model.networks.load(
os.path.join(opt.checkpoints_dir, opt.name, f"{opt.epoch}_net_"),
device=self.device,
)
self.model.eval()
def __call__(
self,
image: np.ndarray,
f_keep_pixels: float = 0,
f_keep_colored_pixels: float = 0,
) -> np.ndarray:
"""Translate an image to a "fake real" image by using the loaded model.
Args:
image: Image to be translated to "fake real"
f_keep_pixels: Factor of original pixels that are kept
f_keep_colored_pixels: Factor of colored pixels that are kept
Returns:
Translated image.
"""
# Store shape
h, w, c = image.shape
img_np = image
# Apply transformations
image: torch.Tensor = self.transform(Image.fromarray(image))
image = image.to(self.device)
# Copy the numpy array because it's not writeable otherwise
# Bring into shape [1,1,h,w]
image.unsqueeze_(0)
# Inference
result = self.model.networks.g_b_to_a.forward(image).detach()
# From [-1,1] to [0,256]
result = tensor2im(result, to_rgb=False)
# Resize to the size the input image has
result = cv2.resize(result, dsize=(w, h), interpolation=cv2.INTER_LINEAR)
if f_keep_pixels > 0:
grey_img = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRAY)
colored_pxls = f_keep_pixels * np.ones((h, w))
result = (1 - f_keep_pixels) * result + f_keep_pixels * grey_img
if f_keep_colored_pixels > 0:
grey_img = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRAY)
colored_pxls = f_keep_colored_pixels * np.ones((h, w))
colored_pxls[img_np[:, :, 0] == img_np[:, :, 1]] = 0
result = (
np.ones_like(colored_pxls) - colored_pxls
) * result + colored_pxls * grey_img
return result.astype(np.uint8)
if __name__ == "__main__":
"""Run GAN over all files in folder."""
parser = argparse.ArgumentParser(description="Extract images from a ROS bag.")
parser.add_argument("--input_dir", help="Directory with input images.")
parser.add_argument("--output_dir", help="Directory for output images.")
parser.add_argument(
"--gan_type",
type=str,
default="default",
help="Decide whether to use Wasserstein gan or default gan [default, wgan]",
)
args = parser.parse_args()
GAN = ImageTranslator(args.gan_type)
files = [
file
for file in os.listdir(args.input_dir)
if os.path.isfile(os.path.join(args.input_dir, file))
and file.lower().endswith((".png", ".jpg", ".jpeg", ".tiff", ".bmp", ".gif"))
]
os.makedirs(args.output_dir, exist_ok=True)
for i, file in enumerate(files):
input_file_path = os.path.join(args.input_dir, file)
output_file_path = os.path.join(args.output_dir, file)
img_np = np.array(Image.open(input_file_path))
img_np = cv2.cvtColor(img_np, cv2.COLOR_GRAY2BGR)
translated_image = GAN(img_np)
cv2.imwrite(output_file_path, translated_image)
print(f"Processing: {100 * i / len(files):.2f}%")
| import argparse
import os
import pathlib
import cv2
import numpy as np
import torch
from PIL import Image
from simulation.utils.machine_learning.data.base_dataset import get_transform
from simulation.utils.machine_learning.data.image_operations import tensor2im
from simulation.utils.machine_learning.models import resnet_generator
from simulation.utils.machine_learning.models.helper import get_norm_layer, init_net
from .configs.test_options import CycleGANTestOptions, WassersteinCycleGANTestOptions
from .models import generator
from .models.cycle_gan_model import CycleGANModel
from .models.wcycle_gan import WassersteinCycleGANModel
class ImageTranslator:
"""Implementation of a simple ROS interface to translate simulated to "real" images."""
def __init__(self, use_wasserstein=True):
"""Initialize the ImageTranslator class.
Use default test options but could be via command-line. Load and setup the model
"""
opt = WassersteinCycleGANTestOptions if use_wasserstein else CycleGANTestOptions
opt.checkpoints_dir = os.path.join(
pathlib.Path(__file__).parent.absolute(), opt.checkpoints_dir
)
tf_properties = {
"load_size": opt.load_size,
"crop_size": opt.crop_size,
"preprocess": opt.preprocess,
"mask": os.path.join(os.path.dirname(__file__), opt.mask),
"no_flip": True,
"grayscale": True,
}
self.transform = get_transform(**tf_properties)
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if opt.is_wgan:
netg_b_to_a = resnet_generator.ResnetGenerator(
opt.input_nc,
opt.output_nc,
opt.ngf,
get_norm_layer(opt.norm),
dilations=opt.dilations,
conv_layers_in_block=opt.conv_layers_in_block,
)
else:
netg_b_to_a = generator.create_generator(
opt.input_nc,
opt.output_nc,
opt.ngf,
opt.netg,
opt.norm,
not opt.no_dropout,
opt.activation,
opt.conv_layers_in_block,
opt.dilations,
)
netg_b_to_a = init_net(netg_b_to_a, opt.init_type, opt.init_gain, self.device)
ModelClass = CycleGANModel if not opt.is_wgan else WassersteinCycleGANModel
self.model = ModelClass.from_dict(
netg_a_to_b=None, netg_b_to_a=netg_b_to_a, **opt.to_dict()
)
self.model.networks.load(
os.path.join(opt.checkpoints_dir, opt.name, f"{opt.epoch}_net_"),
device=self.device,
)
self.model.eval()
def __call__(
self,
image: np.ndarray,
f_keep_pixels: float = 0,
f_keep_colored_pixels: float = 0,
) -> np.ndarray:
"""Translate an image to a "fake real" image by using the loaded model.
Args:
image: Image to be translated to "fake real"
f_keep_pixels: Factor of original pixels that are kept
f_keep_colored_pixels: Factor of colored pixels that are kept
Returns:
Translated image.
"""
# Store shape
h, w, c = image.shape
img_np = image
# Apply transformations
image: torch.Tensor = self.transform(Image.fromarray(image))
image = image.to(self.device)
# Copy the numpy array because it's not writeable otherwise
# Bring into shape [1,1,h,w]
image.unsqueeze_(0)
# Inference
result = self.model.networks.g_b_to_a.forward(image).detach()
# From [-1,1] to [0,256]
result = tensor2im(result, to_rgb=False)
# Resize to the size the input image has
result = cv2.resize(result, dsize=(w, h), interpolation=cv2.INTER_LINEAR)
if f_keep_pixels > 0:
grey_img = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRAY)
colored_pxls = f_keep_pixels * np.ones((h, w))
result = (1 - f_keep_pixels) * result + f_keep_pixels * grey_img
if f_keep_colored_pixels > 0:
grey_img = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRAY)
colored_pxls = f_keep_colored_pixels * np.ones((h, w))
colored_pxls[img_np[:, :, 0] == img_np[:, :, 1]] = 0
result = (
np.ones_like(colored_pxls) - colored_pxls
) * result + colored_pxls * grey_img
return result.astype(np.uint8)
if __name__ == "__main__":
"""Run GAN over all files in folder."""
parser = argparse.ArgumentParser(description="Extract images from a ROS bag.")
parser.add_argument("--input_dir", help="Directory with input images.")
parser.add_argument("--output_dir", help="Directory for output images.")
parser.add_argument(
"--gan_type",
type=str,
default="default",
help="Decide whether to use Wasserstein gan or default gan [default, wgan]",
)
args = parser.parse_args()
GAN = ImageTranslator(args.gan_type)
files = [
file
for file in os.listdir(args.input_dir)
if os.path.isfile(os.path.join(args.input_dir, file))
and file.lower().endswith((".png", ".jpg", ".jpeg", ".tiff", ".bmp", ".gif"))
]
os.makedirs(args.output_dir, exist_ok=True)
for i, file in enumerate(files):
input_file_path = os.path.join(args.input_dir, file)
output_file_path = os.path.join(args.output_dir, file)
img_np = np.array(Image.open(input_file_path))
img_np = cv2.cvtColor(img_np, cv2.COLOR_GRAY2BGR)
translated_image = GAN(img_np)
cv2.imwrite(output_file_path, translated_image)
print(f"Processing: {100 * i / len(files):.2f}%")
| en | 0.83975 | Implementation of a simple ROS interface to translate simulated to "real" images. Initialize the ImageTranslator class. Use default test options but could be via command-line. Load and setup the model Translate an image to a "fake real" image by using the loaded model. Args: image: Image to be translated to "fake real" f_keep_pixels: Factor of original pixels that are kept f_keep_colored_pixels: Factor of colored pixels that are kept Returns: Translated image. # Store shape # Apply transformations # Copy the numpy array because it's not writeable otherwise # Bring into shape [1,1,h,w] # Inference # From [-1,1] to [0,256] # Resize to the size the input image has Run GAN over all files in folder. | 2.25051 | 2 |
src/cnn_class2/tf_resnet_first_layers.py | JouniVatanen/NLP-and-Deep-Learning | 1 | 6631314 | # https://deeplearningcourses.com/c/advanced-computer-vision
# https://www.udemy.com/advanced-computer-vision
from __future__ import print_function, division
from builtins import range, input
# Note: you may need to update your version of future
# sudo pip install -U future
# Let's go up to the end of the first conv block
# to make sure everything has been loaded correctly
# compared to keras
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import keras
from keras.applications.resnet50 import ResNet50
from keras.models import Model
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input, decode_predictions
from tf_resnet_convblock import ConvLayer, BatchNormLayer, ConvBlock
# NOTE: dependent on your Keras version
# this script used 2.1.1
# [<keras.engine.topology.InputLayer at 0x112fe4358>,
# <keras.layers.convolutional.Conv2D at 0x112fe46a0>,
# <keras.layers.normalization.BatchNormalization at 0x112fe4630>,
# <keras.layers.core.Activation at 0x112fe4eb8>,
# <keras.layers.pooling.MaxPooling2D at 0x10ed4be48>,
# <keras.layers.convolutional.Conv2D at 0x1130723c8>,
# <keras.layers.normalization.BatchNormalization at 0x113064710>,
# <keras.layers.core.Activation at 0x113092dd8>,
# <keras.layers.convolutional.Conv2D at 0x11309e908>,
# <keras.layers.normalization.BatchNormalization at 0x11308a550>,
# <keras.layers.core.Activation at 0x11312ac88>,
# <keras.layers.convolutional.Conv2D at 0x1131207b8>,
# <keras.layers.convolutional.Conv2D at 0x1131b8da0>,
# <keras.layers.normalization.BatchNormalization at 0x113115550>,
# <keras.layers.normalization.BatchNormalization at 0x1131a01d0>,
# <keras.layers.merge.Add at 0x11322f0f0>,
# <keras.layers.core.Activation at 0x113246cf8>]
# define some additional layers so they have a forward function
class ReLULayer:
def forward(self, X):
return tf.nn.relu(X)
def get_params(self):
return []
class MaxPoolLayer:
def __init__(self, dim):
self.dim = dim
def forward(self, X):
return tf.nn.max_pool(
X,
ksize=[1, self.dim, self.dim, 1],
strides=[1, 2, 2, 1],
padding='VALID'
)
def get_params(self):
return []
class PartialResNet:
def __init__(self):
self.layers = [
# before conv block
ConvLayer(d=7, mi=3, mo=64, stride=2, padding='SAME'),
BatchNormLayer(64),
ReLULayer(),
MaxPoolLayer(dim=3),
# conv block
ConvBlock(mi=64, fm_sizes=[64, 64, 256], stride=1),
]
self.input_ = tf.placeholder(tf.float32, shape=(None, 224, 224, 3))
self.output = self.forward(self.input_)
def copyFromKerasLayers(self, layers):
self.layers[0].copyFromKerasLayers(layers[1])
self.layers[1].copyFromKerasLayers(layers[2])
self.layers[4].copyFromKerasLayers(layers[5:])
def forward(self, X):
for layer in self.layers:
X = layer.forward(X)
return X
def predict(self, X):
assert(self.session is not None)
return self.session.run(
self.output,
feed_dict={self.input_: X}
)
def set_session(self, session):
self.session = session
self.layers[0].session = session
self.layers[1].session = session
self.layers[4].set_session(session)
def get_params(self):
params = []
for layer in self.layers:
params += layer.get_params()
if __name__ == '__main__':
# you can also set weights to None, it doesn't matter
resnet = ResNet50(weights='imagenet')
# you can determine the correct layer
# by looking at resnet.layers in the console
partial_model = Model(
inputs=resnet.input,
outputs=resnet.layers[16].output
)
print(partial_model.summary())
# for layer in partial_model.layers:
# layer.trainable = False
my_partial_resnet = PartialResNet()
# make a fake image
X = np.random.random((1, 224, 224, 3))
# get keras output
keras_output = partial_model.predict(X)
# get my model output
init = tf.variables_initializer(my_partial_resnet.get_params())
# note: starting a new session messes up the Keras model
session = keras.backend.get_session()
my_partial_resnet.set_session(session)
session.run(init)
# first, just make sure we can get any output
first_output = my_partial_resnet.predict(X)
print("first_output.shape:", first_output.shape)
# copy params from Keras model
my_partial_resnet.copyFromKerasLayers(partial_model.layers)
# compare the 2 models
output = my_partial_resnet.predict(X)
diff = np.abs(output - keras_output).sum()
if diff < 1e-10:
print("Everything's great!")
else:
print("diff = %s" % diff)
| # https://deeplearningcourses.com/c/advanced-computer-vision
# https://www.udemy.com/advanced-computer-vision
from __future__ import print_function, division
from builtins import range, input
# Note: you may need to update your version of future
# sudo pip install -U future
# Let's go up to the end of the first conv block
# to make sure everything has been loaded correctly
# compared to keras
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import keras
from keras.applications.resnet50 import ResNet50
from keras.models import Model
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input, decode_predictions
from tf_resnet_convblock import ConvLayer, BatchNormLayer, ConvBlock
# NOTE: dependent on your Keras version
# this script used 2.1.1
# [<keras.engine.topology.InputLayer at 0x112fe4358>,
# <keras.layers.convolutional.Conv2D at 0x112fe46a0>,
# <keras.layers.normalization.BatchNormalization at 0x112fe4630>,
# <keras.layers.core.Activation at 0x112fe4eb8>,
# <keras.layers.pooling.MaxPooling2D at 0x10ed4be48>,
# <keras.layers.convolutional.Conv2D at 0x1130723c8>,
# <keras.layers.normalization.BatchNormalization at 0x113064710>,
# <keras.layers.core.Activation at 0x113092dd8>,
# <keras.layers.convolutional.Conv2D at 0x11309e908>,
# <keras.layers.normalization.BatchNormalization at 0x11308a550>,
# <keras.layers.core.Activation at 0x11312ac88>,
# <keras.layers.convolutional.Conv2D at 0x1131207b8>,
# <keras.layers.convolutional.Conv2D at 0x1131b8da0>,
# <keras.layers.normalization.BatchNormalization at 0x113115550>,
# <keras.layers.normalization.BatchNormalization at 0x1131a01d0>,
# <keras.layers.merge.Add at 0x11322f0f0>,
# <keras.layers.core.Activation at 0x113246cf8>]
# define some additional layers so they have a forward function
class ReLULayer:
def forward(self, X):
return tf.nn.relu(X)
def get_params(self):
return []
class MaxPoolLayer:
def __init__(self, dim):
self.dim = dim
def forward(self, X):
return tf.nn.max_pool(
X,
ksize=[1, self.dim, self.dim, 1],
strides=[1, 2, 2, 1],
padding='VALID'
)
def get_params(self):
return []
class PartialResNet:
def __init__(self):
self.layers = [
# before conv block
ConvLayer(d=7, mi=3, mo=64, stride=2, padding='SAME'),
BatchNormLayer(64),
ReLULayer(),
MaxPoolLayer(dim=3),
# conv block
ConvBlock(mi=64, fm_sizes=[64, 64, 256], stride=1),
]
self.input_ = tf.placeholder(tf.float32, shape=(None, 224, 224, 3))
self.output = self.forward(self.input_)
def copyFromKerasLayers(self, layers):
self.layers[0].copyFromKerasLayers(layers[1])
self.layers[1].copyFromKerasLayers(layers[2])
self.layers[4].copyFromKerasLayers(layers[5:])
def forward(self, X):
for layer in self.layers:
X = layer.forward(X)
return X
def predict(self, X):
assert(self.session is not None)
return self.session.run(
self.output,
feed_dict={self.input_: X}
)
def set_session(self, session):
self.session = session
self.layers[0].session = session
self.layers[1].session = session
self.layers[4].set_session(session)
def get_params(self):
params = []
for layer in self.layers:
params += layer.get_params()
if __name__ == '__main__':
# you can also set weights to None, it doesn't matter
resnet = ResNet50(weights='imagenet')
# you can determine the correct layer
# by looking at resnet.layers in the console
partial_model = Model(
inputs=resnet.input,
outputs=resnet.layers[16].output
)
print(partial_model.summary())
# for layer in partial_model.layers:
# layer.trainable = False
my_partial_resnet = PartialResNet()
# make a fake image
X = np.random.random((1, 224, 224, 3))
# get keras output
keras_output = partial_model.predict(X)
# get my model output
init = tf.variables_initializer(my_partial_resnet.get_params())
# note: starting a new session messes up the Keras model
session = keras.backend.get_session()
my_partial_resnet.set_session(session)
session.run(init)
# first, just make sure we can get any output
first_output = my_partial_resnet.predict(X)
print("first_output.shape:", first_output.shape)
# copy params from Keras model
my_partial_resnet.copyFromKerasLayers(partial_model.layers)
# compare the 2 models
output = my_partial_resnet.predict(X)
diff = np.abs(output - keras_output).sum()
if diff < 1e-10:
print("Everything's great!")
else:
print("diff = %s" % diff)
| en | 0.710429 | # https://deeplearningcourses.com/c/advanced-computer-vision # https://www.udemy.com/advanced-computer-vision # Note: you may need to update your version of future # sudo pip install -U future # Let's go up to the end of the first conv block # to make sure everything has been loaded correctly # compared to keras # NOTE: dependent on your Keras version # this script used 2.1.1 # [<keras.engine.topology.InputLayer at 0x112fe4358>, # <keras.layers.convolutional.Conv2D at 0x112fe46a0>, # <keras.layers.normalization.BatchNormalization at 0x112fe4630>, # <keras.layers.core.Activation at 0x112fe4eb8>, # <keras.layers.pooling.MaxPooling2D at 0x10ed4be48>, # <keras.layers.convolutional.Conv2D at 0x1130723c8>, # <keras.layers.normalization.BatchNormalization at 0x113064710>, # <keras.layers.core.Activation at 0x113092dd8>, # <keras.layers.convolutional.Conv2D at 0x11309e908>, # <keras.layers.normalization.BatchNormalization at 0x11308a550>, # <keras.layers.core.Activation at 0x11312ac88>, # <keras.layers.convolutional.Conv2D at 0x1131207b8>, # <keras.layers.convolutional.Conv2D at 0x1131b8da0>, # <keras.layers.normalization.BatchNormalization at 0x113115550>, # <keras.layers.normalization.BatchNormalization at 0x1131a01d0>, # <keras.layers.merge.Add at 0x11322f0f0>, # <keras.layers.core.Activation at 0x113246cf8>] # define some additional layers so they have a forward function # before conv block # conv block # you can also set weights to None, it doesn't matter # you can determine the correct layer # by looking at resnet.layers in the console # for layer in partial_model.layers: # layer.trainable = False # make a fake image # get keras output # get my model output # note: starting a new session messes up the Keras model # first, just make sure we can get any output # copy params from Keras model # compare the 2 models | 2.954269 | 3 |
configs/deepim/lmPbrSO/FlowNet512_1.5AugCosyAAEGray_Flat_lmPbr_SO/FlowNet512_1.5AugCosyAAEGray_Flat_Pbr_11_glue.py | THU-DA-6D-Pose-Group/self6dpp | 33 | 6631315 | <gh_stars>10-100
_base_ = ["./FlowNet512_1.5AugCosyAAEGray_Flat_Pbr_01_ape.py"]
OUTPUT_DIR = "output/deepim/lmPbrSO/FlowNet512_1.5AugCosyAAEGray_Flat_lmPbr_SO/glue"
DATASETS = dict(TRAIN=("lm_pbr_glue_train",), TEST=("lm_real_glue_test",))
# bbnc9
# objects glue Avg(1)
# ad_2 20.95 20.95
# ad_5 59.46 59.46
# ad_10 89.58 89.58
# rete_2 5.79 5.79
# rete_5 70.17 70.17
# rete_10 97.30 97.30
# re_2 8.11 8.11
# re_5 73.46 73.46
# re_10 97.39 97.39
# te_2 62.64 62.64
# te_5 94.31 94.31
# te_10 99.81 99.81
# proj_2 19.11 19.11
# proj_5 92.28 92.28
# proj_10 99.61 99.61
# re 4.32 4.32
# te 0.02 0.02
# init by mlBCE
# objects glue Avg(1)
# ad_2 21.43 21.43
# ad_5 63.90 63.90
# ad_10 90.73 90.73
# rete_2 7.24 7.24
# rete_5 74.03 74.03
# rete_10 98.17 98.17
# re_2 9.27 9.27
# re_5 76.45 76.45
# re_10 98.26 98.26
# te_2 64.58 64.58
# te_5 95.75 95.75
# te_10 99.81 99.81
# proj_2 21.04 21.04
# proj_5 93.73 93.73
# proj_10 99.90 99.90
# re 3.97 3.97
# te 0.02 0.02
| _base_ = ["./FlowNet512_1.5AugCosyAAEGray_Flat_Pbr_01_ape.py"]
OUTPUT_DIR = "output/deepim/lmPbrSO/FlowNet512_1.5AugCosyAAEGray_Flat_lmPbr_SO/glue"
DATASETS = dict(TRAIN=("lm_pbr_glue_train",), TEST=("lm_real_glue_test",))
# bbnc9
# objects glue Avg(1)
# ad_2 20.95 20.95
# ad_5 59.46 59.46
# ad_10 89.58 89.58
# rete_2 5.79 5.79
# rete_5 70.17 70.17
# rete_10 97.30 97.30
# re_2 8.11 8.11
# re_5 73.46 73.46
# re_10 97.39 97.39
# te_2 62.64 62.64
# te_5 94.31 94.31
# te_10 99.81 99.81
# proj_2 19.11 19.11
# proj_5 92.28 92.28
# proj_10 99.61 99.61
# re 4.32 4.32
# te 0.02 0.02
# init by mlBCE
# objects glue Avg(1)
# ad_2 21.43 21.43
# ad_5 63.90 63.90
# ad_10 90.73 90.73
# rete_2 7.24 7.24
# rete_5 74.03 74.03
# rete_10 98.17 98.17
# re_2 9.27 9.27
# re_5 76.45 76.45
# re_10 98.26 98.26
# te_2 64.58 64.58
# te_5 95.75 95.75
# te_10 99.81 99.81
# proj_2 21.04 21.04
# proj_5 93.73 93.73
# proj_10 99.90 99.90
# re 3.97 3.97
# te 0.02 0.02 | en | 0.399192 | # bbnc9 # objects glue Avg(1) # ad_2 20.95 20.95 # ad_5 59.46 59.46 # ad_10 89.58 89.58 # rete_2 5.79 5.79 # rete_5 70.17 70.17 # rete_10 97.30 97.30 # re_2 8.11 8.11 # re_5 73.46 73.46 # re_10 97.39 97.39 # te_2 62.64 62.64 # te_5 94.31 94.31 # te_10 99.81 99.81 # proj_2 19.11 19.11 # proj_5 92.28 92.28 # proj_10 99.61 99.61 # re 4.32 4.32 # te 0.02 0.02 # init by mlBCE # objects glue Avg(1) # ad_2 21.43 21.43 # ad_5 63.90 63.90 # ad_10 90.73 90.73 # rete_2 7.24 7.24 # rete_5 74.03 74.03 # rete_10 98.17 98.17 # re_2 9.27 9.27 # re_5 76.45 76.45 # re_10 98.26 98.26 # te_2 64.58 64.58 # te_5 95.75 95.75 # te_10 99.81 99.81 # proj_2 21.04 21.04 # proj_5 93.73 93.73 # proj_10 99.90 99.90 # re 3.97 3.97 # te 0.02 0.02 | 1.457994 | 1 |
async_v20/endpoints/annotations.py | gshklover/async_v20 | 23 | 6631316 | <reponame>gshklover/async_v20<gh_stars>10-100
from ..definitions.types import ClientExtensions
from ..definitions.primitives import InstrumentName
from ..definitions.types import TransactionID
from ..definitions.types import DateTime
__all__ = ['Alias', 'AlignmentTimezone', 'Authorization', 'Count', 'DailyAlignment', 'FromTime',
'FromTransactionID', 'Ids', 'IncludeFirstQuery', 'Instruments', 'LastTransactionID',
'LongClientExtensions', 'LongUnits', 'PageSize', 'ShortClientExtensions', 'ShortUnits',
'Smooth', 'Snapshot', 'SinceTransactionID',
'ToTime', 'ToTransactionID', 'TradeClientExtensions', 'Type', 'Units', 'UserSpecifier']
class Bool(object):
def __new__(cls, arg):
return bool(arg)
class Authorization(str):
"""Contains OANDA's v20 API authorization token"""
pass
class Instruments(str):
pass
class Alias(str):
pass
class Count(int):
"""The number of candlesticks to return in the reponse.
Count should not be specified if both the start and end
parameters are provided, as the time range combined
with the graularity will determine the number of
candlesticks to return. [default=500, maximum=5000]"""
def __new__(cls, value=500):
if not 0 < value <= 5000:
raise ValueError(f'Count: MUST be within range(1,5001). Supplied {value}')
return super().__new__(cls, value)
class Smooth(Bool):
"""A flag that controls whether the candlestick is
'smoothed' or not. A smoothed candlestick uses the
previous candle’s close price as its open price,
while an unsmoothed candlestick uses the first price
from its time range as its open price. [default=False]"""
def __new__(cls, value=False):
return super().__new__(cls, value)
class IncludeFirstQuery(Bool):
"""A flag that controls whether the candlestick that is covered by the
from time should be included in the results. This flag enables clients
to use the timestamp of the last completed candlestick received to poll
for future candlesticks but avoid receiving the previous candlestick
repeatedly. [default=True]"""
def __new__(cls, value=True):
return super().__new__(cls, value)
class DailyAlignment(int):
"""The hour of the day (in the specified timezone)
to use for granularities that have daily alignments.
[default=17, minimum=0, maximum=23]"""
def __new__(cls, value=17):
if not 0 <= value <= 23:
raise ValueError(f'DailyAlignment: Must be within range(24). Supplied: {value}')
return super().__new__(cls, value)
class AlignmentTimezone(str):
"""The timezone to use for the dailyAlignment parameter.
Candlesticks with daily alignment will be aligned to the
dailyAlignment hour within the alignmentTimezone.
[default=America/New_York]"""
# TODO find out what are the valid time zones
def __new__(cls, value='America/New_York'):
return super().__new__(cls, value)
class Ids(str):
pass
class LongUnits(str):
"""Indication of how much of the long Position to closeout. Either the
string "ALL", the string "NONE", or a DecimalNumber representing how many
units of the long position to close using a PositionCloseout MarketOrder.
The units specified must always be positive.
"""
def __new__(cls, value='ALL'):
return super().__new__(cls, value)
class ShortUnits(str):
""" Indication of how much of the short Position to closeout. Either the
string "ALL", the string "NONE", or a DecimalNumber representing how many
units of the short position to close using a PositionCloseout
MarketOrder. The units specified must always be positive.
"""
def __new__(cls, value='ALL'):
return super().__new__(cls, value)
class Snapshot(Bool):
"""Flag that enables/disables the sending of a pricing snapshot
when initially connecting to the stream. [default=True]"""
def __new__(cls, value=True):
return super().__new__(cls, value)
class PageSize(int):
"""The number of Transactions to include in each page
of the results. [default=100, maximum=1000]"""
def __new__(cls, value=100):
if not 0 < value <= 1000:
raise ValueError(f'PageSize: Must be within range(). Supplied: {value}')
return super().__new__(cls, value)
class Type(str):
pass
class UserSpecifier(str):
pass
class FromTime(DateTime):
"""A DateTime to be used as the starting period of a query"""
pass
class ToTime(DateTime):
"""A DateTime to be used as the ending period of a query"""
pass
class TradeClientExtensions(ClientExtensions):
pass
class LongClientExtensions(ClientExtensions):
"""The client extensions to add to the MarketOrder used to close the long
position
"""
pass
class ShortClientExtensions(ClientExtensions):
"""The client extensions to add to the MarketOrder used to close the short
position"""
pass
class Units(str):
"""Indication of how much of the Trade to close. Either the string "ALL"
(indicating that all of the Trade should be closed), or a DecimalNumber
representing the number of units of the open Trade to Close using a
TradeClose MarketOrder. The units specified must always be positive, and
the magnitude of the value cannot exceed the magnitude of the Trade’s
open units
"""
def __new__(cls, value='ALL'):
return super().__new__(cls, value)
class LastTransactionID(TransactionID):
"""Contains the most recent TransactionID"""
pass
class SinceTransactionID(TransactionID):
"""The account changes to get Since LastTransactionID for account_changes() method"""
pass
class FromTransactionID(TransactionID):
"""A TransactionID to be used as the starting period of a query"""
pass
class ToTransactionID(TransactionID):
"""A TransactionID to be used as the ending period of a query"""
pass
class ServiceID(str):
"""The specifier of the service to get"""
pass
class ServiceListID(str):
"""Identification string of service list to get"""
pass
class Start(str):
"""Only show events which started after this date, inclusive.
Suggested format RFC 2822 or RFC 1123"""
pass
class End(str):
"""Only show events which started before this date, inclusive.
Suggested format RFC 2822 or RFC 1123"""
pass
class EventSid(str):
"""The SID of the event to get"""
pass
class StatusID(str):
"""The ID of the status to get"""
pass
| from ..definitions.types import ClientExtensions
from ..definitions.primitives import InstrumentName
from ..definitions.types import TransactionID
from ..definitions.types import DateTime
__all__ = ['Alias', 'AlignmentTimezone', 'Authorization', 'Count', 'DailyAlignment', 'FromTime',
'FromTransactionID', 'Ids', 'IncludeFirstQuery', 'Instruments', 'LastTransactionID',
'LongClientExtensions', 'LongUnits', 'PageSize', 'ShortClientExtensions', 'ShortUnits',
'Smooth', 'Snapshot', 'SinceTransactionID',
'ToTime', 'ToTransactionID', 'TradeClientExtensions', 'Type', 'Units', 'UserSpecifier']
class Bool(object):
def __new__(cls, arg):
return bool(arg)
class Authorization(str):
"""Contains OANDA's v20 API authorization token"""
pass
class Instruments(str):
pass
class Alias(str):
pass
class Count(int):
"""The number of candlesticks to return in the reponse.
Count should not be specified if both the start and end
parameters are provided, as the time range combined
with the graularity will determine the number of
candlesticks to return. [default=500, maximum=5000]"""
def __new__(cls, value=500):
if not 0 < value <= 5000:
raise ValueError(f'Count: MUST be within range(1,5001). Supplied {value}')
return super().__new__(cls, value)
class Smooth(Bool):
"""A flag that controls whether the candlestick is
'smoothed' or not. A smoothed candlestick uses the
previous candle’s close price as its open price,
while an unsmoothed candlestick uses the first price
from its time range as its open price. [default=False]"""
def __new__(cls, value=False):
return super().__new__(cls, value)
class IncludeFirstQuery(Bool):
"""A flag that controls whether the candlestick that is covered by the
from time should be included in the results. This flag enables clients
to use the timestamp of the last completed candlestick received to poll
for future candlesticks but avoid receiving the previous candlestick
repeatedly. [default=True]"""
def __new__(cls, value=True):
return super().__new__(cls, value)
class DailyAlignment(int):
"""The hour of the day (in the specified timezone)
to use for granularities that have daily alignments.
[default=17, minimum=0, maximum=23]"""
def __new__(cls, value=17):
if not 0 <= value <= 23:
raise ValueError(f'DailyAlignment: Must be within range(24). Supplied: {value}')
return super().__new__(cls, value)
class AlignmentTimezone(str):
"""The timezone to use for the dailyAlignment parameter.
Candlesticks with daily alignment will be aligned to the
dailyAlignment hour within the alignmentTimezone.
[default=America/New_York]"""
# TODO find out what are the valid time zones
def __new__(cls, value='America/New_York'):
return super().__new__(cls, value)
class Ids(str):
pass
class LongUnits(str):
"""Indication of how much of the long Position to closeout. Either the
string "ALL", the string "NONE", or a DecimalNumber representing how many
units of the long position to close using a PositionCloseout MarketOrder.
The units specified must always be positive.
"""
def __new__(cls, value='ALL'):
return super().__new__(cls, value)
class ShortUnits(str):
""" Indication of how much of the short Position to closeout. Either the
string "ALL", the string "NONE", or a DecimalNumber representing how many
units of the short position to close using a PositionCloseout
MarketOrder. The units specified must always be positive.
"""
def __new__(cls, value='ALL'):
return super().__new__(cls, value)
class Snapshot(Bool):
"""Flag that enables/disables the sending of a pricing snapshot
when initially connecting to the stream. [default=True]"""
def __new__(cls, value=True):
return super().__new__(cls, value)
class PageSize(int):
"""The number of Transactions to include in each page
of the results. [default=100, maximum=1000]"""
def __new__(cls, value=100):
if not 0 < value <= 1000:
raise ValueError(f'PageSize: Must be within range(). Supplied: {value}')
return super().__new__(cls, value)
class Type(str):
pass
class UserSpecifier(str):
pass
class FromTime(DateTime):
"""A DateTime to be used as the starting period of a query"""
pass
class ToTime(DateTime):
"""A DateTime to be used as the ending period of a query"""
pass
class TradeClientExtensions(ClientExtensions):
pass
class LongClientExtensions(ClientExtensions):
"""The client extensions to add to the MarketOrder used to close the long
position
"""
pass
class ShortClientExtensions(ClientExtensions):
"""The client extensions to add to the MarketOrder used to close the short
position"""
pass
class Units(str):
"""Indication of how much of the Trade to close. Either the string "ALL"
(indicating that all of the Trade should be closed), or a DecimalNumber
representing the number of units of the open Trade to Close using a
TradeClose MarketOrder. The units specified must always be positive, and
the magnitude of the value cannot exceed the magnitude of the Trade’s
open units
"""
def __new__(cls, value='ALL'):
return super().__new__(cls, value)
class LastTransactionID(TransactionID):
"""Contains the most recent TransactionID"""
pass
class SinceTransactionID(TransactionID):
"""The account changes to get Since LastTransactionID for account_changes() method"""
pass
class FromTransactionID(TransactionID):
"""A TransactionID to be used as the starting period of a query"""
pass
class ToTransactionID(TransactionID):
"""A TransactionID to be used as the ending period of a query"""
pass
class ServiceID(str):
"""The specifier of the service to get"""
pass
class ServiceListID(str):
"""Identification string of service list to get"""
pass
class Start(str):
"""Only show events which started after this date, inclusive.
Suggested format RFC 2822 or RFC 1123"""
pass
class End(str):
"""Only show events which started before this date, inclusive.
Suggested format RFC 2822 or RFC 1123"""
pass
class EventSid(str):
"""The SID of the event to get"""
pass
class StatusID(str):
"""The ID of the status to get"""
pass | en | 0.845138 | Contains OANDA's v20 API authorization token The number of candlesticks to return in the reponse. Count should not be specified if both the start and end parameters are provided, as the time range combined with the graularity will determine the number of candlesticks to return. [default=500, maximum=5000] A flag that controls whether the candlestick is 'smoothed' or not. A smoothed candlestick uses the previous candle’s close price as its open price, while an unsmoothed candlestick uses the first price from its time range as its open price. [default=False] A flag that controls whether the candlestick that is covered by the from time should be included in the results. This flag enables clients to use the timestamp of the last completed candlestick received to poll for future candlesticks but avoid receiving the previous candlestick repeatedly. [default=True] The hour of the day (in the specified timezone) to use for granularities that have daily alignments. [default=17, minimum=0, maximum=23] The timezone to use for the dailyAlignment parameter. Candlesticks with daily alignment will be aligned to the dailyAlignment hour within the alignmentTimezone. [default=America/New_York] # TODO find out what are the valid time zones Indication of how much of the long Position to closeout. Either the string "ALL", the string "NONE", or a DecimalNumber representing how many units of the long position to close using a PositionCloseout MarketOrder. The units specified must always be positive. Indication of how much of the short Position to closeout. Either the string "ALL", the string "NONE", or a DecimalNumber representing how many units of the short position to close using a PositionCloseout MarketOrder. The units specified must always be positive. Flag that enables/disables the sending of a pricing snapshot when initially connecting to the stream. [default=True] The number of Transactions to include in each page of the results. [default=100, maximum=1000] A DateTime to be used as the starting period of a query A DateTime to be used as the ending period of a query The client extensions to add to the MarketOrder used to close the long position The client extensions to add to the MarketOrder used to close the short position Indication of how much of the Trade to close. Either the string "ALL" (indicating that all of the Trade should be closed), or a DecimalNumber representing the number of units of the open Trade to Close using a TradeClose MarketOrder. The units specified must always be positive, and the magnitude of the value cannot exceed the magnitude of the Trade’s open units Contains the most recent TransactionID The account changes to get Since LastTransactionID for account_changes() method A TransactionID to be used as the starting period of a query A TransactionID to be used as the ending period of a query The specifier of the service to get Identification string of service list to get Only show events which started after this date, inclusive. Suggested format RFC 2822 or RFC 1123 Only show events which started before this date, inclusive. Suggested format RFC 2822 or RFC 1123 The SID of the event to get The ID of the status to get | 2.31923 | 2 |
pygpsnmea/gui/positionstab.py | tww-software/py_gps_nmea | 0 | 6631317 | <gh_stars>0
"""
tab to display a table of all the positions we have recorded
"""
import tkinter
class PosRepTab(tkinter.ttk.Frame):
"""
tab to display all the NMEA Sentences and descriptions + times
Note:
basically a tab with a table inside
Args:
tabcontrol(tkinter.ttk.Notebook): ttk notebook to add this tab to
Attributes:
autoscroll(tkinter.BooleanVar): if true autoscroll as new positions
are added
autoscrollchk(tkinter.Checkbutton): checkbox for autoscroll
tabs(tkinter.ttk.Notebook): other tabs in the GUI
counter(int): number of positions
tree(tkinter.ttk.Treeview): table of positions
"""
def __init__(self, tabcontrol):
tkinter.ttk.Frame.__init__(self, tabcontrol)
self.autoscroll = tkinter.BooleanVar()
self.autoscroll.set(1)
self.autoscrollchk = tkinter.Checkbutton(
self, text='autoscroll as new positions are added',
var=self.autoscroll)
self.autoscrollchk.select()
self.autoscrollchk.pack(side=tkinter.TOP)
self.tabs = tabcontrol
self.counter = 0
self.tree = tkinter.ttk.Treeview(self)
verticalscrollbar = tkinter.ttk.Scrollbar(
self, orient=tkinter.VERTICAL, command=self.tree.yview)
verticalscrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
horizontalscrollbar = tkinter.ttk.Scrollbar(
self, orient=tkinter.HORIZONTAL, command=self.tree.xview)
horizontalscrollbar.pack(side=tkinter.BOTTOM, fill=tkinter.X)
self.tree.configure(yscrollcommand=verticalscrollbar.set,
xscrollcommand=horizontalscrollbar.set)
self.create_message_table()
def create_message_table(self):
"""
draw a large table in positions tab of all the NMEA sentences we have
"""
self.tree.delete(*self.tree.get_children())
headers = ['Position No', 'Latitude', 'Longitude', 'Timestamp']
self.tree["columns"] = headers
for column in headers:
self.tree.column(column, width=200, minwidth=70,
stretch=tkinter.YES)
self.tree.heading(column, text=column, anchor=tkinter.W)
self.tree.pack(side=tkinter.TOP, fill='both', expand=tkinter.TRUE)
self.tree['show'] = 'headings'
def add_new_line(self, line):
"""
add a new line to the tree table and scroll down to it
Args:
line(list): items in line are pos no, lat, lon, time
"""
self.tree.insert('', self.counter, values=line)
self.counter += 1
if self.autoscroll.get() == 1:
self.tree.yview_moveto(1)
def clear(self):
"""
clear the tree of all data
"""
self.tree.delete(*self.tree.get_children())
| """
tab to display a table of all the positions we have recorded
"""
import tkinter
class PosRepTab(tkinter.ttk.Frame):
"""
tab to display all the NMEA Sentences and descriptions + times
Note:
basically a tab with a table inside
Args:
tabcontrol(tkinter.ttk.Notebook): ttk notebook to add this tab to
Attributes:
autoscroll(tkinter.BooleanVar): if true autoscroll as new positions
are added
autoscrollchk(tkinter.Checkbutton): checkbox for autoscroll
tabs(tkinter.ttk.Notebook): other tabs in the GUI
counter(int): number of positions
tree(tkinter.ttk.Treeview): table of positions
"""
def __init__(self, tabcontrol):
tkinter.ttk.Frame.__init__(self, tabcontrol)
self.autoscroll = tkinter.BooleanVar()
self.autoscroll.set(1)
self.autoscrollchk = tkinter.Checkbutton(
self, text='autoscroll as new positions are added',
var=self.autoscroll)
self.autoscrollchk.select()
self.autoscrollchk.pack(side=tkinter.TOP)
self.tabs = tabcontrol
self.counter = 0
self.tree = tkinter.ttk.Treeview(self)
verticalscrollbar = tkinter.ttk.Scrollbar(
self, orient=tkinter.VERTICAL, command=self.tree.yview)
verticalscrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
horizontalscrollbar = tkinter.ttk.Scrollbar(
self, orient=tkinter.HORIZONTAL, command=self.tree.xview)
horizontalscrollbar.pack(side=tkinter.BOTTOM, fill=tkinter.X)
self.tree.configure(yscrollcommand=verticalscrollbar.set,
xscrollcommand=horizontalscrollbar.set)
self.create_message_table()
def create_message_table(self):
"""
draw a large table in positions tab of all the NMEA sentences we have
"""
self.tree.delete(*self.tree.get_children())
headers = ['Position No', 'Latitude', 'Longitude', 'Timestamp']
self.tree["columns"] = headers
for column in headers:
self.tree.column(column, width=200, minwidth=70,
stretch=tkinter.YES)
self.tree.heading(column, text=column, anchor=tkinter.W)
self.tree.pack(side=tkinter.TOP, fill='both', expand=tkinter.TRUE)
self.tree['show'] = 'headings'
def add_new_line(self, line):
"""
add a new line to the tree table and scroll down to it
Args:
line(list): items in line are pos no, lat, lon, time
"""
self.tree.insert('', self.counter, values=line)
self.counter += 1
if self.autoscroll.get() == 1:
self.tree.yview_moveto(1)
def clear(self):
"""
clear the tree of all data
"""
self.tree.delete(*self.tree.get_children()) | en | 0.603557 | tab to display a table of all the positions we have recorded tab to display all the NMEA Sentences and descriptions + times Note: basically a tab with a table inside Args: tabcontrol(tkinter.ttk.Notebook): ttk notebook to add this tab to Attributes: autoscroll(tkinter.BooleanVar): if true autoscroll as new positions are added autoscrollchk(tkinter.Checkbutton): checkbox for autoscroll tabs(tkinter.ttk.Notebook): other tabs in the GUI counter(int): number of positions tree(tkinter.ttk.Treeview): table of positions draw a large table in positions tab of all the NMEA sentences we have add a new line to the tree table and scroll down to it Args: line(list): items in line are pos no, lat, lon, time clear the tree of all data | 3.243286 | 3 |
loam/cli.py | amorison/loam | 0 | 6631318 | <reponame>amorison/loam
"""Definition of CLI manager."""
from __future__ import annotations
from dataclasses import fields
import argparse
import copy
import pathlib
import typing
import warnings
from types import MappingProxyType
from . import error, _internal
if typing.TYPE_CHECKING:
from typing import Dict, List, Any, Optional, Mapping, TextIO, Union
from argparse import ArgumentParser, Namespace
from os import PathLike
from .base import Section, ConfigBase
BLK = ' \\\n' # cutting line in scripts
def _names(section: Section, option: str) -> List[str]:
"""List of cli strings for a given option."""
entry = section.meta_(option).entry
action = entry.cli_kwargs.get('action')
if action is _internal.Switch:
names = [f'-{option}', f'+{option}']
short = entry.cli_short
if short is not None:
names.append(f'-{short}')
names.append(f'+{short}')
else:
names = [f'--{option}']
short = entry.cli_short
if short is not None:
names.append(f'-{short}')
return names
class Subcmd:
"""Metadata of sub commands.
Attributes:
help: short description of the sub command.
sections: configuration sections used by the subcommand.
defaults: default value of options associated to the subcommand.
"""
def __init__(self, help_msg: str, *sections: str, **defaults: Any):
self.help = help_msg
self.sections = sections
self.defaults = defaults
class CLIManager:
"""CLI manager.
Args:
config_: the :class:`~loam.base.ConfigBase` holding option definitions.
common_: special subcommand, used to define the general description
of the CLI tool as well as configuration sections used by every
subcommand.
bare_: special subcommand, use it to define the configuration
sections that should be used when you call your CLI tool
without any subcommand.
subcmds: all the subcommands of your CLI tool. The name of each
*subcommand* is the name of the keyword argument passed on to
this function.
"""
def __init__(self, config_: ConfigBase,
common_: Optional[Subcmd] = None,
bare_: Optional[Subcmd] = None,
**subcmds: Subcmd):
self._conf = config_
self._subcmds = {}
for sub_name, sub_meta in subcmds.items():
if sub_name.isidentifier():
self._subcmds[sub_name] = sub_meta
else:
raise error.SubcmdError(sub_name)
self._common = common_ if common_ is not None else Subcmd('')
self._bare = bare_
# dict of dict [command][option] = section
self._opt_cmds: Dict[str, Dict[str, str]] = {}
# same as above but for bare command only [option] = section
self._opt_bare: Dict[str, str] = {}
if self.bare is not None:
self._cmd_opts_solver(None)
for cmd_name in self.subcmds:
self._opt_cmds[cmd_name] = {}
self._cmd_opts_solver(cmd_name)
self._parser = self._build_parser()
@property
def common(self) -> Subcmd:
"""Subcmd describing sections common to all subcommands."""
return self._common
@property
def bare(self) -> Optional[Subcmd]:
"""Subcmd used when the CLI tool is invoked without subcommand."""
return self._bare
@property
def subcmds(self) -> Mapping[str, Subcmd]:
"""Subcommands description."""
return MappingProxyType(self._subcmds)
def sections_list(self, cmd: Optional[str] = None) -> List[str]:
"""List of config sections used by a command.
Args:
cmd: command name, set to ``None`` or ``''`` for the bare command.
Returns:
list of configuration sections used by that command.
"""
sections = list(self.common.sections)
if not cmd:
if self.bare is not None:
sections.extend(self.bare.sections)
return sections
return []
sections.extend(self.subcmds[cmd].sections)
if hasattr(self._conf, cmd):
sections.append(cmd)
return sections
def _cmd_opts_solver(self, cmd_name: Optional[str]) -> None:
"""Scan options related to one command and enrich _opt_cmds."""
sections = self.sections_list(cmd_name)
cmd_dict = self._opt_cmds[cmd_name] if cmd_name else self._opt_bare
for sct in reversed(sections):
section: Section = getattr(self._conf, sct)
for fld in fields(section):
opt = fld.name
if not section.meta_(opt).entry.in_cli:
continue
if opt not in cmd_dict:
cmd_dict[opt] = sct
else:
warnings.warn(
'Command <{0}>: {1}.{2} shadowed by {3}.{2}'.format(
cmd_name, sct, opt, cmd_dict[opt]),
error.LoamWarning, stacklevel=4)
def _add_options_to_parser(self, opts_dict: Mapping[str, str],
parser: ArgumentParser) -> None:
"""Add options to a parser."""
for opt, sct in opts_dict.items():
section: Section = getattr(self._conf, sct)
entry = section.meta_(opt).entry
kwargs = copy.deepcopy(entry.cli_kwargs)
action = kwargs.get('action')
if action is _internal.Switch:
kwargs.update(nargs=0)
kwargs.update(help=entry.doc)
kwargs.setdefault('default', getattr(section, opt))
parser.add_argument(*_names(section, opt), **kwargs)
def _build_parser(self) -> ArgumentParser:
"""Build command line argument parser.
Returns:
the command line argument parser.
"""
main_parser = argparse.ArgumentParser(description=self.common.help,
prefix_chars='-+')
self._add_options_to_parser(self._opt_bare, main_parser)
main_parser.set_defaults(**self.common.defaults)
if self.bare is not None:
main_parser.set_defaults(**self.bare.defaults)
subparsers = main_parser.add_subparsers(dest='loam_sub_name')
for cmd_name, meta in self.subcmds.items():
kwargs = {'prefix_chars': '+-', 'help': meta.help}
dummy_parser = subparsers.add_parser(cmd_name, **kwargs)
self._add_options_to_parser(self._opt_cmds[cmd_name], dummy_parser)
dummy_parser.set_defaults(**meta.defaults)
return main_parser
def parse_args(self, arglist: Optional[List[str]] = None) -> Namespace:
"""Parse arguments and update options accordingly.
Args:
arglist: list of arguments to parse. If set to None,
``sys.argv[1:]`` is used.
Returns:
the argument namespace returned by the
:class:`argparse.ArgumentParser`.
"""
args = self._parser.parse_args(args=arglist)
sub_cmd = args.loam_sub_name
if sub_cmd is None:
for opt, sct in self._opt_bare.items():
section: Section = getattr(self._conf, sct)
val = getattr(args, opt, None)
section.cast_and_set_(opt, val)
else:
for opt, sct in self._opt_cmds[sub_cmd].items():
section = getattr(self._conf, sct)
val = getattr(args, opt, None)
section.cast_and_set_(opt, val)
return args
def _zsh_comp_command(self, zcf: TextIO, cmd: Optional[str],
grouping: bool, add_help: bool = True) -> None:
"""Write zsh _arguments compdef for a given command.
Args:
zcf: zsh compdef file.
cmd: command name, set to None or '' for bare command.
grouping: group options (zsh>=5.4).
add_help: add an help option.
"""
if add_help:
if grouping:
print("+ '(help)'", end=BLK, file=zcf)
print("'--help[show help message]'", end=BLK, file=zcf)
print("'-h[show help message]'", end=BLK, file=zcf)
# could deal with duplicate by iterating in reverse and keep set of
# already defined opts.
no_comp = ('store_true', 'store_false')
cmd_dict = self._opt_cmds[cmd] if cmd else self._opt_bare
for opt, sct in cmd_dict.items():
section: Section = getattr(self._conf, sct)
entry = section.meta_(opt).entry
comprule = entry.cli_zsh_comprule
if entry.cli_kwargs.get('action') == 'append':
grpfmt, optfmt = "+ '{}'", "'*{}[{}]{}'"
if comprule is None:
comprule = ''
else:
grpfmt, optfmt = "+ '({})'", "'{}[{}]{}'"
if entry.cli_kwargs.get('action') in no_comp \
or entry.cli_kwargs.get('nargs') == 0:
comprule = None
if comprule is None:
compstr = ''
elif comprule == '':
optfmt = optfmt.replace('[', '=[')
compstr = ': :( )'
else:
optfmt = optfmt.replace('[', '=[')
compstr = f': :{comprule}'
if grouping:
print(grpfmt.format(opt), end=BLK, file=zcf)
for name in _names(section, opt):
print(optfmt.format(name, entry.doc.replace("'", "'\"'\"'"),
compstr), end=BLK, file=zcf)
def zsh_complete(self, path: Union[str, PathLike], cmd: str, *cmds: str,
sourceable: bool = False,
force_grouping: bool = False) -> None:
"""Write zsh compdef script.
Args:
path: desired path of the compdef script.
cmd: command name that should be completed.
cmds: extra command names that should be completed.
sourceable: if True, the generated file will contain an explicit
call to ``compdef``, which means it can be sourced to activate
CLI completion.
force_grouping: if True, assume zsh supports grouping of options.
Otherwise, loam will attempt to check whether zsh >= 5.4.
"""
grouping = force_grouping or _internal.zsh_version() >= (5, 4)
path = pathlib.Path(path)
firstline = ['#compdef', cmd]
firstline.extend(cmds)
subcmds = list(self.subcmds.keys())
with path.open('w') as zcf:
print(*firstline, end='\n\n', file=zcf)
# main function
print(f'function _{cmd} {{', file=zcf)
print('local line', file=zcf)
print('_arguments -C', end=BLK, file=zcf)
if subcmds:
# list of subcommands and their description
substrs = [rf"{sub}\:'{self.subcmds[sub].help}'"
for sub in subcmds]
print('"1:Commands:(({}))"'.format(' '.join(substrs)),
end=BLK, file=zcf)
self._zsh_comp_command(zcf, None, grouping)
if subcmds:
print("'*::arg:->args'", file=zcf)
print('case $line[1] in', file=zcf)
for sub in subcmds:
print(f'{sub}) _{cmd}_{sub} ;;', file=zcf)
print('esac', file=zcf)
print('}', file=zcf)
# all subcommand completion handlers
for sub in subcmds:
print(f'\nfunction _{cmd}_{sub} {{', file=zcf)
print('_arguments', end=BLK, file=zcf)
self._zsh_comp_command(zcf, sub, grouping)
print('}', file=zcf)
if sourceable:
print(f'\ncompdef _{cmd} {cmd}', *cmds, file=zcf)
def _bash_comp_command(self, cmd: Optional[str],
add_help: bool = True) -> List[str]:
"""Build a list of all options for a given command.
Args:
cmd: command name, set to None or '' for bare command.
add_help: add an help option.
Returns:
list of CLI options strings.
"""
out = ['-h', '--help'] if add_help else []
cmd_dict = self._opt_cmds[cmd] if cmd else self._opt_bare
for opt, sct in cmd_dict.items():
section: Section = getattr(self._conf, sct)
out.extend(_names(section, opt))
return out
def bash_complete(self, path: Union[str, PathLike], cmd: str,
*cmds: str) -> None:
"""Write bash complete script.
Args:
path: desired path of the complete script.
cmd: command name that should be completed.
cmds: extra command names that should be completed.
"""
path = pathlib.Path(path)
subcmds = list(self.subcmds.keys())
with path.open('w') as bcf:
# main function
print(f'_{cmd}() {{', file=bcf)
print('COMPREPLY=()', file=bcf)
print(r'local cur=${COMP_WORDS[COMP_CWORD]}', end='\n\n', file=bcf)
optstr = ' '.join(self._bash_comp_command(None))
print(f'local options="{optstr}"', end='\n\n', file=bcf)
if subcmds:
print('local commands="{}"'.format(' '.join(subcmds)),
file=bcf)
print('declare -A suboptions', file=bcf)
for sub in subcmds:
optstr = ' '.join(self._bash_comp_command(sub))
print(f'suboptions[{sub}]="{optstr}"', file=bcf)
condstr = 'if'
for sub in subcmds:
print(condstr, r'[[ "${COMP_LINE}" == *"', sub, '"* ]] ; then',
file=bcf)
print(r'COMPREPLY=( `compgen -W "${suboptions[', sub,
r']}" -- ${cur}` )', sep='', file=bcf)
condstr = 'elif'
print(condstr, r'[[ ${cur} == -* ]] ; then', file=bcf)
print(r'COMPREPLY=( `compgen -W "${options}" -- ${cur}`)',
file=bcf)
if subcmds:
print(r'else', file=bcf)
print(r'COMPREPLY=( `compgen -W "${commands}" -- ${cur}`)',
file=bcf)
print('fi', file=bcf)
print('}', end='\n\n', file=bcf)
print(f'complete -F _{cmd} {cmd}', *cmds, file=bcf)
| """Definition of CLI manager."""
from __future__ import annotations
from dataclasses import fields
import argparse
import copy
import pathlib
import typing
import warnings
from types import MappingProxyType
from . import error, _internal
if typing.TYPE_CHECKING:
from typing import Dict, List, Any, Optional, Mapping, TextIO, Union
from argparse import ArgumentParser, Namespace
from os import PathLike
from .base import Section, ConfigBase
BLK = ' \\\n' # cutting line in scripts
def _names(section: Section, option: str) -> List[str]:
"""List of cli strings for a given option."""
entry = section.meta_(option).entry
action = entry.cli_kwargs.get('action')
if action is _internal.Switch:
names = [f'-{option}', f'+{option}']
short = entry.cli_short
if short is not None:
names.append(f'-{short}')
names.append(f'+{short}')
else:
names = [f'--{option}']
short = entry.cli_short
if short is not None:
names.append(f'-{short}')
return names
class Subcmd:
"""Metadata of sub commands.
Attributes:
help: short description of the sub command.
sections: configuration sections used by the subcommand.
defaults: default value of options associated to the subcommand.
"""
def __init__(self, help_msg: str, *sections: str, **defaults: Any):
self.help = help_msg
self.sections = sections
self.defaults = defaults
class CLIManager:
"""CLI manager.
Args:
config_: the :class:`~loam.base.ConfigBase` holding option definitions.
common_: special subcommand, used to define the general description
of the CLI tool as well as configuration sections used by every
subcommand.
bare_: special subcommand, use it to define the configuration
sections that should be used when you call your CLI tool
without any subcommand.
subcmds: all the subcommands of your CLI tool. The name of each
*subcommand* is the name of the keyword argument passed on to
this function.
"""
def __init__(self, config_: ConfigBase,
common_: Optional[Subcmd] = None,
bare_: Optional[Subcmd] = None,
**subcmds: Subcmd):
self._conf = config_
self._subcmds = {}
for sub_name, sub_meta in subcmds.items():
if sub_name.isidentifier():
self._subcmds[sub_name] = sub_meta
else:
raise error.SubcmdError(sub_name)
self._common = common_ if common_ is not None else Subcmd('')
self._bare = bare_
# dict of dict [command][option] = section
self._opt_cmds: Dict[str, Dict[str, str]] = {}
# same as above but for bare command only [option] = section
self._opt_bare: Dict[str, str] = {}
if self.bare is not None:
self._cmd_opts_solver(None)
for cmd_name in self.subcmds:
self._opt_cmds[cmd_name] = {}
self._cmd_opts_solver(cmd_name)
self._parser = self._build_parser()
@property
def common(self) -> Subcmd:
"""Subcmd describing sections common to all subcommands."""
return self._common
@property
def bare(self) -> Optional[Subcmd]:
"""Subcmd used when the CLI tool is invoked without subcommand."""
return self._bare
@property
def subcmds(self) -> Mapping[str, Subcmd]:
"""Subcommands description."""
return MappingProxyType(self._subcmds)
def sections_list(self, cmd: Optional[str] = None) -> List[str]:
"""List of config sections used by a command.
Args:
cmd: command name, set to ``None`` or ``''`` for the bare command.
Returns:
list of configuration sections used by that command.
"""
sections = list(self.common.sections)
if not cmd:
if self.bare is not None:
sections.extend(self.bare.sections)
return sections
return []
sections.extend(self.subcmds[cmd].sections)
if hasattr(self._conf, cmd):
sections.append(cmd)
return sections
def _cmd_opts_solver(self, cmd_name: Optional[str]) -> None:
"""Scan options related to one command and enrich _opt_cmds."""
sections = self.sections_list(cmd_name)
cmd_dict = self._opt_cmds[cmd_name] if cmd_name else self._opt_bare
for sct in reversed(sections):
section: Section = getattr(self._conf, sct)
for fld in fields(section):
opt = fld.name
if not section.meta_(opt).entry.in_cli:
continue
if opt not in cmd_dict:
cmd_dict[opt] = sct
else:
warnings.warn(
'Command <{0}>: {1}.{2} shadowed by {3}.{2}'.format(
cmd_name, sct, opt, cmd_dict[opt]),
error.LoamWarning, stacklevel=4)
def _add_options_to_parser(self, opts_dict: Mapping[str, str],
parser: ArgumentParser) -> None:
"""Add options to a parser."""
for opt, sct in opts_dict.items():
section: Section = getattr(self._conf, sct)
entry = section.meta_(opt).entry
kwargs = copy.deepcopy(entry.cli_kwargs)
action = kwargs.get('action')
if action is _internal.Switch:
kwargs.update(nargs=0)
kwargs.update(help=entry.doc)
kwargs.setdefault('default', getattr(section, opt))
parser.add_argument(*_names(section, opt), **kwargs)
def _build_parser(self) -> ArgumentParser:
"""Build command line argument parser.
Returns:
the command line argument parser.
"""
main_parser = argparse.ArgumentParser(description=self.common.help,
prefix_chars='-+')
self._add_options_to_parser(self._opt_bare, main_parser)
main_parser.set_defaults(**self.common.defaults)
if self.bare is not None:
main_parser.set_defaults(**self.bare.defaults)
subparsers = main_parser.add_subparsers(dest='loam_sub_name')
for cmd_name, meta in self.subcmds.items():
kwargs = {'prefix_chars': '+-', 'help': meta.help}
dummy_parser = subparsers.add_parser(cmd_name, **kwargs)
self._add_options_to_parser(self._opt_cmds[cmd_name], dummy_parser)
dummy_parser.set_defaults(**meta.defaults)
return main_parser
def parse_args(self, arglist: Optional[List[str]] = None) -> Namespace:
"""Parse arguments and update options accordingly.
Args:
arglist: list of arguments to parse. If set to None,
``sys.argv[1:]`` is used.
Returns:
the argument namespace returned by the
:class:`argparse.ArgumentParser`.
"""
args = self._parser.parse_args(args=arglist)
sub_cmd = args.loam_sub_name
if sub_cmd is None:
for opt, sct in self._opt_bare.items():
section: Section = getattr(self._conf, sct)
val = getattr(args, opt, None)
section.cast_and_set_(opt, val)
else:
for opt, sct in self._opt_cmds[sub_cmd].items():
section = getattr(self._conf, sct)
val = getattr(args, opt, None)
section.cast_and_set_(opt, val)
return args
def _zsh_comp_command(self, zcf: TextIO, cmd: Optional[str],
grouping: bool, add_help: bool = True) -> None:
"""Write zsh _arguments compdef for a given command.
Args:
zcf: zsh compdef file.
cmd: command name, set to None or '' for bare command.
grouping: group options (zsh>=5.4).
add_help: add an help option.
"""
if add_help:
if grouping:
print("+ '(help)'", end=BLK, file=zcf)
print("'--help[show help message]'", end=BLK, file=zcf)
print("'-h[show help message]'", end=BLK, file=zcf)
# could deal with duplicate by iterating in reverse and keep set of
# already defined opts.
no_comp = ('store_true', 'store_false')
cmd_dict = self._opt_cmds[cmd] if cmd else self._opt_bare
for opt, sct in cmd_dict.items():
section: Section = getattr(self._conf, sct)
entry = section.meta_(opt).entry
comprule = entry.cli_zsh_comprule
if entry.cli_kwargs.get('action') == 'append':
grpfmt, optfmt = "+ '{}'", "'*{}[{}]{}'"
if comprule is None:
comprule = ''
else:
grpfmt, optfmt = "+ '({})'", "'{}[{}]{}'"
if entry.cli_kwargs.get('action') in no_comp \
or entry.cli_kwargs.get('nargs') == 0:
comprule = None
if comprule is None:
compstr = ''
elif comprule == '':
optfmt = optfmt.replace('[', '=[')
compstr = ': :( )'
else:
optfmt = optfmt.replace('[', '=[')
compstr = f': :{comprule}'
if grouping:
print(grpfmt.format(opt), end=BLK, file=zcf)
for name in _names(section, opt):
print(optfmt.format(name, entry.doc.replace("'", "'\"'\"'"),
compstr), end=BLK, file=zcf)
def zsh_complete(self, path: Union[str, PathLike], cmd: str, *cmds: str,
sourceable: bool = False,
force_grouping: bool = False) -> None:
"""Write zsh compdef script.
Args:
path: desired path of the compdef script.
cmd: command name that should be completed.
cmds: extra command names that should be completed.
sourceable: if True, the generated file will contain an explicit
call to ``compdef``, which means it can be sourced to activate
CLI completion.
force_grouping: if True, assume zsh supports grouping of options.
Otherwise, loam will attempt to check whether zsh >= 5.4.
"""
grouping = force_grouping or _internal.zsh_version() >= (5, 4)
path = pathlib.Path(path)
firstline = ['#compdef', cmd]
firstline.extend(cmds)
subcmds = list(self.subcmds.keys())
with path.open('w') as zcf:
print(*firstline, end='\n\n', file=zcf)
# main function
print(f'function _{cmd} {{', file=zcf)
print('local line', file=zcf)
print('_arguments -C', end=BLK, file=zcf)
if subcmds:
# list of subcommands and their description
substrs = [rf"{sub}\:'{self.subcmds[sub].help}'"
for sub in subcmds]
print('"1:Commands:(({}))"'.format(' '.join(substrs)),
end=BLK, file=zcf)
self._zsh_comp_command(zcf, None, grouping)
if subcmds:
print("'*::arg:->args'", file=zcf)
print('case $line[1] in', file=zcf)
for sub in subcmds:
print(f'{sub}) _{cmd}_{sub} ;;', file=zcf)
print('esac', file=zcf)
print('}', file=zcf)
# all subcommand completion handlers
for sub in subcmds:
print(f'\nfunction _{cmd}_{sub} {{', file=zcf)
print('_arguments', end=BLK, file=zcf)
self._zsh_comp_command(zcf, sub, grouping)
print('}', file=zcf)
if sourceable:
print(f'\ncompdef _{cmd} {cmd}', *cmds, file=zcf)
def _bash_comp_command(self, cmd: Optional[str],
add_help: bool = True) -> List[str]:
"""Build a list of all options for a given command.
Args:
cmd: command name, set to None or '' for bare command.
add_help: add an help option.
Returns:
list of CLI options strings.
"""
out = ['-h', '--help'] if add_help else []
cmd_dict = self._opt_cmds[cmd] if cmd else self._opt_bare
for opt, sct in cmd_dict.items():
section: Section = getattr(self._conf, sct)
out.extend(_names(section, opt))
return out
def bash_complete(self, path: Union[str, PathLike], cmd: str,
*cmds: str) -> None:
"""Write bash complete script.
Args:
path: desired path of the complete script.
cmd: command name that should be completed.
cmds: extra command names that should be completed.
"""
path = pathlib.Path(path)
subcmds = list(self.subcmds.keys())
with path.open('w') as bcf:
# main function
print(f'_{cmd}() {{', file=bcf)
print('COMPREPLY=()', file=bcf)
print(r'local cur=${COMP_WORDS[COMP_CWORD]}', end='\n\n', file=bcf)
optstr = ' '.join(self._bash_comp_command(None))
print(f'local options="{optstr}"', end='\n\n', file=bcf)
if subcmds:
print('local commands="{}"'.format(' '.join(subcmds)),
file=bcf)
print('declare -A suboptions', file=bcf)
for sub in subcmds:
optstr = ' '.join(self._bash_comp_command(sub))
print(f'suboptions[{sub}]="{optstr}"', file=bcf)
condstr = 'if'
for sub in subcmds:
print(condstr, r'[[ "${COMP_LINE}" == *"', sub, '"* ]] ; then',
file=bcf)
print(r'COMPREPLY=( `compgen -W "${suboptions[', sub,
r']}" -- ${cur}` )', sep='', file=bcf)
condstr = 'elif'
print(condstr, r'[[ ${cur} == -* ]] ; then', file=bcf)
print(r'COMPREPLY=( `compgen -W "${options}" -- ${cur}`)',
file=bcf)
if subcmds:
print(r'else', file=bcf)
print(r'COMPREPLY=( `compgen -W "${commands}" -- ${cur}`)',
file=bcf)
print('fi', file=bcf)
print('}', end='\n\n', file=bcf)
print(f'complete -F _{cmd} {cmd}', *cmds, file=bcf) | en | 0.784835 | Definition of CLI manager. # cutting line in scripts List of cli strings for a given option. Metadata of sub commands. Attributes: help: short description of the sub command. sections: configuration sections used by the subcommand. defaults: default value of options associated to the subcommand. CLI manager. Args: config_: the :class:`~loam.base.ConfigBase` holding option definitions. common_: special subcommand, used to define the general description of the CLI tool as well as configuration sections used by every subcommand. bare_: special subcommand, use it to define the configuration sections that should be used when you call your CLI tool without any subcommand. subcmds: all the subcommands of your CLI tool. The name of each *subcommand* is the name of the keyword argument passed on to this function. # dict of dict [command][option] = section # same as above but for bare command only [option] = section Subcmd describing sections common to all subcommands. Subcmd used when the CLI tool is invoked without subcommand. Subcommands description. List of config sections used by a command. Args: cmd: command name, set to ``None`` or ``''`` for the bare command. Returns: list of configuration sections used by that command. Scan options related to one command and enrich _opt_cmds. Add options to a parser. Build command line argument parser. Returns: the command line argument parser. Parse arguments and update options accordingly. Args: arglist: list of arguments to parse. If set to None, ``sys.argv[1:]`` is used. Returns: the argument namespace returned by the :class:`argparse.ArgumentParser`. Write zsh _arguments compdef for a given command. Args: zcf: zsh compdef file. cmd: command name, set to None or '' for bare command. grouping: group options (zsh>=5.4). add_help: add an help option. # could deal with duplicate by iterating in reverse and keep set of # already defined opts. Write zsh compdef script. Args: path: desired path of the compdef script. cmd: command name that should be completed. cmds: extra command names that should be completed. sourceable: if True, the generated file will contain an explicit call to ``compdef``, which means it can be sourced to activate CLI completion. force_grouping: if True, assume zsh supports grouping of options. Otherwise, loam will attempt to check whether zsh >= 5.4. # main function # list of subcommands and their description # all subcommand completion handlers Build a list of all options for a given command. Args: cmd: command name, set to None or '' for bare command. add_help: add an help option. Returns: list of CLI options strings. Write bash complete script. Args: path: desired path of the complete script. cmd: command name that should be completed. cmds: extra command names that should be completed. # main function | 2.537518 | 3 |
tests/app/test_schemas.py | cds-snc/notifier-api | 41 | 6631319 | <gh_stars>10-100
import pytest
from marshmallow import ValidationError
from sqlalchemy import desc
from app.dao.provider_details_dao import dao_update_provider_details
from app.models import ProviderDetailsHistory
from tests.app.db import create_api_key
def test_job_schema_doesnt_return_notifications(sample_notification_with_job):
from app.schemas import job_schema
job = sample_notification_with_job.job
assert job.notifications.count() == 1
data, errors = job_schema.dump(job)
assert not errors
assert "notifications" not in data
def test_notification_schema_ignores_absent_api_key(sample_notification_with_job):
from app.schemas import notification_with_template_schema
data = notification_with_template_schema.dump(sample_notification_with_job).data
assert data["key_name"] is None
def test_notification_schema_adds_api_key_name(sample_notification):
from app.schemas import notification_with_template_schema
api_key = create_api_key(sample_notification.service, key_name="Test key")
sample_notification.api_key = api_key
data = notification_with_template_schema.dump(sample_notification).data
assert data["key_name"] == "Test key"
@pytest.mark.parametrize(
"schema_name",
[
"notification_with_template_schema",
"notification_schema",
"notification_with_template_schema",
"notification_with_personalisation_schema",
],
)
def test_notification_schema_has_correct_status(sample_notification, schema_name):
from app import schemas
data = getattr(schemas, schema_name).dump(sample_notification).data
assert data["status"] == sample_notification.status
@pytest.mark.parametrize(
"user_attribute, user_value",
[
("name", "New User"),
("email_address", "<EMAIL>"),
("mobile_number", "+16502532222"),
("blocked", False),
],
)
def test_user_update_schema_accepts_valid_attribute_pairs(user_attribute, user_value):
update_dict = {user_attribute: user_value}
from app.schemas import user_update_schema_load_json
data, errors = user_update_schema_load_json.load(update_dict)
assert not errors
@pytest.mark.parametrize(
"user_attribute, user_value",
[
("name", None),
("name", ""),
("email_address", "<EMAIL>"),
("mobile_number", "+44077009"),
],
)
def test_user_update_schema_rejects_invalid_attribute_pairs(user_attribute, user_value):
from app.schemas import user_update_schema_load_json
update_dict = {user_attribute: user_value}
with pytest.raises(ValidationError):
data, errors = user_update_schema_load_json.load(update_dict)
@pytest.mark.parametrize(
"user_attribute",
[
"id",
"updated_at",
"created_at",
"user_to_service",
"_password",
"verify_codes",
"logged_in_at",
"password_changed_at",
"failed_login_count",
"state",
"platform_admin",
],
)
def test_user_update_schema_rejects_disallowed_attribute_keys(user_attribute):
update_dict = {user_attribute: "not important"}
from app.schemas import user_update_schema_load_json
with pytest.raises(ValidationError) as excinfo:
data, errors = user_update_schema_load_json.load(update_dict)
assert excinfo.value.messages["_schema"][0] == "Unknown field name {}".format(user_attribute)
def test_provider_details_schema_returns_user_details(mocker, sample_user, current_sms_provider):
from app.schemas import provider_details_schema
mocker.patch("app.provider_details.switch_providers.get_user_by_id", return_value=sample_user)
current_sms_provider.created_by = sample_user
data = provider_details_schema.dump(current_sms_provider).data
assert sorted(data["created_by"].keys()) == sorted(["id", "email_address", "name"])
def test_provider_details_history_schema_returns_user_details(
mocker, sample_user, restore_provider_details, current_sms_provider
):
from app.schemas import provider_details_schema
mocker.patch("app.provider_details.switch_providers.get_user_by_id", return_value=sample_user)
current_sms_provider.created_by_id = sample_user.id
data = provider_details_schema.dump(current_sms_provider).data
dao_update_provider_details(current_sms_provider)
current_sms_provider_in_history = (
ProviderDetailsHistory.query.filter(ProviderDetailsHistory.id == current_sms_provider.id)
.order_by(desc(ProviderDetailsHistory.version))
.first()
)
data = provider_details_schema.dump(current_sms_provider_in_history).data
assert sorted(data["created_by"].keys()) == sorted(["id", "email_address", "name"])
| import pytest
from marshmallow import ValidationError
from sqlalchemy import desc
from app.dao.provider_details_dao import dao_update_provider_details
from app.models import ProviderDetailsHistory
from tests.app.db import create_api_key
def test_job_schema_doesnt_return_notifications(sample_notification_with_job):
from app.schemas import job_schema
job = sample_notification_with_job.job
assert job.notifications.count() == 1
data, errors = job_schema.dump(job)
assert not errors
assert "notifications" not in data
def test_notification_schema_ignores_absent_api_key(sample_notification_with_job):
from app.schemas import notification_with_template_schema
data = notification_with_template_schema.dump(sample_notification_with_job).data
assert data["key_name"] is None
def test_notification_schema_adds_api_key_name(sample_notification):
from app.schemas import notification_with_template_schema
api_key = create_api_key(sample_notification.service, key_name="Test key")
sample_notification.api_key = api_key
data = notification_with_template_schema.dump(sample_notification).data
assert data["key_name"] == "Test key"
@pytest.mark.parametrize(
"schema_name",
[
"notification_with_template_schema",
"notification_schema",
"notification_with_template_schema",
"notification_with_personalisation_schema",
],
)
def test_notification_schema_has_correct_status(sample_notification, schema_name):
from app import schemas
data = getattr(schemas, schema_name).dump(sample_notification).data
assert data["status"] == sample_notification.status
@pytest.mark.parametrize(
"user_attribute, user_value",
[
("name", "New User"),
("email_address", "<EMAIL>"),
("mobile_number", "+16502532222"),
("blocked", False),
],
)
def test_user_update_schema_accepts_valid_attribute_pairs(user_attribute, user_value):
update_dict = {user_attribute: user_value}
from app.schemas import user_update_schema_load_json
data, errors = user_update_schema_load_json.load(update_dict)
assert not errors
@pytest.mark.parametrize(
"user_attribute, user_value",
[
("name", None),
("name", ""),
("email_address", "<EMAIL>"),
("mobile_number", "+44077009"),
],
)
def test_user_update_schema_rejects_invalid_attribute_pairs(user_attribute, user_value):
from app.schemas import user_update_schema_load_json
update_dict = {user_attribute: user_value}
with pytest.raises(ValidationError):
data, errors = user_update_schema_load_json.load(update_dict)
@pytest.mark.parametrize(
"user_attribute",
[
"id",
"updated_at",
"created_at",
"user_to_service",
"_password",
"verify_codes",
"logged_in_at",
"password_changed_at",
"failed_login_count",
"state",
"platform_admin",
],
)
def test_user_update_schema_rejects_disallowed_attribute_keys(user_attribute):
update_dict = {user_attribute: "not important"}
from app.schemas import user_update_schema_load_json
with pytest.raises(ValidationError) as excinfo:
data, errors = user_update_schema_load_json.load(update_dict)
assert excinfo.value.messages["_schema"][0] == "Unknown field name {}".format(user_attribute)
def test_provider_details_schema_returns_user_details(mocker, sample_user, current_sms_provider):
from app.schemas import provider_details_schema
mocker.patch("app.provider_details.switch_providers.get_user_by_id", return_value=sample_user)
current_sms_provider.created_by = sample_user
data = provider_details_schema.dump(current_sms_provider).data
assert sorted(data["created_by"].keys()) == sorted(["id", "email_address", "name"])
def test_provider_details_history_schema_returns_user_details(
mocker, sample_user, restore_provider_details, current_sms_provider
):
from app.schemas import provider_details_schema
mocker.patch("app.provider_details.switch_providers.get_user_by_id", return_value=sample_user)
current_sms_provider.created_by_id = sample_user.id
data = provider_details_schema.dump(current_sms_provider).data
dao_update_provider_details(current_sms_provider)
current_sms_provider_in_history = (
ProviderDetailsHistory.query.filter(ProviderDetailsHistory.id == current_sms_provider.id)
.order_by(desc(ProviderDetailsHistory.version))
.first()
)
data = provider_details_schema.dump(current_sms_provider_in_history).data
assert sorted(data["created_by"].keys()) == sorted(["id", "email_address", "name"]) | none | 1 | 2.079191 | 2 |
|
chat/tests_apps.py | helmetwearer/dating-app | 0 | 6631320 | <filename>chat/tests_apps.py
from django.apps import apps
from django.test import TestCase
from .apps import ChatConfig
class test_chatconfig(TestCase):
def test_app(self):
self.assertEqual("chat", ChatConfig.name)
self.assertEqual("chat", apps.get_app_config("chat").name) | <filename>chat/tests_apps.py
from django.apps import apps
from django.test import TestCase
from .apps import ChatConfig
class test_chatconfig(TestCase):
def test_app(self):
self.assertEqual("chat", ChatConfig.name)
self.assertEqual("chat", apps.get_app_config("chat").name) | none | 1 | 2.162179 | 2 |
|
server.py | Ailol/Simple-FTP-server | 1 | 6631321 | <filename>server.py<gh_stars>1-10
import argparse
import os
from network.sftp import Sftp
class Server(object):
"""
Simple server implementation
SFTP holds all the magic
Starts the server up by arguments given by the user.
"""
def __init__(self, *args):
try:
self.sftp = Sftp(args[0], args[1])
except BaseException:
raise ValueError('hepp')
def connect(self):
self.sftp.setup_connection()
def list_content(self):
for file in os.listdir("./server_disk"):
print(file)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='A simple way to transfer encrypted files')
parser.add_argument(
'host',
type=str,
help='Enter host (example: localhost)',
default='localhost')
parser.add_argument(
'port',
type=int,
help='Enter port(8000->)',
default=8080)
args = parser.parse_args()
srv = Server(args.host, args.port)
srv.connect()
| <filename>server.py<gh_stars>1-10
import argparse
import os
from network.sftp import Sftp
class Server(object):
"""
Simple server implementation
SFTP holds all the magic
Starts the server up by arguments given by the user.
"""
def __init__(self, *args):
try:
self.sftp = Sftp(args[0], args[1])
except BaseException:
raise ValueError('hepp')
def connect(self):
self.sftp.setup_connection()
def list_content(self):
for file in os.listdir("./server_disk"):
print(file)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='A simple way to transfer encrypted files')
parser.add_argument(
'host',
type=str,
help='Enter host (example: localhost)',
default='localhost')
parser.add_argument(
'port',
type=int,
help='Enter port(8000->)',
default=8080)
args = parser.parse_args()
srv = Server(args.host, args.port)
srv.connect()
| en | 0.657727 | Simple server implementation SFTP holds all the magic Starts the server up by arguments given by the user. | 3.479287 | 3 |
Face_Enhancement/models/networks/__init__.py | abdullahselek/Bringing-Old-Photos-Back-to-Life | 1 | 6631322 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
from models.networks.base_network import BaseNetwork
from models.networks.generator import *
from models.networks.encoder import *
import util.util as util
def find_network_using_name(target_network_name, filename):
target_class_name = target_network_name + filename
module_name = "models.networks." + filename
network = util.find_class_in_module(target_class_name, module_name)
assert issubclass(network, BaseNetwork), (
"Class %s should be a subclass of BaseNetwork" % network
)
return network
def modify_commandline_options(parser, is_train):
opt, _ = parser.parse_known_args()
netG_cls = find_network_using_name(opt.netG, "generator")
parser = netG_cls.modify_commandline_options(parser, is_train)
if is_train:
netD_cls = find_network_using_name(opt.netD, "discriminator")
parser = netD_cls.modify_commandline_options(parser, is_train)
netE_cls = find_network_using_name("conv", "encoder")
parser = netE_cls.modify_commandline_options(parser, is_train)
return parser
def create_network(cls, opt):
net = cls(opt)
net.print_network()
if len(opt.gpu_ids) > 0:
assert torch.cuda.is_available()
net.cuda()
net.init_weights(opt.init_type, opt.init_variance)
return net
def define_G(opt):
netG_cls = find_network_using_name(opt.netG, "generator")
return create_network(netG_cls, opt)
def define_D(opt):
netD_cls = find_network_using_name(opt.netD, "discriminator")
return create_network(netD_cls, opt)
def define_E(opt):
# there exists only one encoder type
netE_cls = find_network_using_name("conv", "encoder")
return create_network(netE_cls, opt)
| # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
from models.networks.base_network import BaseNetwork
from models.networks.generator import *
from models.networks.encoder import *
import util.util as util
def find_network_using_name(target_network_name, filename):
target_class_name = target_network_name + filename
module_name = "models.networks." + filename
network = util.find_class_in_module(target_class_name, module_name)
assert issubclass(network, BaseNetwork), (
"Class %s should be a subclass of BaseNetwork" % network
)
return network
def modify_commandline_options(parser, is_train):
opt, _ = parser.parse_known_args()
netG_cls = find_network_using_name(opt.netG, "generator")
parser = netG_cls.modify_commandline_options(parser, is_train)
if is_train:
netD_cls = find_network_using_name(opt.netD, "discriminator")
parser = netD_cls.modify_commandline_options(parser, is_train)
netE_cls = find_network_using_name("conv", "encoder")
parser = netE_cls.modify_commandline_options(parser, is_train)
return parser
def create_network(cls, opt):
net = cls(opt)
net.print_network()
if len(opt.gpu_ids) > 0:
assert torch.cuda.is_available()
net.cuda()
net.init_weights(opt.init_type, opt.init_variance)
return net
def define_G(opt):
netG_cls = find_network_using_name(opt.netG, "generator")
return create_network(netG_cls, opt)
def define_D(opt):
netD_cls = find_network_using_name(opt.netD, "discriminator")
return create_network(netD_cls, opt)
def define_E(opt):
# there exists only one encoder type
netE_cls = find_network_using_name("conv", "encoder")
return create_network(netE_cls, opt)
| en | 0.764855 | # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # there exists only one encoder type | 2.156622 | 2 |
contrib/cli_scripts/pe_console_group_to_nm.py | coxmediagroup/nodemeister | 0 | 6631323 | #!/usr/bin/env python
"""
Script to migrate a group, along with its classes and
parameters (but not nodes, parents, or child groups)
from Puppet Enterprise Console (or Dashboard?) to
NodeMeister.
Since Console/Dashboard doesn't have a real API, this
directly accesses the MySQL database.
"""
import MySQLdb
import MySQLdb.cursors # I don't like positional refs in DB cursors
import optparse
import sys
import requests
import json
VERBOSE = False
NOOP = False
def get_group_from_dashboard(cur, groupname):
sql = "SELECT * FROM node_groups WHERE name='%s'" % groupname
cur.execute(sql)
result = cur.fetchone()
group_id = result['id']
ret = {'params': {}, 'classes': []}
sql = "SELECT `key`,`value` FROM parameters WHERE parameterable_type = 'NodeGroup' AND parameterable_id=%d" % group_id
cur.execute(sql)
result = cur.fetchall()
for row in result:
ret['params'][row['key']] = row['value']
sql = "SELECT nc.name FROM node_group_class_memberships AS ng LEFT JOIN node_classes AS nc ON nc.id=ng.node_class_id WHERE node_group_id=%d" % group_id
cur.execute(sql)
result = cur.fetchall()
for row in result:
ret['classes'].append(row['name'])
return ret
def get_nm_group_id(nm_host, name):
r = requests.get("http://%s/enc/groups/" % nm_host)
j = r.json()
for n in j:
if n['name'] == name:
return n['id']
return False
def add_group(base_url, name, description):
"""
adds a group to NodeMeister, retrns the ID of the added group or False on failure
"""
payload = {'name': name, 'description': description}
headers = {'content-type': 'application/json'}
r = requests.post("%senc/groups/" % base_url, data=json.dumps(payload), headers=headers)
if r.status_code == 201:
return get_group_id(base_url, name)
return False
def add_param_to_group(base_url, gid, pname, pval):
"""
adds a param to a group in NodeMeister, returns True on success or False on failure
"""
if pval.strip() == "" or pval == "" or pval == "''":
pval = None
payload = {'group': gid, 'paramkey': pname, 'paramvalue': pval}
headers = {'content-type': 'application/json'}
r = requests.post("%senc/parameters/groups/" % base_url, data=json.dumps(payload), headers=headers)
if r.status_code == 201:
return True
return False
def add_class_to_group(base_url, gid, classname, classparams=None):
"""
adds a class to a group in NodeMeister, returns True on success or False on failure
"""
payload = {'group': gid, 'classname': classname, 'classparams': classparams}
headers = {'content-type': 'application/json'}
r = requests.post("%senc/classes/groups/" % base_url, data=json.dumps(payload), headers=headers)
if r.status_code == 201:
return True
return False
def create_nodemeister_group(base_url, group, dash_group):
"""
Creates a group in nodemeister
"""
gid = get_group_id(base_url, group)
if gid is not False:
print("ERROR: group %s already exists in NodeMeister with id %d." % (group, gid))
return False
# ok, try adding the group
gid = add_group(base_url, group, "imported by pe_console_group_to_nm.py")
if gid is False:
print("ERROR adding group in Nodemeister.")
return False
else:
print("Group added to NodeMeister with id %d" % gid)
ok = True
# add the params
for p in dash_group['params']:
res = add_param_to_group(base_url, gid, p, dash_group['params'][p])
if not res:
print("ERROR adding param %s with value '%s' to group %d" % (p, dash_group['params'][p], gid))
ok = False
if VERBOSE:
print("\tadded param %s with value '%s' to group %d" % (p, dash_group['params'][p], gid))
for c in dash_group['classes']:
res = add_class_to_group(base_url, gid, c)
if not res:
print("ERROR adding class %s to group %d" % (c, gid))
ok = False
if VERBOSE:
print("\tadded class %s to group %d" % (c, gid))
if ok is False:
return False
return gid
def main():
p = optparse.OptionParser()
p.add_option('-g', '--group', dest='group',
help='group name to get from dashboard')
p.add_option('-v', '--verbose', dest='verbose', default=False, action='store_true',
help='verbose output')
p.add_option('-t', '--noop', dest='noop', default=False, action='store_true',
help='just print what would be done, do not update NodeMeister')
p.add_option('-n', '--nodemeister', dest='nodemeister', action='store', type='string',
help='nodemeister base URL in form http://host/')
options, args = p.parse_args()
VERBOSE = options.verbose
NOOP = options.noop
if not options.group:
print("ERROR: you must specify a group to get with -g|--group")
sys.exit(1)
if not options.nodemeister:
print("ERROR: You must specify NodeMeister Base URL with -n|--nodemeister")
sys.exit(1)
conn = MySQLdb.connect (host = "127.0.0.1",
user = "puppetenterprise",
passwd = "<PASSWORD>",
db = "puppetenterprise",
cursorclass=MySQLdb.cursors.DictCursor)
cur = conn.cursor()
dash_group = get_group_from_dashboard(cur, options.group)
print("Classes:")
for c in dash_group['classes']:
print(" - %s" % c)
print("\nParameters:")
for p in dash_group['params']:
print(" - %s : '%s'" % (p, dash_group['params'][p]))
if not options.noop:
res = create_nodemeister_group(options.nodemeister, options.group, dash_group)
if res is False:
print("Error.")
sys.exit(1)
else:
print("Ok, group created with ID %d" % res)
else:
print("NOOP - doing nothing.")
return 0
if __name__ == "__main__":
main()
| #!/usr/bin/env python
"""
Script to migrate a group, along with its classes and
parameters (but not nodes, parents, or child groups)
from Puppet Enterprise Console (or Dashboard?) to
NodeMeister.
Since Console/Dashboard doesn't have a real API, this
directly accesses the MySQL database.
"""
import MySQLdb
import MySQLdb.cursors # I don't like positional refs in DB cursors
import optparse
import sys
import requests
import json
VERBOSE = False
NOOP = False
def get_group_from_dashboard(cur, groupname):
sql = "SELECT * FROM node_groups WHERE name='%s'" % groupname
cur.execute(sql)
result = cur.fetchone()
group_id = result['id']
ret = {'params': {}, 'classes': []}
sql = "SELECT `key`,`value` FROM parameters WHERE parameterable_type = 'NodeGroup' AND parameterable_id=%d" % group_id
cur.execute(sql)
result = cur.fetchall()
for row in result:
ret['params'][row['key']] = row['value']
sql = "SELECT nc.name FROM node_group_class_memberships AS ng LEFT JOIN node_classes AS nc ON nc.id=ng.node_class_id WHERE node_group_id=%d" % group_id
cur.execute(sql)
result = cur.fetchall()
for row in result:
ret['classes'].append(row['name'])
return ret
def get_nm_group_id(nm_host, name):
r = requests.get("http://%s/enc/groups/" % nm_host)
j = r.json()
for n in j:
if n['name'] == name:
return n['id']
return False
def add_group(base_url, name, description):
"""
adds a group to NodeMeister, retrns the ID of the added group or False on failure
"""
payload = {'name': name, 'description': description}
headers = {'content-type': 'application/json'}
r = requests.post("%senc/groups/" % base_url, data=json.dumps(payload), headers=headers)
if r.status_code == 201:
return get_group_id(base_url, name)
return False
def add_param_to_group(base_url, gid, pname, pval):
"""
adds a param to a group in NodeMeister, returns True on success or False on failure
"""
if pval.strip() == "" or pval == "" or pval == "''":
pval = None
payload = {'group': gid, 'paramkey': pname, 'paramvalue': pval}
headers = {'content-type': 'application/json'}
r = requests.post("%senc/parameters/groups/" % base_url, data=json.dumps(payload), headers=headers)
if r.status_code == 201:
return True
return False
def add_class_to_group(base_url, gid, classname, classparams=None):
"""
adds a class to a group in NodeMeister, returns True on success or False on failure
"""
payload = {'group': gid, 'classname': classname, 'classparams': classparams}
headers = {'content-type': 'application/json'}
r = requests.post("%senc/classes/groups/" % base_url, data=json.dumps(payload), headers=headers)
if r.status_code == 201:
return True
return False
def create_nodemeister_group(base_url, group, dash_group):
"""
Creates a group in nodemeister
"""
gid = get_group_id(base_url, group)
if gid is not False:
print("ERROR: group %s already exists in NodeMeister with id %d." % (group, gid))
return False
# ok, try adding the group
gid = add_group(base_url, group, "imported by pe_console_group_to_nm.py")
if gid is False:
print("ERROR adding group in Nodemeister.")
return False
else:
print("Group added to NodeMeister with id %d" % gid)
ok = True
# add the params
for p in dash_group['params']:
res = add_param_to_group(base_url, gid, p, dash_group['params'][p])
if not res:
print("ERROR adding param %s with value '%s' to group %d" % (p, dash_group['params'][p], gid))
ok = False
if VERBOSE:
print("\tadded param %s with value '%s' to group %d" % (p, dash_group['params'][p], gid))
for c in dash_group['classes']:
res = add_class_to_group(base_url, gid, c)
if not res:
print("ERROR adding class %s to group %d" % (c, gid))
ok = False
if VERBOSE:
print("\tadded class %s to group %d" % (c, gid))
if ok is False:
return False
return gid
def main():
p = optparse.OptionParser()
p.add_option('-g', '--group', dest='group',
help='group name to get from dashboard')
p.add_option('-v', '--verbose', dest='verbose', default=False, action='store_true',
help='verbose output')
p.add_option('-t', '--noop', dest='noop', default=False, action='store_true',
help='just print what would be done, do not update NodeMeister')
p.add_option('-n', '--nodemeister', dest='nodemeister', action='store', type='string',
help='nodemeister base URL in form http://host/')
options, args = p.parse_args()
VERBOSE = options.verbose
NOOP = options.noop
if not options.group:
print("ERROR: you must specify a group to get with -g|--group")
sys.exit(1)
if not options.nodemeister:
print("ERROR: You must specify NodeMeister Base URL with -n|--nodemeister")
sys.exit(1)
conn = MySQLdb.connect (host = "127.0.0.1",
user = "puppetenterprise",
passwd = "<PASSWORD>",
db = "puppetenterprise",
cursorclass=MySQLdb.cursors.DictCursor)
cur = conn.cursor()
dash_group = get_group_from_dashboard(cur, options.group)
print("Classes:")
for c in dash_group['classes']:
print(" - %s" % c)
print("\nParameters:")
for p in dash_group['params']:
print(" - %s : '%s'" % (p, dash_group['params'][p]))
if not options.noop:
res = create_nodemeister_group(options.nodemeister, options.group, dash_group)
if res is False:
print("Error.")
sys.exit(1)
else:
print("Ok, group created with ID %d" % res)
else:
print("NOOP - doing nothing.")
return 0
if __name__ == "__main__":
main()
| en | 0.741592 | #!/usr/bin/env python Script to migrate a group, along with its classes and parameters (but not nodes, parents, or child groups) from Puppet Enterprise Console (or Dashboard?) to NodeMeister. Since Console/Dashboard doesn't have a real API, this directly accesses the MySQL database. # I don't like positional refs in DB cursors adds a group to NodeMeister, retrns the ID of the added group or False on failure adds a param to a group in NodeMeister, returns True on success or False on failure adds a class to a group in NodeMeister, returns True on success or False on failure Creates a group in nodemeister # ok, try adding the group # add the params | 2.700965 | 3 |
rmgpy/rmg/inputTest.py | speth/RMG-Py | 1 | 6631324 | <reponame>speth/RMG-Py<filename>rmgpy/rmg/inputTest.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2018 Prof. <NAME> (<EMAIL>), #
# Prof. <NAME> (<EMAIL>) and the RMG Team (<EMAIL>) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
import unittest
from rmgpy.rmg.main import RMG
from rmgpy.rmg import input as inp
###################################################
def setUpModule(self):
"""
A method that is run before the class.
"""
# set-up RMG object and get global rmg object in input.py file
# so methods can be tested
global rmg
rmg = RMG()
inp.setGlobalRMG(rmg)
def tearDownModule(self):
# remove RMG object
global rmg
rmg = None
class TestInputDatabase(unittest.TestCase):
"""
Contains unit tests rmgpy.rmg.input.database
"""
def tearDown(self):
# remove the reactionLibraries value
global rmg
rmg.reactionLibraries = None
def testImportingDatabaseReactionLibrariesFromString(self):
"""
Test that we can import Reaction Libraries using the non-tuple form.
"""
global rmg
# add database properties to RMG
inp.database(reactionLibraries=['test'])
self.assertIsInstance(rmg.reactionLibraries[0], tuple)
self.assertFalse(rmg.reactionLibraries[0][1])
def testImportingDatabaseReactionLibrariesFromFalseTuple(self):
"""
Test that we can import Reaction Libraries using the Tuple False form.
"""
global rmg
# add database properties to RMG
inp.database(reactionLibraries=[('test',False)])
self.assertIsInstance(rmg.reactionLibraries[0], tuple)
self.assertFalse(rmg.reactionLibraries[0][1])
def testImportingDatabaseReactionLibrariesFromTrueTuple(self):
"""
Test that we can import Reaction Libraries using the Tuple True form.
"""
global rmg
# add database properties to RMG
inp.database(reactionLibraries=[('test',True)])
self.assertIsInstance(rmg.reactionLibraries[0], tuple)
self.assertTrue(rmg.reactionLibraries[0][1])
class TestInputMLEstimator(unittest.TestCase):
"""
Contains unit tests rmgpy.rmg.input.mlEstimator
"""
def tearDown(self):
# remove the reactionLibraries value
global rmg
rmg.ml_estimator = None
def testMLEstimator(self):
"""
Test that we can input.
"""
from rmgpy.ml.estimator import MLEstimator
global rmg
# add database properties to RMG
inp.mlEstimator(thermo=True)
self.assertIsInstance(rmg.ml_estimator, MLEstimator)
self.assertIsInstance(rmg.ml_settings, dict)
class TestInputThemoCentralDatabase(unittest.TestCase):
"""
Contains unit tests rmgpy.rmg.input.thermoCentralDatabase
"""
def tearDown(self):
# remove the reactionLibraries value
global rmg
rmg.thermoCentralDatabase = None
def testThemoCentralDatabase(self):
"""
Test that we can input.
"""
global rmg
# add database properties to RMG
inp.thermoCentralDatabase(
host='some_host',
port=0,
username='some_usr',
password='<PASSWORD>',
application='some_app'
)
self.assertEqual(rmg.thermoCentralDatabase.host, 'some_host')
self.assertEqual(rmg.thermoCentralDatabase.port, 0)
self.assertEqual(rmg.thermoCentralDatabase.username, 'some_usr')
self.assertEqual(rmg.thermoCentralDatabase.password, '<PASSWORD>')
self.assertEqual(rmg.thermoCentralDatabase.application, 'some_app')
self.assertEqual(rmg.thermoCentralDatabase.client, None)
if __name__ == '__main__':
unittest.main()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2018 Prof. <NAME> (<EMAIL>), #
# Prof. <NAME> (<EMAIL>) and the RMG Team (<EMAIL>) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
import unittest
from rmgpy.rmg.main import RMG
from rmgpy.rmg import input as inp
###################################################
def setUpModule(self):
"""
A method that is run before the class.
"""
# set-up RMG object and get global rmg object in input.py file
# so methods can be tested
global rmg
rmg = RMG()
inp.setGlobalRMG(rmg)
def tearDownModule(self):
# remove RMG object
global rmg
rmg = None
class TestInputDatabase(unittest.TestCase):
"""
Contains unit tests rmgpy.rmg.input.database
"""
def tearDown(self):
# remove the reactionLibraries value
global rmg
rmg.reactionLibraries = None
def testImportingDatabaseReactionLibrariesFromString(self):
"""
Test that we can import Reaction Libraries using the non-tuple form.
"""
global rmg
# add database properties to RMG
inp.database(reactionLibraries=['test'])
self.assertIsInstance(rmg.reactionLibraries[0], tuple)
self.assertFalse(rmg.reactionLibraries[0][1])
def testImportingDatabaseReactionLibrariesFromFalseTuple(self):
"""
Test that we can import Reaction Libraries using the Tuple False form.
"""
global rmg
# add database properties to RMG
inp.database(reactionLibraries=[('test',False)])
self.assertIsInstance(rmg.reactionLibraries[0], tuple)
self.assertFalse(rmg.reactionLibraries[0][1])
def testImportingDatabaseReactionLibrariesFromTrueTuple(self):
"""
Test that we can import Reaction Libraries using the Tuple True form.
"""
global rmg
# add database properties to RMG
inp.database(reactionLibraries=[('test',True)])
self.assertIsInstance(rmg.reactionLibraries[0], tuple)
self.assertTrue(rmg.reactionLibraries[0][1])
class TestInputMLEstimator(unittest.TestCase):
"""
Contains unit tests rmgpy.rmg.input.mlEstimator
"""
def tearDown(self):
# remove the reactionLibraries value
global rmg
rmg.ml_estimator = None
def testMLEstimator(self):
"""
Test that we can input.
"""
from rmgpy.ml.estimator import MLEstimator
global rmg
# add database properties to RMG
inp.mlEstimator(thermo=True)
self.assertIsInstance(rmg.ml_estimator, MLEstimator)
self.assertIsInstance(rmg.ml_settings, dict)
class TestInputThemoCentralDatabase(unittest.TestCase):
"""
Contains unit tests rmgpy.rmg.input.thermoCentralDatabase
"""
def tearDown(self):
# remove the reactionLibraries value
global rmg
rmg.thermoCentralDatabase = None
def testThemoCentralDatabase(self):
"""
Test that we can input.
"""
global rmg
# add database properties to RMG
inp.thermoCentralDatabase(
host='some_host',
port=0,
username='some_usr',
password='<PASSWORD>',
application='some_app'
)
self.assertEqual(rmg.thermoCentralDatabase.host, 'some_host')
self.assertEqual(rmg.thermoCentralDatabase.port, 0)
self.assertEqual(rmg.thermoCentralDatabase.username, 'some_usr')
self.assertEqual(rmg.thermoCentralDatabase.password, '<PASSWORD>')
self.assertEqual(rmg.thermoCentralDatabase.application, 'some_app')
self.assertEqual(rmg.thermoCentralDatabase.client, None)
if __name__ == '__main__':
unittest.main() | en | 0.586151 | #!/usr/bin/env python # -*- coding: utf-8 -*- ############################################################################### # # # RMG - Reaction Mechanism Generator # # # # Copyright (c) 2002-2018 Prof. <NAME> (<EMAIL>), # # Prof. <NAME> (<EMAIL>) and the RMG Team (<EMAIL>) # # # # Permission is hereby granted, free of charge, to any person obtaining a # # copy of this software and associated documentation files (the 'Software'), # # to deal in the Software without restriction, including without limitation # # the rights to use, copy, modify, merge, publish, distribute, sublicense, # # and/or sell copies of the Software, and to permit persons to whom the # # Software is furnished to do so, subject to the following conditions: # # # # The above copyright notice and this permission notice shall be included in # # all copies or substantial portions of the Software. # # # # THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # # DEALINGS IN THE SOFTWARE. # # # ############################################################################### ################################################### A method that is run before the class. # set-up RMG object and get global rmg object in input.py file # so methods can be tested # remove RMG object Contains unit tests rmgpy.rmg.input.database # remove the reactionLibraries value Test that we can import Reaction Libraries using the non-tuple form. # add database properties to RMG Test that we can import Reaction Libraries using the Tuple False form. # add database properties to RMG Test that we can import Reaction Libraries using the Tuple True form. # add database properties to RMG Contains unit tests rmgpy.rmg.input.mlEstimator # remove the reactionLibraries value Test that we can input. # add database properties to RMG Contains unit tests rmgpy.rmg.input.thermoCentralDatabase # remove the reactionLibraries value Test that we can input. # add database properties to RMG | 1.675696 | 2 |
plib/leds.py | slowrunner/GoPiLgc | 0 | 6631325 | #!/usr/bin/python3
# File: leds.py
#
# Methods:
# leds.all_on(egpg) turn two red blinker leds on and two "eyes" on bright white
# leds.all_off(egpg) turn two red blinker leds off and two "eyes" off
# wifi_blinker_on(egpg,color=RED) make wifi led blink on/off,on/off...
# pass colors as leds.YELLOW or leds.GREEN ...
# wifi_blinker_off(egpg) turn wifi blinker off
#
# Usage:
# import leds
# egpg=easygopigo3.EasyGoPiGo3()
# leds.all_on(egpg)
# leds.all_off(egpg)
# leds.wifi_blinker_on(egpg,color=leds.ORANGE)
# leds.wifi_blinker_off(egpg)
#
# or from command line:
# > ./leds.py performs test on/off
# > ./leds.py -s on turns leds on
# > ./leds.py -s off turns leds off
# > ./leds.py -c 5 turn all leds green
import easygopigo3
import sys
sys.path.insert(1,"/home/pi/GoPiLgc/plib")
# import myconfig
from time import sleep
import argparse
import threading
WHITE_BRIGHT = (255, 255, 255) # color 0
RED = (255, 0, 0) # color 1
ORANGE = (255, 125, 0) # color 2
YELLOW = (255, 255, 0) # color 3
YELLOW_GREEN = (125, 255, 0) # color 4
GREEN = (0, 255, 0) # color 5
TURQUOISE = (0, 255, 125) # color 6
CYAN = (0, 255, 255) # color 7 light blue
CYAN_BLUE = (0, 125, 255) # color 8
BLUE = (0, 0, 255) # color 9
VIOLET = (125, 0, 255) # color 10
MAGENTA = (255, 0, 255) # color 11
MAGENTA_RED = (255, 0, 125) # color 12
COLOR_LIST = [WHITE_BRIGHT, RED, ORANGE, YELLOW, YELLOW_GREEN, GREEN, TURQUOISE, CYAN, CYAN_BLUE, BLUE, VIOLET, MAGENTA, MAGENTA_RED]
def all_on(egpg=None):
egpg.blinker_on("left")
egpg.blinker_on("right")
egpg.led_on("left")
egpg.led_on("right")
egpg.set_eye_color(WHITE_BRIGHT)
egpg.open_eyes()
# can set wifi led to white, only if utils/wifi_led_off.sh has been run
egpg.set_led(egpg.LED_WIFI,255,255,255)
def all_off(egpg=None):
egpg.blinker_off("left")
egpg.blinker_off("right")
egpg.led_off("left")
egpg.led_off("right")
egpg.close_eyes()
# can turn wifi led off, only if utils/wifi_led_off.sh has been run
egpg.set_led(egpg.LED_WIFI,0,0,0)
def all_color(egpg=None, colornum=5):
if colornum < len(COLOR_LIST):
egpg.set_led(egpg.LED_WIFI,COLOR_LIST[colornum][0], COLOR_LIST[colornum][1], COLOR_LIST[colornum][2])
egpg.set_eye_color(COLOR_LIST[ colornum ])
egpg.open_eyes()
else:
print("ERROR: all_color({}) larger than {}".format(colornum,len(COLOR_LIST)))
def do_wifi_blinking(egpg,color=RED):
global wifi_blinker_thread_quit
try:
r,g,b = color
while wifi_blinker_thread_quit is not True:
egpg.set_led(egpg.LED_WIFI,r,g,b)
sleep(1)
egpg.set_led(egpg.LED_WIFI,0,0,0)
sleep(1)
except Exception as e:
print("do_wifi_blinking: Exception {}".format(str(e)))
raise e
# print("do_wifi_blinking() exiting")
wifi_blinker_thread_quit = False
wifi_blinker_thread = None
wifi_blinker_thread_quit = False
def wifi_blinker_on(egpg,color=RED):
global wifi_blinker_thread,wifi_blinker_thread_quit
if wifi_blinker_thread:
pass
else: # need to start thread
wifi_blinker_thread_quit = False
wifi_blinker_thread = threading.Thread(target=do_wifi_blinking, args=(egpg,color,), daemon=True)
wifi_blinker_thread.start()
def wifi_blinker_off(egpg):
global wifi_blinker_thread,wifi_blinker_thread_quit
if wifi_blinker_thread:
wifi_blinker_thread_quit = True # tell thread to quit
# wifi_blinker_thread.join() # wait for thread to quit
timer = 0
while wifi_blinker_thread_quit and (timer < 5):
sleep(1)
timer+=1
wifi_blinker_thread_quit = False
wifi_blinker_thread = None
else:
pass
def main():
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--set", action="store", default=None, help="set all leds 'on' or 'off'")
ap.add_argument("-c", "--colornum", type=int, action="store", default=None, help="set all leds to color ")
args = vars(ap.parse_args())
egpg = easygopigo3.EasyGoPiGo3(use_mutex=True)
# myconfig.setParameters(egpg)
set = args["set"]
colornumber = args['colornum']
if set==None:
if colornumber == None:
print("leds.py: Test all_on()")
all_on(egpg)
sleep(5)
print("leds.py: Test all_off()")
all_off(egpg)
sleep(5)
print("leds.py: Test all_color() (green)")
all_color(egpg)
sleep(5)
else:
all_color(egpg,colornumber)
elif set=='on':
all_on(egpg)
else:
all_off(egpg)
print("Test Wifi Blinker")
wifi_blinker_on(egpg,color=RED)
sleep(10)
wifi_blinker_off(egpg)
all_off(egpg)
if (__name__ == '__main__'): main()
| #!/usr/bin/python3
# File: leds.py
#
# Methods:
# leds.all_on(egpg) turn two red blinker leds on and two "eyes" on bright white
# leds.all_off(egpg) turn two red blinker leds off and two "eyes" off
# wifi_blinker_on(egpg,color=RED) make wifi led blink on/off,on/off...
# pass colors as leds.YELLOW or leds.GREEN ...
# wifi_blinker_off(egpg) turn wifi blinker off
#
# Usage:
# import leds
# egpg=easygopigo3.EasyGoPiGo3()
# leds.all_on(egpg)
# leds.all_off(egpg)
# leds.wifi_blinker_on(egpg,color=leds.ORANGE)
# leds.wifi_blinker_off(egpg)
#
# or from command line:
# > ./leds.py performs test on/off
# > ./leds.py -s on turns leds on
# > ./leds.py -s off turns leds off
# > ./leds.py -c 5 turn all leds green
import easygopigo3
import sys
sys.path.insert(1,"/home/pi/GoPiLgc/plib")
# import myconfig
from time import sleep
import argparse
import threading
WHITE_BRIGHT = (255, 255, 255) # color 0
RED = (255, 0, 0) # color 1
ORANGE = (255, 125, 0) # color 2
YELLOW = (255, 255, 0) # color 3
YELLOW_GREEN = (125, 255, 0) # color 4
GREEN = (0, 255, 0) # color 5
TURQUOISE = (0, 255, 125) # color 6
CYAN = (0, 255, 255) # color 7 light blue
CYAN_BLUE = (0, 125, 255) # color 8
BLUE = (0, 0, 255) # color 9
VIOLET = (125, 0, 255) # color 10
MAGENTA = (255, 0, 255) # color 11
MAGENTA_RED = (255, 0, 125) # color 12
COLOR_LIST = [WHITE_BRIGHT, RED, ORANGE, YELLOW, YELLOW_GREEN, GREEN, TURQUOISE, CYAN, CYAN_BLUE, BLUE, VIOLET, MAGENTA, MAGENTA_RED]
def all_on(egpg=None):
egpg.blinker_on("left")
egpg.blinker_on("right")
egpg.led_on("left")
egpg.led_on("right")
egpg.set_eye_color(WHITE_BRIGHT)
egpg.open_eyes()
# can set wifi led to white, only if utils/wifi_led_off.sh has been run
egpg.set_led(egpg.LED_WIFI,255,255,255)
def all_off(egpg=None):
egpg.blinker_off("left")
egpg.blinker_off("right")
egpg.led_off("left")
egpg.led_off("right")
egpg.close_eyes()
# can turn wifi led off, only if utils/wifi_led_off.sh has been run
egpg.set_led(egpg.LED_WIFI,0,0,0)
def all_color(egpg=None, colornum=5):
if colornum < len(COLOR_LIST):
egpg.set_led(egpg.LED_WIFI,COLOR_LIST[colornum][0], COLOR_LIST[colornum][1], COLOR_LIST[colornum][2])
egpg.set_eye_color(COLOR_LIST[ colornum ])
egpg.open_eyes()
else:
print("ERROR: all_color({}) larger than {}".format(colornum,len(COLOR_LIST)))
def do_wifi_blinking(egpg,color=RED):
global wifi_blinker_thread_quit
try:
r,g,b = color
while wifi_blinker_thread_quit is not True:
egpg.set_led(egpg.LED_WIFI,r,g,b)
sleep(1)
egpg.set_led(egpg.LED_WIFI,0,0,0)
sleep(1)
except Exception as e:
print("do_wifi_blinking: Exception {}".format(str(e)))
raise e
# print("do_wifi_blinking() exiting")
wifi_blinker_thread_quit = False
wifi_blinker_thread = None
wifi_blinker_thread_quit = False
def wifi_blinker_on(egpg,color=RED):
global wifi_blinker_thread,wifi_blinker_thread_quit
if wifi_blinker_thread:
pass
else: # need to start thread
wifi_blinker_thread_quit = False
wifi_blinker_thread = threading.Thread(target=do_wifi_blinking, args=(egpg,color,), daemon=True)
wifi_blinker_thread.start()
def wifi_blinker_off(egpg):
global wifi_blinker_thread,wifi_blinker_thread_quit
if wifi_blinker_thread:
wifi_blinker_thread_quit = True # tell thread to quit
# wifi_blinker_thread.join() # wait for thread to quit
timer = 0
while wifi_blinker_thread_quit and (timer < 5):
sleep(1)
timer+=1
wifi_blinker_thread_quit = False
wifi_blinker_thread = None
else:
pass
def main():
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--set", action="store", default=None, help="set all leds 'on' or 'off'")
ap.add_argument("-c", "--colornum", type=int, action="store", default=None, help="set all leds to color ")
args = vars(ap.parse_args())
egpg = easygopigo3.EasyGoPiGo3(use_mutex=True)
# myconfig.setParameters(egpg)
set = args["set"]
colornumber = args['colornum']
if set==None:
if colornumber == None:
print("leds.py: Test all_on()")
all_on(egpg)
sleep(5)
print("leds.py: Test all_off()")
all_off(egpg)
sleep(5)
print("leds.py: Test all_color() (green)")
all_color(egpg)
sleep(5)
else:
all_color(egpg,colornumber)
elif set=='on':
all_on(egpg)
else:
all_off(egpg)
print("Test Wifi Blinker")
wifi_blinker_on(egpg,color=RED)
sleep(10)
wifi_blinker_off(egpg)
all_off(egpg)
if (__name__ == '__main__'): main()
| en | 0.621432 | #!/usr/bin/python3 # File: leds.py # # Methods: # leds.all_on(egpg) turn two red blinker leds on and two "eyes" on bright white # leds.all_off(egpg) turn two red blinker leds off and two "eyes" off # wifi_blinker_on(egpg,color=RED) make wifi led blink on/off,on/off... # pass colors as leds.YELLOW or leds.GREEN ... # wifi_blinker_off(egpg) turn wifi blinker off # # Usage: # import leds # egpg=easygopigo3.EasyGoPiGo3() # leds.all_on(egpg) # leds.all_off(egpg) # leds.wifi_blinker_on(egpg,color=leds.ORANGE) # leds.wifi_blinker_off(egpg) # # or from command line: # > ./leds.py performs test on/off # > ./leds.py -s on turns leds on # > ./leds.py -s off turns leds off # > ./leds.py -c 5 turn all leds green # import myconfig # color 0 # color 1 # color 2 # color 3 # color 4 # color 5 # color 6 # color 7 light blue # color 8 # color 9 # color 10 # color 11 # color 12 # can set wifi led to white, only if utils/wifi_led_off.sh has been run # can turn wifi led off, only if utils/wifi_led_off.sh has been run # print("do_wifi_blinking() exiting") # need to start thread # tell thread to quit # wifi_blinker_thread.join() # wait for thread to quit # myconfig.setParameters(egpg) | 3.437557 | 3 |
aispace/models/question_answer/bento_services/bert_for_qa_with_impossible_service.py | SmileGoat/AiSpace | 32 | 6631326 | <gh_stars>10-100
# !/usr/bin/env python
# coding=utf-8
# @Time : 2020/4/25 18:08
# @Author : <EMAIL>
# @File : bert_for_qa_service.py
__all__ = [
"BertQAWithImpossibleService"
]
import os, sys
from collections import defaultdict
import tensorflow as tf
from copy import deepcopy
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../" * 4)))
from bentoml import api, env, BentoService, artifacts
from bentoml.artifact import TensorflowSavedModelArtifact, PickleArtifact
from bentoml.handlers import JsonHandler
import numpy as np
from scipy.special import softmax, expit
from aispace.datasets.tokenizer import BertTokenizer
from aispace.utils.hparams import Hparams
from aispace.utils.str_utils import uuid_maker, preprocess_text, compute_md5_hash
@artifacts([
TensorflowSavedModelArtifact('model'),
PickleArtifact('tokenizer'),
PickleArtifact("hparams"),
])
@env(auto_pip_dependencies=True)
class BertQAWithImpossibleService(BentoService):
def preprocessing(self, parsed_json):
unique_id = 100000
for one_json in parsed_json:
n_best_size = one_json.get('n_best_size', 5)
threshold = one_json.get('threshold', 0.5)
max_answer_length = one_json.get("max_answer_length", 64)
max_query_length = one_json.get("max_query_length", 64)
doc_stride = one_json.get("doc_stride", 128)
question_text = one_json.get("question_text", "")
trigger = one_json.get("trigger", "")
role = one_json.get("role", "")
event_type = one_json.get("event_type", "")
para_text = one_json.get("context", "")
# if question_text == "" or para_text == "":
if trigger == "" or role == "" or event_type == "" or para_text == "":
# unique_id = uuid_maker()
print("[WARRING] query or context is empty!")
item = {
"unique_id": unique_id,
"qas_id": unique_id,
"question_text": question_text,
"context_text": para_text,
'n_best_size': n_best_size,
'max_answer_length': max_answer_length,
'threshold': threshold
}
yield item
if self.artifacts.hparams.dataset.tokenizer.do_lower_case:
# question_text = question_text.lower()
trigger = trigger.lower()
role = role.lower()
event_type = event_type.lower()
# query_tokens = self.artifacts.tokenizer.tokenize(question_text)
# query_tokens = query_tokens[: max_query_length]
trigger_tokens = self.artifacts.tokenizer.tokenize(trigger)
role_tokens = self.artifacts.tokenizer.tokenize(role)
event_type_tokens = self.artifacts.tokenizer.tokenize(event_type)
query_tokens = trigger_tokens + [self.artifacts.tokenizer.vocab.sep_token] + \
role_tokens + [self.artifacts.tokenizer.vocab.sep_token] + event_type_tokens
query_tokens = query_tokens[: max_query_length]
qas_id = one_json.get('qas_id', compute_md5_hash(self.artifacts.tokenizer.detokenizer(query_tokens) + para_text))
if self.artifacts.hparams.dataset.tokenizer.do_lower_case:
para_text = para_text.lower()
para_tokens = self.artifacts.tokenizer.tokenize(para_text)
"""
For getting token to raw char matching:
1) getting matching between token and tokenized text
2) getting matching between raw text and tokenized text
3) So, can get matching between token and raw
"""
# char idx to token idx
char2token_index = []
# token start idx to char idx
token2char_start_index = []
# token end idx to char idx
token2char_end_index = []
char_idx = 0
for i, token in enumerate(para_tokens):
char_len = len(token.replace("##", ''))
char2token_index.extend([i] * char_len)
token2char_start_index.append(char_idx)
char_idx += char_len
token2char_end_index.append(char_idx - 1)
tokenized_para_text = self.artifacts.tokenizer.detokenizer(para_tokens)
# matching between raw text and tokenized text
N, M = len(para_text), len(tokenized_para_text)
max_N, max_M = 1024, 1024
if N > max_N or M > max_M:
max_N = max(N, max_N)
max_M = max(M, max_M)
match_mapping, mismatch = self._generate_match_mapping(para_text, tokenized_para_text, N, M, max_N, max_M)
# raw idx to tokenized char idx
raw2tokenized_char_index = [None] * (N + 1)
# tokenized char idx to raw idx
tokenized2raw_char_index = [None] * (M + 1)
i, j = N - 1, M - 1
while i >= 0 and j >= 0:
if (i, j) not in match_mapping:
break
# if 324 == i or 353 == j:
# print()
if match_mapping[(i, j)] == 2:
raw2tokenized_char_index[i] = j
tokenized2raw_char_index[j] = i
i, j = i - 1, j - 1
elif match_mapping[(i, j)] == 1:
j = j - 1
else:
i = i - 1
if all(v is None for v in raw2tokenized_char_index) or mismatch:
print("[WARRING] raw and tokenized paragraph mismatch detected")
# unique_id = uuid_maker()
item = {
"unique_id": unique_id,
"qas_id": qas_id,
"question_text": question_text,
"context_text": para_text,
'n_best_size': n_best_size,
'max_answer_length': max_answer_length,
'threshold': threshold
}
yield item
# token start idx to raw char idx
token2char_raw_start_index = []
# token end idx to raw char idx
token2char_raw_end_index = []
for idx in range(len(para_tokens)):
# token char idx
start_pos = token2char_start_index[idx]
end_pos = token2char_end_index[idx]
# raw char idx
raw_start_pos = self._convert_tokenized_index(tokenized2raw_char_index, start_pos, N, is_start=True)
raw_end_pos = self._convert_tokenized_index(tokenized2raw_char_index, end_pos, N, is_start=False)
# matching between token and raw char idx
token2char_raw_start_index.append(raw_start_pos)
token2char_raw_end_index.append(raw_end_pos)
max_para_length = self.artifacts.hparams.dataset.tokenizer.max_len - len(query_tokens) - 3
total_para_length = len(para_tokens)
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
doc_spans = []
para_start = 0
while para_start < total_para_length:
para_length = total_para_length - para_start
if para_length > max_para_length:
para_length = max_para_length
doc_spans.append({
"start": para_start,
"length": para_length
})
if para_start + para_length == total_para_length:
break
para_start += min(para_length, doc_stride)
for (doc_idx, doc_span) in enumerate(doc_spans):
doc_token2char_raw_start_index = []
doc_token2char_raw_end_index = []
doc_token2doc_index = {}
for i in range(doc_span['length']):
token_idx = doc_span["start"] + i
doc_token2char_raw_start_index.append(token2char_raw_start_index[token_idx])
doc_token2char_raw_end_index.append(token2char_raw_end_index[token_idx])
best_doc_idx = self._find_max_context(doc_spans, token_idx)
doc_token2doc_index[i] = (best_doc_idx == doc_idx)
encode_info = \
self.artifacts.tokenizer.encode(
query_tokens,
para_tokens[doc_span['start']: doc_span['start'] + doc_span['length']],
return_mask=True,
return_offset=True,
return_cls_index=True)
input_ids, segment_ids, input_mask, p_mask, q_mask, offset, cls_idx = \
encode_info['input_ids'], encode_info['segment_ids'], encode_info['input_mask'], \
encode_info['b_mask'], encode_info['a_mask'], encode_info['b_offset'], encode_info['cls_index']
# unique_id = uuid_maker()
# p_mask[cls_idx] = 1
item = {
"unique_id": unique_id,
"qas_id": qas_id,
"question_text": question_text,
"context_text": para_text,
"doc_token2char_raw_start_index": doc_token2char_raw_start_index,
"doc_token2char_raw_end_index": doc_token2char_raw_end_index,
'doc_token2doc_index': doc_token2doc_index,
"input_ids": input_ids,
"token_type_ids": segment_ids,
"attention_mask": input_mask,
"p_mask": p_mask,
'offset': offset,
'n_best_size': n_best_size,
'max_answer_length': max_answer_length,
'cls_idx': cls_idx,
'threshold': threshold
}
unique_id += 1
yield item
@api(JsonHandler)
def qa_predict(self, parsed_json):
input_data = {
"input_ids": [], "token_type_ids": [], "attention_mask": [], "p_mask": [], "unique_id": [], "start_position": []
}
no_answer_response = {
'predict_text': "",
'start_prob': 0.,
'end_prob': 0.,
'predict_score': 0.
}
pre_input_data = self.preprocessing(parsed_json)
qas_id_2_examples = defaultdict(list)
unique_id_to_example = defaultdict()
qas_ids = []
for itm in pre_input_data:
qas_ids.append(itm['qas_id'])
if 'input_ids' not in itm:
continue
qas_id_2_examples[itm['qas_id']].append(itm)
unique_id_to_example[itm['unique_id']] = itm
input_data['input_ids'].append(itm['input_ids'])
input_data['token_type_ids'].append(itm['token_type_ids'])
input_data['attention_mask'].append(itm['attention_mask'])
input_data['p_mask'].append(itm['p_mask'])
# input_data['offset'].append(itm['offset'])
# input_data['cls_idx'].append(itm['cls_idx'])
input_data['unique_id'].append(itm['unique_id'])
input_data['start_position'].append(0)
if not input_data['input_ids']:
print("[WARRING] Preprocessing some thing wrong!")
return [no_answer_response]
input_data['input_ids'] = tf.constant(input_data['input_ids'], name="input_ids")
input_data['token_type_ids'] = tf.constant(input_data['token_type_ids'], name="token_type_ids")
input_data['attention_mask'] = tf.constant(input_data['attention_mask'], name="attention_mask")
input_data['p_mask'] = tf.constant(input_data['p_mask'], name="p_mask")
input_data['unique_id'] = tf.constant(input_data['unique_id'], dtype=tf.float32, name="unique_id")
input_data['start_position'] = tf.constant(input_data['start_position'], name="start_position")
start_top_res, end_top_res, answer_prob, unique_id_res = self.artifacts.model(input_data, training=False)
start_top_log_prob, start_top_index = start_top_res.numpy()[:, :, 0], start_top_res.numpy()[:, :, 1].astype(np.int) # [b, k]
end_top_log_prob, end_top_index = end_top_res.numpy()[:, :, :, 0], end_top_res.numpy()[:, :, :, 1].astype(np.int) # [b, k, k]
unique_id_res = unique_id_res.numpy().astype(np.int)
start_n_top, end_n_top = start_top_index.shape[-1], end_top_index.shape[-1]
unique_id_2_result = {}
for i in range(end_top_index.shape[0]):
unique_id = unique_id_res[i]
itm = {
'unique_id': unique_id,
'start_top_log_prob': start_top_log_prob[i],
'start_top_index': start_top_index[i],
'end_top_log_prob': end_top_log_prob[i],
'end_top_index': end_top_index[i],
'is_impossible_prob': answer_prob.numpy()[i][0]
}
unique_id_2_result[unique_id] = itm
answers = []
no_answer_response = {
'predict_text': "",
'span_start': -1,
'start_prob': 0.,
'span_end': -1,
'end_prob': 0.,
'predict_score': 0.,
'is_impossible': 1,
'is_impossible_prob': 1.
}
for qas_id in qas_ids:
examples = qas_id_2_examples.get(qas_id, [])
if not examples:
answers.append(no_answer_response)
continue
max_answer_length, n_best_size, threshold \
= examples[0].get('max_answer_length'), \
examples[0].get('n_best_size'), \
examples[0].get('threshold')
example_all_predicts = []
for example in examples:
cur_unique_id = example['unique_id']
if cur_unique_id not in unique_id_2_result:
continue
cur_result = unique_id_2_result.get(cur_unique_id)
cur_start_top_log_prob = cur_result['start_top_log_prob']
cur_start_top_index = cur_result['start_top_index']
cur_end_top_log_prob = cur_result['end_top_log_prob']
cur_end_top_index = cur_result['end_top_index']
is_impossible = int(cur_result['is_impossible_prob'] >= threshold)
cur_p_mask = example['p_mask']
for i in range(start_n_top):
start_prob = cur_start_top_log_prob[i]
start_index = cur_start_top_index[i]
if not cur_p_mask[start_index]:
continue
for j in range(end_n_top):
end_prob = cur_end_top_log_prob[i, j]
end_index = cur_end_top_index[i, j]
if not cur_p_mask[end_index]:
continue
answer_length = end_index - start_index + 1
if end_index < start_index or answer_length > max_answer_length:
continue
itm = {
'unique_id': cur_unique_id,
'start_prob': start_prob,
'start_index': start_index,
'end_prob': end_prob,
'end_index': end_index,
'predict_score': np.log(start_prob) + np.log(end_prob),
'cls_idx': example['cls_idx'],
'is_impossible': is_impossible,
'is_impossible_prob': cur_result['is_impossible_prob']
}
example_all_predicts.append(itm)
example_all_predicts.sort(key=lambda s: s['predict_score'], reverse=True)
example_top_predicts = []
is_visited = set()
for example_predict in example_all_predicts:
if len(example_top_predicts) >= n_best_size:
break
# if example_predict['start_prob'] < threshold or example_predict['end_prob'] < threshold:
# predict_text = ""
# predict_start = -1
# predict_end = -1
# else:
example_feature = unique_id_to_example[example_predict['unique_id']]
predict_start = example_feature['doc_token2char_raw_start_index'][
example_predict['start_index'] - example_feature['offset']]
predict_end = example_feature['doc_token2char_raw_end_index'][
example_predict['end_index'] - example_feature['offset']]
predict_text = example_feature['context_text'][predict_start: predict_end + 1].strip()
if predict_text in is_visited:
continue
itm = {
'predict_text': predict_text,
'span_start': predict_start,
'start_prob': example_predict['start_prob'],
'span_end': predict_end,
'end_prob': example_predict['end_prob'],
'predict_score': example_predict['predict_score'],
'is_impossible': example_predict['is_impossible'],
'is_impossible_prob': example_predict['is_impossible_prob']
}
example_top_predicts.append(itm)
if len(example_top_predicts) == 0:
example_top_predicts.append(
no_answer_response
)
example_best_predict = example_top_predicts[0]
answers.append(example_best_predict)
return answers
def _generate_match_mapping(self,
para_text,
tokenized_para_text,
N,
M,
max_N,
max_M):
"""Generate match mapping for raw and tokenized paragraph"""
def _lcs_match(para_text,
tokenized_para_text,
N,
M,
max_N,
max_M,
max_dist):
"""longest common sub-sequence
f[i, j] = max(f[i - 1, j], f[i, j - 1], f[i - 1, j - 1] + match(i, j))
unlike standard LCS, this is specifically optimized for the setting
because the mismatch between sentence pieces and original text will be small
"""
f = np.zeros((max_N, max_M), dtype=np.float32)
g = {}
for i in range(N):
# if i == 324:
# print()
for j in range(i - max_dist, i + max_dist):
# if j == 353:
# print()
if j >= M or j < 0:
continue
if i > 0:
g[(i, j)] = 0
f[i, j] = f[i - 1, j]
if j > 0 and f[i, j - 1] > f[i, j]:
g[(i, j)] = 1
f[i, j] = f[i, j - 1]
f_prev = f[i - 1, j - 1] if i > 0 and j > 0 else 0
raw_char = preprocess_text(para_text[i], self.artifacts.hparams.dataset.tokenizer.do_lower_case, remove_space=False, keep_accents=True)
tokenized_char = tokenized_para_text[j]
if raw_char == tokenized_char and f_prev + 1 > f[i, j]:
g[(i, j)] = 2
f[i, j] = f_prev + 1
return f, g
max_dist = abs(N - M) + 10
for _ in range(2):
lcs_matrix, match_mapping = _lcs_match(para_text, tokenized_para_text, N, M, max_N, max_M, max_dist)
if lcs_matrix[N - 1, M - 1] > 0.8 * N:
break
max_dist *= 2
mismatch = lcs_matrix[N - 1, M - 1] < 0.8 * N
if lcs_matrix[N - 1, M - 1] == min(M, N):
mismatch = False
return match_mapping, mismatch
def _convert_tokenized_index(self,
index,
pos,
M=None,
is_start=True):
"""Convert index for tokenized text"""
if index[pos] is not None:
return index[pos]
N = len(index)
rear = pos
while rear < N - 1 and index[rear] is None:
rear += 1
front = pos
while front > 0 and index[front] is None:
front -= 1
assert index[front] is not None or index[rear] is not None
if index[front] is None:
if index[rear] >= 1:
if is_start:
return 0
else:
return index[rear] - 1
return index[rear]
if index[rear] is None:
if M is not None and index[front] < M - 1:
if is_start:
return index[front] + 1
else:
return M - 1
return index[front]
if is_start:
if index[rear] > index[front] + 1:
return index[front] + 1
else:
return index[rear]
else:
if index[rear] > index[front] + 1:
return index[rear] - 1
else:
return index[front]
def _find_max_context(self,
doc_spans,
token_idx):
"""Check if this is the 'max context' doc span for the token.
Because of the sliding window approach taken to scoring documents, a single
token can appear in multiple documents. E.g.
Doc: the man went to the store and bought a gallon of milk
Span A: the man went to the
Span B: to the store and bought
Span C: and bought a gallon of
...
Now the word 'bought' will have two scores from spans B and C. We only
want to consider the score with "maximum context", which we define as
the *minimum* of its left and right context (the *sum* of left and
right context will always be the same, of course).
In the example the maximum context for 'bought' would be span C since
it has 1 left context and 3 right context, while span B has 4 left context
and 0 right context.
"""
best_doc_score = None
best_doc_idx = None
for (doc_idx, doc_span) in enumerate(doc_spans):
doc_start = doc_span["start"]
doc_length = doc_span["length"]
doc_end = doc_start + doc_length - 1
if token_idx < doc_start or token_idx > doc_end:
continue
left_context_length = token_idx - doc_start
right_context_length = doc_end - token_idx
doc_score = min(left_context_length, right_context_length) + 0.01 * doc_length
if best_doc_score is None or doc_score > best_doc_score:
best_doc_score = doc_score
best_doc_idx = doc_idx
return best_doc_idx
def _improve_answer_start(self, para_text, answer, raw_answer_start):
answer = answer.lower().strip()
real_start = para_text.find(answer)
if real_start != -1:
return real_start, answer
else:
return raw_answer_start, answer
def _is_english(self, word: str) -> bool:
"""
Checks whether `word` is a english word.
Note: this function is not standard and should be considered for BERT
tokenization only. See the comments for more details.
:param word:
:return:
"""
flag = True
for c in word:
if 'a' <= c <= 'z' or 'A' <= c <= 'Z' or c == '#':
continue
else:
flag = False
break
return flag | # !/usr/bin/env python
# coding=utf-8
# @Time : 2020/4/25 18:08
# @Author : <EMAIL>
# @File : bert_for_qa_service.py
__all__ = [
"BertQAWithImpossibleService"
]
import os, sys
from collections import defaultdict
import tensorflow as tf
from copy import deepcopy
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../" * 4)))
from bentoml import api, env, BentoService, artifacts
from bentoml.artifact import TensorflowSavedModelArtifact, PickleArtifact
from bentoml.handlers import JsonHandler
import numpy as np
from scipy.special import softmax, expit
from aispace.datasets.tokenizer import BertTokenizer
from aispace.utils.hparams import Hparams
from aispace.utils.str_utils import uuid_maker, preprocess_text, compute_md5_hash
@artifacts([
TensorflowSavedModelArtifact('model'),
PickleArtifact('tokenizer'),
PickleArtifact("hparams"),
])
@env(auto_pip_dependencies=True)
class BertQAWithImpossibleService(BentoService):
def preprocessing(self, parsed_json):
unique_id = 100000
for one_json in parsed_json:
n_best_size = one_json.get('n_best_size', 5)
threshold = one_json.get('threshold', 0.5)
max_answer_length = one_json.get("max_answer_length", 64)
max_query_length = one_json.get("max_query_length", 64)
doc_stride = one_json.get("doc_stride", 128)
question_text = one_json.get("question_text", "")
trigger = one_json.get("trigger", "")
role = one_json.get("role", "")
event_type = one_json.get("event_type", "")
para_text = one_json.get("context", "")
# if question_text == "" or para_text == "":
if trigger == "" or role == "" or event_type == "" or para_text == "":
# unique_id = uuid_maker()
print("[WARRING] query or context is empty!")
item = {
"unique_id": unique_id,
"qas_id": unique_id,
"question_text": question_text,
"context_text": para_text,
'n_best_size': n_best_size,
'max_answer_length': max_answer_length,
'threshold': threshold
}
yield item
if self.artifacts.hparams.dataset.tokenizer.do_lower_case:
# question_text = question_text.lower()
trigger = trigger.lower()
role = role.lower()
event_type = event_type.lower()
# query_tokens = self.artifacts.tokenizer.tokenize(question_text)
# query_tokens = query_tokens[: max_query_length]
trigger_tokens = self.artifacts.tokenizer.tokenize(trigger)
role_tokens = self.artifacts.tokenizer.tokenize(role)
event_type_tokens = self.artifacts.tokenizer.tokenize(event_type)
query_tokens = trigger_tokens + [self.artifacts.tokenizer.vocab.sep_token] + \
role_tokens + [self.artifacts.tokenizer.vocab.sep_token] + event_type_tokens
query_tokens = query_tokens[: max_query_length]
qas_id = one_json.get('qas_id', compute_md5_hash(self.artifacts.tokenizer.detokenizer(query_tokens) + para_text))
if self.artifacts.hparams.dataset.tokenizer.do_lower_case:
para_text = para_text.lower()
para_tokens = self.artifacts.tokenizer.tokenize(para_text)
"""
For getting token to raw char matching:
1) getting matching between token and tokenized text
2) getting matching between raw text and tokenized text
3) So, can get matching between token and raw
"""
# char idx to token idx
char2token_index = []
# token start idx to char idx
token2char_start_index = []
# token end idx to char idx
token2char_end_index = []
char_idx = 0
for i, token in enumerate(para_tokens):
char_len = len(token.replace("##", ''))
char2token_index.extend([i] * char_len)
token2char_start_index.append(char_idx)
char_idx += char_len
token2char_end_index.append(char_idx - 1)
tokenized_para_text = self.artifacts.tokenizer.detokenizer(para_tokens)
# matching between raw text and tokenized text
N, M = len(para_text), len(tokenized_para_text)
max_N, max_M = 1024, 1024
if N > max_N or M > max_M:
max_N = max(N, max_N)
max_M = max(M, max_M)
match_mapping, mismatch = self._generate_match_mapping(para_text, tokenized_para_text, N, M, max_N, max_M)
# raw idx to tokenized char idx
raw2tokenized_char_index = [None] * (N + 1)
# tokenized char idx to raw idx
tokenized2raw_char_index = [None] * (M + 1)
i, j = N - 1, M - 1
while i >= 0 and j >= 0:
if (i, j) not in match_mapping:
break
# if 324 == i or 353 == j:
# print()
if match_mapping[(i, j)] == 2:
raw2tokenized_char_index[i] = j
tokenized2raw_char_index[j] = i
i, j = i - 1, j - 1
elif match_mapping[(i, j)] == 1:
j = j - 1
else:
i = i - 1
if all(v is None for v in raw2tokenized_char_index) or mismatch:
print("[WARRING] raw and tokenized paragraph mismatch detected")
# unique_id = uuid_maker()
item = {
"unique_id": unique_id,
"qas_id": qas_id,
"question_text": question_text,
"context_text": para_text,
'n_best_size': n_best_size,
'max_answer_length': max_answer_length,
'threshold': threshold
}
yield item
# token start idx to raw char idx
token2char_raw_start_index = []
# token end idx to raw char idx
token2char_raw_end_index = []
for idx in range(len(para_tokens)):
# token char idx
start_pos = token2char_start_index[idx]
end_pos = token2char_end_index[idx]
# raw char idx
raw_start_pos = self._convert_tokenized_index(tokenized2raw_char_index, start_pos, N, is_start=True)
raw_end_pos = self._convert_tokenized_index(tokenized2raw_char_index, end_pos, N, is_start=False)
# matching between token and raw char idx
token2char_raw_start_index.append(raw_start_pos)
token2char_raw_end_index.append(raw_end_pos)
max_para_length = self.artifacts.hparams.dataset.tokenizer.max_len - len(query_tokens) - 3
total_para_length = len(para_tokens)
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
doc_spans = []
para_start = 0
while para_start < total_para_length:
para_length = total_para_length - para_start
if para_length > max_para_length:
para_length = max_para_length
doc_spans.append({
"start": para_start,
"length": para_length
})
if para_start + para_length == total_para_length:
break
para_start += min(para_length, doc_stride)
for (doc_idx, doc_span) in enumerate(doc_spans):
doc_token2char_raw_start_index = []
doc_token2char_raw_end_index = []
doc_token2doc_index = {}
for i in range(doc_span['length']):
token_idx = doc_span["start"] + i
doc_token2char_raw_start_index.append(token2char_raw_start_index[token_idx])
doc_token2char_raw_end_index.append(token2char_raw_end_index[token_idx])
best_doc_idx = self._find_max_context(doc_spans, token_idx)
doc_token2doc_index[i] = (best_doc_idx == doc_idx)
encode_info = \
self.artifacts.tokenizer.encode(
query_tokens,
para_tokens[doc_span['start']: doc_span['start'] + doc_span['length']],
return_mask=True,
return_offset=True,
return_cls_index=True)
input_ids, segment_ids, input_mask, p_mask, q_mask, offset, cls_idx = \
encode_info['input_ids'], encode_info['segment_ids'], encode_info['input_mask'], \
encode_info['b_mask'], encode_info['a_mask'], encode_info['b_offset'], encode_info['cls_index']
# unique_id = uuid_maker()
# p_mask[cls_idx] = 1
item = {
"unique_id": unique_id,
"qas_id": qas_id,
"question_text": question_text,
"context_text": para_text,
"doc_token2char_raw_start_index": doc_token2char_raw_start_index,
"doc_token2char_raw_end_index": doc_token2char_raw_end_index,
'doc_token2doc_index': doc_token2doc_index,
"input_ids": input_ids,
"token_type_ids": segment_ids,
"attention_mask": input_mask,
"p_mask": p_mask,
'offset': offset,
'n_best_size': n_best_size,
'max_answer_length': max_answer_length,
'cls_idx': cls_idx,
'threshold': threshold
}
unique_id += 1
yield item
@api(JsonHandler)
def qa_predict(self, parsed_json):
input_data = {
"input_ids": [], "token_type_ids": [], "attention_mask": [], "p_mask": [], "unique_id": [], "start_position": []
}
no_answer_response = {
'predict_text': "",
'start_prob': 0.,
'end_prob': 0.,
'predict_score': 0.
}
pre_input_data = self.preprocessing(parsed_json)
qas_id_2_examples = defaultdict(list)
unique_id_to_example = defaultdict()
qas_ids = []
for itm in pre_input_data:
qas_ids.append(itm['qas_id'])
if 'input_ids' not in itm:
continue
qas_id_2_examples[itm['qas_id']].append(itm)
unique_id_to_example[itm['unique_id']] = itm
input_data['input_ids'].append(itm['input_ids'])
input_data['token_type_ids'].append(itm['token_type_ids'])
input_data['attention_mask'].append(itm['attention_mask'])
input_data['p_mask'].append(itm['p_mask'])
# input_data['offset'].append(itm['offset'])
# input_data['cls_idx'].append(itm['cls_idx'])
input_data['unique_id'].append(itm['unique_id'])
input_data['start_position'].append(0)
if not input_data['input_ids']:
print("[WARRING] Preprocessing some thing wrong!")
return [no_answer_response]
input_data['input_ids'] = tf.constant(input_data['input_ids'], name="input_ids")
input_data['token_type_ids'] = tf.constant(input_data['token_type_ids'], name="token_type_ids")
input_data['attention_mask'] = tf.constant(input_data['attention_mask'], name="attention_mask")
input_data['p_mask'] = tf.constant(input_data['p_mask'], name="p_mask")
input_data['unique_id'] = tf.constant(input_data['unique_id'], dtype=tf.float32, name="unique_id")
input_data['start_position'] = tf.constant(input_data['start_position'], name="start_position")
start_top_res, end_top_res, answer_prob, unique_id_res = self.artifacts.model(input_data, training=False)
start_top_log_prob, start_top_index = start_top_res.numpy()[:, :, 0], start_top_res.numpy()[:, :, 1].astype(np.int) # [b, k]
end_top_log_prob, end_top_index = end_top_res.numpy()[:, :, :, 0], end_top_res.numpy()[:, :, :, 1].astype(np.int) # [b, k, k]
unique_id_res = unique_id_res.numpy().astype(np.int)
start_n_top, end_n_top = start_top_index.shape[-1], end_top_index.shape[-1]
unique_id_2_result = {}
for i in range(end_top_index.shape[0]):
unique_id = unique_id_res[i]
itm = {
'unique_id': unique_id,
'start_top_log_prob': start_top_log_prob[i],
'start_top_index': start_top_index[i],
'end_top_log_prob': end_top_log_prob[i],
'end_top_index': end_top_index[i],
'is_impossible_prob': answer_prob.numpy()[i][0]
}
unique_id_2_result[unique_id] = itm
answers = []
no_answer_response = {
'predict_text': "",
'span_start': -1,
'start_prob': 0.,
'span_end': -1,
'end_prob': 0.,
'predict_score': 0.,
'is_impossible': 1,
'is_impossible_prob': 1.
}
for qas_id in qas_ids:
examples = qas_id_2_examples.get(qas_id, [])
if not examples:
answers.append(no_answer_response)
continue
max_answer_length, n_best_size, threshold \
= examples[0].get('max_answer_length'), \
examples[0].get('n_best_size'), \
examples[0].get('threshold')
example_all_predicts = []
for example in examples:
cur_unique_id = example['unique_id']
if cur_unique_id not in unique_id_2_result:
continue
cur_result = unique_id_2_result.get(cur_unique_id)
cur_start_top_log_prob = cur_result['start_top_log_prob']
cur_start_top_index = cur_result['start_top_index']
cur_end_top_log_prob = cur_result['end_top_log_prob']
cur_end_top_index = cur_result['end_top_index']
is_impossible = int(cur_result['is_impossible_prob'] >= threshold)
cur_p_mask = example['p_mask']
for i in range(start_n_top):
start_prob = cur_start_top_log_prob[i]
start_index = cur_start_top_index[i]
if not cur_p_mask[start_index]:
continue
for j in range(end_n_top):
end_prob = cur_end_top_log_prob[i, j]
end_index = cur_end_top_index[i, j]
if not cur_p_mask[end_index]:
continue
answer_length = end_index - start_index + 1
if end_index < start_index or answer_length > max_answer_length:
continue
itm = {
'unique_id': cur_unique_id,
'start_prob': start_prob,
'start_index': start_index,
'end_prob': end_prob,
'end_index': end_index,
'predict_score': np.log(start_prob) + np.log(end_prob),
'cls_idx': example['cls_idx'],
'is_impossible': is_impossible,
'is_impossible_prob': cur_result['is_impossible_prob']
}
example_all_predicts.append(itm)
example_all_predicts.sort(key=lambda s: s['predict_score'], reverse=True)
example_top_predicts = []
is_visited = set()
for example_predict in example_all_predicts:
if len(example_top_predicts) >= n_best_size:
break
# if example_predict['start_prob'] < threshold or example_predict['end_prob'] < threshold:
# predict_text = ""
# predict_start = -1
# predict_end = -1
# else:
example_feature = unique_id_to_example[example_predict['unique_id']]
predict_start = example_feature['doc_token2char_raw_start_index'][
example_predict['start_index'] - example_feature['offset']]
predict_end = example_feature['doc_token2char_raw_end_index'][
example_predict['end_index'] - example_feature['offset']]
predict_text = example_feature['context_text'][predict_start: predict_end + 1].strip()
if predict_text in is_visited:
continue
itm = {
'predict_text': predict_text,
'span_start': predict_start,
'start_prob': example_predict['start_prob'],
'span_end': predict_end,
'end_prob': example_predict['end_prob'],
'predict_score': example_predict['predict_score'],
'is_impossible': example_predict['is_impossible'],
'is_impossible_prob': example_predict['is_impossible_prob']
}
example_top_predicts.append(itm)
if len(example_top_predicts) == 0:
example_top_predicts.append(
no_answer_response
)
example_best_predict = example_top_predicts[0]
answers.append(example_best_predict)
return answers
def _generate_match_mapping(self,
para_text,
tokenized_para_text,
N,
M,
max_N,
max_M):
"""Generate match mapping for raw and tokenized paragraph"""
def _lcs_match(para_text,
tokenized_para_text,
N,
M,
max_N,
max_M,
max_dist):
"""longest common sub-sequence
f[i, j] = max(f[i - 1, j], f[i, j - 1], f[i - 1, j - 1] + match(i, j))
unlike standard LCS, this is specifically optimized for the setting
because the mismatch between sentence pieces and original text will be small
"""
f = np.zeros((max_N, max_M), dtype=np.float32)
g = {}
for i in range(N):
# if i == 324:
# print()
for j in range(i - max_dist, i + max_dist):
# if j == 353:
# print()
if j >= M or j < 0:
continue
if i > 0:
g[(i, j)] = 0
f[i, j] = f[i - 1, j]
if j > 0 and f[i, j - 1] > f[i, j]:
g[(i, j)] = 1
f[i, j] = f[i, j - 1]
f_prev = f[i - 1, j - 1] if i > 0 and j > 0 else 0
raw_char = preprocess_text(para_text[i], self.artifacts.hparams.dataset.tokenizer.do_lower_case, remove_space=False, keep_accents=True)
tokenized_char = tokenized_para_text[j]
if raw_char == tokenized_char and f_prev + 1 > f[i, j]:
g[(i, j)] = 2
f[i, j] = f_prev + 1
return f, g
max_dist = abs(N - M) + 10
for _ in range(2):
lcs_matrix, match_mapping = _lcs_match(para_text, tokenized_para_text, N, M, max_N, max_M, max_dist)
if lcs_matrix[N - 1, M - 1] > 0.8 * N:
break
max_dist *= 2
mismatch = lcs_matrix[N - 1, M - 1] < 0.8 * N
if lcs_matrix[N - 1, M - 1] == min(M, N):
mismatch = False
return match_mapping, mismatch
def _convert_tokenized_index(self,
index,
pos,
M=None,
is_start=True):
"""Convert index for tokenized text"""
if index[pos] is not None:
return index[pos]
N = len(index)
rear = pos
while rear < N - 1 and index[rear] is None:
rear += 1
front = pos
while front > 0 and index[front] is None:
front -= 1
assert index[front] is not None or index[rear] is not None
if index[front] is None:
if index[rear] >= 1:
if is_start:
return 0
else:
return index[rear] - 1
return index[rear]
if index[rear] is None:
if M is not None and index[front] < M - 1:
if is_start:
return index[front] + 1
else:
return M - 1
return index[front]
if is_start:
if index[rear] > index[front] + 1:
return index[front] + 1
else:
return index[rear]
else:
if index[rear] > index[front] + 1:
return index[rear] - 1
else:
return index[front]
def _find_max_context(self,
doc_spans,
token_idx):
"""Check if this is the 'max context' doc span for the token.
Because of the sliding window approach taken to scoring documents, a single
token can appear in multiple documents. E.g.
Doc: the man went to the store and bought a gallon of milk
Span A: the man went to the
Span B: to the store and bought
Span C: and bought a gallon of
...
Now the word 'bought' will have two scores from spans B and C. We only
want to consider the score with "maximum context", which we define as
the *minimum* of its left and right context (the *sum* of left and
right context will always be the same, of course).
In the example the maximum context for 'bought' would be span C since
it has 1 left context and 3 right context, while span B has 4 left context
and 0 right context.
"""
best_doc_score = None
best_doc_idx = None
for (doc_idx, doc_span) in enumerate(doc_spans):
doc_start = doc_span["start"]
doc_length = doc_span["length"]
doc_end = doc_start + doc_length - 1
if token_idx < doc_start or token_idx > doc_end:
continue
left_context_length = token_idx - doc_start
right_context_length = doc_end - token_idx
doc_score = min(left_context_length, right_context_length) + 0.01 * doc_length
if best_doc_score is None or doc_score > best_doc_score:
best_doc_score = doc_score
best_doc_idx = doc_idx
return best_doc_idx
def _improve_answer_start(self, para_text, answer, raw_answer_start):
answer = answer.lower().strip()
real_start = para_text.find(answer)
if real_start != -1:
return real_start, answer
else:
return raw_answer_start, answer
def _is_english(self, word: str) -> bool:
"""
Checks whether `word` is a english word.
Note: this function is not standard and should be considered for BERT
tokenization only. See the comments for more details.
:param word:
:return:
"""
flag = True
for c in word:
if 'a' <= c <= 'z' or 'A' <= c <= 'Z' or c == '#':
continue
else:
flag = False
break
return flag | en | 0.744612 | # !/usr/bin/env python # coding=utf-8 # @Time : 2020/4/25 18:08 # @Author : <EMAIL> # @File : bert_for_qa_service.py # if question_text == "" or para_text == "": # unique_id = uuid_maker() # question_text = question_text.lower() # query_tokens = self.artifacts.tokenizer.tokenize(question_text) # query_tokens = query_tokens[: max_query_length] For getting token to raw char matching: 1) getting matching between token and tokenized text 2) getting matching between raw text and tokenized text 3) So, can get matching between token and raw # char idx to token idx # token start idx to char idx # token end idx to char idx #", '')) # matching between raw text and tokenized text # raw idx to tokenized char idx # tokenized char idx to raw idx # if 324 == i or 353 == j: # print() # unique_id = uuid_maker() # token start idx to raw char idx # token end idx to raw char idx # token char idx # raw char idx # matching between token and raw char idx # We can have documents that are longer than the maximum sequence length. # To deal with this we do a sliding window approach, where we take chunks # of the up to our max length with a stride of `doc_stride`. # unique_id = uuid_maker() # p_mask[cls_idx] = 1 # input_data['offset'].append(itm['offset']) # input_data['cls_idx'].append(itm['cls_idx']) # [b, k] # [b, k, k] # if example_predict['start_prob'] < threshold or example_predict['end_prob'] < threshold: # predict_text = "" # predict_start = -1 # predict_end = -1 # else: Generate match mapping for raw and tokenized paragraph longest common sub-sequence f[i, j] = max(f[i - 1, j], f[i, j - 1], f[i - 1, j - 1] + match(i, j)) unlike standard LCS, this is specifically optimized for the setting because the mismatch between sentence pieces and original text will be small # if i == 324: # print() # if j == 353: # print() Convert index for tokenized text Check if this is the 'max context' doc span for the token. Because of the sliding window approach taken to scoring documents, a single token can appear in multiple documents. E.g. Doc: the man went to the store and bought a gallon of milk Span A: the man went to the Span B: to the store and bought Span C: and bought a gallon of ... Now the word 'bought' will have two scores from spans B and C. We only want to consider the score with "maximum context", which we define as the *minimum* of its left and right context (the *sum* of left and right context will always be the same, of course). In the example the maximum context for 'bought' would be span C since it has 1 left context and 3 right context, while span B has 4 left context and 0 right context. Checks whether `word` is a english word. Note: this function is not standard and should be considered for BERT tokenization only. See the comments for more details. :param word: :return: | 1.996545 | 2 |
full_cost/full_cost/utils/facturing.py | CEMES-CNRS/full_cost_git | 0 | 6631327 | import importlib
import os
import datetime
from openpyxl import load_workbook, utils
from django.db.models import Q
from openpyxl.styles import Border, Side, Alignment, Font
from openpyxl.utils.cell import get_column_letter
from io import BytesIO
import numpy as np
from datetime import date
from django.http import HttpResponse
from django.db.models import Max
from django.urls import reverse
from full_cost.utils.constants import get_activities_from_entity, get_subbillings_from_entity_short,\
get_subbillings_from_entity_long, get_entity_long, CNRS_PERCENTAGE
from lab.models import Extraction, Price
from full_cost import settings
def get_border(style=None, color='FF000000'):
return Border(left=Side(border_style=style, color=color),
right=Side(border_style=style, color=color),
top=Side(border_style=style, color=color),
bottom=Side(border_style=style, color=color),
diagonal=Side(border_style=style, color=color),
diagonal_direction=0,
outline=Side(border_style=style, color=color),
vertical=Side(border_style=style, color=color),
horizontal=Side(border_style=style, color=color)
)
alignment = Alignment(horizontal='center',
vertical='center',
text_rotation=0,
wrap_text=True,
shrink_to_fit=False,
indent=0)
def as_text(value):
return str(value) if value is not None else ""
def set_columns_width(worksheet):
for column_cells in worksheet.columns:
length = max(len(as_text(cell.value)) for cell in column_cells)
worksheet.column_dimensions[utils.get_column_letter(column_cells[0].column)].width = length
def to_string(val):
return '{:.02f}'.format(val)
def calculate_wus(records_list, entity):
subbillings_long = get_subbillings_from_entity_long(entity)
Nwu = np.array([0. for idx in range(len(subbillings_long))])
for records in records_list:
for r in records:
if r.experiment.get_exp_type_display() in subbillings_long:
ind = subbillings_long.index(r.experiment.get_exp_type_display())
Nwutmp = np.array([r.wu if idx == ind else 0 for idx in range(len(subbillings_long))])
Nwu += Nwutmp
return Nwu
def populate_releve(records_list, project, entity, show_time=True):
subbilling_long = get_subbillings_from_entity_long(entity)
wb = load_workbook(filename=os.path.join(settings.STATIC_ROOT, 'template_facturation.xlsx'))
ws = wb.create_sheet('Relevé')
ws.append([None])
ws.append([None])
if entity == 'MECA' or entity == 'ELEC':
header = ['Date', 'Worker', 'Session']
else:
header = ['Date', 'Experiment', 'Session']
header.extend(subbilling_long)
ws.append(header)
Nwu = calculate_wus(records_list, entity)
for records in records_list:
records = records.order_by('experiment', 'date_from')
for r in records:
date_to = None
date_from = r.date_from.strftime('%d/%m/%Y')
time_to = None
time_from = None
if hasattr(r, 'time_from'):
if isinstance(r.time_from, datetime.time):
time_from = r.time_from.strftime('%H:%M:%S')
else:
time_from = r.get_time_from_display()
if hasattr(r, 'date_to'):
date_to = r.date_to.strftime('%d/%m/%Y')
if hasattr(r, 'time_to'):
if isinstance(r.time_to, datetime.time):
time_to = r.time_to.strftime('%H:%M:%S')
else:
time_to = r.get_time_to_display()
if date_to is not None:
session = f"du {date_from}-{time_from if show_time else ''} au {date_to}-{time_to if show_time else ''}"
else:
session = f"le {date_from}: {time_from if show_time else ''} - {time_to if show_time else ''}"
ind = subbilling_long.index(r.experiment.get_exp_type_display())
wus = [r.wu if idx == ind else None for idx in range(len(subbilling_long))]
row = [r.date_from, str(r.experiment.experiment), session]
row.extend(wus)
ws.append(row)
res = [None, None, 'Total:']
res.extend(Nwu)
ws.append(res)
last_cell = ws.calculate_dimension().split(':')[1]
cell = ws[last_cell]
letter = get_column_letter(cell.column)
icol = cell.column
irow = cell.row
cells = ws['A3':f'{letter}{irow}']
for row in cells:
for cell in row:
cell.border = get_border('medium')
cell.alignment = alignment
ws.append([None])
set_columns_width(ws)
ws['A1'] = f"Relevé des séances sur le projet: {str(project)}"
ws['A1'].font = Font(name='Times New Roman', size=10, bold=False,)
return wb, Nwu
def calculate_totals(project, records_list, entity):
wus = calculate_wus(records_list, entity)
subbilling_short = get_subbillings_from_entity_short(entity)
totals = [0]
for ind, bill in enumerate(subbilling_short):
price, tarification = get_project_price(project, entity, bill)
totals[0] += wus[ind]*price
return totals
def get_project_price(project, entity, bill):
if project.is_academic:
tarification = 'académique'
if project.is_national:
tarification += ' nationale'
price = Price.objects.get(price_entity=entity, price_category='T3ANR', price_name=bill).price
else:
tarification += ' internationale ou privée'
price = Price.objects.get(price_entity=entity, price_category='T3', price_name=bill).price
if not project.is_cnrs:
price += price * CNRS_PERCENTAGE / 100
tarification += ' non gérée par le CNRS'
else:
tarification += ' gérée par le CNRS'
else:
tarification = 'privée'
price = Price.objects.get(price_entity=entity, price_category='T1', price_name=bill).price
return price, tarification
def populate_facture(extraction_name, extraction, entity):
records_list = []
for act in get_activities_from_entity(entity):
records_list.append(getattr(extraction, f'{act}_record_related').all())
project = extraction.project
dates = [extraction.date_after.strftime('%d/%m/%Y'),
extraction.date_before.strftime('%d/%m/%Y'),]
wb, wus = populate_releve(records_list, project, entity)
ws = wb['Facture']
totals = calculate_totals(project, records_list, entity)
subbilling_short = get_subbillings_from_entity_short(entity)
subbilling_long = get_subbillings_from_entity_long(entity)
for ind, bill in enumerate(subbilling_short):
price, tarification = get_project_price(project, entity, bill)
row = [None, subbilling_long[ind], wus[ind], price, to_string(wus[ind]*price)]
ws.append(row)
ws.append([None, None, None, 'Total (€HT):', to_string(totals[0])])
letter = 'E'
irow = 24 + len(subbilling_long) + 1
cells = ws['C24':f'{letter}{24}']
for row in cells:
for cell in row:
cell.border = get_border('medium')
cell.alignment = alignment
cells = ws['B25':f'{letter}{irow}']
for ind in range(25, irow+1):
ws.row_dimensions[ind].height = 40
for row in cells:
for cell in row:
cell.border = get_border('medium')
cell.alignment = alignment
ws['C13'] = str(project.project_pi)
ws['C14'] = project.project_name
ws['C17'] = get_entity_long(entity)
ws['C20'] = dates[0]
ws['E20'] = dates[1]
ws['G6'] = date.today().strftime('%d/%m/%Y')
ws['C2'] = extraction_name
facture_object = '" et de "'.join(subbilling_long)
ws['C19'] = f'Séances de "{facture_object}"'
ws['B22'] = f'Tarification {tarification}'
return wb
def export_book(wb):
stream = BytesIO()
wb.save(stream)
return stream.getvalue()
def generate_xlsx(extraction):
ext_id = extraction.creation_id
entity = extraction.billing
extraction_name = f"{entity} {date.today().strftime('%y')}-{ext_id:03d}"
wb = populate_facture(extraction_name, extraction, entity)
data = export_book(wb)
filename = f'extract_{extraction_name}.xlsx'
response = HttpResponse(content_type="application/vnd.ms-excel")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
response.write(data)
return response
def create_extraction(entity, records_list, project, filter):
ext_id = Extraction.objects.all().filter(creation_date__year=date.today().year).aggregate(Max('creation_id'))['creation_id__max']
if ext_id is not None:
ext_id += 1
else:
ext_id = 0
totals = calculate_totals(project, records_list, entity)
total = totals[0]
ext = Extraction(project=project,
date_after=filter.form.cleaned_data['date_from'].start,
date_before=filter.form.cleaned_data['date_from'].stop,
creation_id=ext_id, amount=total, billing=entity)
ext.save()
for records in records_list:
for r in records:
r.extraction = ext
r.save()
return ext | import importlib
import os
import datetime
from openpyxl import load_workbook, utils
from django.db.models import Q
from openpyxl.styles import Border, Side, Alignment, Font
from openpyxl.utils.cell import get_column_letter
from io import BytesIO
import numpy as np
from datetime import date
from django.http import HttpResponse
from django.db.models import Max
from django.urls import reverse
from full_cost.utils.constants import get_activities_from_entity, get_subbillings_from_entity_short,\
get_subbillings_from_entity_long, get_entity_long, CNRS_PERCENTAGE
from lab.models import Extraction, Price
from full_cost import settings
def get_border(style=None, color='FF000000'):
return Border(left=Side(border_style=style, color=color),
right=Side(border_style=style, color=color),
top=Side(border_style=style, color=color),
bottom=Side(border_style=style, color=color),
diagonal=Side(border_style=style, color=color),
diagonal_direction=0,
outline=Side(border_style=style, color=color),
vertical=Side(border_style=style, color=color),
horizontal=Side(border_style=style, color=color)
)
alignment = Alignment(horizontal='center',
vertical='center',
text_rotation=0,
wrap_text=True,
shrink_to_fit=False,
indent=0)
def as_text(value):
return str(value) if value is not None else ""
def set_columns_width(worksheet):
for column_cells in worksheet.columns:
length = max(len(as_text(cell.value)) for cell in column_cells)
worksheet.column_dimensions[utils.get_column_letter(column_cells[0].column)].width = length
def to_string(val):
return '{:.02f}'.format(val)
def calculate_wus(records_list, entity):
subbillings_long = get_subbillings_from_entity_long(entity)
Nwu = np.array([0. for idx in range(len(subbillings_long))])
for records in records_list:
for r in records:
if r.experiment.get_exp_type_display() in subbillings_long:
ind = subbillings_long.index(r.experiment.get_exp_type_display())
Nwutmp = np.array([r.wu if idx == ind else 0 for idx in range(len(subbillings_long))])
Nwu += Nwutmp
return Nwu
def populate_releve(records_list, project, entity, show_time=True):
subbilling_long = get_subbillings_from_entity_long(entity)
wb = load_workbook(filename=os.path.join(settings.STATIC_ROOT, 'template_facturation.xlsx'))
ws = wb.create_sheet('Relevé')
ws.append([None])
ws.append([None])
if entity == 'MECA' or entity == 'ELEC':
header = ['Date', 'Worker', 'Session']
else:
header = ['Date', 'Experiment', 'Session']
header.extend(subbilling_long)
ws.append(header)
Nwu = calculate_wus(records_list, entity)
for records in records_list:
records = records.order_by('experiment', 'date_from')
for r in records:
date_to = None
date_from = r.date_from.strftime('%d/%m/%Y')
time_to = None
time_from = None
if hasattr(r, 'time_from'):
if isinstance(r.time_from, datetime.time):
time_from = r.time_from.strftime('%H:%M:%S')
else:
time_from = r.get_time_from_display()
if hasattr(r, 'date_to'):
date_to = r.date_to.strftime('%d/%m/%Y')
if hasattr(r, 'time_to'):
if isinstance(r.time_to, datetime.time):
time_to = r.time_to.strftime('%H:%M:%S')
else:
time_to = r.get_time_to_display()
if date_to is not None:
session = f"du {date_from}-{time_from if show_time else ''} au {date_to}-{time_to if show_time else ''}"
else:
session = f"le {date_from}: {time_from if show_time else ''} - {time_to if show_time else ''}"
ind = subbilling_long.index(r.experiment.get_exp_type_display())
wus = [r.wu if idx == ind else None for idx in range(len(subbilling_long))]
row = [r.date_from, str(r.experiment.experiment), session]
row.extend(wus)
ws.append(row)
res = [None, None, 'Total:']
res.extend(Nwu)
ws.append(res)
last_cell = ws.calculate_dimension().split(':')[1]
cell = ws[last_cell]
letter = get_column_letter(cell.column)
icol = cell.column
irow = cell.row
cells = ws['A3':f'{letter}{irow}']
for row in cells:
for cell in row:
cell.border = get_border('medium')
cell.alignment = alignment
ws.append([None])
set_columns_width(ws)
ws['A1'] = f"Relevé des séances sur le projet: {str(project)}"
ws['A1'].font = Font(name='Times New Roman', size=10, bold=False,)
return wb, Nwu
def calculate_totals(project, records_list, entity):
wus = calculate_wus(records_list, entity)
subbilling_short = get_subbillings_from_entity_short(entity)
totals = [0]
for ind, bill in enumerate(subbilling_short):
price, tarification = get_project_price(project, entity, bill)
totals[0] += wus[ind]*price
return totals
def get_project_price(project, entity, bill):
if project.is_academic:
tarification = 'académique'
if project.is_national:
tarification += ' nationale'
price = Price.objects.get(price_entity=entity, price_category='T3ANR', price_name=bill).price
else:
tarification += ' internationale ou privée'
price = Price.objects.get(price_entity=entity, price_category='T3', price_name=bill).price
if not project.is_cnrs:
price += price * CNRS_PERCENTAGE / 100
tarification += ' non gérée par le CNRS'
else:
tarification += ' gérée par le CNRS'
else:
tarification = 'privée'
price = Price.objects.get(price_entity=entity, price_category='T1', price_name=bill).price
return price, tarification
def populate_facture(extraction_name, extraction, entity):
records_list = []
for act in get_activities_from_entity(entity):
records_list.append(getattr(extraction, f'{act}_record_related').all())
project = extraction.project
dates = [extraction.date_after.strftime('%d/%m/%Y'),
extraction.date_before.strftime('%d/%m/%Y'),]
wb, wus = populate_releve(records_list, project, entity)
ws = wb['Facture']
totals = calculate_totals(project, records_list, entity)
subbilling_short = get_subbillings_from_entity_short(entity)
subbilling_long = get_subbillings_from_entity_long(entity)
for ind, bill in enumerate(subbilling_short):
price, tarification = get_project_price(project, entity, bill)
row = [None, subbilling_long[ind], wus[ind], price, to_string(wus[ind]*price)]
ws.append(row)
ws.append([None, None, None, 'Total (€HT):', to_string(totals[0])])
letter = 'E'
irow = 24 + len(subbilling_long) + 1
cells = ws['C24':f'{letter}{24}']
for row in cells:
for cell in row:
cell.border = get_border('medium')
cell.alignment = alignment
cells = ws['B25':f'{letter}{irow}']
for ind in range(25, irow+1):
ws.row_dimensions[ind].height = 40
for row in cells:
for cell in row:
cell.border = get_border('medium')
cell.alignment = alignment
ws['C13'] = str(project.project_pi)
ws['C14'] = project.project_name
ws['C17'] = get_entity_long(entity)
ws['C20'] = dates[0]
ws['E20'] = dates[1]
ws['G6'] = date.today().strftime('%d/%m/%Y')
ws['C2'] = extraction_name
facture_object = '" et de "'.join(subbilling_long)
ws['C19'] = f'Séances de "{facture_object}"'
ws['B22'] = f'Tarification {tarification}'
return wb
def export_book(wb):
stream = BytesIO()
wb.save(stream)
return stream.getvalue()
def generate_xlsx(extraction):
ext_id = extraction.creation_id
entity = extraction.billing
extraction_name = f"{entity} {date.today().strftime('%y')}-{ext_id:03d}"
wb = populate_facture(extraction_name, extraction, entity)
data = export_book(wb)
filename = f'extract_{extraction_name}.xlsx'
response = HttpResponse(content_type="application/vnd.ms-excel")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
response.write(data)
return response
def create_extraction(entity, records_list, project, filter):
ext_id = Extraction.objects.all().filter(creation_date__year=date.today().year).aggregate(Max('creation_id'))['creation_id__max']
if ext_id is not None:
ext_id += 1
else:
ext_id = 0
totals = calculate_totals(project, records_list, entity)
total = totals[0]
ext = Extraction(project=project,
date_after=filter.form.cleaned_data['date_from'].start,
date_before=filter.form.cleaned_data['date_from'].stop,
creation_id=ext_id, amount=total, billing=entity)
ext.save()
for records in records_list:
for r in records:
r.extraction = ext
r.save()
return ext | none | 1 | 2.056441 | 2 |
|
datafiles/tests/test_mapper.py | jacebrowning/datafiles | 151 | 6631328 | # pylint: disable=unused-variable
import platform
from dataclasses import dataclass
from pathlib import Path
import pytest
from datafiles.config import Meta
from datafiles.mapper import Mapper, create_mapper
@dataclass
class MyClass:
foobar: int
class MyField:
@classmethod
def to_preserialization_data(cls, python_value):
return python_value
def describe_mapper():
@pytest.fixture
def mapper():
return Mapper(
instance=MyClass(foobar=42),
attrs={},
pattern=None,
manual=Meta.datafile_manual,
defaults=Meta.datafile_defaults,
infer=Meta.datafile_infer,
)
def describe_path():
def is_none_when_no_pattern(expect, mapper):
expect(mapper.path).is_(None)
def is_relative_to_file_by_default(expect, mapper):
mapper._pattern = '../../tmp/sample.yml'
root = Path(__file__).parents[2]
expect(mapper.path) == root / 'tmp' / 'sample.yml'
def is_absolute_when_specified(expect, mapper):
mapper._pattern = '/private/tmp/sample.yml'
if platform.system() == 'Windows':
path = Path('C:/private/tmp/sample.yml')
else:
path = Path('/private/tmp/sample.yml')
expect(mapper.path) == path
def is_relative_to_cwd_when_specified(expect, mapper):
mapper._pattern = './foobar/sample.yml'
if platform.system() == 'Windows':
path = Path('foobar/sample.yml')
else:
path = Path.cwd() / 'foobar' / 'sample.yml'
expect(mapper.path) == path
def describe_relpath():
def when_cwd_is_parent(expect, mapper):
mapper._pattern = '../../tmp/sample.yml'
expect(mapper.relpath) == Path('tmp', 'sample.yml')
def when_cwd_is_sibling(expect, mapper):
mapper._pattern = '../../../tmp/sample.yml'
expect(mapper.relpath) == Path('..', 'tmp', 'sample.yml')
def describe_text():
def is_blank_when_no_attrs(expect, mapper):
expect(mapper.text) == ""
def is_yaml_by_default(expect, mapper):
mapper.attrs = {'foobar': MyField}
expect(mapper.text) == "foobar: 42\n"
def with_json_format(expect, mapper):
mapper._pattern = '_.json'
mapper.attrs = {'foobar': MyField}
expect(mapper.text) == '{\n "foobar": 42\n}'
def with_toml_format(expect, mapper):
mapper._pattern = '_.toml'
mapper.attrs = {'foobar': MyField}
expect(mapper.text) == "foobar = 42\n"
def with_no_format(expect, mapper):
mapper._pattern = '_'
mapper.attrs = {'foobar': MyField}
expect(mapper.text) == "foobar: 42\n"
def with_unknown_format(expect, mapper):
mapper._pattern = '_.xyz'
mapper.attrs = {'foobar': MyField}
with expect.raises(ValueError):
print(mapper.text)
def describe_load():
def it_requires_path(expect, mapper):
with expect.raises(RuntimeError):
mapper.load()
def describe_save():
def it_requires_path(expect, mapper):
with expect.raises(RuntimeError):
mapper.save()
def describe_create_mapper():
def it_reuses_existing_datafile(mocker, expect):
obj = mocker.Mock()
mapper = mocker.Mock()
obj.datafile = mapper
new_mapper = create_mapper(obj)
expect(new_mapper) == obj.datafile
| # pylint: disable=unused-variable
import platform
from dataclasses import dataclass
from pathlib import Path
import pytest
from datafiles.config import Meta
from datafiles.mapper import Mapper, create_mapper
@dataclass
class MyClass:
foobar: int
class MyField:
@classmethod
def to_preserialization_data(cls, python_value):
return python_value
def describe_mapper():
@pytest.fixture
def mapper():
return Mapper(
instance=MyClass(foobar=42),
attrs={},
pattern=None,
manual=Meta.datafile_manual,
defaults=Meta.datafile_defaults,
infer=Meta.datafile_infer,
)
def describe_path():
def is_none_when_no_pattern(expect, mapper):
expect(mapper.path).is_(None)
def is_relative_to_file_by_default(expect, mapper):
mapper._pattern = '../../tmp/sample.yml'
root = Path(__file__).parents[2]
expect(mapper.path) == root / 'tmp' / 'sample.yml'
def is_absolute_when_specified(expect, mapper):
mapper._pattern = '/private/tmp/sample.yml'
if platform.system() == 'Windows':
path = Path('C:/private/tmp/sample.yml')
else:
path = Path('/private/tmp/sample.yml')
expect(mapper.path) == path
def is_relative_to_cwd_when_specified(expect, mapper):
mapper._pattern = './foobar/sample.yml'
if platform.system() == 'Windows':
path = Path('foobar/sample.yml')
else:
path = Path.cwd() / 'foobar' / 'sample.yml'
expect(mapper.path) == path
def describe_relpath():
def when_cwd_is_parent(expect, mapper):
mapper._pattern = '../../tmp/sample.yml'
expect(mapper.relpath) == Path('tmp', 'sample.yml')
def when_cwd_is_sibling(expect, mapper):
mapper._pattern = '../../../tmp/sample.yml'
expect(mapper.relpath) == Path('..', 'tmp', 'sample.yml')
def describe_text():
def is_blank_when_no_attrs(expect, mapper):
expect(mapper.text) == ""
def is_yaml_by_default(expect, mapper):
mapper.attrs = {'foobar': MyField}
expect(mapper.text) == "foobar: 42\n"
def with_json_format(expect, mapper):
mapper._pattern = '_.json'
mapper.attrs = {'foobar': MyField}
expect(mapper.text) == '{\n "foobar": 42\n}'
def with_toml_format(expect, mapper):
mapper._pattern = '_.toml'
mapper.attrs = {'foobar': MyField}
expect(mapper.text) == "foobar = 42\n"
def with_no_format(expect, mapper):
mapper._pattern = '_'
mapper.attrs = {'foobar': MyField}
expect(mapper.text) == "foobar: 42\n"
def with_unknown_format(expect, mapper):
mapper._pattern = '_.xyz'
mapper.attrs = {'foobar': MyField}
with expect.raises(ValueError):
print(mapper.text)
def describe_load():
def it_requires_path(expect, mapper):
with expect.raises(RuntimeError):
mapper.load()
def describe_save():
def it_requires_path(expect, mapper):
with expect.raises(RuntimeError):
mapper.save()
def describe_create_mapper():
def it_reuses_existing_datafile(mocker, expect):
obj = mocker.Mock()
mapper = mocker.Mock()
obj.datafile = mapper
new_mapper = create_mapper(obj)
expect(new_mapper) == obj.datafile
| en | 0.542122 | # pylint: disable=unused-variable | 2.047433 | 2 |
code-example/ch03/vowels3.py | grahovsky/python-edu | 0 | 6631329 | <filename>code-example/ch03/vowels3.py
vowels = ['a', 'e', 'i', 'o', 'u']
word = input("Provide a word to search for vowels: ")
found = {}
for letter in word:
if letter in vowels:
found.setdefault(letter, 0)
found[letter] += 1
for vowel in sorted(found):
print(vowel, 'occurred', found[vowel], 'times.')
print()
for vowel in sorted(found, key=found.get, reverse=True):
print(vowel, 'occurred', found[vowel], 'times.')
| <filename>code-example/ch03/vowels3.py
vowels = ['a', 'e', 'i', 'o', 'u']
word = input("Provide a word to search for vowels: ")
found = {}
for letter in word:
if letter in vowels:
found.setdefault(letter, 0)
found[letter] += 1
for vowel in sorted(found):
print(vowel, 'occurred', found[vowel], 'times.')
print()
for vowel in sorted(found, key=found.get, reverse=True):
print(vowel, 'occurred', found[vowel], 'times.')
| none | 1 | 4.138385 | 4 |
|
evaluation/evaluate_text_classification.py | dyrson11/NL-Augmenter | 0 | 6631330 | from tasks.TaskTypes import TaskType
import numpy as np
import enum
from datasets import load_dataset
from transformers import pipeline
from dataset import TextLineDataset, KeyValueDataset
import torch
# make this to work for three task.
class SENTIMENT_LABELS(enum.Enum):
NEGATIVE = 0
POSITIVE = 1
class NLI_LABELS(enum.Enum):
ENTAILMENT = 0
NEUTRAL = 1
CONTRADICTION = 2
class QQP_LABEL(enum.Enum):
NON_DUPLICATE = 0
DUPLICATE = 1
def _process_data(dataset_name, split):
if dataset_name in ["qqp", "sst2"]:
hf_dataset = load_dataset("glue", dataset_name, split=split)
elif dataset_name in ["clue"]:
hf_dataset = load_dataset(dataset_name, "cluewsc2020", split=split)
else:
hf_dataset = load_dataset(dataset_name, split=split)
if dataset_name == "imdb":
label_name = "label"
label_func = (
lambda x: SENTIMENT_LABELS.POSITIVE
if x == 1
else SENTIMENT_LABELS.NEGATIVE
)
instance_name = ["text"]
data_class = TextLineDataset
elif dataset_name == "sst2":
label_name = "label"
label_func = (
lambda x: SENTIMENT_LABELS.POSITIVE
if x == 1
else SENTIMENT_LABELS.NEGATIVE
)
instance_name = ["sentence"]
data_class = TextLineDataset
elif dataset_name == "clue":
label_name = "label"
label_func = (
lambda x: SENTIMENT_LABELS.POSITIVE
if x == 1
else SENTIMENT_LABELS.NEGATIVE
)
instance_name = ["text"]
data_class = TextLineDataset
elif dataset_name in ["multi_nli", "snli"]:
label_name = "label"
def label_func(d):
if d == 0:
return NLI_LABELS.ENTAILMENT
elif d == 1:
return NLI_LABELS.NEUTRAL
elif d == 2:
return NLI_LABELS.CONTRADICTION
instance_name = ["premise", "hypothesis"]
data_class = KeyValueDataset
elif dataset_name == "qqp":
label_name = "label"
instance_name = ["question1", "question2"]
def label_func(d):
if d == 1:
return QQP_LABEL.DUPLICATE
else:
return QQP_LABEL.NON_DUPLICATE
data_class = KeyValueDataset
datasets = data_class.from_huggingface(
hf_dataset,
fields=instance_name + [label_name],
task_type=TaskType.TEXT_CLASSIFICATION,
max_size=1000,
)
return datasets, label_func
def _process_data_with_training(dataset_name, split):
if dataset_name in ["qqp", "sst2"]:
train_dataset, hf_dataset = load_dataset(
"glue", dataset_name, split=split
)
else:
train_dataset, hf_dataset = load_dataset(dataset_name, split=split)
if dataset_name == "imdb":
label_name = "label"
label_func = (
lambda x: SENTIMENT_LABELS.POSITIVE
if x == 1
else SENTIMENT_LABELS.NEGATIVE
)
instance_name = ["text"]
data_class = TextLineDataset
elif dataset_name == "sst2":
label_name = "label"
label_func = (
lambda x: SENTIMENT_LABELS.POSITIVE
if x == 1
else SENTIMENT_LABELS.NEGATIVE
)
instance_name = ["sentence"]
data_class = TextLineDataset
elif dataset_name in ["multi_nli", "snli"]:
label_name = "label"
def label_func(d):
if d == 0:
return NLI_LABELS.ENTAILMENT
elif d == 1:
return NLI_LABELS.NEUTRAL
elif d == 2:
return NLI_LABELS.CONTRADICTION
instance_name = ["premise", "hypothesis"]
data_class = KeyValueDataset
elif dataset_name == "qqp":
label_name = "label"
instance_name = ["question1", "question2"]
def label_func(d):
if d == 1:
return QQP_LABEL.DUPLICATE
else:
return QQP_LABEL.NON_DUPLICATE
data_class = KeyValueDataset
datasets = data_class.from_huggingface_with_training(
train_dataset,
hf_dataset,
fields=instance_name + [label_name],
task_type=TaskType.TEXT_CLASSIFICATION,
max_size=1000,
)
return datasets, label_func
def _get_instance_by_keys(example):
if type(example) == str:
return example
elif len(example) == 1:
return example[0] if type(example[0]) == str else example[0][0]
else:
return tuple([e if type(e) == str else e[0] for e in example])
def _process_model_pred(model_name, pred):
if model_name == "aychang/roberta-base-imdb":
return (
SENTIMENT_LABELS.POSITIVE
if pred == "pos"
else SENTIMENT_LABELS.NEGATIVE
)
elif model_name in [
"textattack/roberta-base-imdb",
"textattack/roberta-base-SST-2",
"clue/roberta_chinese_base",
"clue/roberta_chinese_clue_large",
]:
return (
SENTIMENT_LABELS.POSITIVE
if pred == "LABEL_1"
else SENTIMENT_LABELS.NEGATIVE
)
elif model_name in [
"ji-xin/roberta_base-QQP-two_stage",
"textattack/bert-base-uncased-QQP",
]:
return (
QQP_LABEL.DUPLICATE
if pred == "LABEL_1"
else QQP_LABEL.NON_DUPLICATE
)
elif model_name == "roberta-large-mnli":
if pred == "CONTRADICTION":
return NLI_LABELS.CONTRADICTION
elif pred == "ENTAILMENT":
return NLI_LABELS.ENTAILMENT
else:
return NLI_LABELS.NEUTRAL
elif model_name == "textattack/bert-base-uncased-snli":
if pred == "LABEL_0":
return NLI_LABELS.CONTRADICTION
elif pred == "LABEL_1":
return NLI_LABELS.ENTAILMENT
else:
return NLI_LABELS.NEUTRAL
def evaluate_with_training(
operation,
evaluate_filter,
model_name,
dataset_name,
split="test[:20%]",
batch_size=8,
is_cuda=torch.cuda.is_available(),
):
if model_name is None:
model_name = "aychang/roberta-base-imdb"
if dataset_name is None:
dataset_name = "imdb"
print(
f"Loading <{dataset_name}> dataset to evaluate <{model_name}> model."
)
text_classification_pipeline = pipeline(
"sentiment-analysis",
model=model_name,
tokenizer=model_name,
device=0 if is_cuda else -1,
)
percent = f"[{split.split('[')[-1]}" if "[" in split else ""
if dataset_name == "multi_nli":
split = ("train", f"validation_matched{percent}")
elif dataset_name == "imdb":
split = ("train", split)
else:
split = ("train", f"validation{percent}")
print(split)
performance = {
"split": split,
"model_name": model_name,
"dataset_name": dataset_name,
}
dataset, label_func = _process_data_with_training(dataset_name, split)
print(
f"Here is the performance of the model {model_name} on the {split} split of the {dataset_name} dataset"
)
if evaluate_filter:
filtered_dataset = dataset.apply_filter(operation)
print("Here is the performance of the model on the filtered set")
accuracy, total = evaluate_dataset(
text_classification_pipeline,
filtered_dataset,
model_name,
label_func,
batch_size=batch_size,
)
performance["accuracy"] = accuracy
performance["no_of_examples"] = total
else:
accuracy, total = evaluate_dataset(
text_classification_pipeline,
dataset,
model_name,
label_func,
batch_size=batch_size,
)
performance["accuracy"] = accuracy
performance["no_of_examples"] = total
pt_dataset = dataset.apply_transformation(operation)
if pt_dataset is None:
print(f"No transformation applied.")
accuracy = 0
else:
print(
"Here is the performance of the model on the transformed set"
)
accuracy, _ = evaluate_dataset(
text_classification_pipeline,
pt_dataset,
model_name,
label_func,
batch_size=batch_size,
)
performance["pt_accuracy"] = accuracy
# (3) Execute perturbation
# (4) Execute the performance of the original set and the perturbed set
return performance
def evaluate(
operation,
evaluate_filter,
model_name,
dataset_name,
split="test[:20%]",
batch_size=8,
is_cuda=torch.cuda.is_available(),
):
if model_name is None:
model_name = "aychang/roberta-base-imdb"
if dataset_name is None:
dataset_name = "imdb"
print(
f"Loading <{dataset_name}> dataset to evaluate <{model_name}> model."
)
# For the roberta_chinese_base model, you have to call the tokenizer for BERT instead:
# https://huggingface.co/clue/roberta_chinese_base
if model_name in [
"clue/roberta_chinese_base",
"clue/roberta_chinese_clue_large",
]:
text_classification_pipeline = pipeline(
"sentiment-analysis",
model=model_name,
tokenizer="bert-base-chinese",
device=0 if is_cuda else -1,
)
else:
text_classification_pipeline = pipeline(
"sentiment-analysis",
model=model_name,
tokenizer=model_name,
device=0 if is_cuda else -1,
)
percent = f"[{split.split('[')[-1]}" if "[" in split else ""
if dataset_name == "multi_nli":
split = f"validation_matched{percent}"
elif dataset_name != "imdb":
split = f"validation{percent}"
performance = {
"model_name": model_name,
"split": split,
"dataset_name": dataset_name,
}
dataset, label_func = _process_data(dataset_name, split)
print(
f"Here is the performance of the model {model_name} on the {split} split of the {dataset_name} dataset"
)
if evaluate_filter:
filtered_dataset = dataset.apply_filter(operation)
print("Here is the performance of the model on the filtered set")
accuracy, total = evaluate_dataset(
text_classification_pipeline,
filtered_dataset,
model_name,
label_func,
batch_size=batch_size,
)
performance["accuracy"] = accuracy
performance["no_of_examples"] = total
else:
accuracy, total = evaluate_dataset(
text_classification_pipeline,
dataset,
model_name,
label_func,
batch_size=batch_size,
)
performance["accuracy"] = accuracy
performance["no_of_examples"] = total
pt_dataset = dataset.apply_transformation(operation)
if pt_dataset is None:
print(f"No transformation applied.")
accuracy = 0
else:
print(
"Here is the performance of the model on the transformed set"
)
accuracy, _ = evaluate_dataset(
text_classification_pipeline,
pt_dataset,
model_name,
label_func,
batch_size=batch_size,
)
performance["pt_accuracy"] = accuracy
# (3) Execute perturbation
# (4) Execute the performance of the original set and the perturbed set
return performance
def _get_model_pred(model, examples, batch_size):
all_preds = []
with torch.no_grad():
for e in range(0, len(examples), batch_size):
all_preds += model(examples[e : e + batch_size], truncation=True)
return [a["label"] for a in all_preds]
def evaluate_dataset(
text_classification_pipeline,
dataset,
model_name,
label_func,
batch_size=32,
):
accuracy = 0
total = 0
examples = [
_get_instance_by_keys(list(raw_text)[:-1]) for raw_text in dataset
]
labels = [label_func(list(raw_text)[-1]) for raw_text in dataset]
raw_preds = _get_model_pred(
text_classification_pipeline, examples, batch_size=batch_size
)
preds = [
_process_model_pred(model_name, raw_pred) for raw_pred in raw_preds
]
accuracy = np.round(100 * np.mean(np.array(labels) == np.array(preds)))
total = len(labels)
print(
f"The accuracy on this subset which has {total} examples = {accuracy}"
)
return accuracy, total
| from tasks.TaskTypes import TaskType
import numpy as np
import enum
from datasets import load_dataset
from transformers import pipeline
from dataset import TextLineDataset, KeyValueDataset
import torch
# make this to work for three task.
class SENTIMENT_LABELS(enum.Enum):
NEGATIVE = 0
POSITIVE = 1
class NLI_LABELS(enum.Enum):
ENTAILMENT = 0
NEUTRAL = 1
CONTRADICTION = 2
class QQP_LABEL(enum.Enum):
NON_DUPLICATE = 0
DUPLICATE = 1
def _process_data(dataset_name, split):
if dataset_name in ["qqp", "sst2"]:
hf_dataset = load_dataset("glue", dataset_name, split=split)
elif dataset_name in ["clue"]:
hf_dataset = load_dataset(dataset_name, "cluewsc2020", split=split)
else:
hf_dataset = load_dataset(dataset_name, split=split)
if dataset_name == "imdb":
label_name = "label"
label_func = (
lambda x: SENTIMENT_LABELS.POSITIVE
if x == 1
else SENTIMENT_LABELS.NEGATIVE
)
instance_name = ["text"]
data_class = TextLineDataset
elif dataset_name == "sst2":
label_name = "label"
label_func = (
lambda x: SENTIMENT_LABELS.POSITIVE
if x == 1
else SENTIMENT_LABELS.NEGATIVE
)
instance_name = ["sentence"]
data_class = TextLineDataset
elif dataset_name == "clue":
label_name = "label"
label_func = (
lambda x: SENTIMENT_LABELS.POSITIVE
if x == 1
else SENTIMENT_LABELS.NEGATIVE
)
instance_name = ["text"]
data_class = TextLineDataset
elif dataset_name in ["multi_nli", "snli"]:
label_name = "label"
def label_func(d):
if d == 0:
return NLI_LABELS.ENTAILMENT
elif d == 1:
return NLI_LABELS.NEUTRAL
elif d == 2:
return NLI_LABELS.CONTRADICTION
instance_name = ["premise", "hypothesis"]
data_class = KeyValueDataset
elif dataset_name == "qqp":
label_name = "label"
instance_name = ["question1", "question2"]
def label_func(d):
if d == 1:
return QQP_LABEL.DUPLICATE
else:
return QQP_LABEL.NON_DUPLICATE
data_class = KeyValueDataset
datasets = data_class.from_huggingface(
hf_dataset,
fields=instance_name + [label_name],
task_type=TaskType.TEXT_CLASSIFICATION,
max_size=1000,
)
return datasets, label_func
def _process_data_with_training(dataset_name, split):
if dataset_name in ["qqp", "sst2"]:
train_dataset, hf_dataset = load_dataset(
"glue", dataset_name, split=split
)
else:
train_dataset, hf_dataset = load_dataset(dataset_name, split=split)
if dataset_name == "imdb":
label_name = "label"
label_func = (
lambda x: SENTIMENT_LABELS.POSITIVE
if x == 1
else SENTIMENT_LABELS.NEGATIVE
)
instance_name = ["text"]
data_class = TextLineDataset
elif dataset_name == "sst2":
label_name = "label"
label_func = (
lambda x: SENTIMENT_LABELS.POSITIVE
if x == 1
else SENTIMENT_LABELS.NEGATIVE
)
instance_name = ["sentence"]
data_class = TextLineDataset
elif dataset_name in ["multi_nli", "snli"]:
label_name = "label"
def label_func(d):
if d == 0:
return NLI_LABELS.ENTAILMENT
elif d == 1:
return NLI_LABELS.NEUTRAL
elif d == 2:
return NLI_LABELS.CONTRADICTION
instance_name = ["premise", "hypothesis"]
data_class = KeyValueDataset
elif dataset_name == "qqp":
label_name = "label"
instance_name = ["question1", "question2"]
def label_func(d):
if d == 1:
return QQP_LABEL.DUPLICATE
else:
return QQP_LABEL.NON_DUPLICATE
data_class = KeyValueDataset
datasets = data_class.from_huggingface_with_training(
train_dataset,
hf_dataset,
fields=instance_name + [label_name],
task_type=TaskType.TEXT_CLASSIFICATION,
max_size=1000,
)
return datasets, label_func
def _get_instance_by_keys(example):
if type(example) == str:
return example
elif len(example) == 1:
return example[0] if type(example[0]) == str else example[0][0]
else:
return tuple([e if type(e) == str else e[0] for e in example])
def _process_model_pred(model_name, pred):
if model_name == "aychang/roberta-base-imdb":
return (
SENTIMENT_LABELS.POSITIVE
if pred == "pos"
else SENTIMENT_LABELS.NEGATIVE
)
elif model_name in [
"textattack/roberta-base-imdb",
"textattack/roberta-base-SST-2",
"clue/roberta_chinese_base",
"clue/roberta_chinese_clue_large",
]:
return (
SENTIMENT_LABELS.POSITIVE
if pred == "LABEL_1"
else SENTIMENT_LABELS.NEGATIVE
)
elif model_name in [
"ji-xin/roberta_base-QQP-two_stage",
"textattack/bert-base-uncased-QQP",
]:
return (
QQP_LABEL.DUPLICATE
if pred == "LABEL_1"
else QQP_LABEL.NON_DUPLICATE
)
elif model_name == "roberta-large-mnli":
if pred == "CONTRADICTION":
return NLI_LABELS.CONTRADICTION
elif pred == "ENTAILMENT":
return NLI_LABELS.ENTAILMENT
else:
return NLI_LABELS.NEUTRAL
elif model_name == "textattack/bert-base-uncased-snli":
if pred == "LABEL_0":
return NLI_LABELS.CONTRADICTION
elif pred == "LABEL_1":
return NLI_LABELS.ENTAILMENT
else:
return NLI_LABELS.NEUTRAL
def evaluate_with_training(
operation,
evaluate_filter,
model_name,
dataset_name,
split="test[:20%]",
batch_size=8,
is_cuda=torch.cuda.is_available(),
):
if model_name is None:
model_name = "aychang/roberta-base-imdb"
if dataset_name is None:
dataset_name = "imdb"
print(
f"Loading <{dataset_name}> dataset to evaluate <{model_name}> model."
)
text_classification_pipeline = pipeline(
"sentiment-analysis",
model=model_name,
tokenizer=model_name,
device=0 if is_cuda else -1,
)
percent = f"[{split.split('[')[-1]}" if "[" in split else ""
if dataset_name == "multi_nli":
split = ("train", f"validation_matched{percent}")
elif dataset_name == "imdb":
split = ("train", split)
else:
split = ("train", f"validation{percent}")
print(split)
performance = {
"split": split,
"model_name": model_name,
"dataset_name": dataset_name,
}
dataset, label_func = _process_data_with_training(dataset_name, split)
print(
f"Here is the performance of the model {model_name} on the {split} split of the {dataset_name} dataset"
)
if evaluate_filter:
filtered_dataset = dataset.apply_filter(operation)
print("Here is the performance of the model on the filtered set")
accuracy, total = evaluate_dataset(
text_classification_pipeline,
filtered_dataset,
model_name,
label_func,
batch_size=batch_size,
)
performance["accuracy"] = accuracy
performance["no_of_examples"] = total
else:
accuracy, total = evaluate_dataset(
text_classification_pipeline,
dataset,
model_name,
label_func,
batch_size=batch_size,
)
performance["accuracy"] = accuracy
performance["no_of_examples"] = total
pt_dataset = dataset.apply_transformation(operation)
if pt_dataset is None:
print(f"No transformation applied.")
accuracy = 0
else:
print(
"Here is the performance of the model on the transformed set"
)
accuracy, _ = evaluate_dataset(
text_classification_pipeline,
pt_dataset,
model_name,
label_func,
batch_size=batch_size,
)
performance["pt_accuracy"] = accuracy
# (3) Execute perturbation
# (4) Execute the performance of the original set and the perturbed set
return performance
def evaluate(
operation,
evaluate_filter,
model_name,
dataset_name,
split="test[:20%]",
batch_size=8,
is_cuda=torch.cuda.is_available(),
):
if model_name is None:
model_name = "aychang/roberta-base-imdb"
if dataset_name is None:
dataset_name = "imdb"
print(
f"Loading <{dataset_name}> dataset to evaluate <{model_name}> model."
)
# For the roberta_chinese_base model, you have to call the tokenizer for BERT instead:
# https://huggingface.co/clue/roberta_chinese_base
if model_name in [
"clue/roberta_chinese_base",
"clue/roberta_chinese_clue_large",
]:
text_classification_pipeline = pipeline(
"sentiment-analysis",
model=model_name,
tokenizer="bert-base-chinese",
device=0 if is_cuda else -1,
)
else:
text_classification_pipeline = pipeline(
"sentiment-analysis",
model=model_name,
tokenizer=model_name,
device=0 if is_cuda else -1,
)
percent = f"[{split.split('[')[-1]}" if "[" in split else ""
if dataset_name == "multi_nli":
split = f"validation_matched{percent}"
elif dataset_name != "imdb":
split = f"validation{percent}"
performance = {
"model_name": model_name,
"split": split,
"dataset_name": dataset_name,
}
dataset, label_func = _process_data(dataset_name, split)
print(
f"Here is the performance of the model {model_name} on the {split} split of the {dataset_name} dataset"
)
if evaluate_filter:
filtered_dataset = dataset.apply_filter(operation)
print("Here is the performance of the model on the filtered set")
accuracy, total = evaluate_dataset(
text_classification_pipeline,
filtered_dataset,
model_name,
label_func,
batch_size=batch_size,
)
performance["accuracy"] = accuracy
performance["no_of_examples"] = total
else:
accuracy, total = evaluate_dataset(
text_classification_pipeline,
dataset,
model_name,
label_func,
batch_size=batch_size,
)
performance["accuracy"] = accuracy
performance["no_of_examples"] = total
pt_dataset = dataset.apply_transformation(operation)
if pt_dataset is None:
print(f"No transformation applied.")
accuracy = 0
else:
print(
"Here is the performance of the model on the transformed set"
)
accuracy, _ = evaluate_dataset(
text_classification_pipeline,
pt_dataset,
model_name,
label_func,
batch_size=batch_size,
)
performance["pt_accuracy"] = accuracy
# (3) Execute perturbation
# (4) Execute the performance of the original set and the perturbed set
return performance
def _get_model_pred(model, examples, batch_size):
all_preds = []
with torch.no_grad():
for e in range(0, len(examples), batch_size):
all_preds += model(examples[e : e + batch_size], truncation=True)
return [a["label"] for a in all_preds]
def evaluate_dataset(
text_classification_pipeline,
dataset,
model_name,
label_func,
batch_size=32,
):
accuracy = 0
total = 0
examples = [
_get_instance_by_keys(list(raw_text)[:-1]) for raw_text in dataset
]
labels = [label_func(list(raw_text)[-1]) for raw_text in dataset]
raw_preds = _get_model_pred(
text_classification_pipeline, examples, batch_size=batch_size
)
preds = [
_process_model_pred(model_name, raw_pred) for raw_pred in raw_preds
]
accuracy = np.round(100 * np.mean(np.array(labels) == np.array(preds)))
total = len(labels)
print(
f"The accuracy on this subset which has {total} examples = {accuracy}"
)
return accuracy, total
| en | 0.783627 | # make this to work for three task. # (3) Execute perturbation # (4) Execute the performance of the original set and the perturbed set # For the roberta_chinese_base model, you have to call the tokenizer for BERT instead: # https://huggingface.co/clue/roberta_chinese_base # (3) Execute perturbation # (4) Execute the performance of the original set and the perturbed set | 2.521698 | 3 |
tinder_py/tinder/entities/update.py | hc5aleksandrov/autotinder | 0 | 6631331 | <reponame>hc5aleksandrov/autotinder
from typing import List
class NewMessage:
"""
Container for a new message holding the message and match id.
"""
__slots__ = ["message_id", "match_id"]
def __init__(self, message_id: str, match_id: str):
self.message_id: str = message_id
self.match_id: str = match_id
class Update:
"""
Describes an update sent by Tinder containing information about new matches and messages.
"""
__slots__ = ["new_matches", "new_messages", "update"]
def __init__(self, update: dict):
self.new_matches: List[str] = []
"""A list of all new matches"""
self.new_messages: List[NewMessage] = []
"""A list of all new messages"""
for match in update["matches"]:
seen = True
if "seen" in match:
seen = match["seen"]["match_seen"]
if seen:
for message in match["messages"]:
self.new_messages.append(NewMessage(message["_id"], message["match_id"]))
else:
self.new_matches.append(match["_id"])
self.update: dict = update
"""The raw update event response"""
| from typing import List
class NewMessage:
"""
Container for a new message holding the message and match id.
"""
__slots__ = ["message_id", "match_id"]
def __init__(self, message_id: str, match_id: str):
self.message_id: str = message_id
self.match_id: str = match_id
class Update:
"""
Describes an update sent by Tinder containing information about new matches and messages.
"""
__slots__ = ["new_matches", "new_messages", "update"]
def __init__(self, update: dict):
self.new_matches: List[str] = []
"""A list of all new matches"""
self.new_messages: List[NewMessage] = []
"""A list of all new messages"""
for match in update["matches"]:
seen = True
if "seen" in match:
seen = match["seen"]["match_seen"]
if seen:
for message in match["messages"]:
self.new_messages.append(NewMessage(message["_id"], message["match_id"]))
else:
self.new_matches.append(match["_id"])
self.update: dict = update
"""The raw update event response""" | en | 0.673338 | Container for a new message holding the message and match id. Describes an update sent by Tinder containing information about new matches and messages. A list of all new matches A list of all new messages The raw update event response | 3.289851 | 3 |
string/solution/1316.py | gpgun0/baekjoon_ | 0 | 6631332 | <gh_stars>0
class Solution:
def main(self, word: str) -> int:
char_check_list = [0] * 26
char_check_list[ord(word[0]) - 97] = 1
for i in range(1, len(word)):
if char_check_list[ord(word[i]) - 97] and word[i-1] != word[i]:
return 0
char_check_list[ord(word[i]) - 97] = 1
return 1
sol = Solution()
cnt = 0
n = int(input())
for _ in range(n):
word = input()
cnt += sol.main(word)
print(cnt) | class Solution:
def main(self, word: str) -> int:
char_check_list = [0] * 26
char_check_list[ord(word[0]) - 97] = 1
for i in range(1, len(word)):
if char_check_list[ord(word[i]) - 97] and word[i-1] != word[i]:
return 0
char_check_list[ord(word[i]) - 97] = 1
return 1
sol = Solution()
cnt = 0
n = int(input())
for _ in range(n):
word = input()
cnt += sol.main(word)
print(cnt) | none | 1 | 3.238958 | 3 |
|
GeneratorInterface/Core/test/test_FailingGeneratorFilter_cfg.py | AndrissP/cmssw | 0 | 6631333 | <filename>GeneratorInterface/Core/test/test_FailingGeneratorFilter_cfg.py
import FWCore.ParameterSet.Config as cms
import sys
process = cms.Process("TEST")
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(input=cms.untracked.int32(10))
from GeneratorInterface.Core.ExternalGeneratorFilter import *
process.generator = ExternalGeneratorFilter(
cms.EDFilter("FailingGeneratorFilter",
failAt=cms.int32(int(sys.argv[2])),
failureType = cms.int32(int(sys.argv[3]))),
_external_process_waitTime_ = cms.untracked.uint32(5),
_external_process_verbose_ = cms.untracked.bool(True),
_external_process_components_ =cms.vstring()
)
process.p = cms.Path(process.generator)
process.add_(cms.Service("RandomNumberGeneratorService",
generator = cms.PSet(
initialSeed = cms.untracked.uint32(123),
engineName = cms.untracked.string('HepJamesRandom')
)
))
| <filename>GeneratorInterface/Core/test/test_FailingGeneratorFilter_cfg.py
import FWCore.ParameterSet.Config as cms
import sys
process = cms.Process("TEST")
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(input=cms.untracked.int32(10))
from GeneratorInterface.Core.ExternalGeneratorFilter import *
process.generator = ExternalGeneratorFilter(
cms.EDFilter("FailingGeneratorFilter",
failAt=cms.int32(int(sys.argv[2])),
failureType = cms.int32(int(sys.argv[3]))),
_external_process_waitTime_ = cms.untracked.uint32(5),
_external_process_verbose_ = cms.untracked.bool(True),
_external_process_components_ =cms.vstring()
)
process.p = cms.Path(process.generator)
process.add_(cms.Service("RandomNumberGeneratorService",
generator = cms.PSet(
initialSeed = cms.untracked.uint32(123),
engineName = cms.untracked.string('HepJamesRandom')
)
))
| none | 1 | 1.673215 | 2 |
|
tests/test_corpus.py | zhouyangnk/Montreal-Forced-Aligner | 1 | 6631334 | <filename>tests/test_corpus.py
import os
import sys
import pytest
import shutil
from aligner.corpus import Corpus
from aligner.dictionary import Dictionary
from aligner.features.config import FeatureConfig
def test_basic(basic_dict_path, basic_corpus_dir, generated_dir):
dictionary = Dictionary(basic_dict_path, os.path.join(generated_dir, 'basic'))
dictionary.write()
output_directory = os.path.join(generated_dir, 'basic')
c = Corpus(basic_corpus_dir, output_directory)
c.initialize_corpus(dictionary)
fc = FeatureConfig()
fc.generate_features(c)
assert c.get_feat_dim(fc) == 39
def test_basic_txt(basic_corpus_txt_dir, basic_dict_path, generated_dir):
dictionary = Dictionary(basic_dict_path, os.path.join(generated_dir, 'basic'))
dictionary.write()
output_directory = os.path.join(generated_dir, 'basic')
c = Corpus(basic_corpus_txt_dir, output_directory)
assert len(c.no_transcription_files) == 0
c.initialize_corpus(dictionary)
fc = FeatureConfig()
fc.generate_features(c)
assert c.get_feat_dim(fc) == 39
def test_extra(sick_dict, extra_corpus_dir, generated_dir):
output_directory = os.path.join(generated_dir, 'extra')
corpus = Corpus(extra_corpus_dir, output_directory, num_jobs=2)
corpus.initialize_corpus(sick_dict)
def test_stereo(basic_dict_path, stereo_corpus_dir, temp_dir):
temp = os.path.join(temp_dir, 'stereo')
dictionary = Dictionary(basic_dict_path, os.path.join(temp, 'basic'))
dictionary.write()
d = Corpus(stereo_corpus_dir, temp)
d.initialize_corpus(dictionary)
fc = FeatureConfig()
fc.generate_features(d)
assert d.get_feat_dim(fc) == 39
def test_short_segments(basic_dict_path, shortsegments_corpus_dir, temp_dir):
temp = os.path.join(temp_dir, 'short_segments')
dictionary = Dictionary(basic_dict_path, temp)
dictionary.write()
corpus = Corpus(shortsegments_corpus_dir, temp)
corpus.initialize_corpus(dictionary)
fc = FeatureConfig()
fc.generate_features(corpus)
assert len(corpus.feat_mapping.keys()) == 2
assert len(corpus.utt_speak_mapping.keys()) == 3
assert len(corpus.speak_utt_mapping.keys()) == 1
assert len(corpus.text_mapping.keys()) == 3
assert len(corpus.utt_wav_mapping.keys()) == 1
assert len(corpus.segments.keys()) == 3
assert len(corpus.ignored_utterances) == 1
def test_speaker_groupings(large_prosodylab_format_directory, temp_dir, large_dataset_dictionary):
output_directory = os.path.join(temp_dir, 'large')
shutil.rmtree(output_directory, ignore_errors=True)
d = Dictionary(large_dataset_dictionary, output_directory)
d.write()
c = Corpus(large_prosodylab_format_directory, output_directory)
c.initialize_corpus(d)
fc = FeatureConfig()
fc.generate_features(c)
speakers = os.listdir(large_prosodylab_format_directory)
for s in speakers:
assert any(s in x for x in c.speaker_groups)
for root, dirs, files in os.walk(large_prosodylab_format_directory):
for f in files:
name, ext = os.path.splitext(f)
assert any(name in x for x in c.groups)
for root, dirs, files in os.walk(large_prosodylab_format_directory):
for f in files:
name, ext = os.path.splitext(f)
assert any(name in x for x in c.feat_mapping)
shutil.rmtree(output_directory, ignore_errors=True)
d.write()
c = Corpus(large_prosodylab_format_directory, output_directory, num_jobs=2)
c.initialize_corpus(d)
fc.generate_features(c)
for s in speakers:
assert any(s in x for x in c.speaker_groups)
for root, dirs, files in os.walk(large_prosodylab_format_directory):
for f in files:
name, ext = os.path.splitext(f)
assert any(name in x for x in c.groups)
for root, dirs, files in os.walk(large_prosodylab_format_directory):
for f in files:
name, ext = os.path.splitext(f)
assert any(name in x for x in c.feat_mapping)
def test_subset(large_prosodylab_format_directory, temp_dir, large_dataset_dictionary):
output_directory = os.path.join(temp_dir, 'large_subset')
shutil.rmtree(output_directory, ignore_errors=True)
d = Dictionary(large_dataset_dictionary, output_directory)
d.write()
c = Corpus(large_prosodylab_format_directory, output_directory)
c.initialize_corpus(d)
sd = c.split_directory()
fc = FeatureConfig()
fc.generate_features(c)
s = c.subset_directory(10, fc)
assert os.path.exists(sd)
assert os.path.exists(s)
| <filename>tests/test_corpus.py
import os
import sys
import pytest
import shutil
from aligner.corpus import Corpus
from aligner.dictionary import Dictionary
from aligner.features.config import FeatureConfig
def test_basic(basic_dict_path, basic_corpus_dir, generated_dir):
dictionary = Dictionary(basic_dict_path, os.path.join(generated_dir, 'basic'))
dictionary.write()
output_directory = os.path.join(generated_dir, 'basic')
c = Corpus(basic_corpus_dir, output_directory)
c.initialize_corpus(dictionary)
fc = FeatureConfig()
fc.generate_features(c)
assert c.get_feat_dim(fc) == 39
def test_basic_txt(basic_corpus_txt_dir, basic_dict_path, generated_dir):
dictionary = Dictionary(basic_dict_path, os.path.join(generated_dir, 'basic'))
dictionary.write()
output_directory = os.path.join(generated_dir, 'basic')
c = Corpus(basic_corpus_txt_dir, output_directory)
assert len(c.no_transcription_files) == 0
c.initialize_corpus(dictionary)
fc = FeatureConfig()
fc.generate_features(c)
assert c.get_feat_dim(fc) == 39
def test_extra(sick_dict, extra_corpus_dir, generated_dir):
output_directory = os.path.join(generated_dir, 'extra')
corpus = Corpus(extra_corpus_dir, output_directory, num_jobs=2)
corpus.initialize_corpus(sick_dict)
def test_stereo(basic_dict_path, stereo_corpus_dir, temp_dir):
temp = os.path.join(temp_dir, 'stereo')
dictionary = Dictionary(basic_dict_path, os.path.join(temp, 'basic'))
dictionary.write()
d = Corpus(stereo_corpus_dir, temp)
d.initialize_corpus(dictionary)
fc = FeatureConfig()
fc.generate_features(d)
assert d.get_feat_dim(fc) == 39
def test_short_segments(basic_dict_path, shortsegments_corpus_dir, temp_dir):
temp = os.path.join(temp_dir, 'short_segments')
dictionary = Dictionary(basic_dict_path, temp)
dictionary.write()
corpus = Corpus(shortsegments_corpus_dir, temp)
corpus.initialize_corpus(dictionary)
fc = FeatureConfig()
fc.generate_features(corpus)
assert len(corpus.feat_mapping.keys()) == 2
assert len(corpus.utt_speak_mapping.keys()) == 3
assert len(corpus.speak_utt_mapping.keys()) == 1
assert len(corpus.text_mapping.keys()) == 3
assert len(corpus.utt_wav_mapping.keys()) == 1
assert len(corpus.segments.keys()) == 3
assert len(corpus.ignored_utterances) == 1
def test_speaker_groupings(large_prosodylab_format_directory, temp_dir, large_dataset_dictionary):
output_directory = os.path.join(temp_dir, 'large')
shutil.rmtree(output_directory, ignore_errors=True)
d = Dictionary(large_dataset_dictionary, output_directory)
d.write()
c = Corpus(large_prosodylab_format_directory, output_directory)
c.initialize_corpus(d)
fc = FeatureConfig()
fc.generate_features(c)
speakers = os.listdir(large_prosodylab_format_directory)
for s in speakers:
assert any(s in x for x in c.speaker_groups)
for root, dirs, files in os.walk(large_prosodylab_format_directory):
for f in files:
name, ext = os.path.splitext(f)
assert any(name in x for x in c.groups)
for root, dirs, files in os.walk(large_prosodylab_format_directory):
for f in files:
name, ext = os.path.splitext(f)
assert any(name in x for x in c.feat_mapping)
shutil.rmtree(output_directory, ignore_errors=True)
d.write()
c = Corpus(large_prosodylab_format_directory, output_directory, num_jobs=2)
c.initialize_corpus(d)
fc.generate_features(c)
for s in speakers:
assert any(s in x for x in c.speaker_groups)
for root, dirs, files in os.walk(large_prosodylab_format_directory):
for f in files:
name, ext = os.path.splitext(f)
assert any(name in x for x in c.groups)
for root, dirs, files in os.walk(large_prosodylab_format_directory):
for f in files:
name, ext = os.path.splitext(f)
assert any(name in x for x in c.feat_mapping)
def test_subset(large_prosodylab_format_directory, temp_dir, large_dataset_dictionary):
output_directory = os.path.join(temp_dir, 'large_subset')
shutil.rmtree(output_directory, ignore_errors=True)
d = Dictionary(large_dataset_dictionary, output_directory)
d.write()
c = Corpus(large_prosodylab_format_directory, output_directory)
c.initialize_corpus(d)
sd = c.split_directory()
fc = FeatureConfig()
fc.generate_features(c)
s = c.subset_directory(10, fc)
assert os.path.exists(sd)
assert os.path.exists(s)
| none | 1 | 2.247202 | 2 |
|
saleor/product/migrations/0059_generate_variant_name_from_attrs.py | TysonRV/saleor | 9 | 6631335 | <reponame>TysonRV/saleor
# Generated by Django 2.0.2 on 2018-03-11 18:54
from django.db import migrations
from saleor.product.utils.attributes import get_attributes_display_map
def get_name_from_attributes(variant):
attributes = variant.product.product_type.variant_attributes.all()
values = get_attributes_display_map(variant, attributes)
return ' / '.join(
attributechoice.name
for attribute_pk, attributechoice in sorted(
values.items(), key=lambda x: x[0]))
def create_variant_name_based_on_attributes(apps, schema_editor):
ProductVariant = apps.get_model('product', 'ProductVariant')
for variant in ProductVariant.objects.prefetch_related(
'product__product_type__variant_attributes__values'):
new_name = get_name_from_attributes(variant)
if variant.name != new_name:
variant.name = new_name
variant.save()
class Migration(migrations.Migration):
dependencies = [
('product', '0058_auto_20180329_0142'),
]
operations = [
migrations.RunPython(create_variant_name_based_on_attributes, migrations.RunPython.noop)
]
| # Generated by Django 2.0.2 on 2018-03-11 18:54
from django.db import migrations
from saleor.product.utils.attributes import get_attributes_display_map
def get_name_from_attributes(variant):
attributes = variant.product.product_type.variant_attributes.all()
values = get_attributes_display_map(variant, attributes)
return ' / '.join(
attributechoice.name
for attribute_pk, attributechoice in sorted(
values.items(), key=lambda x: x[0]))
def create_variant_name_based_on_attributes(apps, schema_editor):
ProductVariant = apps.get_model('product', 'ProductVariant')
for variant in ProductVariant.objects.prefetch_related(
'product__product_type__variant_attributes__values'):
new_name = get_name_from_attributes(variant)
if variant.name != new_name:
variant.name = new_name
variant.save()
class Migration(migrations.Migration):
dependencies = [
('product', '0058_auto_20180329_0142'),
]
operations = [
migrations.RunPython(create_variant_name_based_on_attributes, migrations.RunPython.noop)
] | en | 0.845188 | # Generated by Django 2.0.2 on 2018-03-11 18:54 | 2.11269 | 2 |
classical/__init__.py | JosephGesnouin/Asymmetrical-Bi-RNNs-to-encode-pedestrian-trajectories | 9 | 6631336 | from .socialforce import predict
from .orca import predict
from .kalman import predict
from .constant_velocity import predict
| from .socialforce import predict
from .orca import predict
from .kalman import predict
from .constant_velocity import predict
| none | 1 | 0.964587 | 1 |
|
signac/contrib/migration/__init__.py | rohanbabbar04/signac | 0 | 6631337 | <gh_stars>0
# Copyright (c) 2019 The Regents of the University of Michigan
# All rights reserved.
# This software is licensed under the BSD 3-Clause License.
"""Handle migrations of signac schema versions."""
import os
import sys
from filelock import FileLock
from packaging import version
from ...version import SCHEMA_VERSION, __version__
from .v0_to_v1 import _load_config_v1, _migrate_v0_to_v1
FN_MIGRATION_LOCKFILE = ".SIGNAC_PROJECT_MIGRATION_LOCK"
# Config loaders must be functions with the signature
# def config_loader(root_directory: str) -> MutableMapping
# When a new schema version is introduced, a corresponding loader only needs to
# be added if the old loader will no longer function. This dictionary must
# contain all unique loaders for schema versions that are supported as starting
# points for migration. The resulting MutableMapping config objects must be
# writeable, i.e. it must be possible to persist in-memory changes from these
# objects to the underlying config files.
_CONFIG_LOADERS = {
"1": _load_config_v1,
}
_MIGRATIONS = {
("0", "1"): _migrate_v0_to_v1,
}
_PARSED_SCHEMA_VERSION = version.parse(SCHEMA_VERSION)
_VERSION_LIST = list(reversed(sorted(version.parse(v) for v in _CONFIG_LOADERS.keys())))
def _get_config_schema_version(root_directory, version_guess):
# Try loading the schema using the loader corresponding to the expected
# version if it has a configured loader.
versions = _VERSION_LIST
if version_guess in _CONFIG_LOADERS:
versions = [version_guess] + versions
for guess in versions:
try:
# Note: We could consider using a different component as the key
# for _CONFIG_LOADERS, but since this is an internal detail it's
# not terribly consequential.
config = _CONFIG_LOADERS[guess.public](root_directory)
break
except Exception:
# The load failed, go to the next
pass
else:
raise RuntimeError("Unable to load config file.")
try:
return version.parse(config["schema_version"])
except KeyError:
# The default schema version is version 0.
return version.parse("0")
def _collect_migrations(root_directory):
schema_version = _PARSED_SCHEMA_VERSION
current_schema_version = _get_config_schema_version(
root_directory, _PARSED_SCHEMA_VERSION
)
if current_schema_version > schema_version:
# Project config schema version is newer and therefore not supported.
raise RuntimeError(
"The signac schema version used by this project is "
f"{current_schema_version}, but signac {__version__} only "
f"supports up to schema version {SCHEMA_VERSION}. Try updating "
"signac."
)
guess = current_schema_version
while _get_config_schema_version(root_directory, guess) < schema_version:
for (origin, destination), migration in _MIGRATIONS.items():
if version.parse(origin) == _get_config_schema_version(
root_directory, guess
):
yield (origin, destination), migration
guess = version.parse(destination)
break
else:
raise RuntimeError(
"The signac schema version used by this project is "
f"{_get_config_schema_version(root_directory, guess)}, but "
f"signac {__version__} uses schema version {schema_version} "
"and does not know how to migrate."
)
def apply_migrations(root_directory):
"""Apply migrations to a project.
This function identifies and performs all the necessary schema migrations
to bring a project up to date with the current schema version of signac.
The calling code does not require prior knowledge of the schema version of
the project, and the function is idempotent when applied to projects that
already have an up-to-date schema.
Parameters
----------
root_directory : str
The path to the project to migrate.
"""
try:
lock = FileLock(os.path.join(root_directory, FN_MIGRATION_LOCKFILE))
with lock:
for (origin, destination), migrate in _collect_migrations(root_directory):
try:
print(
f"Applying migration for version {origin} to {destination}... ",
end="",
file=sys.stderr,
)
migrate(root_directory)
except Exception as e:
raise RuntimeError(
f"Failed to apply migration {destination}."
) from e
else:
config = _CONFIG_LOADERS[version.parse(destination).public](
root_directory
)
config["schema_version"] = destination
config.write()
print("OK", file=sys.stderr)
finally:
try:
os.unlink(lock.lock_file)
except FileNotFoundError:
pass
__all__ = [
"apply_migrations",
]
| # Copyright (c) 2019 The Regents of the University of Michigan
# All rights reserved.
# This software is licensed under the BSD 3-Clause License.
"""Handle migrations of signac schema versions."""
import os
import sys
from filelock import FileLock
from packaging import version
from ...version import SCHEMA_VERSION, __version__
from .v0_to_v1 import _load_config_v1, _migrate_v0_to_v1
FN_MIGRATION_LOCKFILE = ".SIGNAC_PROJECT_MIGRATION_LOCK"
# Config loaders must be functions with the signature
# def config_loader(root_directory: str) -> MutableMapping
# When a new schema version is introduced, a corresponding loader only needs to
# be added if the old loader will no longer function. This dictionary must
# contain all unique loaders for schema versions that are supported as starting
# points for migration. The resulting MutableMapping config objects must be
# writeable, i.e. it must be possible to persist in-memory changes from these
# objects to the underlying config files.
_CONFIG_LOADERS = {
"1": _load_config_v1,
}
_MIGRATIONS = {
("0", "1"): _migrate_v0_to_v1,
}
_PARSED_SCHEMA_VERSION = version.parse(SCHEMA_VERSION)
_VERSION_LIST = list(reversed(sorted(version.parse(v) for v in _CONFIG_LOADERS.keys())))
def _get_config_schema_version(root_directory, version_guess):
# Try loading the schema using the loader corresponding to the expected
# version if it has a configured loader.
versions = _VERSION_LIST
if version_guess in _CONFIG_LOADERS:
versions = [version_guess] + versions
for guess in versions:
try:
# Note: We could consider using a different component as the key
# for _CONFIG_LOADERS, but since this is an internal detail it's
# not terribly consequential.
config = _CONFIG_LOADERS[guess.public](root_directory)
break
except Exception:
# The load failed, go to the next
pass
else:
raise RuntimeError("Unable to load config file.")
try:
return version.parse(config["schema_version"])
except KeyError:
# The default schema version is version 0.
return version.parse("0")
def _collect_migrations(root_directory):
schema_version = _PARSED_SCHEMA_VERSION
current_schema_version = _get_config_schema_version(
root_directory, _PARSED_SCHEMA_VERSION
)
if current_schema_version > schema_version:
# Project config schema version is newer and therefore not supported.
raise RuntimeError(
"The signac schema version used by this project is "
f"{current_schema_version}, but signac {__version__} only "
f"supports up to schema version {SCHEMA_VERSION}. Try updating "
"signac."
)
guess = current_schema_version
while _get_config_schema_version(root_directory, guess) < schema_version:
for (origin, destination), migration in _MIGRATIONS.items():
if version.parse(origin) == _get_config_schema_version(
root_directory, guess
):
yield (origin, destination), migration
guess = version.parse(destination)
break
else:
raise RuntimeError(
"The signac schema version used by this project is "
f"{_get_config_schema_version(root_directory, guess)}, but "
f"signac {__version__} uses schema version {schema_version} "
"and does not know how to migrate."
)
def apply_migrations(root_directory):
"""Apply migrations to a project.
This function identifies and performs all the necessary schema migrations
to bring a project up to date with the current schema version of signac.
The calling code does not require prior knowledge of the schema version of
the project, and the function is idempotent when applied to projects that
already have an up-to-date schema.
Parameters
----------
root_directory : str
The path to the project to migrate.
"""
try:
lock = FileLock(os.path.join(root_directory, FN_MIGRATION_LOCKFILE))
with lock:
for (origin, destination), migrate in _collect_migrations(root_directory):
try:
print(
f"Applying migration for version {origin} to {destination}... ",
end="",
file=sys.stderr,
)
migrate(root_directory)
except Exception as e:
raise RuntimeError(
f"Failed to apply migration {destination}."
) from e
else:
config = _CONFIG_LOADERS[version.parse(destination).public](
root_directory
)
config["schema_version"] = destination
config.write()
print("OK", file=sys.stderr)
finally:
try:
os.unlink(lock.lock_file)
except FileNotFoundError:
pass
__all__ = [
"apply_migrations",
] | en | 0.845021 | # Copyright (c) 2019 The Regents of the University of Michigan # All rights reserved. # This software is licensed under the BSD 3-Clause License. Handle migrations of signac schema versions. # Config loaders must be functions with the signature # def config_loader(root_directory: str) -> MutableMapping # When a new schema version is introduced, a corresponding loader only needs to # be added if the old loader will no longer function. This dictionary must # contain all unique loaders for schema versions that are supported as starting # points for migration. The resulting MutableMapping config objects must be # writeable, i.e. it must be possible to persist in-memory changes from these # objects to the underlying config files. # Try loading the schema using the loader corresponding to the expected # version if it has a configured loader. # Note: We could consider using a different component as the key # for _CONFIG_LOADERS, but since this is an internal detail it's # not terribly consequential. # The load failed, go to the next # The default schema version is version 0. # Project config schema version is newer and therefore not supported. Apply migrations to a project. This function identifies and performs all the necessary schema migrations to bring a project up to date with the current schema version of signac. The calling code does not require prior knowledge of the schema version of the project, and the function is idempotent when applied to projects that already have an up-to-date schema. Parameters ---------- root_directory : str The path to the project to migrate. | 1.917509 | 2 |
imcsdk/mometa/firmware/FirmwareRunning.py | kgrozis/UCS-CIMC-Scripts | 0 | 6631338 | """This module contains the general information for FirmwareRunning ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import ImcVersion, MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class FirmwareRunningConsts():
DEPLOYMENT_BOOT_LOADER = "boot-loader"
DEPLOYMENT_KERNEL = "kernel"
DEPLOYMENT_SYSTEM = "system"
DEPLOYMENT_UNSPECIFIED = "unspecified"
TYPE_ADAPTOR = "adaptor"
TYPE_BLADE_BIOS = "blade-bios"
TYPE_BLADE_CONTROLLER = "blade-controller"
TYPE_SIOC = "sioc"
TYPE_STORAGE_CONTROLLER = "storage-controller"
TYPE_SYSTEM = "system"
TYPE_UNSPECIFIED = "unspecified"
class FirmwareRunning(ManagedObject):
"""This is FirmwareRunning class."""
consts = FirmwareRunningConsts()
naming_props = set([u'deployment'])
mo_meta = MoMeta("FirmwareRunning", "firmwareRunning", "fw-[deployment]", VersionMeta.Version151f, "OutputOnly", 0xf, [], ["admin", "read-only", "user"], [u'biosUnit', u'mgmtController', u'storageController', u'systemIOController'], [], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version151f, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"deployment": MoPropertyMeta("deployment", "deployment", "string", VersionMeta.Version151f, MoPropertyMeta.NAMING, None, None, None, None, ["boot-loader", "kernel", "system", "unspecified"], []),
"description": MoPropertyMeta("description", "description", "string", VersionMeta.Version201a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, 0x2, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, None, ["adaptor", "blade-bios", "blade-controller", "sioc", "storage-controller", "system", "unspecified"], []),
"version": MoPropertyMeta("version", "version", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
}
prop_map = {
"childAction": "child_action",
"deployment": "deployment",
"description": "description",
"dn": "dn",
"rn": "rn",
"status": "status",
"type": "type",
"version": "version",
}
def __init__(self, parent_mo_or_dn, deployment, **kwargs):
self._dirty_mask = 0
self.deployment = deployment
self.child_action = None
self.description = None
self.status = None
self.type = None
self.version = None
ManagedObject.__init__(self, "FirmwareRunning", parent_mo_or_dn, **kwargs)
| """This module contains the general information for FirmwareRunning ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import ImcVersion, MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class FirmwareRunningConsts():
DEPLOYMENT_BOOT_LOADER = "boot-loader"
DEPLOYMENT_KERNEL = "kernel"
DEPLOYMENT_SYSTEM = "system"
DEPLOYMENT_UNSPECIFIED = "unspecified"
TYPE_ADAPTOR = "adaptor"
TYPE_BLADE_BIOS = "blade-bios"
TYPE_BLADE_CONTROLLER = "blade-controller"
TYPE_SIOC = "sioc"
TYPE_STORAGE_CONTROLLER = "storage-controller"
TYPE_SYSTEM = "system"
TYPE_UNSPECIFIED = "unspecified"
class FirmwareRunning(ManagedObject):
"""This is FirmwareRunning class."""
consts = FirmwareRunningConsts()
naming_props = set([u'deployment'])
mo_meta = MoMeta("FirmwareRunning", "firmwareRunning", "fw-[deployment]", VersionMeta.Version151f, "OutputOnly", 0xf, [], ["admin", "read-only", "user"], [u'biosUnit', u'mgmtController', u'storageController', u'systemIOController'], [], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version151f, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"deployment": MoPropertyMeta("deployment", "deployment", "string", VersionMeta.Version151f, MoPropertyMeta.NAMING, None, None, None, None, ["boot-loader", "kernel", "system", "unspecified"], []),
"description": MoPropertyMeta("description", "description", "string", VersionMeta.Version201a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, 0x2, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, None, ["adaptor", "blade-bios", "blade-controller", "sioc", "storage-controller", "system", "unspecified"], []),
"version": MoPropertyMeta("version", "version", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
}
prop_map = {
"childAction": "child_action",
"deployment": "deployment",
"description": "description",
"dn": "dn",
"rn": "rn",
"status": "status",
"type": "type",
"version": "version",
}
def __init__(self, parent_mo_or_dn, deployment, **kwargs):
self._dirty_mask = 0
self.deployment = deployment
self.child_action = None
self.description = None
self.status = None
self.type = None
self.version = None
ManagedObject.__init__(self, "FirmwareRunning", parent_mo_or_dn, **kwargs)
| en | 0.731699 | This module contains the general information for FirmwareRunning ManagedObject. This is FirmwareRunning class. ((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1} | 1.932418 | 2 |
ReinforcedPy/__init__.py | ZibraMax/NSR10-design | 0 | 6631339 | <reponame>ZibraMax/NSR10-design<gh_stars>0
from .Elemento import *
from .Material import *
from .Concreto import *
from .AceroRefuerzo import *
from .Varilla import *
from .Seccion import *
| from .Elemento import *
from .Material import *
from .Concreto import *
from .AceroRefuerzo import *
from .Varilla import *
from .Seccion import * | none | 1 | 1.115751 | 1 |
|
tests/testdata/deploy_scripts/builtin/invalid_metadata.py | horus-view-and-explore/horus-deploy | 3 | 6631340 | <filename>tests/testdata/deploy_scripts/builtin/invalid_metadata.py<gh_stars>1-10
name = "invalid"
METADATA = {"name": name}
| <filename>tests/testdata/deploy_scripts/builtin/invalid_metadata.py<gh_stars>1-10
name = "invalid"
METADATA = {"name": name}
| none | 1 | 1.20522 | 1 |
|
scripts/imgviewer_conversions.py | akashdhamasia/Facial-keypoints-recognition-using-CNN | 184 | 6631341 | # -*- coding: utf-8 -*-
'''
Conversion functions for image viewer extension
'''
import cv2
import six
import numpy as np
import drawing
# logging
from logging import getLogger, NullHandler
logger = getLogger(__name__)
logger.addHandler(NullHandler())
def face_img_func(key, entry, viewer):
# Image conversion
img = entry['img'][0] # Use only a first data in the batch
assert(img.ndim == 3 and (img.shape[0] == 1 or img.shape[0] == 3))
img = np.transpose(img, (1, 2, 0))
img = img.copy() # for safety
img += 0.5 # [-0.5:0.5] -> [0:1]
# Draw
try:
detection_raw = entry['detection'][0]
detection = (detection_raw > 0.5)
if 0.0 <= detection_raw <= 1.0:
drawing.draw_detection(img, detection)
landmark = entry['landmark'][0]
visibility = entry['visibility'][0]
landmark_color = (0, 1, 0) if detection == 1 else (0, 0, 1)
drawing.draw_landmark(img, landmark, visibility, landmark_color, 0.5)
pose = entry['pose'][0]
drawing.draw_pose(img, pose)
gender = entry['gender'][0]
if 0.0 <= gender <= 1.0:
gender = (gender > 0.5)
drawing.draw_gender(img, gender)
except KeyError:
pass
img = (img * 255).astype(np.uint8)
caption = '{:02d}'.format(viewer.img_cnts[key])
return {'img': img, 'cap': caption}
def weights_img_func(key, entry, viewer):
data = entry['weights']
assert(data.ndim == 4)
img_cnt_max = viewer.img_cnt_max[key]
res_data = list()
# accumulate to 3 channels image
for i in six.moves.range(min(data.shape[0], img_cnt_max)):
img_shape = (3,) + data.shape[2:4]
accum = np.zeros(img_shape, dtype=data.dtype)
for ch in six.moves.range(data.shape[1]):
accum[ch % 3] += data[i][ch]
# normalize
img = np.transpose(accum, (1, 2, 0))
img = cv2.normalize(img, None, 0, 255, cv2.NORM_MINMAX)
width = img.shape[0] * 15
res_data.append({'img': img, 'width': width})
return res_data
# ========================= Loss Graph (In a tab page) ========================
def lossgraph_entry_func(key, viewer, trainer):
# Get a log
log_report = trainer.get_extension('LogReport')
log = log_report.log
# Convert log to lists
def extract_log(log, key, epoch_key):
loss, epoch = list(), list()
# TODO Consider duplication of epoch numbers
for i, row in enumerate(log):
if key in row and epoch_key in row:
loss.append(row[key])
epoch.append(row[epoch_key])
return loss, epoch
# Create a graph image from log
def create_graph_img(log, kind):
train_key = 'main/{}'.format(kind)
test_key = 'validation/main/{}'.format(kind)
train_loss, train_epoch = extract_log(log, train_key, 'epoch')
test_loss, test_epoch = extract_log(log, test_key, 'epoch')
if len(train_loss) == 0 and len(test_loss) == 0:
return None
else:
return drawing.draw_loss_graph(train_loss, test_loss,
train_epoch, test_epoch, title=kind)
# Create loss graphs
res = dict()
loss_kinds = ['loss', 'loss_detection', 'loss_landmark', 'loss_visibility',
'loss_pose', 'loss_gender']
for k in loss_kinds:
img = create_graph_img(log, k)
if img is not None: # Use only valid ones
res[k] = img
return res
def lossgraph_img_func(key, entry, viewer):
# Convert to viewer format
return [{'img': entry[k]} for k in entry.keys()]
| # -*- coding: utf-8 -*-
'''
Conversion functions for image viewer extension
'''
import cv2
import six
import numpy as np
import drawing
# logging
from logging import getLogger, NullHandler
logger = getLogger(__name__)
logger.addHandler(NullHandler())
def face_img_func(key, entry, viewer):
# Image conversion
img = entry['img'][0] # Use only a first data in the batch
assert(img.ndim == 3 and (img.shape[0] == 1 or img.shape[0] == 3))
img = np.transpose(img, (1, 2, 0))
img = img.copy() # for safety
img += 0.5 # [-0.5:0.5] -> [0:1]
# Draw
try:
detection_raw = entry['detection'][0]
detection = (detection_raw > 0.5)
if 0.0 <= detection_raw <= 1.0:
drawing.draw_detection(img, detection)
landmark = entry['landmark'][0]
visibility = entry['visibility'][0]
landmark_color = (0, 1, 0) if detection == 1 else (0, 0, 1)
drawing.draw_landmark(img, landmark, visibility, landmark_color, 0.5)
pose = entry['pose'][0]
drawing.draw_pose(img, pose)
gender = entry['gender'][0]
if 0.0 <= gender <= 1.0:
gender = (gender > 0.5)
drawing.draw_gender(img, gender)
except KeyError:
pass
img = (img * 255).astype(np.uint8)
caption = '{:02d}'.format(viewer.img_cnts[key])
return {'img': img, 'cap': caption}
def weights_img_func(key, entry, viewer):
data = entry['weights']
assert(data.ndim == 4)
img_cnt_max = viewer.img_cnt_max[key]
res_data = list()
# accumulate to 3 channels image
for i in six.moves.range(min(data.shape[0], img_cnt_max)):
img_shape = (3,) + data.shape[2:4]
accum = np.zeros(img_shape, dtype=data.dtype)
for ch in six.moves.range(data.shape[1]):
accum[ch % 3] += data[i][ch]
# normalize
img = np.transpose(accum, (1, 2, 0))
img = cv2.normalize(img, None, 0, 255, cv2.NORM_MINMAX)
width = img.shape[0] * 15
res_data.append({'img': img, 'width': width})
return res_data
# ========================= Loss Graph (In a tab page) ========================
def lossgraph_entry_func(key, viewer, trainer):
# Get a log
log_report = trainer.get_extension('LogReport')
log = log_report.log
# Convert log to lists
def extract_log(log, key, epoch_key):
loss, epoch = list(), list()
# TODO Consider duplication of epoch numbers
for i, row in enumerate(log):
if key in row and epoch_key in row:
loss.append(row[key])
epoch.append(row[epoch_key])
return loss, epoch
# Create a graph image from log
def create_graph_img(log, kind):
train_key = 'main/{}'.format(kind)
test_key = 'validation/main/{}'.format(kind)
train_loss, train_epoch = extract_log(log, train_key, 'epoch')
test_loss, test_epoch = extract_log(log, test_key, 'epoch')
if len(train_loss) == 0 and len(test_loss) == 0:
return None
else:
return drawing.draw_loss_graph(train_loss, test_loss,
train_epoch, test_epoch, title=kind)
# Create loss graphs
res = dict()
loss_kinds = ['loss', 'loss_detection', 'loss_landmark', 'loss_visibility',
'loss_pose', 'loss_gender']
for k in loss_kinds:
img = create_graph_img(log, k)
if img is not None: # Use only valid ones
res[k] = img
return res
def lossgraph_img_func(key, entry, viewer):
# Convert to viewer format
return [{'img': entry[k]} for k in entry.keys()]
| en | 0.718661 | # -*- coding: utf-8 -*- Conversion functions for image viewer extension # logging # Image conversion # Use only a first data in the batch # for safety # [-0.5:0.5] -> [0:1] # Draw # accumulate to 3 channels image # normalize # ========================= Loss Graph (In a tab page) ======================== # Get a log # Convert log to lists # TODO Consider duplication of epoch numbers # Create a graph image from log # Create loss graphs # Use only valid ones # Convert to viewer format | 2.809927 | 3 |
stolen_sugar/populate.py | daslater/knausj_talon | 0 | 6631342 | import boto3
import code.keys
from code.keys import ctx as ctx_keys
items_to_write = []
filename = code.keys.__file__.removeprefix('/Users/austin/Code/talon/')
for (category, mappings) in ctx_keys.lists.items():
category = category.removeprefix('self.')
context = ctx_keys.matches if hasattr(ctx_keys, 'matches') else 'default'
for (invocation, target) in mappings.items():
mapping = {'target': target,
'invocation': invocation,
'category': category,
'file': filename,
'context': context}
items_to_write.append(mapping)
def load_mappings(mappings, dynamodb=None):
if not dynamodb:
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('mappings')
for mapping in mappings:
target = mapping['target']
context = mapping['context']
print("Adding mapping:", target, context)
table.put_item(Item=mapping)
if __name__ == '__main__':
load_mappings(items_to_write) | import boto3
import code.keys
from code.keys import ctx as ctx_keys
items_to_write = []
filename = code.keys.__file__.removeprefix('/Users/austin/Code/talon/')
for (category, mappings) in ctx_keys.lists.items():
category = category.removeprefix('self.')
context = ctx_keys.matches if hasattr(ctx_keys, 'matches') else 'default'
for (invocation, target) in mappings.items():
mapping = {'target': target,
'invocation': invocation,
'category': category,
'file': filename,
'context': context}
items_to_write.append(mapping)
def load_mappings(mappings, dynamodb=None):
if not dynamodb:
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('mappings')
for mapping in mappings:
target = mapping['target']
context = mapping['context']
print("Adding mapping:", target, context)
table.put_item(Item=mapping)
if __name__ == '__main__':
load_mappings(items_to_write) | none | 1 | 2.207919 | 2 |
|
bin/enREST.py | ubercomrade/enrest | 0 | 6631343 | import os
import sys
import argparse
from enrest.functions import *
from enrest.set import set_case
from enrest.deg import deg_case
from enrest.fasta import fasta_case
def parse_args():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='subparser_name', help='Available commands:')
deg_parser = subparsers.add_parser('deg', help='Run test on DEGs')
set_parser = subparsers.add_parser('set', help='Run test on SET of genes')
fasta_parser = subparsers.add_parser('fasta', help='Run test on user FASTA files')
deg_parser.add_argument('deg', action='store', help='TSV file with DEG with ..., The NAME column must contain ensemble gene IDS')
deg_parser.add_argument('matrices', action='store', help='Path to matrices in HOCOMOCO (PCM) or in MEME (PFM) format')
deg_parser.add_argument('promoters', action='store', choices=['mm10', 'hg38', 'tair10', 'rnor6'], metavar='N',
help='promoters of organism (hg38, mm10, tair10)')
deg_parser.add_argument('output', action='store', help='Name of directory for output files')
deg_parser.add_argument('-p', '--parameter', action='store', choices=['enrichment', 'fraction'],
metavar='PARAMETER', type=str, default='enrichment',
help='Parameter estimated in test (enrichment or fraction), default= enrichment')
deg_parser.add_argument('-f', '--format', action='store', choices=['meme', 'hocomoco'],
metavar='FORMAT', type=str, default='meme',
help='Format of file with matrices (meme or hocomoco), default= meme')
deg_parser.add_argument('-P', '--pvalue', action='store', type=float, default=0.05,
help='The pvalue is used as threshold to choose DEGs, default= 0.05')
deg_parser.add_argument('-l', '--log2fc_deg', action='store', type=float, default=1.,
help='The absolute value of log2FoldChange used as threshold to choose DEGs promoters (DEGs >= thr OR DEGs <= -thr), default= 1')
deg_parser.add_argument('-L', '--log2fc_back', action='store', type=float, default=0.32192809488736235,
help='The absolute value of log2FoldChange used as threshold to choose background promoters (-thr <= BACK <= thr), default= log2(5/4)')
set_parser.add_argument('set', action='store', help='File with list of genes. Genes must be in Ensemble format (ensemble gene IDS)')
set_parser.add_argument('matrices', action='store', help='Path to matrices in HOCOMOCO (PCM) or in MEME (PFM) format')
set_parser.add_argument('promoters', action='store', choices=['mm10', 'hg38', 'tair10', 'rnor6'], metavar='N',
help='promoters of organism (hg38, mm10, tair10)')
set_parser.add_argument('output', action='store', help='Name of directory for output files')
set_parser.add_argument('-p', '--parameter', action='store', choices=['enrichment', 'fraction'],
metavar='PARAMETER', type=str, default='enrichment',
help='Parameter estimated in test (enrichment or fraction), default= enrichment')
set_parser.add_argument('-f', '--format', action='store', choices=['meme', 'hocomoco'],
metavar='FORMAT', type=str, default='meme',
help='Format of file with matrices (meme or hocomoco), default= meme')
fasta_parser.add_argument('foreground', action='store', help='Fasta file with sequences are used as foreground')
fasta_parser.add_argument('background', action='store', help='Fasta file with sequences are used as background')
fasta_parser.add_argument('matrices', action='store', help='Path to matrices in HOCOMOCO (PCM) or in MEME (PFM) format')
fasta_parser.add_argument('promoters', action='store', choices=['mm10', 'hg38', 'tair10', 'rnor6'], metavar='N',
help='promoters of organism (hg38, mm10, tair10)')
fasta_parser.add_argument('output', action='store', help='Name of directory for output files')
fasta_parser.add_argument('-p', '--parameter', action='store', choices=['enrichment', 'fraction'],
metavar='PARAMETER', type=str, default='enrichment',
help='Parameter estimated in test (enrichment or fraction), default= enrichment')
fasta_parser.add_argument('-f', '--format', action='store', choices=['meme', 'hocomoco'],
metavar='FORMAT', type=str, default='meme',
help='Format of file with matrices (meme or hocomoco), default= meme')
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(1)
return(parser.parse_args())
def main():
args = parse_args()
if args.subparser_name == 'deg':
path_to_deg = args.deg
path_to_db = args.matrices
output_dir = args.output
promoters = args.promoters
parameter = args.parameter
file_format = args.format
padj_thr= args.pvalue
log2fc_thr_deg = args.log2fc_deg
log2fc_thr_background = args.log2fc_back
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
this_dir, this_filename = os.path.split(__file__)
if promoters == 'mm10':
path_to_promoters = os.path.join(this_dir, "../data", "mm10.ensembl.promoters.fa.xz")
elif promoters == 'hg38':
path_to_promoters = os.path.join(this_dir, "../data", "hg38.ensembl.promoters.fa.xz")
elif promoters == 'tair10':
path_to_promoters = os.path.join(this_dir, "../data", "tair10.ensembl.promoters.fa.xz")
elif promoters == 'rnor6':
path_to_promoters = os.path.join(this_dir, "../data", "rnor6.ensembl.promoters.fa.xz")
deg_case(path_to_deg,
path_to_db,
output_dir,
path_to_promoters,
file_format=file_format,
parameter=parameter,
padj_thr=padj_thr,
log2fc_thr_deg=log2fc_thr_deg,
log2fc_thr_background=log2fc_thr_background)
elif args.subparser_name == 'set':
path_to_set = args.set
path_to_db = args.matrices
output_dir = args.output
promoters = args.promoters
parameter = args.parameter
file_format = args.format
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
this_dir, this_filename = os.path.split(__file__)
if promoters == 'mm10':
path_to_promoters = os.path.join(this_dir, "../data", "mm10.ensembl.promoters.fa.xz")
elif promoters == 'hg38':
path_to_promoters = os.path.join(this_dir, "../data", "hg38.ensembl.promoters.fa.xz")
elif promoters == 'tair10':
path_to_promoters = os.path.join(this_dir, "../data", "tair10.ensembl.promoters.fa.xz")
elif promoters == 'rnor6':
path_to_promoters = os.path.join(this_dir, "../data", "rnor6.ensembl.promoters.fa.xz")
set_case(path_to_set,
path_to_db,
output_dir,
path_to_promoters,
file_format=file_format,
parameter=parameter)
elif args.subparser_name == 'fasta':
path_to_foreground = args.foreground
path_to_background = args.background
path_to_db = args.matrices
output_dir = args.output
promoters = args.promoters
parameter = args.parameter
file_format = args.format
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
this_dir, this_filename = os.path.split(__file__)
if promoters == 'mm10':
path_to_promoters = os.path.join(this_dir, "../data", "mm10.ensembl.promoters.fa.xz")
elif promoters == 'hg38':
path_to_promoters = os.path.join(this_dir, "../data", "hg38.ensembl.promoters.fa.xz")
elif promoters == 'tair10':
path_to_promoters = os.path.join(this_dir, "../data", "tair10.ensembl.promoters.fa.xz")
elif promoters == 'rnor6':
path_to_promoters = os.path.join(this_dir, "../data", "rnor6.ensembl.promoters.fa.xz")
fasta_case(path_to_foreground,
path_to_background,
path_to_db,
output_dir,
path_to_promoters,
file_format=file_format,
parameter=parameter)
pass
if __name__ == '__main__':
main()
| import os
import sys
import argparse
from enrest.functions import *
from enrest.set import set_case
from enrest.deg import deg_case
from enrest.fasta import fasta_case
def parse_args():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='subparser_name', help='Available commands:')
deg_parser = subparsers.add_parser('deg', help='Run test on DEGs')
set_parser = subparsers.add_parser('set', help='Run test on SET of genes')
fasta_parser = subparsers.add_parser('fasta', help='Run test on user FASTA files')
deg_parser.add_argument('deg', action='store', help='TSV file with DEG with ..., The NAME column must contain ensemble gene IDS')
deg_parser.add_argument('matrices', action='store', help='Path to matrices in HOCOMOCO (PCM) or in MEME (PFM) format')
deg_parser.add_argument('promoters', action='store', choices=['mm10', 'hg38', 'tair10', 'rnor6'], metavar='N',
help='promoters of organism (hg38, mm10, tair10)')
deg_parser.add_argument('output', action='store', help='Name of directory for output files')
deg_parser.add_argument('-p', '--parameter', action='store', choices=['enrichment', 'fraction'],
metavar='PARAMETER', type=str, default='enrichment',
help='Parameter estimated in test (enrichment or fraction), default= enrichment')
deg_parser.add_argument('-f', '--format', action='store', choices=['meme', 'hocomoco'],
metavar='FORMAT', type=str, default='meme',
help='Format of file with matrices (meme or hocomoco), default= meme')
deg_parser.add_argument('-P', '--pvalue', action='store', type=float, default=0.05,
help='The pvalue is used as threshold to choose DEGs, default= 0.05')
deg_parser.add_argument('-l', '--log2fc_deg', action='store', type=float, default=1.,
help='The absolute value of log2FoldChange used as threshold to choose DEGs promoters (DEGs >= thr OR DEGs <= -thr), default= 1')
deg_parser.add_argument('-L', '--log2fc_back', action='store', type=float, default=0.32192809488736235,
help='The absolute value of log2FoldChange used as threshold to choose background promoters (-thr <= BACK <= thr), default= log2(5/4)')
set_parser.add_argument('set', action='store', help='File with list of genes. Genes must be in Ensemble format (ensemble gene IDS)')
set_parser.add_argument('matrices', action='store', help='Path to matrices in HOCOMOCO (PCM) or in MEME (PFM) format')
set_parser.add_argument('promoters', action='store', choices=['mm10', 'hg38', 'tair10', 'rnor6'], metavar='N',
help='promoters of organism (hg38, mm10, tair10)')
set_parser.add_argument('output', action='store', help='Name of directory for output files')
set_parser.add_argument('-p', '--parameter', action='store', choices=['enrichment', 'fraction'],
metavar='PARAMETER', type=str, default='enrichment',
help='Parameter estimated in test (enrichment or fraction), default= enrichment')
set_parser.add_argument('-f', '--format', action='store', choices=['meme', 'hocomoco'],
metavar='FORMAT', type=str, default='meme',
help='Format of file with matrices (meme or hocomoco), default= meme')
fasta_parser.add_argument('foreground', action='store', help='Fasta file with sequences are used as foreground')
fasta_parser.add_argument('background', action='store', help='Fasta file with sequences are used as background')
fasta_parser.add_argument('matrices', action='store', help='Path to matrices in HOCOMOCO (PCM) or in MEME (PFM) format')
fasta_parser.add_argument('promoters', action='store', choices=['mm10', 'hg38', 'tair10', 'rnor6'], metavar='N',
help='promoters of organism (hg38, mm10, tair10)')
fasta_parser.add_argument('output', action='store', help='Name of directory for output files')
fasta_parser.add_argument('-p', '--parameter', action='store', choices=['enrichment', 'fraction'],
metavar='PARAMETER', type=str, default='enrichment',
help='Parameter estimated in test (enrichment or fraction), default= enrichment')
fasta_parser.add_argument('-f', '--format', action='store', choices=['meme', 'hocomoco'],
metavar='FORMAT', type=str, default='meme',
help='Format of file with matrices (meme or hocomoco), default= meme')
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(1)
return(parser.parse_args())
def main():
args = parse_args()
if args.subparser_name == 'deg':
path_to_deg = args.deg
path_to_db = args.matrices
output_dir = args.output
promoters = args.promoters
parameter = args.parameter
file_format = args.format
padj_thr= args.pvalue
log2fc_thr_deg = args.log2fc_deg
log2fc_thr_background = args.log2fc_back
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
this_dir, this_filename = os.path.split(__file__)
if promoters == 'mm10':
path_to_promoters = os.path.join(this_dir, "../data", "mm10.ensembl.promoters.fa.xz")
elif promoters == 'hg38':
path_to_promoters = os.path.join(this_dir, "../data", "hg38.ensembl.promoters.fa.xz")
elif promoters == 'tair10':
path_to_promoters = os.path.join(this_dir, "../data", "tair10.ensembl.promoters.fa.xz")
elif promoters == 'rnor6':
path_to_promoters = os.path.join(this_dir, "../data", "rnor6.ensembl.promoters.fa.xz")
deg_case(path_to_deg,
path_to_db,
output_dir,
path_to_promoters,
file_format=file_format,
parameter=parameter,
padj_thr=padj_thr,
log2fc_thr_deg=log2fc_thr_deg,
log2fc_thr_background=log2fc_thr_background)
elif args.subparser_name == 'set':
path_to_set = args.set
path_to_db = args.matrices
output_dir = args.output
promoters = args.promoters
parameter = args.parameter
file_format = args.format
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
this_dir, this_filename = os.path.split(__file__)
if promoters == 'mm10':
path_to_promoters = os.path.join(this_dir, "../data", "mm10.ensembl.promoters.fa.xz")
elif promoters == 'hg38':
path_to_promoters = os.path.join(this_dir, "../data", "hg38.ensembl.promoters.fa.xz")
elif promoters == 'tair10':
path_to_promoters = os.path.join(this_dir, "../data", "tair10.ensembl.promoters.fa.xz")
elif promoters == 'rnor6':
path_to_promoters = os.path.join(this_dir, "../data", "rnor6.ensembl.promoters.fa.xz")
set_case(path_to_set,
path_to_db,
output_dir,
path_to_promoters,
file_format=file_format,
parameter=parameter)
elif args.subparser_name == 'fasta':
path_to_foreground = args.foreground
path_to_background = args.background
path_to_db = args.matrices
output_dir = args.output
promoters = args.promoters
parameter = args.parameter
file_format = args.format
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
this_dir, this_filename = os.path.split(__file__)
if promoters == 'mm10':
path_to_promoters = os.path.join(this_dir, "../data", "mm10.ensembl.promoters.fa.xz")
elif promoters == 'hg38':
path_to_promoters = os.path.join(this_dir, "../data", "hg38.ensembl.promoters.fa.xz")
elif promoters == 'tair10':
path_to_promoters = os.path.join(this_dir, "../data", "tair10.ensembl.promoters.fa.xz")
elif promoters == 'rnor6':
path_to_promoters = os.path.join(this_dir, "../data", "rnor6.ensembl.promoters.fa.xz")
fasta_case(path_to_foreground,
path_to_background,
path_to_db,
output_dir,
path_to_promoters,
file_format=file_format,
parameter=parameter)
pass
if __name__ == '__main__':
main()
| none | 1 | 2.631103 | 3 |
|
aoc2020/day_17/part_1.py | en0/aoc2020 | 0 | 6631344 | <gh_stars>0
from aoc2020 import *
from functools import reduce
from .space import Space
class Solution(SolutionABC):
expected = 112
def solve(self) -> any:
space = Space(reduce(lambda a, b: a + b, [
[((x, y, 0), s == '#') for x, s in enumerate(r)]
for y, r in enumerate(self.resource_lines("input"))
]))
return reduce(lambda a, b: space.simulate(), range(6), None)
| from aoc2020 import *
from functools import reduce
from .space import Space
class Solution(SolutionABC):
expected = 112
def solve(self) -> any:
space = Space(reduce(lambda a, b: a + b, [
[((x, y, 0), s == '#') for x, s in enumerate(r)]
for y, r in enumerate(self.resource_lines("input"))
]))
return reduce(lambda a, b: space.simulate(), range(6), None) | none | 1 | 2.977612 | 3 |
|
mcw/logging.py | IonAgorria/meson-cmake-wrapper | 63 | 6631345 | import logging
class ServerLogHandler(logging.Handler):
def __init__(self, server):
super().__init__()
self.server = server
def emit(self, record):
log_entry = self.format(record)
if self.server.connected:
self.server.send_message(log_entry, log=False)
| import logging
class ServerLogHandler(logging.Handler):
def __init__(self, server):
super().__init__()
self.server = server
def emit(self, record):
log_entry = self.format(record)
if self.server.connected:
self.server.send_message(log_entry, log=False)
| none | 1 | 2.805761 | 3 |
|
alembic/versions/d88d63c07199_.py | andreasots/lrrbot | 24 | 6631346 | revision = 'd88d63c07199'
down_revision = ('<KEY>', '77dc71b483ed')
branch_labels = None
depends_on = None
import alembic
import sqlalchemy
def upgrade():
pass
def downgrade():
pass
| revision = 'd88d63c07199'
down_revision = ('<KEY>', '77dc71b483ed')
branch_labels = None
depends_on = None
import alembic
import sqlalchemy
def upgrade():
pass
def downgrade():
pass
| none | 1 | 1.014799 | 1 |
|
examples/__init__.py | dankilman/pages | 97 | 6631347 | exported_examples = {
'awe_examples': {},
'hello_world': {
'screenshot': [195, 0, 195 + 300, 50],
'extension': 'png'
},
'button_and_input': {
'extension': 'gif'
},
'chart_simple': {
'terminate_after': 35,
'extension': 'gif'
},
'chart_complex': {
'terminate_after': 70,
'extension': 'gif'
},
'custom_element': {
'screenshot': [100, 0, 100 + 1000, 150],
'extension': 'png'
},
'raw_html': {
'screenshot': [195, 0, 195 + 1210, 740],
'extension': 'png'
},
'simple_report': {
'screenshot': [195, 0, 195 + 1210, 385],
'extension': 'png'
},
'showcase': {
'screenshot': [195, 0, 195 + 1210, 510],
'extension': 'png'
},
'dsl': {
'terminate_after': 60,
'extension': 'gif'
},
'page_properties': {
'screenshot': [190, 0, 195 + 1210, 85],
'extension': 'png'
},
'standard_output': {
'extension': 'gif'
},
'collapse': {
'screenshot': [195, 0, 195 + 1210, 210],
'extension': 'png'
},
'chart_flat': {
'terminate_after': 60,
'extension': 'gif'
},
'markdown': {
'screenshot': [195, 0, 195 + 1210, 210],
'extension': 'png'
},
'updater': {
'terminate_after': 3,
'extension': 'gif'
}
}
examples_order = [
'hello_world',
'chart_simple',
'chart_complex',
'chart_flat',
'page_properties',
'button_and_input',
'standard_output',
'collapse',
'custom_element',
'raw_html',
'simple_report',
'markdown',
'showcase',
'updater',
'dsl',
]
| exported_examples = {
'awe_examples': {},
'hello_world': {
'screenshot': [195, 0, 195 + 300, 50],
'extension': 'png'
},
'button_and_input': {
'extension': 'gif'
},
'chart_simple': {
'terminate_after': 35,
'extension': 'gif'
},
'chart_complex': {
'terminate_after': 70,
'extension': 'gif'
},
'custom_element': {
'screenshot': [100, 0, 100 + 1000, 150],
'extension': 'png'
},
'raw_html': {
'screenshot': [195, 0, 195 + 1210, 740],
'extension': 'png'
},
'simple_report': {
'screenshot': [195, 0, 195 + 1210, 385],
'extension': 'png'
},
'showcase': {
'screenshot': [195, 0, 195 + 1210, 510],
'extension': 'png'
},
'dsl': {
'terminate_after': 60,
'extension': 'gif'
},
'page_properties': {
'screenshot': [190, 0, 195 + 1210, 85],
'extension': 'png'
},
'standard_output': {
'extension': 'gif'
},
'collapse': {
'screenshot': [195, 0, 195 + 1210, 210],
'extension': 'png'
},
'chart_flat': {
'terminate_after': 60,
'extension': 'gif'
},
'markdown': {
'screenshot': [195, 0, 195 + 1210, 210],
'extension': 'png'
},
'updater': {
'terminate_after': 3,
'extension': 'gif'
}
}
examples_order = [
'hello_world',
'chart_simple',
'chart_complex',
'chart_flat',
'page_properties',
'button_and_input',
'standard_output',
'collapse',
'custom_element',
'raw_html',
'simple_report',
'markdown',
'showcase',
'updater',
'dsl',
]
| none | 1 | 1.531592 | 2 |
|
py_kit/check.py | SystemLight/py-kit | 0 | 6631348 | from functools import partial
from typing import Dict, Any, Callable, List, Union
"""
python dict对象校验器
"""
SOME_TYPE = 'some'
EVERY_TYPE = 'every'
class Undefined:
pass
undefined = Undefined()
class BaseMessage:
"""
自定义规则函数返回的对象,可以继承该类,自定义返回Message对象
该类是包含校验信息的消息类,当继承该类后,可以为该类添加方法
用于得到该类时直接调用处理
:param key: 系统调用,触发message的key值
:param status: 状态
:param msg: 自定义rule函数返回的自定义内容
"""
def __init__(self, key=None, status=True, msg=None):
self.key = key
self.status = status
self.msg = msg
# 包含子元素从底部传导到顶层的key值
self.paths = []
def some(content: Dict, config: Dict) -> BaseMessage:
"""
检验dict对象,当一个key值触发错误就返回message对象,
注意检查函数只会检查content中真实存在的内容,不会检查不存在内容,
如果预期值并不能放到content中,请用dict().update补充对应的默认值
校验字典::
print(some({
'name': 'lisys',
'age': None
}, {
'name': not_null(msg='自定义传参'),
'age': [not_null]
}).__dict__)
:param content: 检验dict对象
:param config: 配置dict对象
:return: dict
"""
for key in content:
m = verify(content[key], config.get(key, []))
if not m.status:
m.key = key
m.paths.append(key)
return m
return BaseMessage(None, msg=content)
def every(content: Dict, config: Dict) -> BaseMessage:
"""
检验dict对象,当全部key值校验完所有规则函数返回message对象,
注意检查函数只会检查content中真实存在的内容,不会检查不存在内容,
如果预期值并不能放到content中,请用dict().update补充对应的默认值
校验字典::
print(every({
'name': 'lisys',
'age': None
}, {
'name': not_null,
'age': [not_null()]
}).__dict__)
:param content: 检验dict对象
:param config: 配置dict对象
:return: Message
"""
every_message = BaseMessage(None, status=True, msg=[])
for key in content:
m = verify(content[key], config.get(key, []), True)
if not m.status:
m.key = key
every_message.status = False
every_message.msg.append(m)
return every_message
def verify(param: Any, preset: Union[Callable, List], strict: bool = False):
"""
校验传入内容
strict为true,并且preset是一个rule列表时,verify会校验所有rule
并且返回一个主Message对象,该Message的msg是一个列表,包含所有的规则错误校验信息
如果有一个规则校验失败,那么主Message对象的status值将为False
使用预设即rule的列表进行校验::
# 检验value的值是否符合规则,not_null为非假的规则函数,verify函数返回BaseMessage对象
value = 'hello SystemLight'
print(verify(value, [not_null]).__dict__)
value = 'hello SystemLight'
print(verify(value, [not_null(msg='自定义传参')]).__dict__)
直接传入rule函数::
value = None
print(verify(value, not_null(msg='自定义传参')).__dict__)
value = None
print(verify(value, not_null).__dict__)
:param param: 检验内容
:param preset: 预设preset,rule函数列表,也可以直接传递rule函数
:param strict: 是否为严格模式,即需要校验全部的rule函数才做错误返回
:return: Message
"""
if hasattr(preset, '__call__'):
base_m = preset(param, caller=True)
elif strict:
base_m = BaseMessage(param)
base_m.msg = []
for rule_call in preset:
m = rule_call(param, caller=True)
if not m.status:
base_m.status = False
base_m.msg.append(m)
else:
for rule_call in preset:
base_m = rule_call(param, caller=True)
if not base_m.status:
return base_m
base_m = BaseMessage(param)
return base_m
def build_check(check_type: str, config: Dict):
"""
构建检查器
:param check_type: 检查器类型,every或者some
:param config: 检查规则函数
:return: 检查器函数
"""
if check_type == SOME_TYPE:
return partial(some, config=config)
if check_type == EVERY_TYPE:
return partial(every, config=config)
raise TypeError('check_type is not support')
def rule(fn):
"""
装饰器,用于装饰自定义规则rule函数
:param fn: 被装饰的普通函数
:return: None
"""
def wrap(*args, **kwargs):
# 判断是被系统调用,还是用户调用
if kwargs.get('caller', False):
# 系统调用会传入caller,如果被装饰的函数接收caller会造成错误
return fn(args[0])
# 用户调用了规则函数,用于自定义传参,同时让返回值仍然是一个可以被系统调用函数
return lambda param, caller: fn(param, *args, **kwargs)
return wrap
def __items(param, config, check_type=SOME_TYPE):
if check_type == SOME_TYPE:
return some(param, config)
if check_type == EVERY_TYPE:
return every(param, config)
raise TypeError('check_type is not support')
items = rule(__items)
@rule
def s_items(param, config):
"""
子元素校验规则,通过传入子元素校验配置实现子元素内容校验,
使用some校验
:param param:
:param config:
:return: Message 所有继承BaseMessage的对象
"""
return __items(param, config, SOME_TYPE)
@rule
def e_items(param, config):
"""
子元素校验规则,通过传入子元素校验配置实现子元素内容校验,
使用every校验
:param param:
:param config:
:return: Message 所有继承BaseMessage的对象
"""
return __items(param, config, EVERY_TYPE)
@rule
def not_null(param, error_msg='none'):
"""
该rule函数是一个最简单的校验规则函数,用于校验值是否为真
内置规则函数,校验内容是否为真值,在使用时,使用者需要按照功能自行制定规则函数
切记规则函数需要使用@rule装饰器进行装饰,并且一定返回Message对象,使用者可以根据需求自行定制Message对象
也可以使用默认的BaseMessage对象
:param param: param 规则函数必须接收的值即验证的内容,类似self,用户只需接收,无须管理传入
:param error_msg: msg 用户自定义传参,在调用验证函数时被使用者传入规则函数的参数
:return: Message 所有继承BaseMessage的对象
"""
if param:
return BaseMessage()
else:
return BaseMessage(msg=error_msg, status=False)
| from functools import partial
from typing import Dict, Any, Callable, List, Union
"""
python dict对象校验器
"""
SOME_TYPE = 'some'
EVERY_TYPE = 'every'
class Undefined:
pass
undefined = Undefined()
class BaseMessage:
"""
自定义规则函数返回的对象,可以继承该类,自定义返回Message对象
该类是包含校验信息的消息类,当继承该类后,可以为该类添加方法
用于得到该类时直接调用处理
:param key: 系统调用,触发message的key值
:param status: 状态
:param msg: 自定义rule函数返回的自定义内容
"""
def __init__(self, key=None, status=True, msg=None):
self.key = key
self.status = status
self.msg = msg
# 包含子元素从底部传导到顶层的key值
self.paths = []
def some(content: Dict, config: Dict) -> BaseMessage:
"""
检验dict对象,当一个key值触发错误就返回message对象,
注意检查函数只会检查content中真实存在的内容,不会检查不存在内容,
如果预期值并不能放到content中,请用dict().update补充对应的默认值
校验字典::
print(some({
'name': 'lisys',
'age': None
}, {
'name': not_null(msg='自定义传参'),
'age': [not_null]
}).__dict__)
:param content: 检验dict对象
:param config: 配置dict对象
:return: dict
"""
for key in content:
m = verify(content[key], config.get(key, []))
if not m.status:
m.key = key
m.paths.append(key)
return m
return BaseMessage(None, msg=content)
def every(content: Dict, config: Dict) -> BaseMessage:
"""
检验dict对象,当全部key值校验完所有规则函数返回message对象,
注意检查函数只会检查content中真实存在的内容,不会检查不存在内容,
如果预期值并不能放到content中,请用dict().update补充对应的默认值
校验字典::
print(every({
'name': 'lisys',
'age': None
}, {
'name': not_null,
'age': [not_null()]
}).__dict__)
:param content: 检验dict对象
:param config: 配置dict对象
:return: Message
"""
every_message = BaseMessage(None, status=True, msg=[])
for key in content:
m = verify(content[key], config.get(key, []), True)
if not m.status:
m.key = key
every_message.status = False
every_message.msg.append(m)
return every_message
def verify(param: Any, preset: Union[Callable, List], strict: bool = False):
"""
校验传入内容
strict为true,并且preset是一个rule列表时,verify会校验所有rule
并且返回一个主Message对象,该Message的msg是一个列表,包含所有的规则错误校验信息
如果有一个规则校验失败,那么主Message对象的status值将为False
使用预设即rule的列表进行校验::
# 检验value的值是否符合规则,not_null为非假的规则函数,verify函数返回BaseMessage对象
value = 'hello SystemLight'
print(verify(value, [not_null]).__dict__)
value = 'hello SystemLight'
print(verify(value, [not_null(msg='自定义传参')]).__dict__)
直接传入rule函数::
value = None
print(verify(value, not_null(msg='自定义传参')).__dict__)
value = None
print(verify(value, not_null).__dict__)
:param param: 检验内容
:param preset: 预设preset,rule函数列表,也可以直接传递rule函数
:param strict: 是否为严格模式,即需要校验全部的rule函数才做错误返回
:return: Message
"""
if hasattr(preset, '__call__'):
base_m = preset(param, caller=True)
elif strict:
base_m = BaseMessage(param)
base_m.msg = []
for rule_call in preset:
m = rule_call(param, caller=True)
if not m.status:
base_m.status = False
base_m.msg.append(m)
else:
for rule_call in preset:
base_m = rule_call(param, caller=True)
if not base_m.status:
return base_m
base_m = BaseMessage(param)
return base_m
def build_check(check_type: str, config: Dict):
"""
构建检查器
:param check_type: 检查器类型,every或者some
:param config: 检查规则函数
:return: 检查器函数
"""
if check_type == SOME_TYPE:
return partial(some, config=config)
if check_type == EVERY_TYPE:
return partial(every, config=config)
raise TypeError('check_type is not support')
def rule(fn):
"""
装饰器,用于装饰自定义规则rule函数
:param fn: 被装饰的普通函数
:return: None
"""
def wrap(*args, **kwargs):
# 判断是被系统调用,还是用户调用
if kwargs.get('caller', False):
# 系统调用会传入caller,如果被装饰的函数接收caller会造成错误
return fn(args[0])
# 用户调用了规则函数,用于自定义传参,同时让返回值仍然是一个可以被系统调用函数
return lambda param, caller: fn(param, *args, **kwargs)
return wrap
def __items(param, config, check_type=SOME_TYPE):
if check_type == SOME_TYPE:
return some(param, config)
if check_type == EVERY_TYPE:
return every(param, config)
raise TypeError('check_type is not support')
items = rule(__items)
@rule
def s_items(param, config):
"""
子元素校验规则,通过传入子元素校验配置实现子元素内容校验,
使用some校验
:param param:
:param config:
:return: Message 所有继承BaseMessage的对象
"""
return __items(param, config, SOME_TYPE)
@rule
def e_items(param, config):
"""
子元素校验规则,通过传入子元素校验配置实现子元素内容校验,
使用every校验
:param param:
:param config:
:return: Message 所有继承BaseMessage的对象
"""
return __items(param, config, EVERY_TYPE)
@rule
def not_null(param, error_msg='none'):
"""
该rule函数是一个最简单的校验规则函数,用于校验值是否为真
内置规则函数,校验内容是否为真值,在使用时,使用者需要按照功能自行制定规则函数
切记规则函数需要使用@rule装饰器进行装饰,并且一定返回Message对象,使用者可以根据需求自行定制Message对象
也可以使用默认的BaseMessage对象
:param param: param 规则函数必须接收的值即验证的内容,类似self,用户只需接收,无须管理传入
:param error_msg: msg 用户自定义传参,在调用验证函数时被使用者传入规则函数的参数
:return: Message 所有继承BaseMessage的对象
"""
if param:
return BaseMessage()
else:
return BaseMessage(msg=error_msg, status=False)
| zh | 0.867143 | python dict对象校验器 自定义规则函数返回的对象,可以继承该类,自定义返回Message对象 该类是包含校验信息的消息类,当继承该类后,可以为该类添加方法 用于得到该类时直接调用处理 :param key: 系统调用,触发message的key值 :param status: 状态 :param msg: 自定义rule函数返回的自定义内容 # 包含子元素从底部传导到顶层的key值 检验dict对象,当一个key值触发错误就返回message对象, 注意检查函数只会检查content中真实存在的内容,不会检查不存在内容, 如果预期值并不能放到content中,请用dict().update补充对应的默认值 校验字典:: print(some({ 'name': 'lisys', 'age': None }, { 'name': not_null(msg='自定义传参'), 'age': [not_null] }).__dict__) :param content: 检验dict对象 :param config: 配置dict对象 :return: dict 检验dict对象,当全部key值校验完所有规则函数返回message对象, 注意检查函数只会检查content中真实存在的内容,不会检查不存在内容, 如果预期值并不能放到content中,请用dict().update补充对应的默认值 校验字典:: print(every({ 'name': 'lisys', 'age': None }, { 'name': not_null, 'age': [not_null()] }).__dict__) :param content: 检验dict对象 :param config: 配置dict对象 :return: Message 校验传入内容 strict为true,并且preset是一个rule列表时,verify会校验所有rule 并且返回一个主Message对象,该Message的msg是一个列表,包含所有的规则错误校验信息 如果有一个规则校验失败,那么主Message对象的status值将为False 使用预设即rule的列表进行校验:: # 检验value的值是否符合规则,not_null为非假的规则函数,verify函数返回BaseMessage对象 value = 'hello SystemLight' print(verify(value, [not_null]).__dict__) value = 'hello SystemLight' print(verify(value, [not_null(msg='自定义传参')]).__dict__) 直接传入rule函数:: value = None print(verify(value, not_null(msg='自定义传参')).__dict__) value = None print(verify(value, not_null).__dict__) :param param: 检验内容 :param preset: 预设preset,rule函数列表,也可以直接传递rule函数 :param strict: 是否为严格模式,即需要校验全部的rule函数才做错误返回 :return: Message 构建检查器 :param check_type: 检查器类型,every或者some :param config: 检查规则函数 :return: 检查器函数 装饰器,用于装饰自定义规则rule函数 :param fn: 被装饰的普通函数 :return: None # 判断是被系统调用,还是用户调用 # 系统调用会传入caller,如果被装饰的函数接收caller会造成错误 # 用户调用了规则函数,用于自定义传参,同时让返回值仍然是一个可以被系统调用函数 子元素校验规则,通过传入子元素校验配置实现子元素内容校验, 使用some校验 :param param: :param config: :return: Message 所有继承BaseMessage的对象 子元素校验规则,通过传入子元素校验配置实现子元素内容校验, 使用every校验 :param param: :param config: :return: Message 所有继承BaseMessage的对象 该rule函数是一个最简单的校验规则函数,用于校验值是否为真 内置规则函数,校验内容是否为真值,在使用时,使用者需要按照功能自行制定规则函数 切记规则函数需要使用@rule装饰器进行装饰,并且一定返回Message对象,使用者可以根据需求自行定制Message对象 也可以使用默认的BaseMessage对象 :param param: param 规则函数必须接收的值即验证的内容,类似self,用户只需接收,无须管理传入 :param error_msg: msg 用户自定义传参,在调用验证函数时被使用者传入规则函数的参数 :return: Message 所有继承BaseMessage的对象 | 2.932594 | 3 |
nindo/client.py | pascalkienast/stats-about-socialmedia | 4 | 6631349 | import asyncio
import urllib.parse
from .http import HTTPClient
from .artist import Artist, RankedArtist, DetailedArtist
from .util import AsyncIterator
from .coupon import Coupon
from .milestone import Milestone
from .viral import Viral
__all__ = (
"NindoClient",
)
class NindoClient:
def __init__(self, **kwargs):
self.loop = kwargs.get("loop", asyncio.get_event_loop())
self._http = HTTPClient(**kwargs)
async def get_artist(self, artist_id):
data = await self._http.request(f"/artist/{artist_id}")
return DetailedArtist(data, http=self._http)
def search(self, term):
async def _to_wrap():
data = await self._http.request(f"/search/smart/{urllib.parse.quote(term)}")
for artists in data:
yield Artist(artists, http=self._http)
return AsyncIterator(_to_wrap())
def _ranked_artists(self, path):
async def _to_wrap():
data = await self._http.request(path)
for artist in data:
yield RankedArtist(artist, http=self._http)
return AsyncIterator(_to_wrap())
def youtube_views_charts(self):
return self._ranked_artists("/ranks/charts/youtube/rankViews/big")
def youtube_likes_charts(self):
return self._ranked_artists("/ranks/charts/youtube/rankLikes/big")
def youtube_followers_charts(self):
return self._ranked_artists("/ranks/charts/youtube/rankSubGain/big")
def youtube_charts(self):
return self._ranked_artists("/ranks/charts/youtube/rank/big")
def instagram_likes_charts(self):
return self._ranked_artists("/ranks/charts/instagram/rankLikes/big")
def instagram_followers_charts(self):
return self._ranked_artists("/ranks/charts/instagram/rankSubGain/big")
def instagram_charts(self):
return self._ranked_artists("/ranks/charts/instagram/rank/big")
def tiktok_likes_charts(self):
return self._ranked_artists("/ranks/charts/tiktok/rankLikes/big")
def tiktok_views_charts(self):
return self._ranked_artists("/ranks/charts/tiktok/rankViews/big")
def tiktok_followers_charts(self):
return self._ranked_artists("/ranks/charts/tiktok/rankSubGain/big")
def tiktok_charts(self):
return self._ranked_artists("/ranks/charts/tiktok/rank/big")
def twitter_likes_charts(self):
return self._ranked_artists("/ranks/charts/twitter/rankLikes/big")
def twitter_retweets_charts(self):
return self._ranked_artists("/ranks/charts/twitter/rankRetweets/big")
def twitter_followers_charts(self):
return self._ranked_artists("/ranks/charts/twitter/rankSubGain/big")
def twitter_charts(self):
return self._ranked_artists("/ranks/charts/twitter/rank/big")
def twitch_viewers_charts(self):
return self._ranked_artists("/ranks/charts/twitch/rankViewer/big")
def twitch_peak_viewers_charts(self):
return self._ranked_artists("/ranks/charts/twitch/rankPeakViewer/big")
def twitch_followers_charts(self):
return self._ranked_artists("/ranks/charts/twitch/rankSubGain/big")
def twitch_charts(self):
return self._ranked_artists("/ranks/charts/twitch/rank/big")
def coupons(self):
# Coupons are the only resource that is paginated
async def _to_wrap():
buffer = []
offset = 0
while True:
if len(buffer) == 0:
data = await self._http.request(f"/coupons?offset={offset}")
buffer = [Coupon(c, http=self._http) for c in data["coupons"]]
if len(buffer) == 0:
break
offset += len(buffer)
yield buffer.pop(0)
return AsyncIterator(_to_wrap())
def milestones(self):
async def _to_wrap():
data = await self._http.request("/ranks/milestones")
for milestone in data:
yield Milestone(milestone, http=self._http)
return AsyncIterator(_to_wrap())
def past_milestones(self):
async def _to_wrap():
data = await self._http.request("/ranks/pastMilestones")
for milestone in data:
yield Milestone(milestone, http=self._http)
return AsyncIterator(_to_wrap())
def viral(self):
async def _to_wrap():
data = await self._http.request("/viral")
for viral in data:
yield Viral(viral, http=self._http)
return AsyncIterator(_to_wrap())
| import asyncio
import urllib.parse
from .http import HTTPClient
from .artist import Artist, RankedArtist, DetailedArtist
from .util import AsyncIterator
from .coupon import Coupon
from .milestone import Milestone
from .viral import Viral
__all__ = (
"NindoClient",
)
class NindoClient:
def __init__(self, **kwargs):
self.loop = kwargs.get("loop", asyncio.get_event_loop())
self._http = HTTPClient(**kwargs)
async def get_artist(self, artist_id):
data = await self._http.request(f"/artist/{artist_id}")
return DetailedArtist(data, http=self._http)
def search(self, term):
async def _to_wrap():
data = await self._http.request(f"/search/smart/{urllib.parse.quote(term)}")
for artists in data:
yield Artist(artists, http=self._http)
return AsyncIterator(_to_wrap())
def _ranked_artists(self, path):
async def _to_wrap():
data = await self._http.request(path)
for artist in data:
yield RankedArtist(artist, http=self._http)
return AsyncIterator(_to_wrap())
def youtube_views_charts(self):
return self._ranked_artists("/ranks/charts/youtube/rankViews/big")
def youtube_likes_charts(self):
return self._ranked_artists("/ranks/charts/youtube/rankLikes/big")
def youtube_followers_charts(self):
return self._ranked_artists("/ranks/charts/youtube/rankSubGain/big")
def youtube_charts(self):
return self._ranked_artists("/ranks/charts/youtube/rank/big")
def instagram_likes_charts(self):
return self._ranked_artists("/ranks/charts/instagram/rankLikes/big")
def instagram_followers_charts(self):
return self._ranked_artists("/ranks/charts/instagram/rankSubGain/big")
def instagram_charts(self):
return self._ranked_artists("/ranks/charts/instagram/rank/big")
def tiktok_likes_charts(self):
return self._ranked_artists("/ranks/charts/tiktok/rankLikes/big")
def tiktok_views_charts(self):
return self._ranked_artists("/ranks/charts/tiktok/rankViews/big")
def tiktok_followers_charts(self):
return self._ranked_artists("/ranks/charts/tiktok/rankSubGain/big")
def tiktok_charts(self):
return self._ranked_artists("/ranks/charts/tiktok/rank/big")
def twitter_likes_charts(self):
return self._ranked_artists("/ranks/charts/twitter/rankLikes/big")
def twitter_retweets_charts(self):
return self._ranked_artists("/ranks/charts/twitter/rankRetweets/big")
def twitter_followers_charts(self):
return self._ranked_artists("/ranks/charts/twitter/rankSubGain/big")
def twitter_charts(self):
return self._ranked_artists("/ranks/charts/twitter/rank/big")
def twitch_viewers_charts(self):
return self._ranked_artists("/ranks/charts/twitch/rankViewer/big")
def twitch_peak_viewers_charts(self):
return self._ranked_artists("/ranks/charts/twitch/rankPeakViewer/big")
def twitch_followers_charts(self):
return self._ranked_artists("/ranks/charts/twitch/rankSubGain/big")
def twitch_charts(self):
return self._ranked_artists("/ranks/charts/twitch/rank/big")
def coupons(self):
# Coupons are the only resource that is paginated
async def _to_wrap():
buffer = []
offset = 0
while True:
if len(buffer) == 0:
data = await self._http.request(f"/coupons?offset={offset}")
buffer = [Coupon(c, http=self._http) for c in data["coupons"]]
if len(buffer) == 0:
break
offset += len(buffer)
yield buffer.pop(0)
return AsyncIterator(_to_wrap())
def milestones(self):
async def _to_wrap():
data = await self._http.request("/ranks/milestones")
for milestone in data:
yield Milestone(milestone, http=self._http)
return AsyncIterator(_to_wrap())
def past_milestones(self):
async def _to_wrap():
data = await self._http.request("/ranks/pastMilestones")
for milestone in data:
yield Milestone(milestone, http=self._http)
return AsyncIterator(_to_wrap())
def viral(self):
async def _to_wrap():
data = await self._http.request("/viral")
for viral in data:
yield Viral(viral, http=self._http)
return AsyncIterator(_to_wrap())
| en | 0.97723 | # Coupons are the only resource that is paginated | 2.595503 | 3 |
app.py | RTIInternational/ncrp-codes-app | 0 | 6631350 | import os
from functools import partial
from pathlib import Path
from typing import Any, Dict, List
import pandas as pd
import streamlit as st
from more_itertools import ichunked
from stqdm import stqdm
from download import download_link
from model_utils import max_pred_bulk, predict, predict_bulk
PRED_BATCH_SIZE = 16
st.set_page_config(
page_title="NCRP Offense Code Classifier", initial_sidebar_state="collapsed"
)
st.markdown(Path("readme.md").read_text())
st.markdown("---")
st.markdown("## ✏️ Single Coder Demo")
input_text = st.text_input(
"Input Offense",
value="FRAUDULENT USE OF A CREDIT CARD OR DEBT CARD >= $25,000",
)
predictions = predict(input_text)
st.markdown("Predictions")
labels = ["Charge Category"]
st.dataframe(pd.DataFrame(predictions[0]))
st.markdown("---")
st.markdown("## 📑 Bulk Coder")
st.warning(
"⚠️ *Note:* Your input data will be deduplicated"
" on the selected column to reduce computation requirements."
)
st.markdown("1️⃣ **Upload File**")
uploaded_file = st.file_uploader("Bulk Upload", type=["xlsx", "csv"])
file_readers = {"csv": pd.read_csv, "xlsx": partial(pd.read_excel, engine="openpyxl")}
if uploaded_file is not None:
for filetype, reader in file_readers.items():
if uploaded_file.name.endswith(filetype):
df = reader(uploaded_file)
st.write("2️⃣ **Select Column of Offense Descriptions**")
string_columns = list(df.select_dtypes("object").columns)
longest_column = max(
[(df[c].str.len().mean(), c) for c in string_columns], key=lambda x: x[0]
)[1]
selected_column = st.selectbox(
"Select Column",
options=list(string_columns),
index=string_columns.index(longest_column),
)
df = df.drop_duplicates(subset=[selected_column])
st.markdown(f"Uploaded Data Sample `(Deduplicated. N Rows = {len(df)})`")
st.dataframe(df.head(20))
st.write(f"3️⃣ **Predict Using Column: `{selected_column}`**")
if st.button(f"Compute Predictions"):
input_texts = (value for _, value in df[selected_column].items())
n_batches = (len(df) // PRED_BATCH_SIZE) + 1
bulk_preds = []
for batch in stqdm(
ichunked(input_texts, PRED_BATCH_SIZE),
total=n_batches,
desc="Bulk Predict Progress",
):
batch_preds = predict_bulk(batch)
bulk_preds.extend(batch_preds)
df["charge_category_pred"] = max_pred_bulk(bulk_preds)
# TODO: Add all scores
# TODO: Add "confidence"
st.write("**Sample Output**")
st.dataframe(df.head(100))
tmp_download_link = download_link(
df,
f"{uploaded_file.name}-ncrp-predictions.csv",
"⬇️ Download as CSV",
)
st.markdown(tmp_download_link, unsafe_allow_html=True)
| import os
from functools import partial
from pathlib import Path
from typing import Any, Dict, List
import pandas as pd
import streamlit as st
from more_itertools import ichunked
from stqdm import stqdm
from download import download_link
from model_utils import max_pred_bulk, predict, predict_bulk
PRED_BATCH_SIZE = 16
st.set_page_config(
page_title="NCRP Offense Code Classifier", initial_sidebar_state="collapsed"
)
st.markdown(Path("readme.md").read_text())
st.markdown("---")
st.markdown("## ✏️ Single Coder Demo")
input_text = st.text_input(
"Input Offense",
value="FRAUDULENT USE OF A CREDIT CARD OR DEBT CARD >= $25,000",
)
predictions = predict(input_text)
st.markdown("Predictions")
labels = ["Charge Category"]
st.dataframe(pd.DataFrame(predictions[0]))
st.markdown("---")
st.markdown("## 📑 Bulk Coder")
st.warning(
"⚠️ *Note:* Your input data will be deduplicated"
" on the selected column to reduce computation requirements."
)
st.markdown("1️⃣ **Upload File**")
uploaded_file = st.file_uploader("Bulk Upload", type=["xlsx", "csv"])
file_readers = {"csv": pd.read_csv, "xlsx": partial(pd.read_excel, engine="openpyxl")}
if uploaded_file is not None:
for filetype, reader in file_readers.items():
if uploaded_file.name.endswith(filetype):
df = reader(uploaded_file)
st.write("2️⃣ **Select Column of Offense Descriptions**")
string_columns = list(df.select_dtypes("object").columns)
longest_column = max(
[(df[c].str.len().mean(), c) for c in string_columns], key=lambda x: x[0]
)[1]
selected_column = st.selectbox(
"Select Column",
options=list(string_columns),
index=string_columns.index(longest_column),
)
df = df.drop_duplicates(subset=[selected_column])
st.markdown(f"Uploaded Data Sample `(Deduplicated. N Rows = {len(df)})`")
st.dataframe(df.head(20))
st.write(f"3️⃣ **Predict Using Column: `{selected_column}`**")
if st.button(f"Compute Predictions"):
input_texts = (value for _, value in df[selected_column].items())
n_batches = (len(df) // PRED_BATCH_SIZE) + 1
bulk_preds = []
for batch in stqdm(
ichunked(input_texts, PRED_BATCH_SIZE),
total=n_batches,
desc="Bulk Predict Progress",
):
batch_preds = predict_bulk(batch)
bulk_preds.extend(batch_preds)
df["charge_category_pred"] = max_pred_bulk(bulk_preds)
# TODO: Add all scores
# TODO: Add "confidence"
st.write("**Sample Output**")
st.dataframe(df.head(100))
tmp_download_link = download_link(
df,
f"{uploaded_file.name}-ncrp-predictions.csv",
"⬇️ Download as CSV",
)
st.markdown(tmp_download_link, unsafe_allow_html=True)
| en | 0.175921 | # ✏️ Single Coder Demo") # 📑 Bulk Coder") # TODO: Add all scores # TODO: Add "confidence" | 2.588876 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.