hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794101a4fb5697e2fe58ffc343a4c8a5111ada44 | 5,329 | py | Python | tutorials/language/extern_op.py | eleflea/tvm | d199243d8907b2d8062dd9c20b69dcb9765a970f | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 40 | 2021-06-14T23:14:46.000Z | 2022-03-21T14:32:23.000Z | tutorials/language/extern_op.py | eleflea/tvm | d199243d8907b2d8062dd9c20b69dcb9765a970f | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 14 | 2021-06-08T03:15:54.000Z | 2022-02-01T23:50:24.000Z | tutorials/language/extern_op.py | eleflea/tvm | d199243d8907b2d8062dd9c20b69dcb9765a970f | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 11 | 2021-06-14T05:56:18.000Z | 2022-02-27T06:52:07.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
External Tensor Functions
=========================
**Author**: `Tianqi Chen <https://tqchen.github.io>`_
While TVM supports transparent code generation, sometimes
it is also helpful to incorporate manual written code into
the pipeline. For example, we might want to use cuDNN for
some of the convolution kernels and define the rest of the stages.
TVM supports these black box function calls natively.
Specfically, TVM support all the tensor functions that are DLPack compatible.
Which means we can call any function with POD types(pointer, int, float)
or pointer to DLTensor as argument.
"""
from __future__ import absolute_import, print_function
import tvm
from tvm import te
import numpy as np
from tvm.contrib import cblas
import tvm.testing
if not tvm.get_global_func("tvm.contrib.cblas.matmul", allow_missing=True):
raise Exception("Not compiled with cblas support; can't build this tutorial")
######################################################################
# Use Extern Tensor Function
# --------------------------
# In the example below, we use :any:`te.extern` to add an extern
# array function call. In the extern call, we declare the shape
# of output tensors. In the second argument we provide the list of inputs.
#
# User will need to provide a function describing how to compute the result.
# The compute function takes list of symbolic placeholder for the inputs,
# list of symbolic placeholder for the outputs and returns the executing statement.
#
# In this case we simply call a registered TVM function, which invokes a CBLAS call.
# TVM does not control internal of the extern array function and treats it as blackbox.
# We can further mix schedulable TVM calls that add a bias term to the result.
#
n = 1024
l = 128
m = 235
bias = te.var("bias", dtype="float32")
A = te.placeholder((n, l), name="A")
B = te.placeholder((l, m), name="B")
C = te.extern(
(n, m),
[A, B],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.cblas.matmul", ins[0], ins[1], outs[0], False, False
),
name="C",
)
D = te.compute(C.shape, lambda i, j: C[i, j] + bias, name="D")
s = te.create_schedule(D.op)
######################################################################
# Verify the Result
# -----------------
# We can verify that the result matches what we expected.
#
ctx = tvm.cpu(0)
f = tvm.build(s, [A, B, D, bias], "llvm")
a = tvm.nd.array(np.random.uniform(size=(n, l)).astype(A.dtype), ctx)
b = tvm.nd.array(np.random.uniform(size=(l, m)).astype(B.dtype), ctx)
d = tvm.nd.array(np.zeros((n, m), dtype=D.dtype), ctx)
bb = 10.0
f(a, b, d, bb)
tvm.testing.assert_allclose(d.asnumpy(), np.dot(a.asnumpy(), b.asnumpy()) + 10, rtol=1e-5)
######################################################################
# Extern Contrib Wrappers
# -----------------------
# TVM also provide extern contrib wrappers to useful extern calls,
# the following line is equivalent to the previous example.
#
from tvm.contrib import cblas
C = cblas.matmul(A, B)
D = te.compute(C.shape, lambda i, j: C[i, j] + bias, name="D")
s = te.create_schedule(D.op)
######################################################################
# Hook Python Function as Extern
# ------------------------------
# Since we can call into any PackedFunc in TVM. We can use the extern
# function to callback into python.
#
# The following example registers a python function into TVM runtime system
# and use it to complete one stage of the computation.
# This makes TVM much more flexible. For example, we can insert front-end
# callbacks to inspect the intermediate results or mix customized code
# with TVM.
#
@tvm.register_func("tvm.contrib.my_tvm_addone")
def my_tvm_addone(x, y):
print("my_tvm_addone signatures: %s, %s" % (type(x), type(y)))
tvm.nd.array(x.asnumpy() + 1).copyto(y)
A = te.placeholder((n,), name="A")
B = te.extern(
A.shape,
[A],
lambda ins, outs: tvm.tir.call_packed("tvm.contrib.my_tvm_addone", ins[0], outs[0]),
name="C",
)
s = te.create_schedule(B.op)
f = tvm.build(s, [A, B], "llvm")
a = tvm.nd.array(np.random.uniform(size=(n,)).astype(A.dtype), ctx)
b = tvm.nd.array(np.random.uniform(size=(n,)).astype(B.dtype), ctx)
f(a, b)
tvm.testing.assert_allclose(b.asnumpy(), a.asnumpy() + 1, rtol=1e-5)
######################################################################
# Summary
# -------
# - TVM calls extern tensor function via :any:`te.extern`
# - Use contrib wrappers for short sugars of extern tensor calls.
# - We can hook front-end function as extern tensor callbacks.
#
| 38.064286 | 90 | 0.65791 |
79410283245fa516fe7801d7937f9da6e28f77d1 | 808 | py | Python | turbosnake/test/test_utils.py | AlexeyBond/turbosnake | 832c924c2cf29a741234848792bf750aa72fece2 | [
"MIT"
] | 2 | 2021-09-23T01:11:22.000Z | 2022-02-04T21:08:24.000Z | turbosnake/test/test_utils.py | AlexeyBond/turbosnake | 832c924c2cf29a741234848792bf750aa72fece2 | [
"MIT"
] | null | null | null | turbosnake/test/test_utils.py | AlexeyBond/turbosnake | 832c924c2cf29a741234848792bf750aa72fece2 | [
"MIT"
] | null | null | null | from unittest import TestCase
from unittest.mock import Mock
from turbosnake import event_prop_invoker
class EventPropInvokerTest(TestCase):
def setUp(self) -> None:
class ComponentStub:
...
self.target = ComponentStub()
self.target.props = {}
def test_prop_missing(self):
fn = event_prop_invoker(self.target, 'on_event')
self.assertTrue(callable(fn))
with self.assertRaises(KeyError):
fn()
def test_invoke_prop(self):
fn = event_prop_invoker(self.target, 'on_event')
cb = Mock(return_value='mock return value')
self.target.props = {'on_event': cb}
ret = fn('arg1', 2, kwa='kwa')
self.assertEqual(ret, 'mock return value')
cb.assert_called_with('arg1', 2, kwa='kwa')
| 26.064516 | 56 | 0.631188 |
7941028c41441115c5951efdd014ed949ca5c0a9 | 14,534 | py | Python | OmniMarkupLib/Renderers/libs/pygments/lexers/idl.py | henumohe/OmniMarkupPreviewer | a15382a8309fe04f2c515151c00c074ab9c0d1ab | [
"MIT"
] | 652 | 2015-07-26T00:00:17.000Z | 2022-02-24T18:30:04.000Z | OmniMarkupLib/Renderers/libs/pygments/lexers/idl.py | henumohe/OmniMarkupPreviewer | a15382a8309fe04f2c515151c00c074ab9c0d1ab | [
"MIT"
] | 82 | 2015-01-15T12:30:43.000Z | 2022-01-06T02:56:53.000Z | OmniMarkupLib/Renderers/libs/pygments/lexers/idl.py | henumohe/OmniMarkupPreviewer | a15382a8309fe04f2c515151c00c074ab9c0d1ab | [
"MIT"
] | 99 | 2015-01-14T19:53:45.000Z | 2021-08-11T15:17:26.000Z | # -*- coding: utf-8 -*-
"""
pygments.lexers.idl
~~~~~~~~~~~~~~~~~~~
Lexers for IDL.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, words
from pygments.token import Text, Comment, Operator, Keyword, Name, Number
__all__ = ['IDLLexer']
class IDLLexer(RegexLexer):
"""
Pygments Lexer for IDL (Interactive Data Language).
.. versionadded:: 1.6
"""
name = 'IDL'
aliases = ['idl']
filenames = ['*.pro']
mimetypes = ['text/idl']
flags = re.IGNORECASE | re.MULTILINE
_RESERVED = (
'and', 'begin', 'break', 'case', 'common', 'compile_opt',
'continue', 'do', 'else', 'end', 'endcase', 'elseelse',
'endfor', 'endforeach', 'endif', 'endrep', 'endswitch',
'endwhile', 'eq', 'for', 'foreach', 'forward_function',
'function', 'ge', 'goto', 'gt', 'if', 'inherits', 'le',
'lt', 'mod', 'ne', 'not', 'of', 'on_ioerror', 'or', 'pro',
'repeat', 'switch', 'then', 'until', 'while', 'xor')
"""Reserved words from: http://www.exelisvis.com/docs/reswords.html"""
_BUILTIN_LIB = (
'abs', 'acos', 'adapt_hist_equal', 'alog', 'alog10',
'amoeba', 'annotate', 'app_user_dir', 'app_user_dir_query',
'arg_present', 'array_equal', 'array_indices', 'arrow',
'ascii_template', 'asin', 'assoc', 'atan', 'axis',
'a_correlate', 'bandpass_filter', 'bandreject_filter',
'barplot', 'bar_plot', 'beseli', 'beselj', 'beselk',
'besely', 'beta', 'bilinear', 'binary_template', 'bindgen',
'binomial', 'bin_date', 'bit_ffs', 'bit_population',
'blas_axpy', 'blk_con', 'box_cursor', 'breakpoint',
'broyden', 'butterworth', 'bytarr', 'byte', 'byteorder',
'bytscl', 'caldat', 'calendar', 'call_external',
'call_function', 'call_method', 'call_procedure', 'canny',
'catch', 'cd', 'cdf_\w*', 'ceil', 'chebyshev',
'check_math',
'chisqr_cvf', 'chisqr_pdf', 'choldc', 'cholsol', 'cindgen',
'cir_3pnt', 'close', 'cluster', 'cluster_tree', 'clust_wts',
'cmyk_convert', 'colorbar', 'colorize_sample',
'colormap_applicable', 'colormap_gradient',
'colormap_rotation', 'colortable', 'color_convert',
'color_exchange', 'color_quan', 'color_range_map', 'comfit',
'command_line_args', 'complex', 'complexarr', 'complexround',
'compute_mesh_normals', 'cond', 'congrid', 'conj',
'constrained_min', 'contour', 'convert_coord', 'convol',
'convol_fft', 'coord2to3', 'copy_lun', 'correlate', 'cos',
'cosh', 'cpu', 'cramer', 'create_cursor', 'create_struct',
'create_view', 'crossp', 'crvlength', 'cti_test',
'ct_luminance', 'cursor', 'curvefit', 'cvttobm', 'cv_coord',
'cw_animate', 'cw_animate_getp', 'cw_animate_load',
'cw_animate_run', 'cw_arcball', 'cw_bgroup', 'cw_clr_index',
'cw_colorsel', 'cw_defroi', 'cw_field', 'cw_filesel',
'cw_form', 'cw_fslider', 'cw_light_editor',
'cw_light_editor_get', 'cw_light_editor_set', 'cw_orient',
'cw_palette_editor', 'cw_palette_editor_get',
'cw_palette_editor_set', 'cw_pdmenu', 'cw_rgbslider',
'cw_tmpl', 'cw_zoom', 'c_correlate', 'dblarr', 'db_exists',
'dcindgen', 'dcomplex', 'dcomplexarr', 'define_key',
'define_msgblk', 'define_msgblk_from_file', 'defroi',
'defsysv', 'delvar', 'dendrogram', 'dendro_plot', 'deriv',
'derivsig', 'determ', 'device', 'dfpmin', 'diag_matrix',
'dialog_dbconnect', 'dialog_message', 'dialog_pickfile',
'dialog_printersetup', 'dialog_printjob',
'dialog_read_image', 'dialog_write_image', 'digital_filter',
'dilate', 'dindgen', 'dissolve', 'dist', 'distance_measure',
'dlm_load', 'dlm_register', 'doc_library', 'double',
'draw_roi', 'edge_dog', 'efont', 'eigenql', 'eigenvec',
'ellipse', 'elmhes', 'emboss', 'empty', 'enable_sysrtn',
'eof', 'eos_\w*', 'erase', 'erf', 'erfc', 'erfcx',
'erode', 'errorplot', 'errplot', 'estimator_filter',
'execute', 'exit', 'exp', 'expand', 'expand_path', 'expint',
'extrac', 'extract_slice', 'factorial', 'fft', 'filepath',
'file_basename', 'file_chmod', 'file_copy', 'file_delete',
'file_dirname', 'file_expand_path', 'file_info',
'file_lines', 'file_link', 'file_mkdir', 'file_move',
'file_poll_input', 'file_readlink', 'file_same',
'file_search', 'file_test', 'file_which', 'findgen',
'finite', 'fix', 'flick', 'float', 'floor', 'flow3',
'fltarr', 'flush', 'format_axis_values', 'free_lun',
'fstat', 'fulstr', 'funct', 'fv_test', 'fx_root',
'fz_roots', 'f_cvf', 'f_pdf', 'gamma', 'gamma_ct',
'gauss2dfit', 'gaussfit', 'gaussian_function', 'gaussint',
'gauss_cvf', 'gauss_pdf', 'gauss_smooth', 'getenv',
'getwindows', 'get_drive_list', 'get_dxf_objects',
'get_kbrd', 'get_login_info', 'get_lun', 'get_screen_size',
'greg2jul', 'grib_\w*', 'grid3', 'griddata',
'grid_input', 'grid_tps', 'gs_iter',
'h5[adfgirst]_\w*', 'h5_browser', 'h5_close',
'h5_create', 'h5_get_libversion', 'h5_open', 'h5_parse',
'hanning', 'hash', 'hdf_\w*', 'heap_free',
'heap_gc', 'heap_nosave', 'heap_refcount', 'heap_save',
'help', 'hilbert', 'histogram', 'hist_2d', 'hist_equal',
'hls', 'hough', 'hqr', 'hsv', 'h_eq_ct', 'h_eq_int',
'i18n_multibytetoutf8', 'i18n_multibytetowidechar',
'i18n_utf8tomultibyte', 'i18n_widechartomultibyte',
'ibeta', 'icontour', 'iconvertcoord', 'idelete', 'identity',
'idlexbr_assistant', 'idlitsys_createtool', 'idl_base64',
'idl_validname', 'iellipse', 'igamma', 'igetcurrent',
'igetdata', 'igetid', 'igetproperty', 'iimage', 'image',
'image_cont', 'image_statistics', 'imaginary', 'imap',
'indgen', 'intarr', 'interpol', 'interpolate',
'interval_volume', 'int_2d', 'int_3d', 'int_tabulated',
'invert', 'ioctl', 'iopen', 'iplot', 'ipolygon',
'ipolyline', 'iputdata', 'iregister', 'ireset', 'iresolve',
'irotate', 'ir_filter', 'isa', 'isave', 'iscale',
'isetcurrent', 'isetproperty', 'ishft', 'isocontour',
'isosurface', 'isurface', 'itext', 'itranslate', 'ivector',
'ivolume', 'izoom', 'i_beta', 'journal', 'json_parse',
'json_serialize', 'jul2greg', 'julday', 'keyword_set',
'krig2d', 'kurtosis', 'kw_test', 'l64indgen', 'label_date',
'label_region', 'ladfit', 'laguerre', 'laplacian',
'la_choldc', 'la_cholmprove', 'la_cholsol', 'la_determ',
'la_eigenproblem', 'la_eigenql', 'la_eigenvec', 'la_elmhes',
'la_gm_linear_model', 'la_hqr', 'la_invert',
'la_least_squares', 'la_least_square_equality',
'la_linear_equation', 'la_ludc', 'la_lumprove', 'la_lusol',
'la_svd', 'la_tridc', 'la_trimprove', 'la_triql',
'la_trired', 'la_trisol', 'least_squares_filter', 'leefilt',
'legend', 'legendre', 'linbcg', 'lindgen', 'linfit',
'linkimage', 'list', 'll_arc_distance', 'lmfit', 'lmgr',
'lngamma', 'lnp_test', 'loadct', 'locale_get',
'logical_and', 'logical_or', 'logical_true', 'lon64arr',
'lonarr', 'long', 'long64', 'lsode', 'ludc', 'lumprove',
'lusol', 'lu_complex', 'machar', 'make_array', 'make_dll',
'make_rt', 'map', 'mapcontinents', 'mapgrid', 'map_2points',
'map_continents', 'map_grid', 'map_image', 'map_patch',
'map_proj_forward', 'map_proj_image', 'map_proj_info',
'map_proj_init', 'map_proj_inverse', 'map_set',
'matrix_multiply', 'matrix_power', 'max', 'md_test',
'mean', 'meanabsdev', 'mean_filter', 'median', 'memory',
'mesh_clip', 'mesh_decimate', 'mesh_issolid', 'mesh_merge',
'mesh_numtriangles', 'mesh_obj', 'mesh_smooth',
'mesh_surfacearea', 'mesh_validate', 'mesh_volume',
'message', 'min', 'min_curve_surf', 'mk_html_help',
'modifyct', 'moment', 'morph_close', 'morph_distance',
'morph_gradient', 'morph_hitormiss', 'morph_open',
'morph_thin', 'morph_tophat', 'multi', 'm_correlate',
'ncdf_\w*', 'newton', 'noise_hurl', 'noise_pick',
'noise_scatter', 'noise_slur', 'norm', 'n_elements',
'n_params', 'n_tags', 'objarr', 'obj_class', 'obj_destroy',
'obj_hasmethod', 'obj_isa', 'obj_new', 'obj_valid',
'online_help', 'on_error', 'open', 'oplot', 'oploterr',
'parse_url', 'particle_trace', 'path_cache', 'path_sep',
'pcomp', 'plot', 'plot3d', 'ploterr', 'plots', 'plot_3dbox',
'plot_field', 'pnt_line', 'point_lun', 'polarplot',
'polar_contour', 'polar_surface', 'poly', 'polyfill',
'polyfillv', 'polygon', 'polyline', 'polyshade', 'polywarp',
'poly_2d', 'poly_area', 'poly_fit', 'popd', 'powell',
'pref_commit', 'pref_get', 'pref_set', 'prewitt', 'primes',
'print', 'printd', 'product', 'profile', 'profiler',
'profiles', 'project_vol', 'psafm', 'pseudo',
'ps_show_fonts', 'ptrarr', 'ptr_free', 'ptr_new',
'ptr_valid', 'pushd', 'p_correlate', 'qgrid3', 'qhull',
'qromb', 'qromo', 'qsimp', 'query_ascii', 'query_bmp',
'query_csv', 'query_dicom', 'query_gif', 'query_image',
'query_jpeg', 'query_jpeg2000', 'query_mrsid', 'query_pict',
'query_png', 'query_ppm', 'query_srf', 'query_tiff',
'query_wav', 'radon', 'randomn', 'randomu', 'ranks',
'rdpix', 'read', 'reads', 'readu', 'read_ascii',
'read_binary', 'read_bmp', 'read_csv', 'read_dicom',
'read_gif', 'read_image', 'read_interfile', 'read_jpeg',
'read_jpeg2000', 'read_mrsid', 'read_pict', 'read_png',
'read_ppm', 'read_spr', 'read_srf', 'read_sylk',
'read_tiff', 'read_wav', 'read_wave', 'read_x11_bitmap',
'read_xwd', 'real_part', 'rebin', 'recall_commands',
'recon3', 'reduce_colors', 'reform', 'region_grow',
'register_cursor', 'regress', 'replicate',
'replicate_inplace', 'resolve_all', 'resolve_routine',
'restore', 'retall', 'return', 'reverse', 'rk4', 'roberts',
'rot', 'rotate', 'round', 'routine_filepath',
'routine_info', 'rs_test', 'r_correlate', 'r_test',
'save', 'savgol', 'scale3', 'scale3d', 'scope_level',
'scope_traceback', 'scope_varfetch', 'scope_varname',
'search2d', 'search3d', 'sem_create', 'sem_delete',
'sem_lock', 'sem_release', 'setenv', 'set_plot',
'set_shading', 'sfit', 'shade_surf', 'shade_surf_irr',
'shade_volume', 'shift', 'shift_diff', 'shmdebug', 'shmmap',
'shmunmap', 'shmvar', 'show3', 'showfont', 'simplex', 'sin',
'sindgen', 'sinh', 'size', 'skewness', 'skip_lun',
'slicer3', 'slide_image', 'smooth', 'sobel', 'socket',
'sort', 'spawn', 'spher_harm', 'sph_4pnt', 'sph_scat',
'spline', 'spline_p', 'spl_init', 'spl_interp', 'sprsab',
'sprsax', 'sprsin', 'sprstp', 'sqrt', 'standardize',
'stddev', 'stop', 'strarr', 'strcmp', 'strcompress',
'streamline', 'stregex', 'stretch', 'string', 'strjoin',
'strlen', 'strlowcase', 'strmatch', 'strmessage', 'strmid',
'strpos', 'strput', 'strsplit', 'strtrim', 'struct_assign',
'struct_hide', 'strupcase', 'surface', 'surfr', 'svdc',
'svdfit', 'svsol', 'swap_endian', 'swap_endian_inplace',
'symbol', 'systime', 's_test', 't3d', 'tag_names', 'tan',
'tanh', 'tek_color', 'temporary', 'tetra_clip',
'tetra_surface', 'tetra_volume', 'text', 'thin', 'threed',
'timegen', 'time_test2', 'tm_test', 'total', 'trace',
'transpose', 'triangulate', 'trigrid', 'triql', 'trired',
'trisol', 'tri_surf', 'truncate_lun', 'ts_coef', 'ts_diff',
'ts_fcast', 'ts_smooth', 'tv', 'tvcrs', 'tvlct', 'tvrd',
'tvscl', 'typename', 't_cvt', 't_pdf', 'uindgen', 'uint',
'uintarr', 'ul64indgen', 'ulindgen', 'ulon64arr', 'ulonarr',
'ulong', 'ulong64', 'uniq', 'unsharp_mask', 'usersym',
'value_locate', 'variance', 'vector', 'vector_field', 'vel',
'velovect', 'vert_t3d', 'voigt', 'voronoi', 'voxel_proj',
'wait', 'warp_tri', 'watershed', 'wdelete', 'wf_draw',
'where', 'widget_base', 'widget_button', 'widget_combobox',
'widget_control', 'widget_displaycontextmen', 'widget_draw',
'widget_droplist', 'widget_event', 'widget_info',
'widget_label', 'widget_list', 'widget_propertysheet',
'widget_slider', 'widget_tab', 'widget_table',
'widget_text', 'widget_tree', 'widget_tree_move',
'widget_window', 'wiener_filter', 'window', 'writeu',
'write_bmp', 'write_csv', 'write_gif', 'write_image',
'write_jpeg', 'write_jpeg2000', 'write_nrif', 'write_pict',
'write_png', 'write_ppm', 'write_spr', 'write_srf',
'write_sylk', 'write_tiff', 'write_wav', 'write_wave',
'wset', 'wshow', 'wtn', 'wv_applet', 'wv_cwt',
'wv_cw_wavelet', 'wv_denoise', 'wv_dwt', 'wv_fn_coiflet',
'wv_fn_daubechies', 'wv_fn_gaussian', 'wv_fn_haar',
'wv_fn_morlet', 'wv_fn_paul', 'wv_fn_symlet',
'wv_import_data', 'wv_import_wavelet', 'wv_plot3d_wps',
'wv_plot_multires', 'wv_pwt', 'wv_tool_denoise',
'xbm_edit', 'xdisplayfile', 'xdxf', 'xfont',
'xinteranimate', 'xloadct', 'xmanager', 'xmng_tmpl',
'xmtool', 'xobjview', 'xobjview_rotate',
'xobjview_write_image', 'xpalette', 'xpcolor', 'xplot3d',
'xregistered', 'xroi', 'xsq_test', 'xsurface', 'xvaredit',
'xvolume', 'xvolume_rotate', 'xvolume_write_image',
'xyouts', 'zoom', 'zoom_24')
"""Functions from: http://www.exelisvis.com/docs/routines-1.html"""
tokens = {
'root': [
(r'^\s*;.*?\n', Comment.Singleline),
(words(_RESERVED, prefix=r'\b', suffix=r'\b'), Keyword),
(words(_BUILTIN_LIB, prefix=r'\b', suffix=r'\b'), Name.Builtin),
(r'\+=|-=|\^=|\*=|/=|#=|##=|<=|>=|=', Operator),
(r'\+\+|--|->|\+|-|##|#|\*|/|<|>|&&|\^|~|\|\|\?|:', Operator),
(r'\b(mod=|lt=|le=|eq=|ne=|ge=|gt=|not=|and=|or=|xor=)', Operator),
(r'\b(mod|lt|le|eq|ne|ge|gt|not|and|or|xor)\b', Operator),
(r'\b[0-9](L|B|S|UL|ULL|LL)?\b', Number),
(r'.', Text),
]
}
| 55.262357 | 79 | 0.58862 |
794102ccf643951ff91587a43256c3795a8211ba | 3,145 | py | Python | app.py | yxxhero/opskit-api | 01b2ba64e1bde1c1f4755e8a87624f04b037629f | [
"MIT"
] | null | null | null | app.py | yxxhero/opskit-api | 01b2ba64e1bde1c1f4755e8a87624f04b037629f | [
"MIT"
] | null | null | null | app.py | yxxhero/opskit-api | 01b2ba64e1bde1c1f4755e8a87624f04b037629f | [
"MIT"
] | null | null | null | import logging
from flask import Blueprint
from flask_cors import CORS
from flask_restful import Api
from opskit_api.models import app
from opskit_api.resources.admin.comments import AdminComment
from opskit_api.resources.admin.essays import AdminNote
from opskit_api.resources.admin.users import AdminUser
from opskit_api.resources.api.commnets import Comments
from opskit_api.resources.api.essay import Essay
# 引入视图函数
from opskit_api.resources.api.essays import Essays
from opskit_api.resources.message.message import UserMessage
from opskit_api.resources.api.noteuserinfo import EssayUserInfo
from opskit_api.resources.api.notices import Notices
from opskit_api.resources.api.upload import Upload
from opskit_api.resources.api.useressays import UserEssays
from opskit_api.resources.api.userinfo import UserInfo
from opskit_api.resources.auth.login import Login
from opskit_api.resources.auth.logout import Logout
from opskit_api.resources.auth.register import Register
from opskit_api.resources.search.essays import SearchNote
from opskit_api.resources.statistics.recommend import Recommend
CORS(app, resources={r"/api/*": {"origins": "*"}})
# resources blueprint
api_bp = Blueprint("api", __name__)
api_resource = Api(api_bp, catch_all_404s=True)
api_resource.add_resource(Essays, "/notes")
api_resource.add_resource(Notices, "/notices")
api_resource.add_resource(UserEssays, "/usernotes")
api_resource.add_resource(Essay, "/note")
api_resource.add_resource(Upload, "/upload")
api_resource.add_resource(UserInfo, "/userinfo")
api_resource.add_resource(EssayUserInfo, "/noteuserinfo")
api_resource.add_resource(Comments, "/comments")
api_resource.add_resource(UserMessage, "/messages")
# admin blueprint
admin_bp = Blueprint("admin", __name__)
admin_resource = Api(admin_bp, catch_all_404s=True)
admin_resource.add_resource(AdminUser, "/users")
admin_resource.add_resource(AdminNote, "/notes")
admin_resource.add_resource(AdminComment, "/comments")
# auth blueprint
auth_bp = Blueprint("auth", __name__)
auth_resource = Api(auth_bp, catch_all_404s=True)
auth_resource.add_resource(Login, "/login")
auth_resource.add_resource(Logout, "/logout")
auth_resource.add_resource(Register, "/register")
# statistics blueprint
statistics_bp = Blueprint("statistics", __name__)
statistics_resource = Api(statistics_bp, catch_all_404s=True)
statistics_resource.add_resource(Recommend, "/recommend")
# search blueprint
search_bp = Blueprint("search", __name__)
search_resource = Api(search_bp, catch_all_404s=True)
search_resource.add_resource(SearchNote, "/notes")
# 拦截请求
@app.before_request
def handle_token():
pass
# 注册蓝图
app.register_blueprint(auth_bp, url_prefix="/api/v1/auth")
app.register_blueprint(search_bp, url_prefix="/api/v1/search")
app.register_blueprint(admin_bp, url_prefix="/api/v1/admin")
app.register_blueprint(api_bp, url_prefix="/api/v1/resource")
app.register_blueprint(statistics_bp, url_prefix="/api/v1/statistics")
if __name__ != "__main__":
gunicorn_logger = logging.getLogger("gunicorn.error")
app.logger.handlers = gunicorn_logger.handlers
app.logger.setLevel(gunicorn_logger.level)
| 32.091837 | 70 | 0.814626 |
794102e863916b180ae3fe80c836f5ca09b15cc8 | 7,955 | py | Python | MILNet.py | dafeigediaozhatian/MILNet | d60c894952f9b153051ccc8397608a06b6661d2b | [
"MIT"
] | null | null | null | MILNet.py | dafeigediaozhatian/MILNet | d60c894952f9b153051ccc8397608a06b6661d2b | [
"MIT"
] | null | null | null | MILNet.py | dafeigediaozhatian/MILNet | d60c894952f9b153051ccc8397608a06b6661d2b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import numpy as np
import torch
import torch.optim as optim
from tensorboardX import SummaryWriter
from scipy import stats
from tqdm import tqdm
from config_aesthetic import get_args
from utils.filter_nan import filter_nan
from data.gcn_dataloader_6144 import AVADataset
from model.single_rsgcn_loss_emd import RsgcnModel
from model.adaptive_emd_loss import ada_emd_loss
from model.emd_loss_metric import compute_mse, emd_dis
def main():
# cuda
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# config
config = get_args()
# model
model = RsgcnModel(6144, 512, 512, 5, 10)
model = model.cuda()
# warm start
if config.warm_start:
model.load_state_dict(torch.load(os.path.join(config.ckpt_path,
'ILG-semantic-GCN-obj-color-loss-ada-EMD-visual-model-epoch-%d.pkl' % config.warm_start_epoch)))
print('Successfully loaded pretrain model')
# setting lr
conv_base_lr = config.conv_base_lr
optimizer = optim.Adam(model.parameters(), conv_base_lr)
# loss function
criterion = ada_emd_loss
# record training log
result_dir = config.result_path + 'ILG_semantic_GCN_obj_color_ada_EMD_visual'
if not os.path.exists(result_dir):
os.mkdir(result_dir)
writer = SummaryWriter(log_dir=result_dir)
# model size
param_num = 0
for param in model.parameters():
param_num += int(np.prod(param.shape))
print('Trainable params: %.2f million' % (param_num / 1e6))
# training
if config.train:
# read dataset
trainset = AVADataset(config.train_csv_file, config.refer_img_path, config.train_img_path, config.anno_file, config.train_refer_file)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=config.train_batch_size,
shuffle=True, num_workers=config.num_workers)
# for early stopping
train_losses = []
init_C = 0
init_throshold = 0.2
alpha = 0.99
# start training
print('its learning time: ')
for epoch in range(config.warm_start_epoch, config.epochs):
batch_losses = []
for i, data in tqdm(enumerate(train_loader)):
refer_feature = data['refer_feature'].to(device).float()
refer_feature = torch.transpose(refer_feature, 1, 2)
anno = data['anno'].to(device).float()
anno = anno.view(-1, 10, 1)
# 输出分数分布
gcn_outputs = model(refer_feature)
gcn_outputs = gcn_outputs.view(-1, 10, 1)
optimizer.zero_grad()
# loss function
loss_gcn = criterion(anno, gcn_outputs, init_C, init_throshold)
init_C = alpha * loss_gcn.detach() + (1-alpha) * init_C
batch_losses.append(loss_gcn.item())
# backward
loss_gcn.backward()
optimizer.step()
if i % 50 == 49:
print('Epoch: %d/%d | Step: %d/%d | Training Rank loss: %.4f' % (
epoch + 1, config.epochs, i + 1, len(trainset) // config.train_batch_size + 1, loss_gcn.data.item()))
# update throshold
init_throshold = torch.mean(torch.Tensor(batch_losses))
# compute mean loss
avg_loss = sum(batch_losses) / (len(trainset) // config.train_batch_size + 1)
train_losses.append(avg_loss)
print('Epoch %d averaged training Rank loss: %.4f' % (epoch + 1, avg_loss))
writer.add_scalars('Loss_group', {'train_loss': avg_loss}, epoch)
print('Epoch %d gcn loss: %.4f' % (epoch + 1, loss_gcn))
writer.add_scalars('Loss_group', {'gcn_loss': loss_gcn}, epoch)
# exponetial learning rate decay
if (epoch + 1) % 3 == 0:
conv_base_lr = conv_base_lr / 10
optimizer = optim.Adam(model.parameters(), conv_base_lr)
writer.add_scalars('LR', {'learn_rate': conv_base_lr}, epoch)
# Use early stopping to monitor training
# print('Saving model...')
torch.save(model.state_dict(), os.path.join(config.ckpt_path,
'ILG-semantic-GCN-obj-color-loss-ada-EMD-visual-model-epoch-%d.pkl' % (epoch + 1)))
print('Done.\n')
# testing
if config.test:
model.eval()
print('its test time: ')
testset = AVADataset(config.test_csv_file, config.refer_img_path, config.train_img_path, config.anno_file, config.test_refer_file)
test_loader = torch.utils.data.DataLoader(testset, batch_size=config.test_batch_size, shuffle=False,
num_workers=config.num_workers)
for test_epoch in range(1, config.epochs):
pred_score = []
pred_dis_score = []
gt_score = []
gt_dis_score = []
model.load_state_dict(torch.load(os.path.join(config.ckpt_path, 'best_model.pkl')))
for data in tqdm(test_loader):
# forward
refer_feature = data['refer_feature'].to(device).float()
refer_feature = torch.transpose(refer_feature, 1, 2)
score = data['score']
gt_dis = data['anno']
with torch.no_grad():
gcn_outputs = model(refer_feature)
gcn_outputs = gcn_outputs.view(-1, 10, 1)
pred_dis_score += list(gcn_outputs.cpu().numpy())
gt_dis_score += list(gt_dis.cpu().numpy())
for elem_output in gcn_outputs:
predicted_mean = 0.0
for i, elem in enumerate(elem_output, 1):
predicted_mean += i * elem
pred_score.append(predicted_mean.cpu().numpy()[0])
gt_score += list(score)
new_pred_score, new_gt_score, new_pred_dist, new_gt_dist = filter_nan(pred_score, gt_score, pred_dis_score, gt_dis_score)
# plcc
pred = np.squeeze(np.array(new_pred_score).astype('float64'))
gt = np.squeeze(np.array(new_gt_score).astype('float64'))
plcc, _ = stats.pearsonr(pred, gt)
print('% PLCC of mean: {} | epoch: {}'.format(plcc, test_epoch))
# ACC
correct_nums = 0
for i in range(len(new_pred_score)):
if (new_pred_score[i] >= 5 and new_gt_score[i] >= 5) or (new_pred_score[i] < 5 and new_gt_score[i] < 5):
correct_nums += 1
acc = correct_nums / len(new_pred_score)
print('acc is %f | epoch: %d' % (acc, test_epoch))
# srocc
srocc_gcn = stats.spearmanr(new_pred_score, new_gt_score)[0]
print('% gcn SRCC of mean: {} | epoch: {}'.format(srocc_gcn, test_epoch))
writer.add_scalars('SROCC', {'GCN SROCC': srocc_gcn}, test_epoch)
# MSE
pred_label = torch.Tensor(np.array(new_pred_score))
gt_label = torch.Tensor(np.array(new_gt_score))
mse_value = compute_mse(pred_label, gt_label)
print('% MSE value: {} | epoch: {}'.format(mse_value, test_epoch))
# emd1
pred_dis = torch.Tensor(np.array(new_pred_dist))
pred_dis = torch.squeeze(pred_dis, dim=-1)
gt_dis = torch.Tensor(np.array(new_gt_dist))
emd1_value = emd_dis(pred_dis, gt_dis)
print('% emd1 value: {} | epoch: {}'.format(emd1_value, test_epoch))
# emd2
emd2_value = emd_dis(pred_dis, gt_dis, dist_r=2)
print('% emd2 value: {} | epoch: {}'.format(emd2_value, test_epoch))
writer.close()
if __name__=='__main__':
main()
| 38.616505 | 150 | 0.581772 |
7941032c95c73d92ed8d763da6bc0a0439a4e989 | 2,136 | py | Python | opensfm/synthetic_data/synthetic_examples.py | chengchunhsu/OpenSfM | 9abc20707c55a2e3cdfe9212bc2155a98b0e6979 | [
"BSD-2-Clause"
] | 1 | 2019-05-31T13:50:41.000Z | 2019-05-31T13:50:41.000Z | opensfm/synthetic_data/synthetic_examples.py | Pandinosaurus/OpenSfM | b892ba9fd5e7fd6c7a9e3c81edddca80f71c1cd5 | [
"BSD-2-Clause"
] | null | null | null | opensfm/synthetic_data/synthetic_examples.py | Pandinosaurus/OpenSfM | b892ba9fd5e7fd6c7a9e3c81edddca80f71c1cd5 | [
"BSD-2-Clause"
] | 2 | 2017-03-31T16:54:34.000Z | 2018-07-10T11:32:22.000Z | import opensfm.synthetic_data.synthetic_scene as ss
def synthetic_circle_scene():
scene_length = 60
points_count = 5000
generator = ss.get_scene_generator("circle", scene_length)
scene = ss.SyntheticStreetScene(generator)
scene.add_street(points_count, 7, 7).perturb_floor([0, 0, 0.1]).perturb_walls(
[0.2, 0.2, 0.01]
)
camera_height = 1.5
camera_interval = 3
position_perturbation = [0.2, 0.2, 0.01]
rotation_perturbation = 0.2
camera = ss.get_camera("perspective", "1", 0.7, -0.1, 0.01)
scene.add_camera_sequence(
camera,
0,
scene_length,
camera_height,
camera_interval,
position_perturbation,
rotation_perturbation,
)
return scene
def synthetic_cube_scene():
return ss.SyntheticCubeScene(10, 1000, 0.001)
def synthetic_rig_scene():
scene_length = 20
points_count = 5000
generator = ss.get_scene_generator("line", scene_length)
scene = ss.SyntheticStreetScene(generator)
scene.add_street(points_count, 15, 12).perturb_floor([0, 0, 0.1]).perturb_walls(
[0.2, 0.2, 0.01]
)
camera_height = 2
camera_interval = 3
position_perturbation = [0.2, 0.2, 0.01]
rotation_perturbation = 0.3
relative_positions = [[0, 0, 0.2], [0, 0, -0.2], [-0.2, 0, 0], [0.2, 0, 0]]
relative_rotations = [
[0, 0, 0],
[0, 3.1415927, 0],
[0, 1.5707963, 0],
[0, -1.5707963, 0],
]
camera_front = ss.get_camera("perspective", "1", 0.7, -0.1, 0.01)
camera_back = ss.get_camera("perspective", "1", 0.7, -0.1, 0.01)
camera_left = ss.get_camera("perspective", "1", 0.9, -0.1, 0.01)
camera_right = ss.get_camera("perspective", "1", 0.9, -0.1, 0.01)
cameras = [
camera_front,
camera_back,
camera_right,
camera_left,
]
scene.add_rig_camera_sequence(
cameras,
relative_positions,
relative_rotations,
0,
scene_length,
camera_height,
camera_interval,
position_perturbation,
rotation_perturbation,
)
return scene
| 27.384615 | 84 | 0.612828 |
7941033c88397f8adf0c96551952397941d27dd0 | 495 | py | Python | registration/forms.py | timgates42/timestrap | 744ebcb0cd5fc536245c18058236169f4f36cb8b | [
"BSD-2-Clause"
] | 1,758 | 2017-04-21T08:42:59.000Z | 2022-03-09T22:58:53.000Z | registration/forms.py | timgates42/timestrap | 744ebcb0cd5fc536245c18058236169f4f36cb8b | [
"BSD-2-Clause"
] | 172 | 2017-04-23T21:30:03.000Z | 2022-02-10T20:10:06.000Z | registration/forms.py | timgates42/timestrap | 744ebcb0cd5fc536245c18058236169f4f36cb8b | [
"BSD-2-Clause"
] | 138 | 2017-04-23T23:02:16.000Z | 2022-03-25T04:44:19.000Z | from django.contrib.auth.forms import PasswordResetForm
from conf.models import Site
from conf.utils import current_site_id
class TimestrapPasswordResetForm(PasswordResetForm):
"""
Override the 'domain' and 'site_name' email context variables to use the
current site.
"""
def save(self, **kwargs):
site = Site.objects.get(id=current_site_id())
kwargs["extra_email_context"] = {"domain": site.domain, "site_name": site.name}
super().save(**kwargs)
| 29.117647 | 87 | 0.705051 |
7941036d06df89f660ec226878684a48428decd4 | 7,216 | py | Python | tools/fix_power_pin.py | tgingold/opentdc | d74fe70a1295ae3808f7abd98d84bf488ac16bb5 | [
"Apache-2.0"
] | 10 | 2020-12-03T02:41:23.000Z | 2022-02-21T06:12:42.000Z | tools/fix_power_pin.py | tgingold/opentdc | d74fe70a1295ae3808f7abd98d84bf488ac16bb5 | [
"Apache-2.0"
] | 1 | 2020-11-30T01:35:09.000Z | 2020-11-30T19:15:56.000Z | tools/fix_power_pin.py | tgingold/OpenTDC | d74fe70a1295ae3808f7abd98d84bf488ac16bb5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
# SPDX-FileCopyrightText: (c) 2020 Tristan Gingold <[email protected]>
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Read power strips from DEF and apply them to a LEF as PIN shape
# Workaround an OpenRoad issue
import sys
import argparse
from lef_def_parser.def_parser import DefParser
LEF_GRAMMAR = {
'VERSION': None,
'NOWIREEXTENSIONATPIN': None,
'DIVIDERCHAR': None,
'BUSBITCHARS': None,
'MACRO': {
'_end': True,
'CLASS': None,
'FOREIGN': None,
'ORIGIN': None,
'SIZE': None,
'PIN': {
'_end': True,
'DIRECTION': None,
'USE': None,
'PORT': {
'LAYER': None,
'RECT': None
}
},
'OBS': {
'LAYER': None,
'RECT': None
}
}
}
class LefStatement:
def __init__(self, name, value, nest, end_label):
self.name = name
self.value = value
self.nest = nest
self.end_label = end_label
class LefParser:
def __init__(self, filename):
self.filename = filename
self.statement = None
self.pins = {}
def parse(self):
f = open(self.filename, 'r')
lib = LefStatement('LIBRARY', [], [], False)
stack = [(LEF_GRAMMAR, lib)]
for line in f:
toks = line.split()
if not toks:
continue
key = toks[0]
#print(toks)
state = stack[-1]
if key == 'END':
if len(stack) == 1:
# Last END
self.statement = stack[0][1]
break
else:
stack.pop()
continue
if key not in state[0]:
raise Exception('unknown statement {}'.format(key))
next = state[0][key]
if next is None:
# A simple attribute
assert toks[-1] == ';'
s = LefStatement(key, toks[1:-1], None, False)
stack[-1][1].nest.append(s)
else:
# A nested statement
s = LefStatement(key, toks[1:], [], '_end' in next)
stack[-1][1].nest.append(s)
if key == 'PIN':
self.pins[toks[1]] = s
stack.append((next, s))
f.close()
def write1(self, file, s, level):
file.write(' ' * level + s.name + ' ' + ' '.join(s.value))
if s.nest:
file.write('\n')
for s1 in s.nest:
self.write1(file, s1, level + 1)
file.write(' ' * level + 'END')
if s.end_label:
file.write(' ' + s.value[0])
file.write('\n')
else:
file.write(' ;\n')
def write(self, file):
for s1 in self.statement.nest:
self.write1(file, s1, 0)
file.write('END LIBRARY\n')
def rect_filter(shapes, rect):
res = []
r = [[float(rect[0]), float(rect[1])],
[float(rect[2]), float(rect[3])]]
for s in shapes:
# That's very rough, but we are talking about power strips.
if (abs(s[0][0] - r[0][0]) < 1
and abs(s[0][1] - r[0][1]) < 1
and abs(s[1][0] - r[1][0]) < 1
and abs(s[1][1] - r[1][1]) < 1):
pass
else:
res.append(s)
return res
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Create a parasitic SPEF file from def and lef files.')
parser.add_argument('--def_file', '-d', required=True,
help='Input DEF')
parser.add_argument('--lef_file', '-l', required=True,
help='Input LEF')
parser.add_argument('--pin', '-p', nargs='+',
help='Pin name')
parser.add_argument('--layer', '-L', required=True,
help='Layer name')
parser.add_argument('--output', '-o', required=True,
help='Output LEF file')
args = parser.parse_args()
my_def = DefParser(args.def_file)
my_def.parse()
my_lef = LefParser(args.lef_file)
my_lef.parse()
unit = 1000.0
for pin in args.pin:
print("Pins: {}".format(pin))
n = my_def.specialnets.net_dict.get(pin)
if n is None:
print("Cannot find SPECIALNET {}".format(pin))
continue
rects = []
for r in n.routed:
if r.layer == args.layer and r.shape == 'STRIPE':
print('Shape: wd: {}, pts: {}'.format(r.shape_width, r.points))
hw = float(r.shape_width) / 2
if r.points[0][0] == r.points[1][0]:
# Vertical shape - extend horizontally
if r.points[0][1] < r.points[1][1]:
d = hw
else:
d = -hw
rect = [[r.points[0][0] - d, r.points[0][1]],
[r.points[1][0] + d, r.points[1][1]]]
elif r.points[0][1] == r.points[1][1]:
# Horizontal shape - extend vertically
if r.points[0][0] < r.points[1][0]:
d = hw
else:
d = -hw
rect = [[r.points[0][0], r.points[0][1] - d],
[r.points[1][0], r.points[1][1] + d]]
else:
raise Exception
rects.append([[rect[0][0] / unit, rect[0][1] / unit],
[rect[1][0] / unit, rect[1][1] / unit]])
# Now, insert in LEF
p = my_lef.pins[pin]
port = [s for s in p.nest if s.name == 'PORT']
assert len(port) == 1
port = port[0]
# First remove existing rect.
layer = None
for s in port.nest:
if s.name == 'LAYER':
layer = s.value[0]
elif s.name == 'RECT':
if layer == args.layer:
rects = rect_filter(rects, s.value)
else:
raise Exception
for i, s in enumerate(port.nest):
if s.name == 'LAYER' and s.value[0] == args.layer:
for r in rects:
stmt = LefStatement('RECT', [str(r[0][0]), str(r[0][1]),
str(r[1][0]), str(r[1][1])],
None, False)
port.nest.insert(i + 1, stmt)
break
with open(args.output, 'w') as f:
my_lef.write(f)
| 33.719626 | 79 | 0.467988 |
7941051ad31bf58f109a23e6821908ddba692cfb | 1,259 | py | Python | networkx/tests/test.py | rakschahsa/networkx | 6cac55b1064c3c346665f9281680fa3b66442ad0 | [
"BSD-3-Clause"
] | 445 | 2019-01-26T13:50:26.000Z | 2022-03-18T05:17:38.000Z | networkx/tests/test.py | rakschahsa/networkx | 6cac55b1064c3c346665f9281680fa3b66442ad0 | [
"BSD-3-Clause"
] | 242 | 2019-01-29T15:48:27.000Z | 2022-03-31T22:09:21.000Z | networkx/tests/test.py | rakschahsa/networkx | 6cac55b1064c3c346665f9281680fa3b66442ad0 | [
"BSD-3-Clause"
] | 136 | 2018-01-09T22:52:06.000Z | 2022-02-24T13:26:18.000Z | #!/usr/bin/env python
import sys
from os import path, getcwd
def run(verbosity=1, doctest=False, numpy=True):
"""Run NetworkX tests.
Parameters
----------
verbosity: integer, optional
Level of detail in test reports. Higher numbers provide more detail.
doctest: bool, optional
True to run doctests in code modules
numpy: bool, optional
True to test modules dependent on numpy
"""
try:
import nose
except ImportError:
raise ImportError(
"The nose package is needed to run the NetworkX tests.")
sys.stderr.write("Running NetworkX tests:")
nx_install_dir = path.join(path.dirname(__file__), path.pardir)
# stop if running from source directory
if getcwd() == path.abspath(path.join(nx_install_dir, path.pardir)):
raise RuntimeError("Can't run tests from source directory.\n"
"Run 'nosetests' from the command line.")
argv = [' ', '--verbosity=%d' % verbosity,
'-w', nx_install_dir,
'-exe']
if doctest:
argv.extend(['--with-doctest', '--doctest-extension=txt'])
if not numpy:
argv.extend(['-A not numpy'])
nose.run(argv=argv)
if __name__ == "__main__":
run()
| 27.369565 | 75 | 0.618745 |
7941053e9d16d82568cfd93244a400f48a8eff01 | 2,301 | py | Python | ml_service/pipelines/sales_forecast_verify_train_pipeline.py | owaiskhalid21/mlops-sales-forecasting | ae092f125d0b970f892b361ad4ced3a8dd9b3b3b | [
"MIT"
] | null | null | null | ml_service/pipelines/sales_forecast_verify_train_pipeline.py | owaiskhalid21/mlops-sales-forecasting | ae092f125d0b970f892b361ad4ced3a8dd9b3b3b | [
"MIT"
] | null | null | null | ml_service/pipelines/sales_forecast_verify_train_pipeline.py | owaiskhalid21/mlops-sales-forecasting | ae092f125d0b970f892b361ad4ced3a8dd9b3b3b | [
"MIT"
] | null | null | null | import argparse
import sys
import os
from azureml.core import Run, Experiment, Workspace
from ml_service.util.env_variables import Env
from sales_forecast.util.model_helper import get_latest_model
def main():
run = Run.get_context()
if (run.id.startswith('OfflineRun')):
from dotenv import load_dotenv
load_dotenv()
sources_dir = os.environ.get("SOURCES_DIR_TRAIN")
if (sources_dir is None):
sources_dir = 'sales_forecast'
workspace_name = os.environ.get("WORKSPACE_NAME")
experiment_name = os.environ.get("EXPERIMENT_NAME")
resource_group = os.environ.get("RESOURCE_GROUP")
subscription_id = os.environ.get("SUBSCRIPTION_ID")
build_id = os.environ.get('BUILD_BUILDID')
aml_workspace = Workspace.get(
name=workspace_name,
subscription_id=subscription_id,
resource_group=resource_group
)
ws = aml_workspace
exp = Experiment(ws, experiment_name)
else:
exp = run.experiment
e = Env()
parser = argparse.ArgumentParser("register")
parser.add_argument(
"--build_id",
type=str,
help="The Build ID of the build triggering this pipeline run",
)
parser.add_argument(
"--output_model_version_file",
type=str,
default="model_version.txt",
help="Name of a file to write model version to"
)
args = parser.parse_args()
if (args.build_id is not None):
build_id = args.build_id
model_name = e.model_name
try:
tag_name = 'BuildId'
model = get_latest_model(
model_name, tag_name, build_id, exp.workspace)
if (model is not None):
print("Model was registered for this build.")
if (model is None):
print("Model was not registered for this run.")
sys.exit(1)
except Exception as e:
print(e)
print("Model was not registered for this run.")
sys.exit(1)
# Save the Model Version for other AzDO jobs after script is complete
if args.output_model_version_file is not None:
with open(args.output_model_version_file, "w") as out_file:
out_file.write(str(model.version))
if __name__ == '__main__':
main()
| 30.276316 | 73 | 0.637983 |
794105540e33b7369b987008ead7504c528a23d8 | 145 | py | Python | Task1/chapter50.py | shkhaider2015/AI_Lab_Task | 642a0d5e30515dac6972da194741b829cdc63f30 | [
"Unlicense"
] | null | null | null | Task1/chapter50.py | shkhaider2015/AI_Lab_Task | 642a0d5e30515dac6972da194741b829cdc63f30 | [
"Unlicense"
] | null | null | null | Task1/chapter50.py | shkhaider2015/AI_Lab_Task | 642a0d5e30515dac6972da194741b829cdc63f30 | [
"Unlicense"
] | null | null | null | def fun():
def extra():
print("Hello ")
def extra1():
print("Shakeel")
fun1 = extra()
fun2 = extra1()
fun()
| 14.5 | 24 | 0.462069 |
794105ed0f307a1fd1cfacbb9cb65cac1264d3f0 | 799 | py | Python | Lib/corpuscrawler/crawl_lcm.py | cash/corpuscrawler | 8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d | [
"Apache-2.0"
] | 95 | 2019-06-13T23:34:21.000Z | 2022-03-12T05:22:49.000Z | Lib/corpuscrawler/crawl_lcm.py | sahwar/corpuscrawler | 8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d | [
"Apache-2.0"
] | 31 | 2019-06-02T18:56:53.000Z | 2021-08-10T20:16:02.000Z | Lib/corpuscrawler/crawl_lcm.py | sahwar/corpuscrawler | 8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d | [
"Apache-2.0"
] | 35 | 2019-06-18T08:26:24.000Z | 2022-01-11T13:59:40.000Z | # coding: utf-8
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function, unicode_literals
import re
def crawl(crawler):
out = crawler.get_output(language='lcm')
crawler.crawl_pngscriptures_org(out, language='lcm')
| 34.73913 | 74 | 0.764706 |
794107259dd9c8aafcbbfa1b66ccc60c753120ff | 476 | py | Python | tests/plan/factories.py | hmpf/easydmp | 650b241fedd839635335c1044f2d695f9c076e4c | [
"MIT"
] | 5 | 2018-09-25T15:48:39.000Z | 2020-06-04T11:56:36.000Z | tests/plan/factories.py | hmpf/easydmp | 650b241fedd839635335c1044f2d695f9c076e4c | [
"MIT"
] | 233 | 2018-01-10T17:28:41.000Z | 2022-03-30T09:38:03.000Z | tests/plan/factories.py | hmpf/easydmp | 650b241fedd839635335c1044f2d695f9c076e4c | [
"MIT"
] | 2 | 2019-02-07T10:34:09.000Z | 2019-03-12T07:25:09.000Z | import factory
from easydmp.plan.models import Plan
from tests.auth.factories import UserFactory
from tests.dmpt.factories import TemplateFactory
__all__ = [
'PlanFactory',
]
class PlanFactory(factory.django.DjangoModelFactory):
class Meta:
model = Plan
template = factory.SubFactory(TemplateFactory)
title = factory.Faker('sentence', nb_words=6)
added_by = factory.SubFactory(UserFactory)
modified_by = factory.SubFactory(UserFactory)
| 20.695652 | 53 | 0.752101 |
794108acb4c0758ccd6f9972c9a950b8a44d1134 | 5,285 | py | Python | pykeigan/utils.py | keigan-motor/pykeigan_motor | 884c742e38d07a057be62a5f58c2ea1a6f4d30a9 | [
"MIT"
] | 10 | 2018-01-24T00:28:22.000Z | 2022-03-30T04:20:57.000Z | pykeigan/utils.py | keigan-motor/pykeigan_motor | 884c742e38d07a057be62a5f58c2ea1a6f4d30a9 | [
"MIT"
] | 8 | 2018-03-12T06:09:56.000Z | 2022-03-01T03:58:30.000Z | pykeigan/utils.py | keigan-motor/pykeigan_motor | 884c742e38d07a057be62a5f58c2ea1a6f4d30a9 | [
"MIT"
] | 10 | 2018-03-06T04:54:34.000Z | 2021-10-04T04:53:40.000Z | # -*- coding: utf-8 -*-
import struct
def float2bytes(float_value):
float_value=float(float_value)
return struct.pack("!f", float_value)
def bytes2float(byte_array):
return struct.unpack('!f',byte_array)[0]
def uint8_t2bytes(uint8_value):
uint8_value=int(uint8_value)
if uint8_value<0:
raise TypeError("Argument should be positive or equal to zero")
if uint8_value>256-1:
raise TypeError("Argument should be less than 256")
return struct.pack("B",uint8_value)
def uint16_t2bytes(uint16_value):
uint16_value=int(uint16_value)
if uint16_value<0:
raise TypeError("Argument should be positive or equal to zero")
if uint16_value>256**2-1:
raise TypeError("Argument should be less than 256**2")
val1=int(uint16_value/256)
val2=uint16_value-val1*256
return struct.pack("BB",val1,val2)
def uint16_t2bytes_little(uint16_value):
uint16_value=int(uint16_value)
if uint16_value<0:
raise TypeError("Argument should be positive or equal to zero")
if uint16_value>256**2-1:
raise TypeError("Argument should be less than 256**2")
val1=int(uint16_value/256)
val2=uint16_value-val1*256
return struct.pack("<BB",val2,val1)
def uint32_t2bytes(uint32_value):
uint32_value=int(uint32_value)
if uint32_value<0:
raise TypeError("Argument should be positive or equal to zero")
if uint32_value>256**4-1:
raise TypeError("Argument should be less than 256**4")
val1=int(uint32_value/256**3)
val2=int((uint32_value-val1*256**3)/256**2)
val3=int((uint32_value-val1*256**3-val2*256**2)/256)
val4=uint32_value-val1*256**3-val2*256**2-val3*256
return struct.pack("BBBB",val1,val2,val3,val4)
def bytes2uint32_t(ba):
return struct.unpack(">I",ba)[0]
def bytes2uint16_t(ba):
return struct.unpack(">H", ba)[0]
def bytes2uint8_t(ba):
return struct.unpack("B",ba)[0]
def bytes2int16_t(ba):
return struct.unpack(">h",ba)[0]
def bytes2int16_t_little(ba):
return struct.unpack("<h",ba)[0]
def deg2rad(degree):
return degree*0.017453292519943295
def rad2deg(radian):
return radian/ 0.017453292519943295
def rpm2rad_per_sec(rpm):
return rpm *0.10471975511965977
def rad_per_sec2rpm(radian_per_sec):
return radian_per_sec/0.10471975511965977
def calc_crc16(buf):
calc_crc16_bytes.CRC_TABLE = [ \
0 , 0x1189 , 0x2312 , 0x329b , 0x4624 , 0x57ad , 0x6536 , 0x74bf , \
0x8c48 , 0x9dc1 , 0xaf5a , 0xbed3 , 0xca6c , 0xdbe5 , 0xe97e , 0xf8f7 , \
0x1081 , 0x0108 , 0x3393 , 0x221a , 0x56a5 , 0x472c , 0x75b7 , 0x643e , \
0x9cc9 , 0x8d40 , 0xbfdb , 0xae52 , 0xdaed , 0xcb64 , 0xf9ff , 0xe876 , \
0x2102 , 0x308b , 0x0210 , 0x1399 , 0x6726 , 0x76af , 0x4434 , 0x55bd , \
0xad4a , 0xbcc3 , 0x8e58 , 0x9fd1 , 0xeb6e , 0xfae7 , 0xc87c , 0xd9f5 , \
0x3183 , 0x200a , 0x1291 , 0x0318 , 0x77a7 , 0x662e , 0x54b5 , 0x453c , \
0xbdcb , 0xac42 , 0x9ed9 , 0x8f50 , 0xfbef , 0xea66 , 0xd8fd , 0xc974 , \
\
0x4204 , 0x538d , 0x6116 , 0x709f , 0x0420 , 0x15a9 , 0x2732 , 0x36bb , \
0xce4c , 0xdfc5 , 0xed5e , 0xfcd7 , 0x8868 , 0x99e1 , 0xab7a , 0xbaf3 , \
0x5285 , 0x430c , 0x7197 , 0x601e , 0x14a1 , 0x0528 , 0x37b3 , 0x263a , \
0xdecd , 0xcf44 , 0xfddf , 0xec56 , 0x98e9 , 0x8960 , 0xbbfb , 0xaa72 , \
0x6306 , 0x728f , 0x4014 , 0x519d , 0x2522 , 0x34ab , 0x0630 , 0x17b9 , \
0xef4e , 0xfec7 , 0xcc5c , 0xddd5 , 0xa96a , 0xb8e3 , 0x8a78 , 0x9bf1 , \
0x7387 , 0x620e , 0x5095 , 0x411c , 0x35a3 , 0x242a , 0x16b1 , 0x0738 , \
0xffcf , 0xee46 , 0xdcdd , 0xcd54 , 0xb9eb , 0xa862 , 0x9af9 , 0x8b70 , \
\
0x8408 , 0x9581 , 0xa71a , 0xb693 , 0xc22c , 0xd3a5 , 0xe13e , 0xf0b7 , \
0x0840 , 0x19c9 , 0x2b52 , 0x3adb , 0x4e64 , 0x5fed , 0x6d76 , 0x7cff , \
0x9489 , 0x8500 , 0xb79b , 0xa612 , 0xd2ad , 0xc324 , 0xf1bf , 0xe036 , \
0x18c1 , 0x0948 , 0x3bd3 , 0x2a5a , 0x5ee5 , 0x4f6c , 0x7df7 , 0x6c7e , \
0xa50a , 0xb483 , 0x8618 , 0x9791 , 0xe32e , 0xf2a7 , 0xc03c , 0xd1b5 , \
0x2942 , 0x38cb , 0x0a50 , 0x1bd9 , 0x6f66 , 0x7eef , 0x4c74 , 0x5dfd , \
0xb58b , 0xa402 , 0x9699 , 0x8710 , 0xf3af , 0xe226 , 0xd0bd , 0xc134 , \
0x39c3 , 0x284a , 0x1ad1 , 0x0b58 , 0x7fe7 , 0x6e6e , 0x5cf5 , 0x4d7c , \
\
0xc60c , 0xd785 , 0xe51e , 0xf497 , 0x8028 , 0x91a1 , 0xa33a , 0xb2b3 , \
0x4a44 , 0x5bcd , 0x6956 , 0x78df , 0x0c60 , 0x1de9 , 0x2f72 , 0x3efb , \
0xd68d , 0xc704 , 0xf59f , 0xe416 , 0x90a9 , 0x8120 , 0xb3bb , 0xa232 , \
0x5ac5 , 0x4b4c , 0x79d7 , 0x685e , 0x1ce1 , 0x0d68 , 0x3ff3 , 0x2e7a , \
0xe70e , 0xf687 , 0xc41c , 0xd595 , 0xa12a , 0xb0a3 , 0x8238 , 0x93b1 , \
0x6b46 , 0x7acf , 0x4854 , 0x59dd , 0x2d62 , 0x3ceb , 0x0e70 , 0x1ff9 , \
0xf78f , 0xe606 , 0xd49d , 0xc514 , 0xb1ab , 0xa022 , 0x92b9 , 0x8330 , \
0x7bc7 , 0x6a4e , 0x58d5 , 0x495c , 0x3de3 , 0x2c6a , 0x1ef1 , 0x0f78 \
]
c = 0
for val in buf:
num = (c ^ val) & 0xFF
c = calc_crc16_bytes.CRC_TABLE[num] ^ (c >> 8)
return c
def calc_crc16_bytes(buf):
crc16 = calc_crc16(buf)
#print(crc16)
return uint16_t2bytes_little(crc16)
| 40.037879 | 81 | 0.635951 |
794108eab634b2e9281158fd4c913c43dec2a8cc | 9,109 | py | Python | melodic/lib/python2.7/dist-packages/geometry_msgs/msg/_PoseWithCovarianceStamped.py | Dieptranivsr/Ros_Diep | d790e75e6f5da916701b11a2fdf3e03b6a47086b | [
"MIT"
] | 1 | 2021-11-08T12:24:24.000Z | 2021-11-08T12:24:24.000Z | devel/lib/python3/dist-packages/geometry_msgs/msg/_PoseWithCovarianceStamped.py | hyu-nani/ydlidar_ws | 56316db999c057c4315a20ba8277826d6a043120 | [
"MIT"
] | 1 | 2021-07-08T10:26:06.000Z | 2021-07-08T10:31:11.000Z | devel/lib/python3/dist-packages/geometry_msgs/msg/_PoseWithCovarianceStamped.py | hyu-nani/ydlidar_ws | 56316db999c057c4315a20ba8277826d6a043120 | [
"MIT"
] | null | null | null | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from geometry_msgs/PoseWithCovarianceStamped.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geometry_msgs.msg
import std_msgs.msg
class PoseWithCovarianceStamped(genpy.Message):
_md5sum = "953b798c0f514ff060a53a3498ce6246"
_type = "geometry_msgs/PoseWithCovarianceStamped"
_has_header = True # flag to mark the presence of a Header object
_full_text = """# This expresses an estimated pose with a reference coordinate frame and timestamp
Header header
PoseWithCovariance pose
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
string frame_id
================================================================================
MSG: geometry_msgs/PoseWithCovariance
# This represents a pose in free space with uncertainty.
Pose pose
# Row-major representation of the 6x6 covariance matrix
# The orientation parameters use a fixed-axis representation.
# In order, the parameters are:
# (x, y, z, rotation about X axis, rotation about Y axis, rotation about Z axis)
float64[36] covariance
================================================================================
MSG: geometry_msgs/Pose
# A representation of pose in free space, composed of position and orientation.
Point position
Quaternion orientation
================================================================================
MSG: geometry_msgs/Point
# This contains the position of a point in free space
float64 x
float64 y
float64 z
================================================================================
MSG: geometry_msgs/Quaternion
# This represents an orientation in free space in quaternion form.
float64 x
float64 y
float64 z
float64 w
"""
__slots__ = ['header','pose']
_slot_types = ['std_msgs/Header','geometry_msgs/PoseWithCovariance']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,pose
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(PoseWithCovarianceStamped, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.pose is None:
self.pose = geometry_msgs.msg.PoseWithCovariance()
else:
self.header = std_msgs.msg.Header()
self.pose = geometry_msgs.msg.PoseWithCovariance()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_7d().pack(_x.pose.pose.position.x, _x.pose.pose.position.y, _x.pose.pose.position.z, _x.pose.pose.orientation.x, _x.pose.pose.orientation.y, _x.pose.pose.orientation.z, _x.pose.pose.orientation.w))
buff.write(_get_struct_36d().pack(*self.pose.covariance))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.pose is None:
self.pose = geometry_msgs.msg.PoseWithCovariance()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 56
(_x.pose.pose.position.x, _x.pose.pose.position.y, _x.pose.pose.position.z, _x.pose.pose.orientation.x, _x.pose.pose.orientation.y, _x.pose.pose.orientation.z, _x.pose.pose.orientation.w,) = _get_struct_7d().unpack(str[start:end])
start = end
end += 288
self.pose.covariance = _get_struct_36d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_7d().pack(_x.pose.pose.position.x, _x.pose.pose.position.y, _x.pose.pose.position.z, _x.pose.pose.orientation.x, _x.pose.pose.orientation.y, _x.pose.pose.orientation.z, _x.pose.pose.orientation.w))
buff.write(self.pose.covariance.tostring())
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.pose is None:
self.pose = geometry_msgs.msg.PoseWithCovariance()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 56
(_x.pose.pose.position.x, _x.pose.pose.position.y, _x.pose.pose.position.z, _x.pose.pose.orientation.x, _x.pose.pose.orientation.y, _x.pose.pose.orientation.z, _x.pose.pose.orientation.w,) = _get_struct_7d().unpack(str[start:end])
start = end
end += 288
self.pose.covariance = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=36)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_36d = None
def _get_struct_36d():
global _struct_36d
if _struct_36d is None:
_struct_36d = struct.Struct("<36d")
return _struct_36d
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_7d = None
def _get_struct_7d():
global _struct_7d
if _struct_7d is None:
_struct_7d = struct.Struct("<7d")
return _struct_7d
| 37.640496 | 236 | 0.654518 |
794109c5303f9b6fa5b315f054ffd4cbeba7df17 | 1,477 | py | Python | requests/config.py | jasenguyen/requests | 6d25fff3f93eb89a87742bf3be24fa87724fe550 | [
"0BSD"
] | 1 | 2016-08-29T01:49:37.000Z | 2016-08-29T01:49:37.000Z | requests/config.py | jasenguyen/requests | 6d25fff3f93eb89a87742bf3be24fa87724fe550 | [
"0BSD"
] | null | null | null | requests/config.py | jasenguyen/requests | 6d25fff3f93eb89a87742bf3be24fa87724fe550 | [
"0BSD"
] | null | null | null | # -*- coding: utf-8 -*-
"""
requests.config
~~~~~~~~~~~~~~~
This module provides the Requests settings feature set.
"""
class Settings(object):
_singleton = {}
# attributes with defaults
__attrs__ = []
def __init__(self, **kwargs):
super(Settings, self).__init__()
self.__dict__ = self._singleton
def __call__(self, *args, **kwargs):
# new instance of class to call
r = self.__class__()
# cache previous settings for __exit__
r.__cache = self.__dict__.copy()
map(self.__cache.setdefault, self.__attrs__)
# set new settings
self.__dict__.update(*args, **kwargs)
return r
def __enter__(self):
pass
def __exit__(self, *args):
# restore cached copy
self.__dict__.update(self.__cache.copy())
del self.__cache
def __getattribute__(self, key):
if key in object.__getattribute__(self, '__attrs__'):
try:
return object.__getattribute__(self, key)
except AttributeError:
return None
return object.__getattribute__(self, key)
settings = Settings()
settings.base_headers = {'User-Agent': 'python-requests.org'}
settings.accept_gzip = True
settings.proxies = None
settings.verbose = None
settings.timeout = None
settings.max_redirects = 30
settings.decode_unicode = True
#: Use socket.setdefaulttimeout() as fallback?
settings.timeout_fallback = True
| 21.405797 | 61 | 0.634394 |
79410a4d390e09895e329b13158cafc4c2798228 | 799 | py | Python | mcelfish/plot_beta.py | pgdr/mcelfish | 55c736f8425d0ab425dab999094ada4bafb9ca54 | [
"MIT"
] | null | null | null | mcelfish/plot_beta.py | pgdr/mcelfish | 55c736f8425d0ab425dab999094ada4bafb9ca54 | [
"MIT"
] | null | null | null | mcelfish/plot_beta.py | pgdr/mcelfish | 55c736f8425d0ab425dab999094ada4bafb9ca54 | [
"MIT"
] | null | null | null | from scipy.stats import beta
import matplotlib.pyplot as plt
import numpy as np
import sys
def _gen(alpha_, beta_, loc_=0, scale_=1):
return np.linspace(
beta.ppf(0.01, alpha_, beta_, loc_, scale_),
beta.ppf(0.99, alpha_, beta_, loc_, scale_),
100,
)
def _plot(x, alpha_, beta_, loc_=0, scale_=1):
plt.plot(
x,
beta.pdf(x, alpha_, beta_, loc=loc_, scale=scale_),
label=f"Beta({alpha_}, {beta_})",
)
plt.show()
def main():
abls = [1.0, 1.0, 0.0, 1.0]
if len(sys.argv) == 1:
exit("Usage: plot_beta alpha [beta [loc [scale]]]\n e.g. 2 8 10 50")
for i in range(1, min(5, len(sys.argv))):
abls[i - 1] = float(sys.argv[i])
x = _gen(*abls)
_plot(x, *abls)
if __name__ == "__main__":
main()
| 22.194444 | 76 | 0.566959 |
79410a717098bceac4de42241287d8cecde8a1e1 | 567 | py | Python | delft3dfmpy/__init__.py | SiebeBosch/delft3dfmpy | 562c00dd05c0137b588fdd0f281018beaa0b9775 | [
"MIT"
] | null | null | null | delft3dfmpy/__init__.py | SiebeBosch/delft3dfmpy | 562c00dd05c0137b588fdd0f281018beaa0b9775 | [
"MIT"
] | null | null | null | delft3dfmpy/__init__.py | SiebeBosch/delft3dfmpy | 562c00dd05c0137b588fdd0f281018beaa0b9775 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Top-level package for delft3dfmpy."""
__author__ = """Guus Rongen"""
__email__ = '[email protected]'
__version__ = '1.1.3'
from delft3dfmpy.core.dfm import DFlowFMModel
from delft3dfmpy.core.drr import DFlowRRModel
from delft3dfmpy.core.mesh2d import Rectangular
from delft3dfmpy.datamodels.hydamo import HyDAMO
from delft3dfmpy.datamodels.osm import OSM
from delft3dfmpy.io.dflowfmwriter import DFlowFMWriter
from delft3dfmpy.io.dflowrrwriter import DFlowRRWriter
from delft3dfmpy.core.logging import initialize_logger
initialize_logger()
| 29.842105 | 54 | 0.809524 |
79410c2081b8709cff2133d2c5555bf51d4a4f96 | 39,169 | py | Python | scripts/install-unity.py | schneems/OpenMined | f8d6433c51e9838e2eef4d7f4935508b8f0bf789 | [
"Apache-2.0"
] | null | null | null | scripts/install-unity.py | schneems/OpenMined | f8d6433c51e9838e2eef4d7f4935508b8f0bf789 | [
"Apache-2.0"
] | null | null | null | scripts/install-unity.py | schneems/OpenMined | f8d6433c51e9838e2eef4d7f4935508b8f0bf789 | [
"Apache-2.0"
] | 1 | 2020-05-27T10:09:17.000Z | 2020-05-27T10:09:17.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2015 Adrian Stutz
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import collections
import ConfigParser
import datetime
import getpass
import hashlib
import io
import json
import math
import os
import pipes
import re
import shutil
import subprocess
import sys
import time
import traceback
import urllib
import urllib2
import ssl
# ---- CONFIGURATION ----
VERSION = '0.1.0'
# URL to look for main Unity releases
UNITY_DOWNLOADS = 'https://unity3d.com/get-unity/download/archive'
# URL to look for Unity patch releases
UNITY_PATCHES = 'https://unity3d.com/unity/qa/patch-releases'
# URL to look for beta releases
UNITY_BETAS = 'https://unity3d.com/unity/beta/archive'
# URL for Unity patch release notes
UNITY_PATCH_RELEASE_NOTES = 'https://unity3d.com/unity/qa/patch-releases/%s'
# URL for Unity beta release notes
UNITY_BETA_RELEASE_NOTES = 'https://unity3d.com/unity/beta/unity%s'
# Regex to find relative beta page URI from HTML
UNITY_BETAVERSION_RE = '"/unity/beta/unity(\d+\.\d+\.\d+\w\d+)"'
# parametrized beta version URL, given its version
UNITY_BETAVERSION_URL = "https://unity3d.com/unity/beta/unity%s"
# Regex to parse package URLs from HTML
UNITY_DOWNLOADS_RE = '"(https?:\/\/[\w\/.-]+\/[0-9a-f]{12}\/)MacEditorInstaller\/[\w\/.-]+-(\d+\.\d+\.\d+\w\d+)[\w\/.-]+"'
# Name of the ini file at the package URL that contains package information (%s = version)
UNITY_INI_NAME = 'unity-%s-osx.ini'
# Regex to parse URLs given to --discover
UNITY_INI_RE = '(https?:\/\/[\w\/.-]+\/[0-9a-f]{12}\/)[\w\/.-]+-(\d+\.\d+\.\d+\w\d+)[\w\/.-]+'
# Regex to parse Unity versions in the format of e.g. '5.3.2p3'"
VERSION_RE = '^(\d+)?(?:\.(\d+)(?:\.(\d+))?)?(?:(\w)(?:(\d+))?)?$'
# Unity release types and corresponding letters in version string
RELEASE_LETTERS = { 'all': None, 'release': 'f', 'patch': 'p', 'beta': 'b', 'alpha': 'a' }
# How release types are included in the search, higher values include all lower release types
RELEASE_LETTER_STRENGTH = { None: 1, 'f': 1, 'p': 2, 'b': 3, 'a': 4 }
# How release types are sorted, bigger value means the release is newer
RELEASE_LETTER_SORT = { 'p': 4, 'f': 3, 'b': 2, 'a': 1 }
# Default release stage when not explicitly specified with --list or in the given version string
DEFAULT_STAGE = 'f'
# Default location where script data is being stored
# (install packages, unity ini files, cache and settings)
DATA_DIR = '~/Library/Application Support/install-unity/'
# Name of the Unity versions cache in DATA_DIR (created from above URLs)
CACHE_FILE = 'cache.json'
# Lifetime of the cache, use --update to force an update
CACHE_LIFETIME = 60*60*24
# File where script settings are stored in DATA_DIR
SETTINGS_FILE = 'settings.json'
# Timeout of download requests in seconds
DOWNLOAD_TIMEOUT = 60
# How often downloads are retried when errors occur before giving up
DOWNLOAD_MAX_RETRIES = 3
# Time in seconds to wait between retrying the download
DOWNLOAD_RETRY_WAIT = 10
# Size of blocks of data processed while downloading
DOWNLOAD_BLOCKSIZE = 8192
# ---- ARGUMENTS ----
parser = argparse.ArgumentParser(description='Install Unity Script ' + VERSION)
parser.add_argument('--version', action='version', version='%(prog)s ' + VERSION)
parser.add_argument('versions',
metavar='VERSION', type=str, nargs='*',
help='unity version to install packages from (only >= 5.0.0)')
parser.add_argument('--packages',
action='store_const', const='list', dest='operation',
help='list available packages for the versions(s)')
parser.add_argument('--download',
action='store_const', const='download', dest='operation',
help='only download the version(s), don\'t install them')
parser.add_argument('--install',
action='store_const', const='install', dest='operation',
help='only install the version(s), they must have been downloaded previously')
parser.add_argument('--volume',
default='/',
help='set the target volume (must be a volume mountpoint)')
parser.add_argument('-p', '--package',
action='append',
help='add package to download or install, absent = install default packages')
parser.add_argument('--all-packages',
action='store_true',
help='install all packages instead of only the default ones when no packages are selected')
parser.add_argument('-k', '--keep',
action='store_true',
help='don\'t remove downloaded packages after installation (implied when using --install)')
parser.add_argument('--data-dir',
action='store',
default=DATA_DIR,
help='directory to store packages, unity ini files, cache and settings (default is in Application Support)')
parser.add_argument('-u', '--update',
action='store_true',
help='force updating of cached version information')
parser.add_argument('-l', '--list',
choices=['release', 'patch', 'beta', 'alpha', 'all'],
help='list the cached unity versions')
parser.add_argument('--discover',
action='append',
help='manually discover a Unity packages url (link to unity-VERSION-osx.ini or MacEditorInstaller url)')
parser.add_argument('--forget',
action='append',
help='remove a manually discovered version')
parser.add_argument('--save',
action='store_true',
help='save the current set of packages as defaults, used when no packages are given (use with no packages to reset)')
parser.add_argument('--unity-defaults',
action='store_true',
help='use the unity default packages instead of the custom defaults that might have been saved')
parser.add_argument('-v', '--verbose',
action='store_true',
help='show stacktrace when an error occurs')
args = parser.parse_args()
# ---- GENERAL ----
def error(message):
print 'ERROR: ' + message
if args.verbose:
traceback.print_stack()
sys.exit(1)
# ---- VERSIONS CACHE ----
class version_cache:
def __init__(self, cache_file):
self.cache_file = cache_file
self.cache = {}
self.sorted_versions = None
self.load()
def load(self):
if not os.path.isfile(self.cache_file):
return
with open(self.cache_file, 'r') as file:
data = file.read()
self.cache = json.loads(data)
self.sorted_versions = None
def is_outdated(self, cache):
if not cache or not '_lastupdate' in cache:
return True
lastupdate = datetime.datetime.strptime(cache['_lastupdate'], '%Y-%m-%dT%H:%M:%S.%f')
return (datetime.datetime.utcnow() - lastupdate).total_seconds() > CACHE_LIFETIME
def update(self, stage, force = False):
strength = RELEASE_LETTER_STRENGTH[stage]
if force:
print 'Forced updating of unity versions cache'
else:
print 'Updating outdated unity versions caches'
if strength >= 1 and (force or self.is_outdated(self.cache.get('release', None))):
print 'Loading Unity releases...'
self.cache['release'] = {}
self.cache['release']['_lastupdate'] = datetime.datetime.utcnow().isoformat()
count = self._load_and_parse(UNITY_DOWNLOADS, UNITY_DOWNLOADS_RE, self.cache['release'])
if count > 0: print 'Found %i Unity releases.' % count
if strength >= 2 and (force or self.is_outdated(self.cache.get('patch', None))):
print 'Loading Unity patch releases...'
self.cache['patch'] = {}
self.cache['patch']['_lastupdate'] = datetime.datetime.utcnow().isoformat()
count = self._load_and_parse(UNITY_PATCHES, UNITY_DOWNLOADS_RE, self.cache['patch'])
if count > 0: print 'Found %i Unity patch releases.' % count
if strength >= 3 and (force or self.is_outdated(self.cache.get('beta', None))):
print 'Loading Unity beta releases...'
self.cache['beta'] = {}
self.cache['beta']['_lastupdate'] = datetime.datetime.utcnow().isoformat()
count = self._load_and_parse_betas(UNITY_BETAS, UNITY_DOWNLOADS_RE, self.cache['beta'])
if count > 0: print 'Found %i Unity patch releases.' % count
print ''
self.save()
self.sorted_versions = None
def _load_and_parse_betas(self, url, pattern, unity_versions):
try:
response = urllib2.urlopen(url)
except Exception as e:
error('Could not load URL "%s": %s' % (url, e.reason))
result = sorted(set(re.findall(UNITY_BETAVERSION_RE, response.read())))
for betaversion in result:
versionurl = UNITY_BETAVERSION_URL % betaversion
self._load_and_parse(versionurl, pattern, unity_versions)
return len(result)
def _load_and_parse(self, url, pattern, unity_versions, fail = True):
try:
response = urllib2.urlopen(url)
except Exception as e:
if fail:
error('Could not load URL "%s": %s' % (url, e.reason))
else:
return 0
result = re.findall(pattern, response.read())
for match in result:
unity_versions[match[1]] = match[0]
return len(result)
def save(self):
with open(self.cache_file, 'w') as file:
data = json.dumps(self.cache)
file.write(data)
def autoadd(self, version):
parsed = parse_version(version)
# We need a full version to look up the release notes
if None in parsed:
return False
url = None
cache = None
if parsed[3] == 'p':
url = UNITY_PATCH_RELEASE_NOTES % version_string(parsed)
cache = 'patch'
elif parsed[3] == 'b':
url = UNITY_BETA_RELEASE_NOTES % version_string(parsed)
cache = 'beta'
else:
return False
print 'Unity version %s not known, trying to guess release notes URL to find it...' % version
count = self._load_and_parse(url, UNITY_DOWNLOADS_RE, self.cache[cache], False)
if count > 0:
self.save()
self.sorted_versions = None
return count > 0
def add(self, url):
result = re.search(UNITY_INI_RE, url)
if result is None:
print 'WARNING: Could not parse Unity packages url: %s' % url
return None
baseurl = result.group(1)
version = result.group(2)
ini_name = UNITY_INI_NAME % version
ini_url = baseurl + ini_name
success = False
try:
urllib2.urlopen(ini_url)
except urllib2.HTTPError, e:
print 'ERROR: Failed to load url "%s", returned error code %d' % (ini_url, e.code)
except urllib2.URLError, e:
print 'ERROR: Failed to load url "%s", error: %s' % (ini_url, e.reason)
else:
success = True
if not success: return None
if not 'discovered' in self.cache:
self.cache['discovered'] = {}
self.cache['discovered'][version] = baseurl
self.sorted_versions = None
return version
def remove(self, version):
if not 'discovered' in self.cache or not version in self.cache['discovered']:
print "WARNING: Version %s not found in manually discovered versions" % version
return False
del self.cache['discovered'][version]
self.sorted_versions = None
return True
def get_baseurl(self, version):
if 'discovered' in self.cache and version in self.cache['discovered']:
return self.cache['discovered'][version]
elif version in self.cache['release']:
return self.cache['release'][version]
elif version in self.cache['patch']:
return self.cache['patch'][version]
elif version in self.cache['beta']:
return self.cache['beta'][version]
else:
return None
def get_sorted_versions(self, stage = DEFAULT_STAGE):
if self.sorted_versions == None:
all_versions = []
strength = RELEASE_LETTER_STRENGTH[stage]
# Release versions are always considered
if 'release' in self.cache and strength >= 1:
all_versions += self.cache['release'].keys()
# Patch versions are only considered when explicitly selected patch, beta or alpha
if 'patch' in self.cache and strength >= 2:
all_versions += self.cache['patch'].keys()
# Beta releases are only considered when explicitly selecting beta or alpha
if 'beta' in self.cache and strength >= 3:
all_versions += self.cache['beta'].keys()
# Same rules as above are applied to manually discovered versions
if 'discovered' in self.cache:
for version in self.cache['discovered'].keys():
parsed = parse_version(version)
if ((parsed[3] == 'f' and strength >= 1)
or (parsed[3] == 'p' and strength >= 2)
or (parsed[3] == 'b' and strength >= 3)
or (parsed[3] == 'a' and strength >= 4)):
all_versions.append(version)
all_versions = [x for x in all_versions if not x.startswith('_')]
self.sorted_versions = sorted(all_versions, compare_versions)
return self.sorted_versions
def list(self, stage = DEFAULT_STAGE):
print 'Known available Unity versions:'
print '(Use "--discover URL" to add versions not automatically discovered)'
strength = RELEASE_LETTER_STRENGTH[stage]
last_major_minor = None
for version in reversed(self.get_sorted_versions(stage)):
parts = parse_version(version)
if strength < RELEASE_LETTER_STRENGTH[parts[3]]:
continue
major_minor = '%s.%s' % (parts[0], parts[1])
if (major_minor != last_major_minor):
print '\n== %s ==' % major_minor
last_major_minor = major_minor
print '- %s' % version
def migrate_default_packages(self):
if "default_packages" in self.cache:
packages = self.cache["default_packages"]
del self.cache["default_packages"]
return packages
else:
return None
# ---- SETTINGS ----
class script_settings:
def __init__(self, settings_file):
self.settings_file = settings_file
self.settings = {}
self.load()
def load(self):
if not os.path.isfile(self.settings_file):
return
with open(self.settings_file, 'r') as file:
data = file.read()
self.settings = json.loads(data)
def save(self):
with open(self.settings_file, 'w') as file:
data = json.dumps(self.settings)
file.write(data)
def set_default_packages(self, list):
if list and len(list) > 0:
self.settings["default_packages"] = list
else:
del self.settings["default_packages"]
self.save()
def get_default_packages(self):
if "default_packages" in self.settings:
return self.settings["default_packages"]
else:
return []
# ---- VERSION HANDLING ----
def parse_version(version):
match = re.match(VERSION_RE, version)
if not match:
error('Version %s does not conform to Unity version format 0.0.0x0' % version)
parts = list(match.groups())
# Convert to int, except fourth element wich is release type letter
for i in range(len(parts)):
if not parts[i] or i == 3: continue
parts[i] = int(parts[i])
return parts
def version_string(parts):
parts = list(parts)
for i in range(len(parts)):
if parts[i] == None:
parts[i] = 'x'
elif i != 3:
parts[i] = str(parts[i])
return '%s.%s.%s%s%s' % tuple(parts)
def compare_versions(one, two):
first = parse_version(one)
first[3] = RELEASE_LETTER_SORT[first[3]]
second = parse_version(two)
second[3] = RELEASE_LETTER_SORT[second[3]]
return cmp(first, second)
def input_matches_version(input_version, match_with):
for i in range(5):
if input_version[i] == None or match_with[i] == None:
continue
# If a specific build number is given (e.g. 5.6p3), we match the release type as well.
# Otherwise we ignore it, as release type selection is done when the list
# of versions is being compiled.
if i == 3 and input_version[4] is None:
continue
elif input_version[i] != match_with[i]:
return False
return True
def select_version(version, sorted_versions):
input_version = parse_version(version)
for i in reversed(range(len(sorted_versions))):
match_with = parse_version(sorted_versions[i])
if input_matches_version(input_version, match_with):
if version != sorted_versions[i]:
print 'Selected version %s for input version %s' % (sorted_versions[i], version)
else:
print 'Selected version %s exactly matches input version' % (sorted_versions[i])
return sorted_versions[i]
return None
# ---- DOWNLOAD ----
def convertSize(size):
if size == 0:
return '0 B'
elif size < 1024:
return '%s B' % size
size_name = ("KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
size = size / 1024.0
i = int(math.floor(math.log(size,1024)))
p = math.pow(1024,i)
s = round(size/p,2)
return '%s %s' % (s,size_name[i])
def download_url(url, output, expected_size):
retries = 0
while True:
try:
existing_size = 0
if os.path.isfile(output):
existing_size = os.path.getsize(output)
if existing_size == expected_size:
print 'File already exists and matches expected size, skipping'
return
elif existing_size > expected_size:
print 'Existing file "%s" is bigger than expected (%s > %s)' % (output, convertSize(existing_size), convertSize(expected_size))
return
else:
print 'Resuming download of existing file "%s" (%s already downloaded)' % (output, convertSize(existing_size))
req = urllib2.Request(url)
if existing_size > 0:
req.add_header('Range', 'bytes=%s-' % existing_size)
res = urllib2.urlopen(req, None, DOWNLOAD_TIMEOUT)
file = open(output, 'ab')
print ''
init_progress()
blocknr = math.floor(existing_size / DOWNLOAD_BLOCKSIZE)
while True:
chunk = res.read(DOWNLOAD_BLOCKSIZE)
progress(blocknr, DOWNLOAD_BLOCKSIZE, expected_size)
blocknr += 1
if len(chunk) == 0:
break
file.write(chunk)
progress_cleanup()
file.close()
res.close()
if os.path.getsize(output) < expected_size:
raise Exception('Connection dropped')
break
except Exception, e:
print 'Error downloading file: %s' % str(e)
retries += 1
if retries > DOWNLOAD_MAX_RETRIES:
error('Failed to download file, max number of retries exceeded')
else:
print 'Will try to resume download in a few seconds...'
time.sleep(DOWNLOAD_RETRY_WAIT)
block_times = None
last_update = None
def init_progress():
global block_times, last_update
block_times = collections.deque()
block_times.append(time.time())
last_update = 0
def progress(blocknr, blocksize, size):
global block_times, last_update
if time.time() - last_update > 0.5:
last_update = time.time()
window_duration = time.time() - block_times[0]
window_size = len(block_times) * blocksize
speed = window_size / window_duration
size_done = blocknr * blocksize
current = min(1.0, size_done / float(size))
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
sys.stdout.write('[')
sys.stdout.write('=' * int(math.floor(current * 60)))
sys.stdout.write('>')
sys.stdout.write('·' * int(math.ceil((1 - current) * 60) - 1))
sys.stdout.write('] ')
sys.stdout.write('{0:.2f}% | '.format(100.0 * current))
sys.stdout.write('{0}/s '.format(convertSize(speed)))
sys.stdout.write('\n')
block_times.append(time.time())
if (len(block_times) > 100):
block_times.popleft()
def progress_cleanup():
global block_times, last_update
if not block_times is None:
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
block_times = None
def hashfile(path, blocksize=65536):
with open(path, 'rb') as file:
hasher = hashlib.md5()
buf = file.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = file.read(blocksize)
return hasher.hexdigest()
def load_ini(version):
baseurl = cache.get_baseurl(version)
if not baseurl:
error('Version %s is now a known Unity version' % version)
ini_dir = os.path.join(data_path, version)
ini_name = UNITY_INI_NAME % version
ini_path = os.path.join(ini_dir, ini_name)
if args.update or not os.path.isfile(ini_path):
url = baseurl + ini_name
try:
response = urllib2.urlopen(url)
except Exception as e:
error('Could not load URL "%s": %s' % (url, e.reason))
if not os.path.isdir(ini_dir):
os.makedirs(ini_dir)
with open(ini_path, 'w') as file:
file.write(response.read())
config = ConfigParser.ConfigParser()
config.read(ini_path)
return config
def select_packages(version, config, packages):
available = config.sections()
if len(packages) == 0:
if args.all_packages:
selected = available
else:
print 'Using the default packages as defined by Unity'
selected = [x for x in available if config.getboolean(x, 'install')]
else:
# ConfigParser sections are case-sensitive, make sure
# we use the proper case regardless what the user entered
lower_to_upper = {}
for pkg in available:
lower_to_upper[pkg.lower()] = pkg
selected = []
for select in packages:
if select.lower() in lower_to_upper:
selected.append(lower_to_upper[select.lower()])
else:
print 'WARNING: Unity version %s has no package "%s"' % (version, select)
# If the main Unity editor package was selected,
# make sure it's installed first
if 'Unity' in selected:
selected.remove('Unity')
selected.insert(0, 'Unity')
return selected
def download(version, path, config, selected):
if not os.path.isdir(path):
os.makedirs(path)
for pkg in selected:
if not config.has_option(pkg, 'md5'):
print 'WARNING: Cannot verify file "%s": No md5 hash found.' % filename
md5hash = None
else:
md5hash = config.get(pkg, 'md5')
baseurl = cache.get_baseurl(version)
fileurl = baseurl + config.get(pkg, 'url')
filename = os.path.basename(fileurl)
output = os.path.join(path, filename)
if os.path.isfile(output) and md5hash and hashfile(output) == md5hash:
print 'File %s already downloaded' % filename
else:
print 'Downloading %s (%s)...' % (filename, convertSize(config.getint(pkg, 'size')))
download_url(fileurl, output, config.getint(pkg, 'size'))
if md5hash and hashfile(output) != md5hash:
error('Downloaded file "%s" is corrupt, hash does not match.' % filename)
print 'Download complete!'
print ''
# ---- INSTALL ----
def find_unity_installs():
installs = {}
app_dir = os.path.join(args.volume, 'Applications')
if not os.path.isdir(app_dir):
error('Applications directory on target volume "%s" not found' % args.volume)
install_paths = [x for x in os.listdir(app_dir) if x.startswith('Unity')]
for install_name in install_paths:
plist_path = os.path.join(app_dir, install_name, 'Unity.app', 'Contents', 'Info.plist')
if not os.path.isfile(plist_path):
print "WARNING: No Info.plist found at '%s'" % plist_path
continue
installed_version = subprocess.check_output(['defaults', 'read', plist_path, 'CFBundleVersion']).strip()
installs[installed_version] = os.path.join(app_dir, install_name)
if len(installs) == 0:
print "No existing Unity installations found."
else:
print 'Found %d existing Unity installations:' % len(installs)
for install in installs:
print '- %s (%s)' % (install, installs[install])
print ''
return installs
def check_root():
global pwd
if not is_root and (not args.operation or args.operation == 'install'):
# Get the root password early so we don't need to ask for it
# after the downloads potentially took a long time to finish.
# Also, just calling sudo might expire when the install takes
# long and the user would have to enter his password again
# and again.
print 'Your admin password is required to install the packages'
pwd = getpass.getpass('User password:')
# Check the root password, so that the user won't only find out
# much later if the password is wrong
command = 'sudo -k && echo "%s" | /usr/bin/sudo -S /usr/bin/whoami' % pwd
result = subprocess.call(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if result != 0:
error('User password invalid or user not an admin')
print ''
def install(version, path, config, selected, installs):
missing = False
for pkg in selected:
filename = os.path.basename(config.get(pkg, 'url'))
if not os.path.isfile(os.path.join(path, filename)):
print 'Package "%s" has not been downloaded' % filename
missing = True
if missing:
error('Some packages to be installed have not been downloaded')
if not version in installs and not 'Unity' in selected:
error('Installing only components but no matching Unity %s installation found' % version)
install_path = os.path.join(args.volume, 'Applications', 'Unity')
moved_unity_to = None
if version in installs and os.path.basename(installs[version]) == 'Unity':
# The 'Unity' folder already contains the target version
pass
elif os.path.isdir(install_path):
# There's another version in the 'Unity' folder, move it to 'Unity VERSION'
lookup = [vers for vers,name in installs.iteritems() if os.path.basename(name) == 'Unity']
if len(lookup) != 1:
error('Directory "%s" not recognized as Unity installation.' % install_path)
moved_unity_to = os.path.join(args.volume, 'Applications', 'Unity %s' % lookup[0])
if os.path.isdir(moved_unity_to):
error('Duplicate Unity installs in "%s" and "%s"' % (install_path, moved_unity_to))
os.rename(install_path, moved_unity_to)
# If a matching version exists elsewhere, move it to 'Unity'
moved_unity_from = None
if version in installs and os.path.basename(installs[version]) != 'Unity':
moved_unity_from = installs[version]
os.rename(moved_unity_from, install_path)
install_error = None
for pkg in selected:
filename = os.path.basename(config.get(pkg, 'url'))
package = os.path.join(path, filename)
print 'Installing %s...' % filename
command = ['/usr/sbin/installer', '-pkg', package, '-target', args.volume, '-verbose'];
if not is_root:
command = ['/usr/bin/sudo', '-S'] + command;
p = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if not is_root:
result = p.communicate(pwd + "\n")
else:
result = p.communicate(None)
if p.returncode != 0:
install_error = (filename, result[0])
break
# Revert moving around Unity installations
if moved_unity_from:
os.rename(install_path, moved_unity_from)
if moved_unity_to:
if os.path.isdir(install_path):
# If there previously was a 'Unity' folder, move the newly
# installed Unity to 'Unity VERSION'
new_install_path = os.path.join(args.volume, 'Applications', 'Unity %s' % version)
os.rename(install_path, new_install_path)
os.rename(moved_unity_to, install_path)
if install_error:
error('Installation of package "%s" failed: %s' % install_error)
print 'Installation complete!'
print ''
def clean_up(path):
# Prevent cleanup if there are unexpected files in the download directory
for file in os.listdir(path):
if not os.path.splitext(file)[1].lower() in ['.pkg', '.ini'] and not file == '.DS_Store':
print 'WARNING: Cleanup aborted because of unkown file "%s" in "%s"' % (file, path)
return
shutil.rmtree(path)
downloads = os.path.dirname(path)
for file in os.listdir(downloads):
if not file == '.DS_Store':
return
shutil.rmtree(downloads)
# ---- MAIN ----
data_path = os.path.abspath(os.path.expanduser(args.data_dir))
cache = version_cache(os.path.join(data_path, CACHE_FILE))
settings = script_settings(os.path.join(data_path, SETTINGS_FILE))
pwd = None
is_root = (os.getuid() == 0)
def main():
print 'Install Unity Script %s' % VERSION
operation = args.operation
packages = [x.lower() for x in args.package] if args.package else []
stage = DEFAULT_STAGE
# Check the installed OpenSSL version
# unity3d.com only supports TLS1.2, which requires at least OpenSSL 1.0.1.
# macOS has deprecated OpenSSL in favor of its own crypto libraries, which
# means macOS will be stuck at OpenSSL 0.9.8, which doesn't support TLS1.2.
match = re.match('OpenSSL (\d+).(\d+).(\d+)(\w+)', ssl.OPENSSL_VERSION)
if match:
parts = match.groups()
if (int(parts[0]) < 1 or int(parts[1]) < 0 or int(parts[2]) < 1):
print (
'ERROR: Your Python\'s OpenSSL library is outdated (%s).\n'
'At least OpenSSL version 1.0.1g is required.\n'
'You need to install a new version of Python 2 with an updated OpenSSL library.\n'
) % (ssl.OPENSSL_VERSION)
brew_check = os.system('brew help &> /dev/null')
if brew_check != 0:
print 'Either download it from www.python.org or install it using a package manager like Homebrew.'
else:
print (
'You can install python with Homebrew using the following command:\n'
'brew install python'
)
sys.exit(1)
# Make sure data_dir exists
if not os.path.isdir(data_path):
os.makedirs(data_path)
print 'Writing data to %s (use --data-dir to change)' % data_path
print ''
# Handle adding and removing of versions
if args.discover or args.forget:
if args.forget:
for version in args.forget:
if cache.remove(version):
print 'Removed version %s from cache' % version
if args.discover:
for url in args.discover:
version = cache.add(url)
if version:
print 'Added version %s to cache' % version
cache.save()
print ''
if args.list or len(args.versions) == 0:
operation = 'list-versions'
# Legacy: Migrate default packages from cache to settings
legacy_default_packages = cache.migrate_default_packages()
if legacy_default_packages and len(settings.get_default_packages()) == 0:
settings.set_default_packages(legacy_default_packages)
# Save default packages
if args.save:
if len(packages) > 0:
print 'Saving packages %s as defaults' % ', '.join(packages)
print ''
else:
print 'Clearing saved default packages'
print ''
settings.set_default_packages(packages)
# Main Operation
if operation == 'list-versions':
find_unity_installs() # To show the user which installs we discovered
if args.list:
stage = RELEASE_LETTERS[args.list]
if not stage:
stage = 'a'
cache.update(stage, force = args.update)
cache.list(stage)
print ''
if not args.list:
print 'Only listing release versions of Unity, use "-l patch|beta" to list patch or beta versions'
print 'List available packages for a given version using "--packages VERSION"'
else:
installs = find_unity_installs()
sorted_installs = sorted(installs.keys(), compare_versions)
# Determine the maximum release stage of all vesions
max_strength = RELEASE_LETTER_STRENGTH[stage]
for version in args.versions:
strength = RELEASE_LETTER_STRENGTH[parse_version(version)[3]]
if strength > max_strength:
stage = parse_version(version)[3]
max_strength = strength
if args.update or operation != 'install':
cache.update(stage, force = args.update)
adding_packages = (not operation or operation == 'install') and len(packages) > 0 and not 'unity' in packages
if adding_packages:
print 'Installing additional packages ("Unity" editor package not selected)'
if len(sorted_installs) == 0:
error('No Unity installation found, intall the "Unity" editor package first')
print 'Trying to select the most recent installed version'
version_list = sorted_installs
else:
print 'Trying to select most recent known Unity version'
version_list = cache.get_sorted_versions(stage)
versions = set()
for input_version in args.versions:
selected = select_version(input_version, version_list)
if not selected:
# Try to find version by guessing the release notes url
if not adding_packages and cache.autoadd(input_version):
# Try to re-select version with the updated cache
version_list = cache.get_sorted_versions(stage)
selected = select_version(input_version, version_list)
if not selected:
error('Could not find a Unity version that matches "%s"' % input_version)
versions.add(selected)
print ''
for version in versions:
config = load_ini(version)
if operation == 'list':
print 'Available packages for Unity version %s:' % version
for pkg in config.sections():
print '- %s%s (%s)' % (
pkg,
'*' if config.getboolean(pkg, 'install') else '',
convertSize(config.getint(pkg, 'size'))
)
print ''
else:
path = os.path.expanduser(os.path.join(data_path, version))
print 'Processing packages for Unity version %s:' % version
if len(packages) == 0 and not args.unity_defaults:
packages = settings.get_default_packages()
if len(packages) > 0:
print 'Using saved default packages'
selected = select_packages(version, config, packages)
if len(selected) == 0:
print 'WARNING: No packages selected for version %s' % version
continue
print ''
print 'Selected packages: %s' % ", ".join(selected)
if operation == 'download' or not operation:
print 'Download size: %s' % convertSize(sum(map(lambda pkg: config.getint(pkg, 'size'), selected)))
if operation == 'install' or not operation:
print 'Install size: %s' % convertSize(sum(map(lambda pkg: config.getint(pkg, 'installedsize'), selected)))
print ''
if not operation and 'Unity' in selected and version in installs:
print 'WARNING: Unity version %s already installed at "%s", skipping.' % (version, installs[version])
print 'Don\'t select the Unity editor packages to install additional packages'
print 'Remove to existing version to re-install the Unity version'
print 'Separate --download and --install calls will re-install the Unity version'
print ''
continue
check_root()
if operation == 'download' or not operation:
download(version, path, config, selected)
if operation == 'install' or not operation:
install(version, path, config, selected, installs)
if not args.keep and not operation:
clean_up(path)
if operation == 'list':
print 'Packages with a * are installed by default if no packages are selected'
print 'Select packages to install using "--package NAME"'
try:
main()
except KeyboardInterrupt:
pass
except SystemExit:
pass
except BaseException, e:
if args.verbose:
traceback.print_exc()
else:
print 'ERROR: ' + str(e)
| 36.882298 | 147 | 0.614261 |
79410c670e8c2dd4f4730735723d12c8cc9ce4ca | 927 | py | Python | test/test_template_registration.py | cons3rt/cons3rt-python-sdk | f0bcb295735ac55bbe47448fcbd95d2c7beb3ec0 | [
"RSA-MD"
] | null | null | null | test/test_template_registration.py | cons3rt/cons3rt-python-sdk | f0bcb295735ac55bbe47448fcbd95d2c7beb3ec0 | [
"RSA-MD"
] | null | null | null | test/test_template_registration.py | cons3rt/cons3rt-python-sdk | f0bcb295735ac55bbe47448fcbd95d2c7beb3ec0 | [
"RSA-MD"
] | null | null | null | # coding: utf-8
"""
CONS3RT Web API
A CONS3RT ReSTful API # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.models.template_registration import TemplateRegistration # noqa: E501
from openapi_client.rest import ApiException
class TestTemplateRegistration(unittest.TestCase):
"""TemplateRegistration unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTemplateRegistration(self):
"""Test TemplateRegistration"""
# FIXME: construct object with mandatory attributes with example values
# model = openapi_client.models.template_registration.TemplateRegistration() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 22.609756 | 98 | 0.717368 |
79410e2c6369500a0ffe3b8c2e0cbc351890d3de | 142 | py | Python | scripts/save_next_appointment.py | rugo/sac_client | 64b4a2bb1c2fadbc28facac3149e8d5cc097ba63 | [
"Apache-2.0"
] | null | null | null | scripts/save_next_appointment.py | rugo/sac_client | 64b4a2bb1c2fadbc28facac3149e8d5cc097ba63 | [
"Apache-2.0"
] | null | null | null | scripts/save_next_appointment.py | rugo/sac_client | 64b4a2bb1c2fadbc28facac3149e8d5cc097ba63 | [
"Apache-2.0"
] | null | null | null | #!/opt/sac/bin/python
from sac import util
from sac import config
util.save_appointment_to_file(config.APP_CACHE_PATH, config.APP_ERROR_PATH) | 28.4 | 75 | 0.838028 |
79410ef988f942089b54e4f650293b82fb47a886 | 120,069 | py | Python | resources.py | jurjendejong/QGIS3_Delft3D_FM | 058f1ab20974fb0a36e61fe5942e131917d612c7 | [
"MIT"
] | 1 | 2019-10-21T07:42:49.000Z | 2019-10-21T07:42:49.000Z | resources.py | jurjendejong/QGIS3_Delft3D_FM | 058f1ab20974fb0a36e61fe5942e131917d612c7 | [
"MIT"
] | 1 | 2021-09-20T08:42:56.000Z | 2021-09-20T08:42:56.000Z | resources.py | jurjendejong/QGIS3_Delft3D_FM | 058f1ab20974fb0a36e61fe5942e131917d612c7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.13.1)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x18\x0d\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x02\x00\x00\x00\x02\x00\x08\x06\x00\x00\x00\xf4\x78\xd4\xfa\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x82\x26\x00\x00\x82\x26\
\x01\x05\x76\x47\x9e\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x17\x8a\x49\x44\
\x41\x54\x78\xda\xed\xdd\x79\x94\x9d\x65\x7d\xc0\x71\xad\xda\xda\
\x9e\xda\xf6\x9c\xf6\x8f\x9a\x04\x09\x08\x28\x15\x15\x21\xa8\xa8\
\x2c\x0a\x28\xa2\x20\x42\xdb\xe3\x82\x6b\x5d\xea\x86\x0b\x2e\x47\
\x51\xb1\x5a\x15\x66\x92\x90\x00\x49\x80\x84\x2d\x01\x21\x10\x4c\
\x20\x9b\x81\x90\x65\x32\x4b\x66\xb2\xef\xfb\x36\x24\xb9\xef\x7d\
\xef\x7b\xcb\x16\x20\x09\x09\xe9\x73\x93\x1b\x9a\x63\x43\x98\xc9\
\xcc\x9d\x79\xef\x7d\x3f\x7f\x7c\xce\x90\x73\x02\x84\xcb\xfb\x3c\
\xbf\xef\xbd\xf7\x5d\x5e\xb5\x6f\xdf\xbe\x57\x01\x00\xd9\xe2\x45\
\x00\x00\x01\x00\x00\x08\x00\x00\x40\x00\x00\x00\x02\x00\x00\x10\
\x00\x00\x80\x00\x00\x00\x04\x00\x00\x20\x00\x00\x00\x01\x00\x00\
\x08\x00\x00\x40\x00\x00\x00\x02\x00\x00\x10\x00\x00\x80\x00\x00\
\x00\x04\x00\x00\x20\x00\x00\x00\x01\x00\x00\x02\x00\x00\x10\x00\
\x00\x80\x00\x00\x00\x04\x00\x00\x20\x00\x00\x00\x01\x00\x00\x08\
\x00\x00\x40\x00\x00\x00\x02\x00\x00\x10\x00\x00\x80\x00\x00\x00\
\x04\x00\x00\x20\x00\x00\x00\x01\x00\x00\x08\x00\x00\x40\x00\x00\
\x00\x02\x00\x00\x04\x00\x00\x20\x00\x00\x00\x01\x00\x00\xd4\x54\
\x00\xe4\x92\xe2\xb9\x00\x50\x85\x8e\x37\xc8\xbb\x16\x00\xfb\x00\
\xa0\x0a\x5d\x6b\x90\x0b\x00\x00\x04\x00\x02\x00\x00\x01\x80\x00\
\x00\x40\x00\x20\x00\x00\x10\x00\x02\xc0\x01\x04\x80\x00\x10\x00\
\x00\x20\x00\x04\x00\x00\x08\x00\x01\x00\x00\x02\x40\x00\x00\x80\
\x00\x10\x00\x00\x20\x00\x04\x00\x00\x08\x00\x01\x00\x00\x02\x40\
\x00\x00\x80\x00\x10\x00\x00\x20\x00\x04\x00\x00\x08\x00\x01\x00\
\x00\x02\x40\x00\x00\x80\x00\x10\x00\x00\x08\x00\x04\x00\x00\x02\
\x00\x01\x00\x80\x00\x40\x00\x00\x20\x00\x10\x00\x00\x08\x00\x01\
\x00\x00\x02\x40\x00\x00\x80\x00\x10\x00\x00\x20\x00\x04\x00\x00\
\x08\x00\x01\x00\x40\x8a\x25\xfb\x72\xd1\xb6\x3d\xb9\xfc\xf6\xdd\
\xe1\xd7\xbb\x05\x00\x02\x00\x20\xc5\xa2\xc7\xd7\x26\xf9\x65\x53\
\x96\xe7\x9b\x47\x36\xc7\x33\xeb\x67\xc5\x53\xaf\x7e\xa4\x30\xe1\
\x5b\x93\x0b\x63\xaf\x78\x38\x19\x7d\xf1\x84\x64\xd4\x39\x13\x92\
\xe1\xa7\x3f\x54\x1c\x7a\xf2\xa4\xe2\xe0\xe3\xfe\x54\x1c\x78\xcc\
\xec\x62\x7d\xdf\xb6\x60\x45\xb1\xae\xcf\xa6\x62\xdd\x1b\xf3\xc1\
\x8e\xe0\xc5\x60\x5f\xd9\xae\x20\x0e\xbf\x67\x63\x71\xd0\xb1\xcb\
\x8a\xd7\x9f\x30\x37\xb9\xf1\x94\x59\xc9\x88\x33\xa6\x25\xa3\xce\
\x9d\x58\x18\x73\xc9\xf8\xc2\xfd\x9f\x1f\x17\x4f\xba\xea\xe1\x78\
\x46\xdd\xa3\xf9\xd6\x3b\x5b\xf2\x4b\x26\xad\x8c\xd6\x36\x47\xd1\
\x96\x15\x2f\xe4\x0a\x05\x01\x20\x00\x00\xe8\xf2\x90\xdf\xb4\x64\
\x5b\xbe\x75\xf4\xbc\x78\xca\x4f\x67\x17\xee\xbe\x6c\x56\x32\x7c\
\xc0\xdc\x30\xcc\xd7\x94\x07\xf7\xbe\x14\xda\x1d\xfe\x7c\xeb\x43\
\x78\x34\x15\xee\xfd\xd4\x63\xf1\x8c\xeb\x9a\xa2\xe5\x8f\xac\xcb\
\xe5\xb6\xec\x16\x00\x02\x00\x80\x97\x1b\xf8\x6b\x5b\x36\xc4\x7f\
\xfa\x45\x43\x32\xf2\xac\xc6\xe2\xc0\x7e\xdb\x52\x3a\xe4\x8f\xce\
\xc0\x63\xe2\x64\xd8\xa9\x2b\x0a\xf7\x5c\xbe\x30\x9e\x76\xcd\x8a\
\xfc\xc2\x07\x0b\xd1\xe6\x65\x07\xbe\x82\x10\x00\x02\x00\x20\x33\
\x0a\xf1\x9e\x68\xc5\xf4\x55\xf1\xc4\xef\x97\x3e\x62\x6f\x29\xd6\
\xf7\x4d\x6a\x6a\xe0\x77\x54\x7d\xdf\xe7\x93\x1b\x4f\x79\x3c\x19\
\x7d\xc9\xba\x78\x66\xfd\x86\x68\xe3\xc2\x67\x05\x80\x00\x00\xa8\
\x2d\x71\xb4\x2b\xdf\x72\x5b\xdb\xfe\x77\xf8\x75\x7d\x9e\xce\xe4\
\xc0\xef\xd0\xa7\x05\xfd\x72\xe1\x35\x6a\x88\xa7\xff\xae\x29\xda\
\xbc\x34\x11\x00\x02\x00\xa0\x0a\x87\x7e\xfc\x42\xbe\x6d\xcc\xfc\
\xe4\xb6\x0f\xce\x09\x43\xff\x49\x03\xfe\x28\x0c\xea\xbf\x35\x19\
\xfd\xf1\xf9\xf9\xc6\xe1\xeb\xa2\xf6\xd5\x7b\x05\x80\x00\x00\x48\
\xed\xc7\xfb\xf9\x05\x63\x17\x26\x77\x7c\xb8\xa1\x58\xdf\xa7\x68\
\x88\x77\xb3\x21\x27\x6d\x2b\x8c\xbd\x62\x4d\x08\xab\xa7\x73\x5b\
\xd7\x0b\x00\x01\x00\xd0\xab\xd7\xd3\xbf\x98\x5f\x34\x7e\x49\x72\
\xd7\x45\xa5\xcb\xed\x62\x83\xba\xc7\xbc\x98\x8c\x18\xd0\x1e\x3f\
\x76\xed\xe6\xdc\xd6\x75\x7b\x05\x80\x00\x80\x74\x9f\xed\xbd\x79\
\xd9\x76\xaf\x43\x8d\xfc\xbf\x6c\x5f\x1d\x87\x77\xa3\x33\x8b\xf5\
\xfd\x72\x86\x71\xaf\x7b\x21\x19\x3e\x60\x71\x61\xdc\x57\x06\x87\
\xbf\xfe\x5b\xc3\x5c\x00\x40\xea\x94\xae\xe7\xce\xb7\x8e\x9e\xef\
\xb5\xa8\xe2\xc1\xbf\x6e\xee\xc6\xe4\xf6\x0b\x1a\xc2\xa0\xd9\x69\
\xf0\xa6\xd2\xb3\xc1\xbd\xc1\xc5\xc1\xeb\x0c\x76\x01\x00\xe9\x08\
\x80\x9b\xde\x39\xaf\x34\x38\x44\x40\xf5\xc9\x2f\x7e\x68\x69\x32\
\xfc\xf4\xb9\x7f\x76\x07\x3d\xd2\xad\x74\x1e\xc6\xcd\xc1\xd9\xc1\
\xab\x0d\x79\x01\x00\xbd\x1d\x00\xfb\x44\x40\xb5\x9c\xd4\x57\xd8\
\x9b\x6f\x1c\x31\xb7\x38\xe4\xa4\x65\x86\x69\xd5\x6b\x0f\xae\x0b\
\xde\x66\xd8\x0b\x00\xe8\x8d\x00\x68\x3b\x64\x43\x12\x01\x69\x95\
\xdf\xfe\x7c\x3c\xf5\xea\xd9\xc5\x81\x6f\xda\x64\x70\xd6\xa4\x69\
\xc1\x05\x86\xbe\x00\x80\x9e\x0b\x80\x61\xa7\xb6\xfd\xd9\x46\x24\
\x02\xd2\x64\xeb\xfa\x62\xe1\x81\x2f\xcd\xcc\xec\xdd\xf9\xb2\x67\
\x49\xf0\x79\xe7\x0a\x08\x00\xe8\x8d\x00\x10\x01\x69\x38\xb1\x6f\
\xc3\x82\xf6\xfd\x97\xf1\x1d\x38\x81\xcc\x60\xcc\x9e\xd2\xf3\x17\
\x7e\x12\xfc\x83\x00\x00\x2a\x14\x00\xef\x6a\x7b\x99\x0d\x48\x04\
\xf4\xc6\xe0\xdf\xb2\x32\x4a\x6e\x3f\xbf\x74\x46\xff\x1e\x43\x90\
\xe0\x99\x60\x48\xd0\x5f\x00\x00\xdd\x1d\x00\xad\x47\xd8\x7c\x76\
\xe6\xdb\x46\x2f\xf0\x3a\xf5\x80\xe8\xf1\x1d\x85\x07\xbe\x38\x33\
\xc5\x8f\xd7\xa5\x77\x95\x82\x70\x6c\x70\x86\x00\x00\x7a\x22\x00\
\x44\x40\x0f\x9c\xd5\x1f\x3f\xfa\xeb\x39\xc5\xfa\xbe\x91\x21\x47\
\x07\xcd\x0a\xce\x14\x00\x40\x17\x6f\x04\x74\x5a\x6b\x07\x36\x1c\
\x11\x50\x89\xeb\xf8\xdb\xee\x9e\x5f\x1c\x7c\xdc\x1a\x03\x8d\xa3\
\x34\x2e\x38\x41\x00\x00\x47\x19\x00\xfb\x6f\x24\xb3\x4f\x04\xf4\
\xe0\xf7\xfc\x6b\x5b\x36\x24\x37\xbd\x63\xbe\x01\x46\x37\xd8\x1d\
\x0c\x0d\xfe\x49\x00\x00\x95\x0a\x00\x11\xd0\x55\x71\xb4\x6b\xff\
\x25\x7d\x6e\xd9\x4b\xf7\x7b\xb2\x7c\xd5\xc0\xeb\x05\x00\xd0\xe1\
\x67\x01\x74\x72\xa3\x11\x01\x47\x79\xdb\xde\xe2\xa0\x63\x37\x18\
\x54\xf4\xc0\xdd\x05\x3f\x57\x2b\xb7\x19\x16\x00\x50\xc9\x00\x18\
\xd1\xe9\x00\x10\x01\x9d\xb1\x7d\xf3\x53\xc9\x1d\x1f\x69\x70\xbf\
\x7e\x7a\xd8\xc2\xe0\x3c\x01\x00\x74\x77\x00\x88\x80\x0e\x88\xe7\
\x0c\x6b\x71\x76\x3f\xbd\x6c\x72\x70\x9c\x00\x00\x0e\x13\x00\x67\
\xb4\x74\x61\x73\x11\x01\x87\x3b\xc9\x6f\xf3\xb2\xed\x1d\xbc\xba\
\x02\x7a\x42\xe9\xde\x12\xdf\x0d\xfe\x42\x00\x00\x87\x04\xc0\xbb\
\x5b\xba\xb8\xb9\x84\x08\x18\x23\x02\x0e\xbe\xeb\x9f\x59\xdf\x58\
\xac\xeb\xf3\xb4\xa1\x43\x0a\x35\x07\x27\x0b\x00\xa0\xbb\x02\x40\
\x04\x1c\xfc\xae\xff\x96\xf7\x35\x19\x32\xa4\xdc\xae\xe0\xe7\xd5\
\xf2\xb0\x21\x01\x00\x95\x0c\x80\x9b\xdf\xd3\xd2\x4d\x1b\x4b\x66\
\x23\x20\xbf\x68\xfc\x92\xe2\xc0\x7e\xdb\x0c\x17\xaa\xec\xa9\x83\
\xa7\x0b\x00\x10\x00\xfb\x44\xc0\xd1\x5c\xd7\x9f\xdf\x5d\xb8\xef\
\x33\xa5\xeb\xfa\xf7\x1a\x28\x54\xe9\xf3\x05\xae\x4b\xf3\xbd\x03\
\x04\x00\x54\x36\x00\x9a\xbb\x79\x53\xc9\x44\x04\x44\xeb\x5a\x37\
\x15\xaf\x3f\x61\xa5\x21\x42\x0d\x58\x1b\x9c\x25\x00\x20\x73\x01\
\xf0\xde\xe6\x0a\x6c\x28\x35\x1d\x01\xf1\xb4\x6b\x1a\x3c\xb5\x8f\
\x1a\x53\xfa\x14\xeb\x57\x69\xbb\x52\x40\x00\x40\xf5\x05\x40\x6d\
\x46\x40\xae\xfd\x99\x6e\x3a\x69\x12\xd2\x6a\x46\xf0\x46\x01\x00\
\x59\x08\x80\x5b\xce\x6c\xae\xe0\x66\x52\x33\x11\x10\xad\x6f\xdb\
\x54\x1c\xd4\x7f\xbd\x01\x41\x06\xe4\x83\x0b\x04\x00\xd4\x7c\x00\
\x54\xfc\xd2\xb5\x52\x04\x2c\xac\xea\xb3\xfc\xe7\xde\xd9\x56\xac\
\xeb\xf3\xa4\xc1\x40\xc6\xbe\x12\xf8\xef\xe0\x35\x02\x00\x04\x40\
\x26\x23\xa0\xf0\xc7\xaf\x39\xcb\x9f\x2c\x9b\x1d\xf4\x15\x00\x20\
\x00\xb2\x13\x01\xb9\xc7\x77\xf8\xbe\x1f\xf6\x2b\x04\x17\x0a\x00\
\xa8\xb5\x00\xb8\xf5\xfd\x3d\x79\xf7\xba\xaa\x88\x80\x68\xc3\xfc\
\x2d\xc5\x41\xfd\xd7\xd9\xf8\xe1\x25\xa5\xa7\x59\xfe\xa6\xa7\x1f\
\x33\x2c\x00\xa0\x76\x02\x20\xf5\x11\x90\x6f\x1d\x3d\xaf\x58\xd7\
\xe7\x09\x1b\x3e\x1c\xd6\xfd\xc1\x5f\x0b\x00\xa8\x89\x00\xf8\x40\
\x63\x2f\x6c\x22\xa9\x8c\x80\xc2\x84\x6f\xce\xf2\x7d\x3f\xbc\xa2\
\xd2\x93\x2e\xff\x59\x00\x40\xb5\x07\xc0\xc8\xb3\x1a\x7b\x69\x13\
\x49\x4f\x04\x14\x0a\x7b\x93\x3b\x2f\x9c\x6d\x63\x87\x0e\xdb\x12\
\xbc\x5d\x00\x80\x00\xa8\xde\x08\xc8\x6f\x7f\x3e\x19\x31\xa0\xd5\
\x86\x0e\x9d\x56\x7a\xec\xf5\x45\x02\x00\xaa\x36\x00\xce\x6e\xec\
\xe5\x4d\xa4\x14\x01\x8b\x7a\xe5\xbf\x7f\xdb\x86\x27\x8b\x43\xdf\
\xb2\xd4\x46\x0e\x5d\x7a\xa0\xd0\x95\x02\x00\xaa\x32\x00\xce\x69\
\x4c\xc1\x26\xd2\xe3\x11\x10\x6d\x5a\xb2\xbd\x38\xa8\xff\x06\x1b\
\x38\x74\x8b\x61\x95\xb8\x69\x90\x00\x80\xda\x0f\x80\x1e\x8d\x80\
\x68\xd5\xec\xf5\xc5\xfa\x7e\x91\x4d\x1b\xba\xd5\x1f\xba\x3b\x02\
\x04\x00\x54\x32\x00\x46\x9d\x33\x27\x45\x1b\x48\xc5\x23\x20\xbf\
\xf0\x81\xa5\xc5\xba\x3e\x4f\xd9\xac\xa1\x22\xee\x0b\x5e\x2b\x00\
\xa0\x2a\x02\xe0\xdc\x39\x29\xdb\x40\x2a\x16\x01\x71\xc3\x0d\x6d\
\xe1\x9f\xbf\xcb\x26\x0d\x15\xbf\x57\xc0\x6b\x05\x00\x08\x80\x54\
\x44\x40\x3c\xe9\x87\x8d\xe5\xbb\x99\xd9\xa0\xa1\xf2\x1e\x0c\x5e\
\x27\x00\x20\xcd\x01\x70\xdb\x07\xe7\xa4\x74\x03\xe9\xb6\x08\x28\
\x8c\xfb\x8f\x39\x36\x64\xe8\x71\xe3\xbb\x1a\x01\x02\x00\xb2\x19\
\x00\xdd\x12\x01\x85\x07\xbf\x6a\xf8\x43\xef\x79\x28\xf8\x4b\x01\
\x00\xa9\x0c\x80\x0f\xa5\x7d\x40\x96\x22\x60\xf1\xd1\x3d\xca\xf7\
\xeb\x86\x3f\xf4\xbe\x89\x47\x1b\x01\x02\x00\x2a\x1a\x00\xe7\x35\
\x54\xc1\x06\xd2\xe9\x08\x28\x8c\xff\x46\xa3\x8d\x17\x52\x63\x72\
\xf0\x57\x02\x00\x04\x40\x45\x23\xa0\x30\xe1\x5b\xde\xf9\x43\xfa\
\x4c\x0d\x5e\x2f\x00\x20\x2d\x01\x70\xfb\xf9\x0d\x55\xb4\x81\xbc\
\x62\x04\x14\x26\x7c\xdb\x3b\x7f\x48\xaf\x69\x9d\x79\x9c\xb0\x00\
\x80\x8a\x06\xc0\x05\x0d\x55\xb6\x81\xbc\x6c\x04\x14\x1e\xbe\xd2\
\xf0\x87\xf4\x7b\xb4\xa3\x11\x20\x00\x40\x00\xbc\x62\x04\x14\x1e\
\xfe\x9e\xe1\x0f\xd5\xe3\x91\x8e\x5c\x22\x28\x00\xa0\x92\x01\x70\
\xc7\x87\x1b\xaa\x74\x03\x79\x29\x02\xe2\x89\x3f\x30\xfc\xa1\xfa\
\x8c\x12\x00\xd0\xab\x01\xf0\x91\x86\x2a\xde\x40\x76\x86\x3f\x7f\
\x93\x8d\x14\xaa\xd6\x8f\x05\x00\x08\x00\x20\x7b\x4a\xb7\xe7\xbe\
\x4c\x00\x40\xef\x04\xc0\x6c\x9b\x10\xa4\xe3\x13\xad\x60\x53\xd0\
\x58\x7e\xa0\xce\xa8\x60\x78\x30\x34\xa8\x0f\x7e\x1b\x5c\x13\xfc\
\x2c\xf8\x61\x70\x65\xf0\x8d\xe0\x3b\xe5\x5f\x5f\x1d\xfc\x3a\xa8\
\x2b\xff\x3d\x37\x07\x77\x94\x1f\xd3\x5b\xba\x37\xff\xf4\x60\x49\
\xb0\x2d\x65\x0f\xc5\x7a\x36\x18\x20\x00\xa0\xa7\x03\xe0\xce\x0b\
\x05\x00\x54\xd6\x9e\xf2\xd0\x9d\x57\xbe\x35\x6e\x69\xa8\xff\x3c\
\xf8\x72\x70\x61\xf0\xf6\xe0\x1f\xbb\xeb\x11\xba\x1d\x15\xfe\x9d\
\x7f\x17\x1c\x1f\xbc\x3b\xf8\x58\xf0\x85\xe0\xaa\xe0\xba\x60\x6c\
\x50\x7a\x7a\x66\xa1\x87\x5e\xa3\x5c\x70\x8c\x00\x00\x01\x00\xd5\
\xe6\x89\xe2\xf5\x27\x34\x87\xf5\x34\x3e\x9e\xf8\x83\xc9\xf9\xa6\
\x5b\x9b\xa3\x15\xd3\xdb\xa3\xb5\x2d\xa5\x77\xe2\xaf\xe9\xe9\xe1\
\xde\xcd\xa1\xf0\x86\xe0\x1d\xc1\x27\x82\xef\x05\x37\x94\x6f\xef\
\xbb\xbc\xfc\xee\xbd\xbb\x5e\xc3\xd2\xa7\x13\x6f\x10\x00\xd0\x63\
\x01\xf0\x51\x01\x00\x9d\xd2\x67\x67\x71\xe8\xc9\x2b\x0b\x63\x2e\
\x6d\x8c\x67\xd4\xcd\x89\xd6\xb5\xac\xcc\x25\xc9\xce\x97\x59\x63\
\xd7\x56\xf3\xf0\xef\x40\x1c\xbc\x3a\x78\x73\x70\x69\xf0\xcb\x60\
\x5c\xb0\x36\xd8\xdb\x85\x5b\x06\xbf\x46\x00\x40\x4f\x04\xc0\x5d\
\x17\x09\x00\x38\xd2\xc7\xf7\xd7\x9f\xb8\xa1\x30\xe6\x92\x45\x61\
\xd8\xaf\x8d\x56\x37\x3c\x93\x8b\xe3\xce\xac\xb1\x9a\x0e\x80\x23\
\x84\xc1\xdf\x94\xbf\x5a\xf8\x4a\xf9\x13\x83\x59\x41\xb1\x83\xaf\
\xf9\x50\x01\x00\x02\x00\x7a\xfa\xdd\xfd\xfa\x64\xf8\x69\xd3\x0b\
\x13\xbe\x35\x2d\xbf\xf8\xa1\xc5\xb9\x68\xeb\xae\x2e\xae\xb1\x4c\
\x06\xc0\x11\xc2\xe0\xb8\xe0\x8a\x60\x44\xb0\xf4\x08\x9f\x14\x5c\
\x26\x00\xa0\xe2\x01\xf0\x31\x01\x40\xb6\xdf\xe1\x0f\x7d\xeb\xa2\
\x30\xf0\xa7\x44\x1b\x16\x34\x87\x35\xf1\x7c\x37\xaf\x31\x01\x70\
\xe4\x20\xf8\xfb\xf2\x89\x90\xbf\x09\x66\x1c\x72\x4e\xc1\x13\xc1\
\xb1\x02\x00\x2a\x19\x00\xa3\x3f\x2e\x00\xc8\xdc\x77\xf8\xc9\x2d\
\x67\x2e\x8d\x67\xd6\x2f\xcf\x6d\x5d\xff\x5c\x85\xd7\x98\x00\xe8\
\x5c\x10\xbc\xb6\x74\x49\x60\xf0\xdd\xe0\xfb\x02\x00\x04\x00\x74\
\x75\xe8\x3f\x9d\x8c\x3c\xbb\x35\xdf\x38\x62\x69\x2e\xf7\xf8\x9e\
\x1e\x5c\x63\x02\xa0\x0b\x04\x00\x54\x34\x00\x2e\x9e\x65\x38\x50\
\x93\xea\xfb\xe4\x93\xdb\xce\x9b\x99\x6f\x1d\x3d\x2f\x17\x47\xbb\
\x7b\x69\x8d\x09\x00\x01\x00\x02\x00\x7a\xe0\x9d\x7e\x2e\xb9\xf3\
\xa3\x93\xf3\x0b\x1f\x9c\x93\x2b\x14\x76\xa6\x60\x8d\x09\x00\x01\
\x00\xe9\x54\x18\x73\x89\x00\xa0\xfa\x5d\x7f\xe2\xb2\x78\xe6\xc0\
\x86\x5c\x9c\x7f\x2e\x65\x6b\x4c\x00\x08\x00\x48\x6b\x00\x7c\x42\
\x00\x50\xad\x76\x25\xb7\x9e\xd5\x94\x5f\x36\x75\x79\x8a\xd7\x98\
\x00\x10\x00\x20\x00\xa0\x7b\xbe\xdb\xef\x5b\x28\x3c\xf0\xa5\x86\
\xa8\x7d\x55\xa1\x0a\xd6\x98\x00\x10\x00\x90\xd2\x00\xb8\xfb\x52\
\x01\x40\x75\x18\x72\xd2\xaa\x78\xe6\xc0\xd6\x5c\x1c\xbd\x50\x45\
\x6b\x4c\x00\x08\x00\x48\x6b\x00\x7c\x52\x00\x90\x66\xbb\x93\x91\
\x67\xb5\xe6\x97\x4d\x59\x5f\xa5\x6b\x4c\x00\x08\x00\x10\x00\xd0\
\x99\x67\xc4\x27\xa3\x2f\x9e\x18\x6d\x59\xbe\xb9\xca\xd7\x98\x00\
\x10\x00\x90\xd6\x00\xb8\x4c\x00\x90\xaa\x5b\xf3\x26\xa3\xce\x79\
\x2c\xda\xbc\x7c\x5d\x8d\xac\x31\x01\x20\x00\x20\xa5\x01\x70\xcf\
\xe5\x02\x80\x54\x48\x86\x9f\xde\x16\xad\x69\x5c\x53\x63\x6b\x4c\
\x00\x08\x00\x10\x00\x70\x58\x43\x4f\x5e\x9d\x5f\xfc\xd0\x9a\x1a\
\x5d\x63\x02\x40\x00\x40\x5a\x03\xe0\x5f\x05\x00\xbd\x63\x50\xff\
\xf6\x7c\xf3\xc8\x35\x35\xbe\xc6\x04\x80\x00\x80\x94\x06\xc0\x1f\
\xfe\x4d\x00\xd0\xd3\xf7\xe8\x2f\xc4\x53\x7e\xda\x98\x8b\xf3\x7b\
\x33\xb0\xc6\x04\x80\x00\x00\x01\x40\xe6\xed\x28\xdc\x73\xf9\xa4\
\x5c\x6e\x4b\x31\x43\x6b\x4c\x00\x08\x00\x48\x6b\x00\xfc\xbb\x00\
\xa0\x07\xbe\xe7\x7f\xeb\xdc\x68\xc3\x82\x45\x19\x5c\x63\x02\x40\
\x00\x40\x4a\x03\xe0\xde\x4f\xcd\x34\xa0\xa8\xe0\xd3\xf9\x9e\x8a\
\xa7\xfd\x72\x66\x38\xd6\xf6\x66\x74\x8d\x09\x00\x01\x00\x02\x80\
\xac\x5d\xd6\x37\x60\x51\xb4\x79\x79\x92\xf1\x35\x26\x00\x04\x00\
\xa4\x35\x00\x3e\x2d\x00\xe8\xee\x93\xfc\x9e\x88\x67\xd4\xb5\x59\
\x5f\x02\x40\x00\x40\x9a\x03\xe0\xbe\xcf\x08\x00\xba\xef\x5d\xff\
\xb0\xd3\x5a\xa2\xf6\x95\x79\x6b\x4b\x00\x08\x00\x10\x00\x64\xe3\
\xbb\xfe\x24\x9e\x71\xdd\x63\xd6\x94\x00\x10\x00\x50\x35\x01\xf0\
\x59\x01\x40\xd7\xde\xf5\xdf\xf4\xce\x59\x51\xfb\xea\xcd\xd6\x93\
\x00\x10\x00\x20\x00\xc8\xc8\xa3\x7a\xe3\x89\x57\x3d\x9a\xe1\x33\
\xfc\x05\x80\x00\x80\x2a\x0e\x80\xb1\x57\x08\x00\x8e\xe2\x44\xbf\
\xbe\x51\x7e\xc9\xc4\xa5\xd6\x90\x00\x10\x00\x50\xb5\x01\xf0\x39\
\x01\x40\x67\x1f\xde\xb3\x38\x6a\x5f\x5d\xb0\x7e\x04\x80\x00\x00\
\x01\x40\x46\x14\xee\xb9\x7c\x76\x2e\x8e\xf7\x58\x3b\x02\x40\x00\
\x40\xb5\x07\xc0\xfd\x9f\x17\x00\x74\xe4\x2c\xff\x1d\x71\xc3\x8d\
\x73\xad\x19\x01\x20\x00\xa0\x66\x02\xe0\x0b\x02\x80\x57\x78\x6c\
\xef\xb1\x1b\xa3\xb5\xcd\x1b\xad\x17\x01\x20\x00\x40\x00\x90\x99\
\x4b\xfc\x4e\x6d\xc8\xe5\xda\x9f\xb1\x56\x04\x80\x00\x80\x5a\x0b\
\x80\x07\xbe\x28\x00\x38\xfc\xf0\xbf\xed\x43\x93\x72\x49\xb2\xd3\
\x3a\x11\x00\x02\x00\x6a\x32\x00\xbe\x24\x00\xf8\xff\x27\xfb\xdd\
\xf7\xe9\x71\xe1\xf8\x70\xb2\x9f\x00\x10\x00\x20\x00\xc8\x88\x17\
\xe2\xc9\x3f\xba\xdf\xda\x10\x00\x02\x00\x6a\x3d\x00\xc6\x7d\x59\
\x00\x70\xd0\xb3\xf1\xec\xa1\xe3\xac\x0b\x01\x20\x00\x20\x13\x01\
\xf0\x15\x01\x40\x49\x92\x5f\xf0\xc0\x44\x6b\x42\x00\x08\x00\x10\
\x00\x64\xe7\x1a\xff\xf6\x68\xd5\xac\xe9\xd6\x83\x00\x10\x00\x90\
\xa5\x00\x78\xf0\xab\x02\x20\xdb\xf7\xf4\x5f\x1b\x6d\x5a\xdc\x6c\
\x2d\x08\x00\x01\x00\x59\x0b\x80\x3f\x7e\x4d\x00\x64\x76\xf8\xf7\
\xdb\x14\x6d\x5a\xea\x81\x3e\x02\x40\x00\x80\x00\x20\x33\x06\x1e\
\xb3\x35\xda\xb8\xc8\xdd\xfd\x04\x80\x00\x80\xec\x06\xc0\xd7\x05\
\x40\xf6\xde\xf9\xe7\xa2\x0d\x0b\xb6\x3b\xfe\x05\x80\x00\x80\x2c\
\x07\xc0\xf8\xff\x14\x00\x99\x1a\xfe\x7d\xe2\x68\x5d\xcb\x3a\xc7\
\xbe\x00\x10\x00\x20\x00\x04\x40\x76\xce\xf6\x4f\xa2\x55\xb3\x17\
\x38\xee\x05\x80\x00\x00\x42\x00\x7c\x63\x96\xc1\x98\x09\x4f\xe5\
\x97\x4d\x9d\xe1\x98\x17\x00\x02\x00\x10\x00\xd9\xb1\x27\xdf\x74\
\xf3\x14\xc7\xbb\x00\x10\x00\xc0\xff\x05\xc0\x84\x6f\x0a\x80\x5a\
\x7f\xb0\xcf\x84\x6f\x4e\x73\xac\x0b\x00\x01\x00\xfc\x59\x00\x7c\
\x5b\x00\xd4\xf2\x23\x7d\x6f\x3f\xbf\xc9\x71\x2e\x00\x04\x00\x20\
\x00\xb2\x34\xfc\x6f\x3c\x65\x65\x2e\x8e\x3c\xd2\x57\x00\x08\x00\
\xe0\x30\x01\xf0\xd0\x77\x04\x40\x2d\x1a\xd4\x7f\x7b\xb4\x75\xdd\
\x4e\xc7\xb8\x00\x10\x00\xc0\xcb\x04\xc0\x95\x02\xa0\xf6\x2e\xf7\
\xdb\x11\xad\x69\x8c\x1d\xdf\x02\x40\x00\x00\x02\x20\x3b\x5e\xcc\
\x37\x8f\x74\xad\xbf\x00\x10\x00\xc0\x2b\x04\xc0\xc3\xdf\x15\x00\
\xb5\x74\xc6\xff\xb8\x2f\x4f\x75\x5c\x0b\x00\x01\x00\x74\x20\x00\
\xbe\x27\x00\x6a\xe5\xa4\xbf\x61\xef\x2a\x5d\xee\xf7\x82\xe3\x5a\
\x00\x08\x00\x40\x00\x64\xe7\x01\x3f\xab\x73\xf9\x6d\xed\x8e\x69\
\x01\x20\x00\x80\x0e\x89\x27\x7e\x5f\x00\x54\xbf\xdd\xf9\x65\x53\
\x66\x39\x9e\x05\x80\x00\x00\x3a\x11\x00\x57\x09\x80\x6a\xff\xde\
\x7f\xec\x67\xdd\xe6\x57\x00\x08\x00\x40\x00\x64\xca\xe0\xe3\x57\
\xe5\x0a\xf1\x2e\xc7\xb2\x00\x10\x00\x40\xe7\x02\x60\xd2\x0f\x05\
\x40\x75\x7b\x36\xbf\x60\xec\x42\xc7\xb2\x00\x10\x00\x40\xe7\x02\
\x60\xf2\x8f\x66\x1b\xa2\x22\x00\x01\x20\x00\x40\x00\x20\x02\x10\
\x00\x02\x00\x6a\x3f\x00\x7e\x2c\x00\x6a\xc7\x73\x22\x40\x00\x08\
\x00\xa0\x63\x01\x30\xe5\x27\x02\x40\x04\x20\x00\x04\x00\x08\x00\
\x44\x00\x02\x40\x00\x40\x06\x02\xe0\xa7\x02\x40\x04\x20\x00\x04\
\x00\x08\x00\x6a\x2a\x02\xe6\xdf\x27\x02\x04\x80\x00\x00\x0e\x13\
\x00\x53\x7f\x26\x00\x44\x00\x02\x40\x00\x40\xf6\x02\xe0\x6a\x01\
\x20\x02\x10\x00\x02\x00\x04\x00\x22\x00\x01\x20\x00\xa0\xf6\x03\
\xe0\x4f\x3f\x6f\x30\x1c\x45\x00\x02\x40\x00\x40\xe6\x02\xe0\x17\
\x02\x40\x04\x20\x00\x04\x00\x08\x00\x44\x00\x02\x40\x00\x40\xed\
\x07\xc0\xb4\x6b\x04\x80\x08\x40\x00\x08\x00\xc8\x5c\x00\x3c\xf2\
\x2b\x01\x90\xe9\x08\xb8\x77\x81\x75\x20\x00\x04\x00\x08\x00\x44\
\x00\x02\x40\x00\x40\x36\x02\xe0\xbf\x04\x00\x22\x40\x00\x08\x00\
\xc8\x5c\x00\x3c\xfa\xeb\x39\x06\x20\x22\x40\x00\x08\x00\x10\x00\
\x88\x00\x6b\x43\x00\x08\x00\xa8\xfd\x00\xf8\x8d\x00\x40\x04\x08\
\x00\x01\x00\x99\x0b\x80\xe9\xbf\x15\x00\x88\x00\x01\x20\x00\x40\
\x00\x80\x08\x10\x00\x02\x00\x32\x10\x00\xbf\x13\x00\x88\x00\x01\
\x20\x00\x20\x73\x01\xf0\xd8\xef\x1b\x0d\x3a\x8e\x18\x01\xf3\xfe\
\x20\x02\x04\x80\x00\x00\x01\x40\x06\x3d\x2f\x02\x04\x80\x00\x80\
\x9a\x0b\x80\x6b\x05\x00\x22\x40\x00\x08\x00\x10\x00\x20\x02\x04\
\x80\x00\x80\xda\x0f\x80\x19\x75\x02\x00\x11\x20\x00\x04\x00\x64\
\x2e\x00\x66\xd6\x0b\x00\x44\x80\x00\x10\x00\x20\x00\x40\x04\x08\
\x00\x01\x00\x19\x08\x80\x81\x4d\x86\x19\x22\x40\x00\x08\x00\xc8\
\x5a\x00\xcc\x1a\x24\x00\xe8\x5a\x04\xb4\xdd\x3d\xdf\x5a\x12\x00\
\x02\x00\x04\x00\x22\x00\x01\x20\x00\x20\xfd\x01\x30\x58\x00\x20\
\x02\x04\x80\x00\x80\xcc\x05\xc0\xec\x21\xcd\x86\x17\x22\x40\x00\
\x08\x00\x10\x00\x20\x02\x04\x80\x00\x80\xda\x0f\x80\xa1\x02\x00\
\x11\x20\x00\x04\x00\x64\x2e\x00\x1a\x6e\x10\x00\x88\x00\x01\x20\
\x00\x40\x00\x80\x08\x10\x00\x02\x00\x6a\x3f\x00\xe6\xdc\xd8\x62\
\x50\x51\xd9\x08\x18\x33\x5f\x00\x20\x00\x20\x75\x01\x30\x4c\x00\
\x20\x02\x04\x80\x00\x00\x01\x00\x22\x40\x00\x08\x00\xa8\x79\xf9\
\xc6\xe1\x02\x00\x11\x20\x00\x04\x00\x64\x2f\x00\x46\xcc\x35\x98\
\x10\x01\x02\x40\x00\x80\x00\x00\x11\x20\x00\x04\x00\xd4\x7c\x00\
\x34\xdd\x2c\x00\x10\x01\x02\x40\x00\x40\xd6\x44\xab\x1b\xd6\x17\
\xee\xfe\x64\x63\x61\xcc\xa5\x65\x9f\x08\x2e\x69\x4c\x46\x97\x5c\
\x1c\x7c\xbc\x31\xb9\xab\xe4\x63\x4d\xc9\x5d\x17\x95\x34\x26\x77\
\x7e\xb4\xec\xc2\xa6\xe4\x8e\x8f\x94\x7d\xb8\x29\xb9\xfd\x82\xb2\
\xf3\x83\xf3\x9a\x93\xdb\xce\x6b\x0a\xc2\xcf\x0f\x05\x1f\x6c\x4e\
\x46\x95\x9c\x5b\x76\x4e\x4b\x32\xf2\x9c\xe6\x64\xe4\xd9\xe1\xe7\
\x59\x07\xdc\xfa\x81\xe0\xfd\x07\xcd\x4d\x6e\x79\x5f\xd9\x99\x73\
\x93\x9b\xdf\xdb\x1a\x94\x7f\xbe\xe7\x80\x11\xef\x6e\x4b\x46\x9c\
\x51\x36\xe0\x80\xe1\x25\xa7\xcf\x3b\xe0\xb4\x79\xc9\xb0\xd3\xe6\
\x1f\xf0\xae\xe0\xd4\x03\x6e\x3a\x75\x41\x72\xd3\x3b\xcb\xde\xb1\
\x70\xbf\x1b\xdf\x7e\x88\x53\x16\xbd\xe4\x86\xb7\x2d\x2e\xde\xf0\
\x2f\x07\x2d\x29\xde\x70\xf2\x92\xe2\xd0\xfd\x96\x16\x87\xbe\xf5\
\xa0\x65\xc5\xa1\x6f\x59\x56\x1c\xb2\xdf\xf2\xe2\x90\x93\xc2\xcf\
\x93\xc2\xcf\x13\x57\xbc\xe4\xfa\x13\x57\x1e\x70\x42\xc9\xaa\x03\
\xde\x5c\xb2\xba\x38\xb8\xe4\xf8\x35\x87\x58\x5b\x1c\x7c\xdc\x41\
\xeb\xf6\x1b\xd4\x7f\xfd\x21\x36\x14\x07\x1d\xbb\xf1\x10\x9b\xf6\
\x1b\xf8\xa6\xcd\x87\xd8\x12\xb4\x17\x07\x1e\x53\xf2\xf8\x21\x3f\
\xb7\x1e\xd0\x6f\x5b\xb1\xfe\x25\xdb\x83\x5c\x59\x54\xac\xef\x9b\
\x0f\x0e\xfe\x8c\xcb\x0a\x41\x72\x40\x9f\xe2\x21\xfe\x67\xbf\xba\
\x3e\x4f\x04\x4f\x96\x3d\x15\x3c\x5d\xf6\x4c\x18\xf6\x3b\x0e\xf1\
\x6c\xd9\x73\xa5\xdf\x9b\x5f\x70\xff\xa2\x1a\x5f\x63\xbf\x37\xc8\
\x05\x00\x00\xae\x02\x40\x00\x00\x20\x00\x10\x00\x00\x08\x00\x04\
\x00\x00\x02\x40\x00\x38\x80\x00\x10\x00\x02\x00\x00\x04\x80\x00\
\x00\x00\x01\x20\x00\x00\x40\x00\x08\x00\x00\x10\x00\x02\x00\x00\
\x04\x80\x00\x00\x00\x01\x20\x00\x00\x40\x00\x08\x00\x00\x10\x00\
\x02\x00\x00\x04\x80\x00\x00\x00\x01\x20\x00\x00\x40\x00\x08\x00\
\x00\x10\x00\x02\x00\x00\x01\x80\x00\x00\x40\x00\x20\x00\x00\x10\
\x00\x08\x00\x00\x04\x00\x02\x00\x00\x01\x20\x00\x00\x40\x00\x08\
\x00\x00\x10\x00\x02\x00\x00\x04\x80\x00\x00\x00\x01\x20\x00\x00\
\x40\x00\x08\x00\x00\x10\x00\x02\x00\x00\x04\x80\x00\x00\x00\x01\
\x20\x00\x00\x40\x00\x08\x00\x00\x10\x00\x02\x00\x00\x04\x80\x00\
\x00\x00\x01\x20\x00\x00\x10\x00\x08\x00\x00\x04\x00\x02\x00\x00\
\x01\x80\x00\x00\x40\x00\x20\x00\x00\x10\x00\x02\x00\x00\x04\x80\
\x00\x00\x00\x01\x20\x00\x00\x40\x00\x08\x00\x00\x10\x00\x02\x00\
\x00\x04\x80\x00\x00\x00\x01\x20\x00\x00\x40\x00\x08\x00\x00\x10\
\x00\x02\x00\x00\x04\x80\x00\x00\x00\x01\x20\x00\x00\x40\x00\x08\
\x00\x00\x10\x00\x02\x00\x00\x01\x80\x00\x00\x40\x00\x20\x00\x00\
\x10\x00\x08\x00\x00\x04\x00\x02\x00\x00\x01\x20\x00\x00\x40\x00\
\x08\x00\x00\x10\x00\x02\x00\x00\x04\x80\x00\x00\x00\x01\x20\x00\
\x00\x40\x00\x08\x00\x00\x10\x00\x02\x00\x00\x04\x80\x00\x00\x00\
\x01\x20\x00\x00\x40\x00\x08\x00\x00\x10\x00\x02\x00\x00\x04\x80\
\x00\x00\x00\x01\x20\x00\x00\x10\x00\x86\xb9\x00\x00\x40\x00\x20\
\x00\x00\x10\x00\x08\x00\x00\x04\x00\x02\x00\x00\x01\x20\x00\x00\
\x40\x00\x08\x00\x00\x10\x00\x02\x00\x00\x04\x80\x00\x00\x00\x01\
\x20\x00\x00\x40\x00\x08\x00\x00\x10\x00\x02\x00\x00\x04\x80\x00\
\x00\x00\x01\x20\x00\x00\x40\x00\x08\x00\x00\x10\x00\x02\x00\x00\
\x04\x80\x00\x00\x00\x01\x20\x00\x00\x10\x00\x06\xb9\x00\x00\x40\
\x00\x20\x00\x00\x10\x00\x08\x00\x00\x04\x00\x02\x00\x00\x01\x20\
\x00\x1c\x40\x00\x08\x00\x01\x00\x00\x02\x40\x00\x00\x80\x00\x10\
\x00\x00\x20\x00\x04\x00\x00\x08\x00\x01\x00\x00\x02\x40\x00\x00\
\x80\x00\x10\x00\x00\x20\x00\x04\x00\x00\x08\x00\x01\x00\x00\x02\
\x40\x00\x00\x80\x00\x10\x00\x00\x20\x00\x04\x00\x00\x08\x00\x01\
\x00\x80\x00\x40\x00\x00\x20\x00\x10\x00\x00\x08\x00\x04\x00\x00\
\x02\x00\x01\x00\x80\x00\x10\x00\x00\x20\x00\x04\x00\x00\x08\x00\
\x01\x00\x00\x02\x40\x00\x00\x80\x00\x10\x00\x00\x20\x00\x04\x00\
\x00\x08\x00\x01\x00\x00\x02\x40\x00\x00\x80\x00\x10\x00\x00\x20\
\x00\x04\x00\x00\x08\x00\x01\x00\x00\x02\x40\x00\x00\x80\x00\x10\
\x00\x00\x08\x00\x04\x00\x00\x02\x00\x01\x00\x80\x00\x40\x00\x00\
\x20\x00\x10\x00\x00\x08\x00\x01\x00\x00\x02\x40\x00\x00\x80\x00\
\x10\x00\x00\x20\x00\x04\x00\x00\x08\x00\x01\x00\x00\x02\x40\x00\
\x00\x80\x00\x10\x00\x00\x20\x00\x04\x00\x00\x08\x00\x01\x00\x00\
\x02\x40\x00\x00\x80\x00\x10\x00\x00\x20\x00\x04\x00\x00\x08\x00\
\x01\x00\x80\x00\x40\x00\x00\x20\x00\x10\x00\x00\x08\x00\x04\x00\
\x00\x02\x00\x01\x00\x80\x00\x10\x00\x00\x20\x00\x04\x00\x00\x08\
\x00\x01\x00\x00\x02\x40\x00\x00\x80\x00\x10\x00\x00\x20\x00\x04\
\x00\x00\x08\x00\x01\x00\x00\x02\x40\x00\x00\x80\x00\x10\x00\x00\
\x20\x00\x04\x00\x00\x08\x00\x01\x00\x00\x02\x40\x00\x00\x80\x00\
\x10\x00\x00\x08\x00\xc3\x5c\x00\x00\x20\x00\x10\x00\x00\x08\x00\
\x04\x00\x00\x02\x00\x01\x00\x80\x00\x10\x00\x00\x20\x00\x04\x00\
\x00\x08\x00\x01\x00\x00\x02\x40\x00\x00\x80\x00\x10\x00\x00\x20\
\x00\x04\x00\x00\x08\x00\x01\x00\x00\x02\x40\x00\x00\x80\x00\x10\
\x00\x00\x20\x00\x04\x00\x00\x08\x00\x01\x00\x00\x02\x40\x00\x00\
\x80\x00\x10\x00\x00\x08\x00\x83\x5c\x00\x00\x20\x00\x10\x00\x00\
\x08\x00\x04\x00\x00\x02\x00\x01\x00\x80\x00\x10\x00\x0e\x20\x00\
\x04\x80\x00\x00\x00\x01\x20\x00\x00\x40\x00\x08\x00\x00\x10\x00\
\x02\x00\x00\x04\x80\x00\x00\x00\x01\x20\x00\x00\x40\x00\x08\x00\
\x00\x10\x00\x02\x00\x00\x04\x80\x00\x00\x00\x01\x20\x00\x00\x40\
\x00\x08\x00\x00\x10\x00\x02\x00\x00\x04\x80\x00\x00\x40\x00\x20\
\x00\x00\x10\x00\x08\x00\x00\x04\x00\x02\x00\x00\x01\x80\x00\x00\
\x40\x00\x08\x00\x00\x10\x00\x02\x00\x00\x04\x80\x00\x00\x00\x01\
\x20\x00\x00\x40\x00\x08\x00\x00\x10\x00\x02\x00\x00\x04\x80\x00\
\x00\x00\x01\x20\x00\x00\x40\x00\x08\x00\x00\x10\x00\x02\x00\x00\
\x04\x80\x00\x00\x00\x01\x20\x00\x00\x40\x00\x08\x00\x00\x04\x00\
\x02\x00\x00\x01\x80\x00\x00\x40\x00\x20\x00\x00\x10\x00\x08\x00\
\x00\x04\x80\x00\x00\x00\x01\x20\x00\x00\x40\x00\x08\x00\x00\x10\
\x00\x02\x00\x00\x04\x80\x00\x00\x00\x01\x20\x00\x00\x40\x00\x08\
\x00\x00\x10\x00\x02\x00\x00\x04\x80\x00\x00\x00\x01\x20\x00\x00\
\x40\x00\x08\x00\x00\x10\x00\x02\x00\x00\x04\x80\x00\x00\x40\x00\
\x20\x00\x00\x10\x00\x08\x00\x00\x04\x00\x02\x00\x00\x01\x80\x00\
\x00\x40\x00\x08\x00\x00\x10\x00\x02\x00\x00\x04\x80\x00\x00\x00\
\x01\x20\x00\x00\x40\x00\x08\x00\x00\x10\x00\x02\x00\x00\x04\x80\
\x00\x00\x00\x01\x90\xca\x00\xb8\x10\x00\xaa\xd0\x49\x06\x79\x17\
\x02\xc0\x8b\x00\x00\x02\x00\x00\x10\x00\x00\x80\x00\x00\x00\x04\
\x00\x00\x20\x00\x00\x00\x01\x00\x00\x08\x00\x00\x40\x00\x00\x00\
\x02\x00\x00\x10\x00\x00\x80\x00\x00\x00\x04\x00\x00\x20\x00\x00\
\x00\x01\x00\x00\x08\x00\x00\x40\x00\x00\x00\x02\x00\x00\x04\x00\
\x00\x20\x00\x00\x00\x01\x00\x00\x08\x00\x00\x40\x00\x00\x00\x02\
\x00\x00\x10\x00\x00\x80\x00\x00\x00\x04\x00\x00\x20\x00\x00\x00\
\x01\x00\x00\x08\x00\x00\x40\x00\x00\x00\x02\x00\x00\x10\x00\x00\
\x80\x00\x00\x00\x04\x00\x00\x08\x00\x00\x40\x00\x00\x00\x02\x00\
\x00\x10\x00\x00\x40\x35\xfb\x5f\x99\x5c\x22\xff\xb2\xd0\x6a\x29\
\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x19\x71\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x02\x00\x00\x00\x02\x00\x08\x06\x00\x00\x00\xf4\x78\xd4\xfa\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x7a\x5e\x00\x00\x7a\x5e\
\x01\xe9\xa7\x6d\x1a\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x18\xee\x49\x44\
\x41\x54\x78\xda\xed\xdd\x6d\x8c\x65\x77\x41\xc7\xf1\xdf\xb9\xf3\
\xb0\x77\xb6\xdd\xce\x42\x79\x10\xe4\x21\x6a\x21\x1a\x1f\x82\x6c\
\x1b\x82\x0f\xd5\xa0\x22\xdc\xf0\xd4\x80\x04\xcd\x80\x56\xd6\x44\
\xf1\x95\x09\xa6\x06\x95\x28\xc6\x17\xf3\x42\x21\x18\xa3\x31\x13\
\x63\x4c\x55\xc2\x0b\x13\x5a\x1c\xde\xd0\xd2\x96\x42\x53\xfa\x40\
\xa5\xc5\xb6\xdb\x85\x2d\xdd\xb6\x40\x5b\xd8\x9d\xb6\xbb\x3b\xbb\
\x3b\xf7\x1c\x5f\xec\x85\x76\xb7\xd3\xdd\x99\xd9\xb9\xf7\x9e\x7b\
\xce\xe7\x93\x90\x90\xb4\xb0\xb3\x67\xee\xcc\xf7\x77\xff\xf7\xdc\
\x99\xa2\xaa\xaa\x00\x00\xed\xd2\x71\x09\x00\xc0\x00\x00\x00\x0c\
\x00\x00\xc0\x00\x00\x00\x0c\x00\x00\xc0\x00\x00\x00\x0c\x00\x00\
\xc0\x00\x00\x00\x0c\x00\x00\xc0\x00\x00\x00\x0c\x00\x00\xc0\x00\
\x00\x00\x0c\x00\x00\xc0\x00\x00\x00\x0c\x00\x00\xc0\x00\x00\x00\
\x9e\x63\xba\xce\x1f\xdc\xf2\xe2\xc2\x6b\x92\x5c\x9e\x64\x4f\x92\
\x1f\x4b\x52\xf8\x94\x35\xde\xb7\x93\x5c\x9f\xe4\xba\xde\x55\x57\
\x3f\xea\x72\x00\x0c\x47\x51\x55\x55\x1d\xc3\x3f\x93\xe4\x2f\x92\
\x7c\xa4\xee\x23\x85\xa1\xba\x2e\xc9\x15\xbd\xab\xae\x7e\xca\xa5\
\x00\x68\xf8\x00\x58\x5e\x5c\x98\x1a\x3c\x03\xbc\xdc\xa7\x87\x24\
\xb7\x26\x79\x4b\xef\xaa\xab\x0f\xbb\x14\x00\xdb\xa7\x8e\xf7\x00\
\x7c\x58\xfc\x79\x96\x37\x24\xf9\xfc\xf2\xe2\xc2\x9c\x4b\x01\xd0\
\xd0\x01\xb0\xbc\xb8\xf0\xd2\x24\x7f\xed\xd3\xc2\x19\xf6\xe4\xd4\
\xcb\x41\x00\x34\xf4\x04\xe0\x0d\x49\x76\xf8\xb4\xb0\x8e\x3f\x5d\
\x5e\x5c\xb8\xc4\x65\x00\x68\xe6\x00\xd8\xe3\x53\xc2\xf3\xd8\x91\
\xe4\x93\x2e\x03\x40\x33\x07\xc0\xab\x7c\x4a\x38\x8b\xb7\x2e\x2f\
\x2e\xbc\xcb\x65\x00\x68\xde\x00\xf0\x3e\x7f\xce\xe5\x13\x6e\x08\
\x04\x68\xde\x00\x80\x73\x79\x75\xdc\x10\x08\x60\x00\xd0\x4a\x6e\
\x08\x04\x30\x00\x68\x21\x37\x04\x02\x18\x00\xb4\x94\x1b\x02\x01\
\x0c\x00\x5a\xca\x0d\x81\x00\x06\x00\x2d\xe4\x86\x40\x00\x03\x80\
\x96\x72\x43\x20\x80\x01\x40\x0b\xb9\x21\x10\xc0\x00\xa0\xa5\xdc\
\x10\x08\x60\x00\xd0\x52\x6e\x08\x04\x30\x00\x68\x21\x37\x04\x02\
\x18\x00\xb4\x94\x1b\x02\x01\x0c\x00\x5a\x68\x47\x92\x7f\x70\x19\
\x00\x0c\x00\xda\xe7\x2d\x6e\x08\x04\x30\x00\x68\x27\x37\x04\x02\
\x18\x00\xb4\xd0\xab\x93\xfc\xb9\xcb\x00\x60\x00\xd0\x3e\x1f\x76\
\x43\x20\x80\x01\x40\xfb\xb8\x21\x10\xc0\x00\xa0\xa5\xde\xb2\xbc\
\xb8\x70\x85\xcb\x00\x60\x00\xd0\x3e\x1f\x5f\x5e\x5c\xd8\xe9\x32\
\x00\x18\x00\xb4\x8b\x9f\x10\x08\x60\x00\xd0\x52\x1f\x5e\x5e\x5c\
\x78\x8d\xcb\x00\x60\x00\xd0\x2e\x7e\x65\x30\x80\x01\x40\x4b\xb9\
\x21\x10\xc0\x00\xa0\xa5\xdc\x10\x08\x60\x00\xd0\x42\x6e\x08\x04\
\x30\x00\x68\x29\x37\x04\x02\x18\x00\xb4\x90\x1b\x02\x01\x0c\x00\
\x5a\xca\x0d\x81\x80\x01\xe0\x12\xd0\x52\x6e\x08\x04\x0c\x00\x68\
\x21\x37\x04\x02\x06\x00\xb4\x94\x1b\x02\x01\x03\x00\x5a\x68\x47\
\x92\xff\x5c\x5e\x5c\x98\x77\x29\x00\x03\x00\xda\xe5\xd2\x24\x9f\
\x5f\x5e\x5c\x78\x81\x4b\x01\x18\x00\xd0\xce\x11\x70\xf9\xf2\xe2\
\xc2\xac\xcb\x01\xb4\xc1\xb4\x4b\x00\x49\x92\xd7\x27\xb9\x31\xc9\
\xd1\xe5\xc5\x85\x2f\x26\xb9\x2d\x49\xdf\x65\x01\xb6\x60\x2d\xc9\
\xbd\x49\xee\xe8\x5d\x75\xf5\x83\x06\x00\x4c\x86\x9d\x49\x7e\x73\
\xf0\x1f\x80\xf3\xb2\xbc\xb8\x70\x47\x92\xdf\xed\x5d\x75\xf5\xd7\
\xeb\xf6\xb1\x79\x09\x00\x00\x86\x67\x4f\x92\x3b\x96\x17\x17\x3e\
\x60\x00\x00\x40\xbb\xec\x48\xf2\x4f\xcb\x8b\x0b\x97\x18\x00\x00\
\xd0\x2e\x3b\x93\xfc\x8b\x01\x00\x00\xed\xf3\xab\xcb\x8b\x0b\xbb\
\x0c\x00\x00\x68\x97\x22\xa7\xde\x71\x64\x00\x00\x40\xcb\xbc\xd6\
\x00\x00\x80\xf6\x99\x32\x00\x00\x00\x03\x00\x00\x30\x00\x00\x00\
\x03\x00\x00\x30\x00\x00\x00\x03\x00\x00\x30\x00\x00\x00\x03\x00\
\x00\x30\x00\x00\x00\x03\x00\x00\x0c\x00\x00\xc0\x00\x00\x00\x0c\
\x00\x00\xc0\x00\x00\x00\x0c\x00\x00\xc0\x00\x00\x00\x0c\x00\x00\
\xc0\x00\x00\x00\x0c\x00\x00\xc0\x00\x00\x00\x0c\x00\x00\xc0\x00\
\x00\x00\x0c\x00\x00\xc0\x00\x00\x00\x0c\x00\x00\xc0\x00\x00\x00\
\x0c\x00\x00\x30\x00\x00\x00\x03\x00\x00\x30\x00\x00\x00\x03\x00\
\x00\x30\x00\x00\x00\x03\x00\x00\x30\x00\x00\x00\x03\x00\x00\x30\
\x00\x00\x00\x03\x00\x00\x30\x00\x00\x00\x03\x00\x00\x30\x00\x00\
\x00\x03\x00\x00\x30\x00\x00\x00\x03\x00\x00\x30\x00\x00\xa0\xbd\
\xa6\x5d\x82\xf3\xb8\x78\xbb\x5e\x92\xe9\x17\xbe\xd2\x85\x00\x18\
\x93\x13\xdf\xb9\x2f\xe5\xf1\x23\x2e\x84\x01\x30\xe2\x8b\xb7\xfb\
\x65\xd9\xf9\x13\x6f\x74\x21\x00\xc6\x64\xed\xd0\x23\x06\xc0\x16\
\x79\x09\x00\x00\x0c\x00\x00\xc0\x00\x00\x00\x0c\x00\x00\xc0\x00\
\x00\x00\x0c\x00\x00\xc0\x00\x00\x00\x0c\x00\x00\xc0\x00\x00\x00\
\x0c\x00\x00\xc0\x00\x00\x00\x0c\x00\x00\xc0\x00\x00\x00\x0c\x00\
\x00\xc0\x00\x00\x00\x0c\x00\x00\xc0\x00\x00\x00\x0c\x00\x00\x30\
\x00\x00\x00\x03\x00\x00\x30\x00\x00\x00\x03\x00\x00\x30\x00\x00\
\x00\x03\x00\x00\x30\x00\x00\x80\x5a\x99\x76\x09\xd6\xb7\xeb\xe5\
\xaf\xcd\x8f\xff\xfa\x07\xcf\xfa\xef\x54\x55\x95\xaa\xaa\x5c\x2c\
\x80\x31\x79\xe5\xbb\xfe\xe4\x9c\xff\xce\x3d\x9f\xfa\xab\xf4\x4f\
\x1c\x73\xb1\x0c\x80\x8d\x29\x3a\x53\x99\x9a\x9d\x73\x21\x00\x68\
\x24\x2f\x01\x00\x80\x01\x00\x00\x18\x00\x00\x80\x01\x00\x00\x18\
\x00\x00\x80\x01\x00\x00\x18\x00\x00\x80\x01\x00\x00\x18\x00\x00\
\x80\x01\x00\x00\x18\x00\x00\x80\x01\x00\x00\x18\x00\x00\x80\x01\
\x00\x00\x18\x00\x00\x80\x01\x00\x00\x18\x00\x00\x80\x01\x00\x00\
\x06\x00\x00\x60\x00\x00\x00\x06\x00\x00\x60\x00\x00\x00\x06\x00\
\x00\x60\x00\x00\x00\x75\x36\xdd\xca\xbf\xf4\x6c\x37\x2f\xff\xe9\
\x5f\x3c\xeb\xbf\x33\xd5\xdd\x95\xd5\xc7\x1f\xf4\x08\x01\x98\x70\
\xaf\xf8\xd9\x5f\x4e\x55\xf6\x9f\xf7\x9f\xf7\x4f\x1e\xcf\x23\xf7\
\xdc\x6c\x00\xb4\xc1\xcc\xdc\xae\xfc\xcc\x9b\xaf\xf4\x55\x01\xd0\
\x06\x3f\xf9\xfa\xb3\xfe\xe3\xe3\x47\x56\x5a\x39\x00\xbc\x04\x00\
\x00\x2d\x64\x00\x00\x80\x01\x00\x00\x18\x00\x00\x80\x01\x00\x00\
\x18\x00\x00\x80\x01\x00\x00\x18\x00\x00\x80\x01\x00\x00\x18\x00\
\x00\x80\x01\x00\x00\x18\x00\x00\x80\x01\x00\x00\x18\x00\x00\x80\
\x01\x00\x00\x18\x00\x00\x80\x01\x00\x00\x18\x00\x00\x80\x01\x00\
\x00\x06\x00\x00\x60\x00\x00\x00\x06\x00\x00\x60\x00\x00\x00\x06\
\x00\x00\x60\x00\x00\x00\x06\x00\x00\x60\x00\x00\x00\x06\x00\x00\
\x60\x00\x00\x00\x06\x00\x00\x60\x00\x00\x00\x06\x00\x00\x60\x00\
\x00\x00\x1b\x31\xfd\x83\xff\xb2\xba\xbc\x77\x26\xc9\xd4\x38\x3f\
\x98\xa2\x48\x59\x55\x39\x3e\xfc\x3f\xa9\x2a\xca\xfe\xda\xac\x4f\
\x3f\x00\x65\x7f\xad\x4a\x72\x62\x14\x7f\xd6\x54\x27\xc5\xea\xf2\
\xde\xee\x18\xff\xba\xfd\x6e\x6f\xe9\x64\x92\x14\xc7\xfe\xe7\x83\
\x97\x24\xf9\xcb\x24\xef\x4b\x22\x8a\x00\xd0\x5c\x27\x92\x7c\x2a\
\xc9\xdf\x4c\x27\xf9\x68\x92\xf7\xbb\x26\x00\xd0\x78\xb3\x49\x3e\
\x90\xa4\xe8\x24\x79\x93\xeb\x01\x00\xad\xf2\xa6\x4e\x92\x23\xae\
\x03\x00\xb4\xca\x91\x4e\x92\xfb\x5d\x07\x00\x68\x95\xfb\x3b\x49\
\xf6\xb9\x0e\x00\xd0\x2a\xfb\x9c\x00\x00\x40\x4b\x4f\x00\x0c\x00\
\x00\x30\x00\x00\x80\xa6\x0f\x80\xa2\xaa\xaa\xac\x2e\xef\x3d\x9c\
\x64\xde\xf5\x00\x80\xc6\x5b\xe9\xf6\x96\x76\xff\xe0\x77\x01\xb8\
\x11\x10\x00\xda\x61\x5f\xf2\xcc\x2f\x03\xf2\x32\x00\x00\xb4\xc3\
\xfd\x06\x00\x00\x18\x00\x00\x40\xdb\x06\x80\x7b\x00\x00\xa0\x1d\
\xf6\x9d\x39\x00\x2a\xd7\x04\x00\x1a\xad\x3a\x6d\x00\x74\x7b\x4b\
\xc7\x92\x1c\x74\x5d\x00\xa0\xd1\x0e\x0e\x9a\xff\xc3\x13\x80\xc4\
\x7d\x00\x00\xd0\x74\x3f\x6c\xfd\xb3\x07\x80\xfb\x00\x00\xa0\xd9\
\xf6\xad\x37\x00\x9c\x00\x00\x40\x0b\x4f\x00\x0c\x00\x00\x30\x00\
\x00\x80\x36\x0c\x80\x87\x92\x1c\x73\x6d\x00\xa0\x91\x8e\x0d\x5a\
\x7f\xfa\x00\xe8\xf6\x96\xaa\x24\xfb\x5d\x1f\x00\x68\xa4\xfd\x83\
\xd6\x3f\xe7\x04\xe0\xb4\xa3\x01\x00\xa0\x51\x4e\x6b\xbc\x01\x00\
\x00\x06\x80\x01\x00\x00\x6d\x1c\x00\x7e\x18\x10\x00\x34\xd3\x3e\
\x27\x00\x00\xe0\x04\xe0\x19\xdd\xde\xd2\xe1\x24\x8f\xb9\x46\x00\
\xd0\x28\x8f\x0d\x1a\xff\xbc\x27\x00\x4e\x01\x00\xa0\xe1\xcf\xfe\
\x0d\x00\x00\x30\x00\x7e\xc8\x8d\x80\x00\xd0\x2c\xfb\x9c\x00\x00\
\x80\x13\x00\x03\x00\x00\x0c\x80\x53\xbe\x99\x64\xcd\xb5\x02\x80\
\x46\x58\x1b\xb4\xfd\xec\x03\xa0\xdb\x5b\x3a\x99\xe4\x80\xeb\x05\
\x00\x8d\x70\x60\xd0\xf6\x73\x9e\x00\x24\x5e\x06\x00\x80\xa6\x58\
\xb7\xe9\x06\x00\x00\x18\x00\x06\x00\x00\x18\x00\x00\x40\xab\x06\
\x80\x1f\x06\x04\x00\xcd\xb0\x6e\xd3\x8b\xaa\xaa\xd6\xfd\xb7\x57\
\x97\xf7\xae\x24\xb9\xc8\x75\x03\x80\x89\xf5\x64\xb7\xb7\x34\xbf\
\x99\x13\x80\xc4\xcb\x00\x00\x30\xe9\x9e\xb7\xe5\x06\x00\x00\x18\
\x00\xa7\x71\x1f\x00\x00\x4c\xb6\x7d\x4e\x00\x00\xc0\x09\x80\x01\
\x00\x00\x06\xc0\xfa\x1e\x48\x52\xb9\x76\x00\x30\x91\xaa\x41\xcb\
\x37\x37\x00\xba\xbd\xa5\xa3\x49\x0e\xba\x7e\x00\x30\x91\x0e\x0e\
\x5a\xbe\xe9\x13\x80\xc4\x8d\x80\x00\x30\xa9\xce\xda\xf0\x73\x0d\
\x00\xf7\x01\x00\xc0\x64\xba\xdf\x00\x00\x00\x03\xc0\x00\x00\x00\
\x03\xe0\xec\xdc\x03\x00\x00\x93\xe9\xbc\xee\x01\x78\x28\xc9\xaa\
\x6b\x08\x00\x13\x65\x75\xd0\xf0\xad\x0d\x80\x6e\x6f\xa9\xcc\x59\
\xde\x43\x08\x00\xd4\xd2\x03\x83\x86\x6f\xf9\x04\x20\x71\x1f\x00\
\x00\x4c\x9a\x73\xb6\xdb\x00\x00\x00\x03\x60\x5d\x6e\x04\x04\x80\
\xc9\x72\xce\x76\x3b\x01\x00\x00\x27\x00\x06\x00\x00\x18\x00\x49\
\xba\xbd\xa5\xc3\x49\x1e\x73\x2d\x01\x60\x22\x3c\x36\x68\xf7\x79\
\x9f\x00\x24\xee\x03\x00\x80\x49\xb1\xa1\x66\x6f\x74\x00\x78\x19\
\x00\x00\x26\xc3\x86\x9a\x6d\x00\x00\x80\x01\x60\x00\x00\x80\x01\
\x60\x00\x00\x40\xab\x07\xc0\x37\x93\xac\xb9\xa6\x00\x50\x6b\x6b\
\x83\x66\x6f\xcf\x00\xe8\xf6\x96\x4e\x26\x39\xe0\xba\x02\x40\xad\
\x1d\x18\x34\x7b\xdb\x4e\x00\x12\x2f\x03\x00\x40\xdd\x6d\xb8\xd5\
\x06\x00\x00\x18\x00\x67\xe5\x87\x01\x01\x40\xbd\x6d\xb8\xd5\x4e\
\x00\x00\xc0\x09\x80\x01\x00\x00\x6d\x18\x00\x45\x55\x55\x1b\xfe\
\x7f\x5d\x5d\xde\xbb\x92\xe4\x22\xd7\x17\x00\x6a\xe7\xc9\x6e\x6f\
\x69\x7e\x18\x27\x00\x4e\x01\x00\xa0\x01\xcf\xfe\xb7\x32\x00\xdc\
\x08\x08\x00\xf5\xb4\xa9\x46\x3b\x01\x00\x00\x27\x00\x06\x00\x00\
\x18\x00\x06\x00\x00\x18\x00\x49\x1e\x48\x52\xb9\xc6\x00\x50\x2b\
\xd5\xa0\xd1\xc3\x19\x00\xdd\xde\xd2\xd1\x24\x0f\xbb\xce\x00\x50\
\x2b\x0f\x0f\x1a\x3d\xb4\x13\x80\xc4\xcb\x00\x00\x50\x37\x9b\x6e\
\xb3\x01\x00\x00\x06\x80\x01\x00\x00\x06\xc0\xfa\xfc\x30\x20\x00\
\xa8\x97\x4d\xb7\xd9\x09\x00\x00\x38\x01\xd8\x90\x87\x92\xac\xba\
\xd6\x00\x50\x0b\xab\x83\x36\x0f\x77\x00\x74\x7b\x4b\x65\x36\xf9\
\x5e\x43\x00\x60\x68\x1e\x18\xb4\x79\xe8\x27\x00\x89\xfb\x00\x00\
\xa0\x2e\xb6\xd4\xe4\xad\x0e\x00\xf7\x01\x00\x40\x3d\x6c\xa9\xc9\
\x06\x00\x00\x18\x00\x06\x00\x00\x18\x00\xcf\xcf\x3d\x00\x00\x50\
\x0f\xa3\xbb\x07\xa0\xdb\x5b\x3a\x94\xe4\x71\xd7\x1c\x00\xc6\xea\
\xf1\x41\x93\x47\x76\x02\x90\x78\x19\x00\x00\xc6\x6d\xcb\x2d\x36\
\x00\x00\xc0\x00\x30\x00\x00\xc0\x00\x38\x3b\x37\x02\x02\xc0\x78\
\x6d\xb9\xc5\x4e\x00\x00\xc0\x09\xc0\xa6\x7c\x23\xc9\x9a\x6b\x0f\
\x00\x63\xb1\x36\x68\xf1\x68\x07\x40\xb7\xb7\x74\x32\xc9\x01\xd7\
\x1f\x00\xc6\xe2\xc0\xa0\xc5\x23\x3f\x01\x48\xbc\x0c\x00\x00\xe3\
\x72\x5e\x0d\x3e\xdf\x01\xe0\x46\x40\x00\x18\x8f\xf3\x6a\xb0\x13\
\x00\x00\x70\x02\x60\x00\x00\x80\x01\x60\x00\x00\x40\x23\x07\x40\
\x51\x55\xd5\x79\xfd\xe9\xab\xcb\x7b\x57\x92\x5c\xe4\xf3\x00\x00\
\x23\xf3\x64\xb7\xb7\x34\x3f\xce\x13\x80\xc4\x8d\x80\x00\x30\x6a\
\xe7\xdd\xde\xed\x18\x00\x5e\x06\x00\x80\xd1\x3a\xef\xf6\x1a\x00\
\x00\x60\x00\x18\x00\x00\x60\x00\x6c\x8c\x7b\x00\x00\x60\xb4\x6a\
\x71\x0f\xc0\xbe\x24\x95\xcf\x05\x00\x8c\x44\x55\x8b\x01\xd0\xed\
\x2d\x1d\x4d\xf2\xb0\xcf\x07\x00\x8c\xc4\xc3\x83\xf6\x8e\xfd\x04\
\x20\x71\x1f\x00\x00\x8c\xca\xb6\x34\x77\xbb\x06\x80\xfb\x00\x00\
\x60\x34\xb6\xa5\xb9\x4e\x00\x00\xc0\x09\x80\x01\x00\x00\x06\x80\
\x01\x00\x00\x06\xc0\x59\x3c\x94\x64\xd5\xe7\x04\x00\x86\x6a\x75\
\xd0\xdc\x7a\x0c\x80\x6e\x6f\xa9\x4c\xb2\xdf\xe7\x05\x00\x86\x6a\
\xff\xa0\xb9\xb5\x39\x01\x48\xbc\x0c\x00\x00\xc3\xb6\x6d\xad\x35\
\x00\x00\xc0\x00\x30\x00\x00\xc0\x00\xd8\x1c\x3f\x0c\x08\x00\x86\
\x6b\xdb\x5a\xeb\x04\x00\x00\x9c\x00\x6c\x5d\xb7\xb7\x74\x28\xc9\
\xe3\x3e\x37\x00\x30\x14\x8f\x0f\x5a\x5b\xbb\x13\x00\xa7\x00\x00\
\x30\x01\xcf\xfe\x0d\x00\x00\x30\x00\xb6\x85\x1b\x01\x01\x60\x38\
\xb6\xb5\xb1\x4e\x00\x00\xc0\x09\x80\x01\x00\x00\x06\xc0\xe6\x7d\
\x23\x49\xdf\xe7\x08\x00\xb6\x55\x7f\xd0\xd8\x7a\x0e\x80\x6e\x6f\
\xe9\x64\x92\x03\x3e\x4f\x00\xb0\xad\x0e\x0c\x1a\x5b\xdb\x13\x80\
\xc4\xcb\x00\x00\x50\xfb\xb6\x1a\x00\x00\x60\x00\x18\x00\x00\x60\
\x00\x18\x00\x00\x60\x00\x6c\x90\x1f\x06\x04\x00\x35\x6f\x6b\x51\
\x55\xd5\xb6\x7f\x94\x9f\xfc\xc8\xef\x1f\x3f\xb1\x96\xd9\x26\x5d\
\xf9\x57\xbd\xa8\xcc\x5b\x7f\xe1\x48\x52\xb4\xeb\x11\xf7\xe8\x77\
\x67\xf2\xd9\x5b\xbb\x19\xc2\xc3\x64\xe2\xbd\xf5\xb2\xe3\x79\xd5\
\xcb\x4f\xb8\x10\x67\xa8\xaa\xe4\xbf\x6f\xb8\x30\x4f\x3c\x59\xb8\
\x18\x67\x78\xe9\xee\x32\xef\xbc\xfc\x68\x8a\xc2\x17\xd4\x99\xf6\
\x3f\x34\x9b\xeb\xbe\xba\xc3\x85\x58\xc7\x3b\xdf\x78\xfc\xf8\x6b\
\xae\xf8\x8f\xee\x76\xff\xff\x4e\x0f\xe3\x83\xbd\xf3\xa1\x6a\xed\
\xe0\xa1\xe6\x0c\x80\xd7\xbc\xac\xcc\x95\xef\x3e\x9c\xfe\xec\x5a\
\xbb\xe2\xff\x9d\x1d\xf9\xe8\x7f\x5d\x98\x43\x47\x7d\xb3\x3a\xd3\
\x1f\xbe\xe5\x78\x7e\xf4\x92\x15\x3f\xf4\x62\x9d\xf8\xff\xf3\xa7\
\x2f\xce\xe7\xef\x4e\x12\x8f\x9b\x67\x7b\xc5\xc5\x65\x7e\xef\x8a\
\xc3\x29\xbb\x6b\x2e\xc6\x99\xf1\x7f\x70\x2e\x1f\xfb\xf4\x6c\x8e\
\x9c\xf0\x98\x39\xd3\x9f\xbd\xe7\x58\x5e\xf2\x8a\x63\x43\xf9\x56\
\x33\x8c\x97\x00\xf2\x82\x9d\xc5\x63\x4d\x8a\xff\xdf\xfe\xc1\xe1\
\x4c\xb5\x31\xfe\xff\x3a\x9f\x43\x47\x7d\x01\xae\x17\xff\xdf\xf8\
\xa5\x15\x17\xe2\x79\xe3\x3f\xe5\x62\xac\x13\xff\xbf\xfb\xa3\xc3\
\x99\x16\xff\xf5\xe3\xff\xef\xbb\x72\xc4\x61\xda\xba\xf1\xbf\xec\
\x75\x4f\xa5\x28\x3b\x43\x69\xea\x50\x06\xc0\x45\x73\xcd\xf8\x61\
\x40\xe2\xef\x0b\x50\xfc\xc5\x5f\xfc\xc5\x7f\x9c\xf1\x4f\x92\xa2\
\xec\x0c\xa5\xa9\x43\x19\x00\x3b\x67\x73\x8f\xf8\x8b\xbf\xf8\x8b\
\x7f\xdb\xbd\xfc\x85\xe2\x2f\xfe\xe7\x17\xff\x24\x49\xd9\xb9\x7b\
\x62\x06\xc0\xdc\x4c\x6e\x11\x7f\xf1\x17\x7f\xf1\x6f\x7b\xfc\x3f\
\xfe\x21\xf1\x17\xff\xf3\x8c\x7f\x92\xa2\x2a\x86\xd2\xd4\xa1\x0c\
\x80\x22\xb9\x4e\xfc\xc5\x5f\xfc\xc5\x5f\xfc\xc5\x5f\xfc\xcf\x2f\
\xfe\xa7\xbe\xd8\x8a\xa1\x34\x75\x28\x6f\x03\x4c\x92\xf7\xfc\xd6\
\x95\x6b\x87\x8e\x66\xa2\xbe\x33\x88\xbf\x2f\x40\xf1\x17\x7f\xf1\
\x17\xff\x3a\xc5\xbf\x28\x3b\xfd\xf9\x3d\xd7\x0e\xe5\x1d\x7b\x9d\
\x61\xfd\x65\x5e\x74\x61\xf1\xa4\xf8\x8b\xbf\xf8\x8b\xbf\xf8\x23\
\xfe\x5b\x7c\xe6\x9f\xa4\xe8\x4f\x0d\xed\x9b\xd0\xd0\x06\xc0\xee\
\xb9\x3c\x22\xfe\xe2\x2f\xfe\xe2\x2f\xfe\x88\xff\xd6\xe2\x9f\x24\
\x29\x3b\x8f\x4e\xdc\x00\xb8\x60\x47\x1e\x10\x7f\xf1\x17\x7f\xf1\
\x17\x7f\xf1\x17\xff\x2d\xc6\x3f\x49\x51\x76\x86\xf6\xe3\xf5\x87\
\x36\x00\x76\xce\xe6\x4e\xf1\x17\x7f\xf1\x17\x7f\xf1\x17\x7f\xf1\
\xdf\x5a\xfc\x4f\x7d\xf1\x15\x77\x4c\xdc\x00\x98\x9b\xc9\x17\xc5\
\x5f\xfc\xc5\x5f\xfc\xc5\x5f\xfc\xd9\x62\xfc\x4f\x9d\x00\x0c\xad\
\xa5\x43\x1b\x00\x3b\xa6\xf3\xa5\x99\x9a\x7e\xbf\x10\x7f\x5f\x80\
\xe2\x2f\xfe\xe2\x2f\xfe\x75\x8f\x7f\xaa\x22\xc9\xf0\x7e\xae\xce\
\xd0\xde\x06\x98\x24\xef\xff\x9d\x2b\x57\x1f\x5d\x49\xad\x7e\xbd\
\x93\xf8\xfb\x02\x14\x7f\xf1\x17\x7f\xf1\xaf\x7d\xfc\x93\x14\x6b\
\xd3\xab\xf3\x97\x7d\x66\x6e\xe2\x4e\x00\x92\xe4\x85\x17\x14\x4f\
\x88\xbf\xf8\x8b\xbf\xf8\x8b\xbf\xf8\x8b\xff\xe6\xe2\x9f\x24\x45\
\xd9\xf9\xde\x30\x3f\xa6\xa1\x0e\x80\x5d\xdd\x3c\x24\xfe\xe2\x2f\
\xfe\xe2\x2f\xfe\xe2\x2f\xfe\x4f\x6d\xfa\x7f\x57\x94\x9d\x07\x27\
\x76\x00\x5c\xb8\x23\x5f\x17\x7f\xf1\x17\x7f\xf1\x17\x7f\xf1\x17\
\xff\x2d\x28\x3b\x43\x6d\xe8\x50\x07\xc0\xdc\x4c\x6e\x15\x7f\xf1\
\x17\x7f\xf1\x17\x7f\xf1\x17\xff\xcd\x2b\xaa\x62\xa8\x0d\x1d\xea\
\x00\xd8\x31\x9d\xeb\xc5\x5f\xfc\xc5\x5f\xfc\xc5\x5f\xfc\xc5\x7f\
\x4b\x27\x00\x43\xfd\xc5\x7a\x43\x7d\x17\x40\x92\xbc\xeb\xdd\x57\
\xf6\x9f\x5a\x1d\xee\xd0\x10\x7f\xf1\x17\x7f\xf1\x1f\x56\xfc\xff\
\xfe\x43\x87\x33\x23\xfe\xe2\x3f\xe2\xf8\x17\x65\xa7\x9c\xdf\x73\
\xed\x50\xbf\x28\x87\x1e\xe6\x17\x5f\x58\x3c\x2d\xfe\xe2\x2f\xfe\
\xe2\x2f\xfe\xe2\x2f\xfe\x9b\x18\x00\xfd\xa9\xa1\xb7\x73\xe8\x03\
\x60\xf7\x5c\xbe\x2b\xfe\xe2\x2f\xfe\xe2\x2f\xfe\xe2\x2f\xfe\x9b\
\x50\x76\xbe\x3d\xf1\x03\x60\x57\x37\xfb\xc5\x5f\xfc\xc5\x5f\xfc\
\xc5\x5f\xfc\xc5\x7f\x13\x27\x00\x65\x67\xe8\xed\x1c\xfa\x00\xd8\
\x39\x9b\xbb\xc4\x5f\xfc\xc5\x5f\xfc\xc5\x5f\xfc\xc5\x7f\x53\x27\
\x00\x5f\x9d\xf8\x01\xd0\x9d\xc9\x97\xc4\x5f\xfc\xc5\x5f\xfc\xc5\
\x5f\xfc\xc5\x7f\x13\x27\x00\x55\x71\xf3\xc4\x0f\x80\x24\x37\x74\
\x0a\xf1\x17\x7f\xf1\x17\x7f\xf1\x17\x7f\xf1\xdf\xc4\x09\xc0\x4d\
\xc3\xfe\xf8\x87\xfe\x36\xc0\x24\xf9\xed\xf7\x5d\x79\xe2\xb1\xa7\
\x32\x23\xfe\xe2\x2f\xfe\xe2\x2f\xfe\xe2\x2f\xfe\xe7\x08\x73\x7f\
\xea\xc4\xfc\xa5\xd7\x0c\xfd\x17\xe9\x8d\xe4\xfd\xf9\x17\x5f\x50\
\x1c\x12\x7f\xf1\x17\x7f\xf1\x17\x7f\xf1\x17\xff\x0d\x0d\x80\x43\
\xa3\xf8\x7b\x8c\x64\x00\x5c\x34\x97\x83\xe2\x2f\xfe\xe2\x2f\xfe\
\xe2\x2f\xfe\xe2\xbf\x81\x01\x50\x76\x46\xf2\x8b\xf4\x46\x32\x00\
\x76\xed\xc8\x7d\xe2\x2f\xfe\xe2\x2f\xfe\xe2\x2f\xfe\xe2\xbf\x01\
\x65\xe7\xde\xc6\x0c\x80\x9d\xb3\xb9\x5d\xfc\xc5\x5f\xfc\xc5\x5f\
\xfc\xc5\x5f\xfc\x37\x70\x02\x50\x15\xb7\x35\x66\x00\x74\x67\x72\
\x83\xf8\x8b\xbf\xf8\x8b\xbf\xf8\x8b\xbf\xf8\x6f\xe8\x04\xe0\x0b\
\xa3\x19\x1a\x23\x78\x17\x40\x92\xbc\xed\x5d\x57\x96\xc7\x4e\x66\
\x4b\x6f\x08\x14\x7f\x5f\x80\xe2\x2f\xfe\xe2\x2f\xfe\x6d\x88\x7f\
\x51\x76\xaa\xf9\x3d\xd7\x8e\xe4\xc9\xf9\xc8\x7e\x4b\xdf\x4b\x76\
\x15\x5b\xca\x98\xf8\xfb\x02\x14\x7f\xf1\x17\x7f\xf1\x6f\xc5\x33\
\xff\x24\xe9\x4f\x8d\xec\xbb\xfe\xc8\x06\xc0\xee\x9d\x79\x5c\xfc\
\xc5\x5f\xfc\xc5\x5f\xfc\xc5\x5f\xfc\xcf\x7a\x02\x30\xb2\x5f\xa0\
\x37\xb2\x01\x70\x51\x37\x07\xc4\x5f\xfc\xc5\x5f\xfc\xc5\x5f\xfc\
\xc5\xff\xac\x03\xe0\x9b\x8d\x1b\x00\x17\xcc\xe6\x6b\xe2\x2f\xfe\
\xe2\x2f\xfe\xa3\xf2\xb2\x17\x88\xbf\xf8\x4f\x56\xfc\x4f\x7d\x51\
\x17\x77\x37\x6e\x00\xcc\xcd\xe6\x16\xf1\x17\x7f\xf1\x17\xff\x51\
\xc5\xff\xe3\x7f\x2c\xfe\xe2\x3f\x61\xf1\x3f\x75\x02\xf0\xe5\xc6\
\x0d\x80\xee\x74\xbe\x70\xae\xb7\x00\x88\xbf\x2f\x40\xf1\x17\x7f\
\xf1\x17\xff\xb6\xc6\x7f\x30\x00\xbe\x30\xb2\x3f\x6b\x54\x6f\x03\
\x4c\x92\xf7\xbe\xf7\xca\xb5\xef\x1d\xc9\x94\xf8\x8b\xbf\xf8\x8b\
\xbf\xf8\x8b\xbf\xf8\x9f\x11\xe4\xfe\xd4\xda\xfc\xa5\xd7\xcc\x8c\
\xea\xcf\xeb\x8c\xf2\x2f\x77\xf1\x05\xc5\x8a\xf8\x8b\xbf\xf8\x8b\
\xbf\xf8\x8b\xbf\xf8\xaf\x3b\x00\x46\xfa\x4d\x6f\xa4\x03\x60\x7e\
\x2e\x8f\x8a\xbf\xf8\x8b\xbf\xf8\x8b\xbf\xf8\x8b\xff\x3a\xca\xce\
\x23\x8d\x1d\x00\xbb\xba\xd9\x27\xfe\xe2\x2f\xfe\xe2\x2f\xfe\xe2\
\x2f\xfe\xeb\x9c\x00\x54\xc5\xfd\x8d\x1d\x00\x3b\x67\x73\xa7\xf8\
\x8b\xbf\xf8\x8b\xbf\xf8\x8b\xbf\xf8\xaf\x33\x00\xca\xce\x9d\x4d\
\x1e\x00\x37\x89\xbf\xf8\x8b\xbf\xf8\x8b\xbf\xf8\x8b\xff\x3a\xca\
\xce\x8d\x8d\x1d\x00\xd3\x9d\xdc\xf2\x53\xaf\x28\x2b\xf1\x47\xfc\
\xc5\x5f\xfc\xc5\x5f\xfc\x9f\xfd\x05\x5f\xa4\x58\x9b\xfe\xca\x48\
\x4f\x1c\x46\xf9\x36\xc0\x24\xf9\xfe\x2d\x57\x1c\xe9\x74\x4f\xec\
\x14\x7f\xc4\x5f\xfc\xc5\x5f\xfc\xc5\x7f\x10\xe3\xb5\xe9\xd5\xf9\
\xcb\x3e\x33\xd7\xd8\x13\x80\xc1\x29\xc0\x21\xf1\x47\xfc\xc5\x5f\
\xfc\xc5\x5f\xfc\x9f\x35\x00\xca\xce\x13\xa3\xfe\x33\x3b\x63\xf8\
\x4b\x7e\x4b\xfc\x11\x7f\xf1\x17\x7f\xf1\x17\xff\xd3\xda\xf8\x60\
\xe3\x07\x40\xca\xce\xff\x89\xbf\xf8\x8b\xbf\xf8\x8b\xbf\xf8\x8b\
\xff\x69\x6d\xfc\x7a\xf3\x4f\x00\xaa\xe2\x56\xf1\x17\x7f\xc4\x5f\
\xfc\xc5\x5f\xfc\x4f\x3b\x01\x18\x79\x1b\x47\x7f\x02\x50\x15\xd7\
\x89\xbf\xf8\x23\xfe\xe2\x2f\xfe\xe2\x7f\xda\x09\xc0\xe7\x47\xff\
\x84\x7c\xc4\xef\x02\x48\x92\x95\x3b\xde\xde\xaf\x3a\x65\xa7\x69\
\x0f\x3a\xf1\x17\x7f\xf1\x17\x7f\xf1\x17\xff\x2d\x3c\xfb\x2f\xe7\
\xf7\x5c\x3b\xf2\x6f\x02\x63\x89\x70\xd1\x9f\x7a\x5a\xfc\xc5\x5f\
\xfc\xc5\x5f\xfc\xc5\xbf\xf5\xcf\xfc\x4f\x35\x71\x2c\x1f\xf4\x78\
\x9e\x85\x97\x9d\xef\x88\xbf\xf8\x8b\xbf\xf8\x8b\xbf\xf8\xb7\x3d\
\xfe\xe3\x6c\xe2\x78\x4e\x00\xca\xce\x37\xc4\x5f\xfc\xc5\x5f\xfc\
\xc5\x5f\xfc\x5b\x1f\xff\x53\x4d\x7c\xa0\x3d\x27\x00\x55\xf1\xbf\
\xe2\x2f\xfe\xe2\x8f\xf8\x8b\x7f\xdb\xe3\x3f\x38\x01\xb8\xab\x3d\
\x27\x00\x55\x71\xb3\xf8\x8b\xbf\xf8\x23\xfe\xe2\xdf\xfa\xf8\x8f\
\xb1\x89\xe3\x3a\x01\xb8\x71\x92\x3f\x59\xe2\x2f\xfe\xe2\x2f\xfe\
\xe2\x2f\xfe\xdb\x36\x00\x4e\xce\xdc\x34\x9e\xe1\x31\x86\xb7\x01\
\x26\xc9\xca\xed\xef\x38\x51\x4d\xf5\x67\xc4\x5f\xfc\xc5\x5f\xfc\
\xc5\x5f\xfc\x5b\x1b\xff\xfe\xd4\x89\xf9\x4b\xaf\xd9\xd1\x9e\x13\
\x80\x53\x7f\xe9\x89\xfb\xa5\x40\xe2\x2f\xfe\xe2\x2f\xfe\xe2\x2f\
\xfe\x4d\x69\xe1\xf8\x7e\x18\x4f\xd9\x79\x58\xfc\xc5\x5f\xfc\xc5\
\x1f\xf1\x6f\x6b\xfc\x93\xa4\x28\x3b\x07\x5b\x37\x00\x8a\xaa\xb8\
\x4f\xfc\xc5\x5f\xfc\xc5\x1f\xf1\x6f\x6b\xfc\x07\x4f\x86\xef\x6d\
\xe3\x09\xc0\xed\xe2\x2f\xfe\xe2\xdf\xc6\xf8\xaf\x88\xbf\xf8\x8b\
\xff\x33\x4f\x86\x6f\x6b\xe3\x09\xc0\x17\xc4\x5f\xfc\xc5\xbf\x8d\
\xf1\x3f\xe9\x62\x88\xbf\xf8\x3f\xf3\x64\x78\x6c\x2d\x1c\xdb\xbb\
\x00\x92\xe4\xf0\x9d\x6f\x2b\x53\x54\x85\xf8\x8b\xbf\xf8\x8b\xbf\
\xf8\xbb\x16\x6d\x8b\x7f\x51\x76\xaa\xf9\x3d\xd7\x8e\xed\x89\xf8\
\x58\x7f\x23\x5f\xd1\x9f\x3a\x26\xfe\xe2\x2f\xfe\xe2\x2f\xfe\xae\
\x45\xeb\x9e\xf9\x27\x49\x7f\x6a\xac\x95\x19\xef\x00\x28\x3b\x8f\
\x8b\xbf\xf8\x8b\xbf\xf8\x8b\x3f\xad\x8b\xff\xa9\x06\x3e\xd6\xe6\
\x01\x70\x40\xfc\xc5\x5f\xfc\xc5\x5f\xfc\x69\x5b\xfc\x07\x0d\xfc\
\x66\x6b\x07\x40\xca\xce\xdd\xe2\x2f\xfe\xe2\x2f\xfe\xe2\x4f\xdb\
\xe2\x7f\xea\x9b\x48\xf1\xb5\xf6\x9e\x00\x54\xc5\x2d\xe2\x2f\xfe\
\xe2\xdf\x2c\x3f\x22\xfe\xe2\x2f\xfe\x1b\x3d\x01\x18\x6b\x03\xc7\
\x7b\x02\x50\x15\xd7\x89\xbf\xf8\x8b\x7f\xb3\xe2\xff\x09\xf1\x17\
\x7f\xf1\xdf\xe8\x00\xb8\x7e\xbc\x4f\xc2\xc7\xf8\x36\xc0\x24\x59\
\xb9\xe3\xed\x6b\x55\xa7\x1c\xcb\x77\x52\xf1\x17\x7f\xf1\x17\x7f\
\xf1\x17\xff\xb1\xc4\xb7\x3f\xb5\x36\x7f\xe9\x35\x63\xfd\x85\x78\
\x9d\x1a\x5c\x84\x27\xc5\x5f\xfc\xc5\x5f\xfc\xc5\x5f\xfc\x5b\x36\
\x00\xc6\xfe\x4d\x76\xec\x03\x20\x65\xe7\x51\xf1\x17\x7f\xf1\x17\
\x7f\xf1\x17\xff\x56\x19\x43\xfb\xea\x77\x02\x50\x76\xf6\x89\xbf\
\xf8\x8b\xbf\xf8\x8b\xbf\xf8\xb7\xea\x04\xa0\x2a\xee\x77\x02\x50\
\x15\x77\x8a\xbf\xf8\x8b\xbf\xf8\x8b\xbf\xf8\xb7\xec\x04\xe0\xce\
\x71\x7f\x08\x75\x38\x01\xb8\x49\xfc\xc5\x5f\xfc\xc5\x5f\xfc\xc5\
\xbf\x55\x27\x00\x65\xe7\x46\x27\x00\xc9\x97\x33\xe4\xdf\x07\x24\
\xfe\xe2\x2f\xfe\xe2\x2f\xfe\xe2\x5f\x9f\x6f\x30\x45\x8a\xb5\xe9\
\x5b\xc7\x3e\x42\xc6\xfd\x36\xc0\x24\x59\xb9\xed\x9d\xab\xd5\xf4\
\xda\x0e\xf1\x17\x7f\xf1\x17\x7f\xf1\x17\xff\xc6\x3f\xfb\x5f\x9b\
\x5e\x9d\xbf\xec\x33\x73\x4e\x00\x92\x14\x65\xe7\x09\xf1\x17\x7f\
\xf1\x17\x7f\xf1\x17\xff\x56\x0c\x80\x21\x35\x6f\x52\x07\xc0\xb7\
\xc4\x5f\xfc\xc5\x5f\xfc\xc5\x5f\xfc\x5b\x32\x00\xbe\x55\x87\x8f\
\xa3\x16\x03\x20\x65\xe7\xeb\xe2\x2f\xfe\xe2\x2f\xfe\xe2\x2f\xfe\
\xad\x50\x76\xee\x31\x00\x7e\xb0\x86\xaa\xe2\x2b\xe2\x2f\xfe\xe2\
\x2f\xfe\xe2\x2f\xfe\x2d\x39\x01\xb8\xb5\x0e\x1f\x47\x5d\x4e\x00\
\xb6\xe5\x97\x02\x89\xbf\xf8\x8b\xff\x36\xc7\xff\x43\xe2\x2f\xfe\
\xe2\x3f\x84\xe6\x5d\x5f\x87\x0f\xa3\x16\xef\x02\x48\x92\x95\x3b\
\xde\xde\xaf\x3a\xe5\x96\x07\x89\xf8\x8b\xbf\xf8\x0f\x21\xfe\x73\
\xe2\x2f\xfe\xe2\xbf\xcd\xcf\xfe\xcb\xf9\x3d\xd7\xd6\xe2\x9b\x4e\
\xa7\x36\x17\xa5\x3f\xf5\xb4\xf8\x8b\xbf\xf8\x8b\xbf\xf8\x8b\x7f\
\xa3\x07\x40\x7f\xaa\x36\x17\xa9\x36\x03\x20\x65\xe7\x3b\xe2\x2f\
\xfe\xe2\x2f\xfe\xe2\x2f\xfe\x8d\xb6\xc5\xd6\x35\xfb\x04\xa0\xec\
\xec\x17\x7f\xf1\x17\x7f\xf1\x17\x7f\xf1\x6f\xf4\x09\xc0\x16\x5a\
\xd7\x86\x13\x80\xbb\xc4\x5f\xfc\xc5\x5f\xfc\xc5\x5f\xfc\x1b\x7e\
\x02\x70\x57\x5d\x3e\x94\xfa\x9c\x00\x54\xc5\x97\xc4\x5f\xfc\xc5\
\x5f\xfc\xc5\x5f\xfc\x1b\x7d\x02\x50\x15\x37\x1b\x00\x67\x5e\x94\
\xfe\xd4\x0d\xe2\x2f\xfe\xe2\x2f\xfe\xe2\x2f\xfe\x8d\x1e\x00\xfd\
\xa9\x9b\x6a\xf3\xb1\xd4\xe5\x6d\x80\x49\xb2\x72\xfb\x3b\x4e\x54\
\x53\xfd\x19\xf1\x17\x7f\xf1\x17\x7f\xf1\x17\xff\x06\xc6\xff\xe4\
\xfc\xa5\xd7\xcc\x3a\x01\x58\xff\xe2\x1c\x12\x7f\xf1\x17\x7f\xf1\
\x17\x7f\xf1\x6f\xe8\x00\xf8\x7e\x9d\x3e\x9e\x7a\x0d\x80\xb2\x73\
\x50\xfc\xc5\x5f\xfc\xc5\x5f\xfc\xc5\xbf\x91\x03\xe0\x79\x1a\x67\
\x00\x24\x49\xd9\xb9\x57\xfc\xc5\x5f\xfc\xc5\x5f\xfc\xc5\xbf\x91\
\xd6\x69\x9c\x01\xf0\x83\x75\x54\x15\xb7\x89\xbf\xf8\x8b\xbf\xf8\
\x8b\xbf\xf8\x37\xf2\x04\xe0\x8c\xc6\x19\x00\xa7\xaf\xa3\x1b\xc4\
\x5f\xfc\xc5\x5f\xfc\xc5\x5f\xfc\x1b\x7a\x02\x70\x63\xbd\x06\x49\
\x8d\xde\x05\x90\x24\x87\xef\x7c\x5b\xf9\xe8\x77\x67\x0b\xf1\x17\
\x7f\xf1\x17\x7f\xf1\x17\xff\xe6\x7c\x93\x2a\xaa\xdd\xaf\xff\x6c\
\xad\x9e\x74\x77\xea\x76\x8d\x1e\x7d\x78\x6e\x45\xfc\xc5\x5f\xfc\
\xc5\x5f\xfc\xc5\xbf\x49\x8a\xb5\xe9\xda\x55\xad\x76\x03\xe0\xdf\
\x3e\xb7\xeb\xfb\xe2\x2f\xfe\xe2\x2f\xfe\xe2\x2f\xfe\x8d\x1a\x00\
\x65\xe7\x31\x03\xe0\x1c\x76\x75\x73\xc0\x43\x45\xfc\xc5\x5f\xfc\
\xc5\x5f\xfc\x1b\x36\x00\x6a\xd7\xb6\xda\x0d\x80\x0b\x66\x73\xb7\
\x87\x8a\xf8\x8b\xbf\xf8\x8b\xbf\xf8\x37\xeb\x9b\x56\xf1\x35\x03\
\xe0\x5c\x03\x60\x47\xbe\xec\x91\x22\xfe\xe2\x2f\xfe\xe2\x2f\xfe\
\x0d\x3b\x01\xa8\x5d\xdb\x6a\x37\x00\x5e\xb0\xb3\xb8\xbe\x28\x3c\
\x58\xc4\x5f\xfc\xc5\x5f\xfc\xc5\xbf\x51\x03\xe0\xfa\xda\x7d\x4c\
\x75\x7b\x1b\x60\x92\xfc\xdb\xc7\x3e\x78\xb8\x5f\x56\xf3\x6d\x7d\
\xa0\xec\xbe\x20\x79\xf3\x1b\x8e\xa6\x28\x2a\x5f\x35\x67\x38\xf0\
\xc8\x6c\xbe\x72\xdf\x8c\x0b\xb1\x8e\x37\xbd\xee\x78\x5e\x7c\xf1\
\x9a\x0b\x71\x86\xb2\x2c\xf2\xb9\x5b\x76\xe6\xe9\x55\xd7\xe2\xb9\
\xa3\xb1\xca\xaf\xfc\xfc\xb1\xc4\xf7\x9a\x21\x3f\x73\x29\x56\x2e\
\xbc\xfc\x53\xbb\x0d\x80\x0d\x58\x5d\xde\x7b\x5d\x92\x37\x79\xd4\
\x00\xd0\x00\xd7\x77\x7b\x4b\xbf\x56\xb7\x0f\xaa\x53\xd3\x8b\x75\
\xbf\xc7\x0b\x00\x0d\x51\xcb\xa6\x19\x00\x00\x60\x00\xb8\x58\x00\
\x60\x00\x8c\xcf\x3e\x8f\x17\x00\x1a\xa2\x96\x4d\xab\xeb\x00\x78\
\x30\xc9\x71\x8f\x19\x00\x26\xdc\xf1\x41\xd3\x0c\x80\x8d\xe8\xf6\
\x96\xca\x24\xfb\x3d\x6e\x00\x98\x70\xfb\x07\x4d\x33\x00\x36\xc1\
\x7d\x00\x00\x4c\xba\xda\xb6\xcc\x00\x00\x00\x03\xa0\x56\xdc\x08\
\x08\xc0\xa4\xab\x6d\xcb\x9c\x00\x00\x80\x13\x00\x17\x0d\x00\x0c\
\x80\x31\xea\xf6\x96\xbe\x9f\xe4\x09\x8f\x1d\x00\x26\xd4\x13\x83\
\x96\x19\x00\x5b\xe0\x3e\x00\x00\x26\x55\xad\x1b\x56\xf7\x01\xe0\
\x65\x00\x00\x26\x55\xad\x1b\x66\x00\x00\x80\x01\xe0\xe2\x01\x80\
\x01\xe0\xe2\x01\x80\x01\x30\x06\xdf\x48\xd2\xf7\x18\x02\x60\xc2\
\xf4\x07\x0d\x33\x00\xb6\xa2\xdb\x5b\x3a\x91\x9a\xfe\x16\x25\x00\
\x38\x8b\x07\x07\x0d\x33\x00\xce\x83\x97\x01\x00\x98\x34\xb5\x6f\
\x97\x01\x00\x00\x06\x40\x2d\xf9\x61\x40\x00\x4c\x9a\xda\xb7\xcb\
\x09\x00\x00\x38\x01\x70\x11\x01\xa0\x0d\xed\x2a\xaa\xaa\xaa\xfd\
\x55\x5c\x5d\xde\xfb\x54\x92\x0b\x3d\x9e\x00\x98\x00\x4f\x77\x7b\
\x4b\xbb\x9c\x00\x38\x05\x00\xc0\xb3\x7f\x03\x60\x8b\xdc\x08\x08\
\xc0\xa4\x98\x88\x66\x39\x01\x00\x00\x27\x00\x2e\x26\x00\x18\x00\
\x2e\x26\x00\x18\x00\x63\xe4\x1e\x00\x00\x26\x85\x7b\x00\xb6\x4b\
\xb7\xb7\x74\x24\xc9\x23\x1e\x53\x00\xd4\xdc\x23\x83\x66\x19\x00\
\xdb\xc8\xcb\x00\x00\x68\x95\x01\x00\x00\x5a\x65\x00\x00\x80\x56\
\x35\x72\x00\xb8\x11\x10\x00\xad\x72\x02\x00\x00\x5a\xd5\x86\x01\
\xf0\x60\x92\xe3\x1e\x5b\x00\xd4\xd4\xf1\x41\xab\x0c\x80\xed\xd4\
\xed\x2d\x95\x49\xf6\x7b\x7c\x01\x50\x53\xfb\x07\xad\x32\x00\x86\
\xc0\x7d\x00\x00\x68\x54\x0b\x07\xc0\x3d\x1e\x5f\x00\x68\x54\xfb\
\x06\xc0\x3f\x26\x79\xca\x63\x0c\x80\x9a\x79\x6a\xd0\xa8\x89\x51\
\x54\x55\x35\x51\x57\x78\x75\x79\xef\xcf\x25\xd9\x9b\xe4\xb5\x1e\
\x6f\x00\xd4\xc0\xbe\x24\x4b\xdd\xde\xd2\xd7\x0c\x00\x00\xa0\xd6\
\x3a\x2e\x01\x00\x18\x00\x00\x80\x01\x00\x00\x18\x00\x00\x80\x01\
\x00\x00\x18\x00\x00\x80\x01\x00\x00\x18\x00\x00\x80\x01\x00\x00\
\x18\x00\x00\x80\x01\x00\x00\x18\x00\x00\x80\x01\x00\x00\x18\x00\
\x00\x80\x01\x00\x00\x18\x00\x00\xc0\x73\xfc\x3f\xdf\xfd\x8c\xdd\
\x97\x6f\xa7\x6a\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\
\x00\x00\x25\x2a\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x02\x00\x00\x00\x02\x00\x08\x06\x00\x00\x00\xf4\x78\xd4\xfa\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x8a\xf4\x00\x00\x8a\xf4\
\x01\xd4\x72\x42\xf8\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x24\xa7\x49\x44\
\x41\x54\x78\xda\xed\xdd\x79\xb8\x9e\x65\x7d\x27\xf0\x9e\x24\x27\
\x0b\x91\x2d\x0b\x7b\x90\x55\x25\x21\x01\x41\x2e\x68\x05\x8b\x42\
\x11\x42\xc2\xa6\xb5\xa5\x33\x9d\x7a\xe9\xd4\x99\x8e\x0b\xbb\xd3\
\x29\x76\xa6\xc5\x99\x5a\x3b\x4e\x6b\x65\x11\xa1\xb5\x9d\x8e\x83\
\xed\xe8\xb4\xd3\x8e\x28\x8b\x62\xe2\x74\x1c\x09\xa1\x4e\x1d\x5b\
\x64\x17\x15\x11\x49\xce\x49\x58\x42\x48\xce\x09\xf3\xbb\x93\x3b\
\x78\xf2\x66\x3d\xef\x79\x97\xe7\x79\xee\xcf\x1f\x9f\xeb\x6a\x15\
\x92\x73\xde\xf7\x7e\xee\xdf\xd7\xf3\xfb\xe6\xcd\x4f\xbd\xfc\xf2\
\xcb\x3f\x05\x94\x65\xf8\xef\x16\xcf\x0f\x5f\xca\xe6\x7b\x4d\xa0\
\x3c\x5e\x04\x28\x6b\xf0\xcf\x0e\x37\x84\x8d\xe1\xe5\x6c\x63\xfe\
\xcf\x66\x7b\x8d\x40\x00\x00\x9a\x35\xf8\x07\xc3\x15\x61\x78\xcc\
\xe0\x6f\x35\x9c\xff\x99\x41\xaf\x19\x08\x00\x40\xfd\x87\xff\x85\
\xe1\xa1\x5d\x0c\xfe\x56\xe9\x9f\xbd\xd0\x6b\x07\x02\x00\x50\xcf\
\xc1\x7f\x42\xf8\xca\x38\x06\x7f\xab\xf4\xef\x9e\xe0\xb5\x04\x01\
\x00\xa8\xc7\xe0\x3f\x30\xdc\x12\x46\x27\x30\xfc\xb7\x1a\xcd\xbf\
\xd6\x81\x5e\x5b\x10\x00\x80\x6a\x0e\xfe\x69\xe1\xd7\xc3\xb3\x1d\
\x18\xfc\xad\x9e\xcd\xbf\xf6\x34\xaf\x35\x08\x00\x40\x75\x86\xff\
\xcf\x87\xc7\xbb\x30\xf8\x5b\xa5\xdf\xe3\xe7\xbd\xe6\x20\x00\x00\
\xfd\x1d\xfc\x6f\x08\xff\xab\x07\x83\xbf\x55\xfa\x3d\xdf\xe0\x3d\
\x00\x01\x00\xe8\xed\xe0\x3f\x34\xfc\xe7\xb0\xa9\x0f\xc3\x7f\xab\
\x4d\xf9\x6b\x38\xd4\x7b\x02\x02\x00\xd0\xdd\xc1\x3f\x23\xfc\xdb\
\xf0\x7c\x1f\x07\x7f\xab\xe7\xf3\xd7\x34\xc3\x7b\x04\x02\x00\xd0\
\xd9\xc1\x3f\x10\xfe\x49\xf8\x7e\x85\x06\x7f\xab\xef\xe7\xaf\x71\
\xc0\x7b\x06\x02\x00\x30\xf1\xe1\xff\xd3\xe1\xde\x0a\x0f\xfe\x56\
\xe9\x6b\xfd\x69\xef\x1d\x08\x00\x40\x7b\x83\xff\xd5\xe1\xb3\x35\
\x1a\xfc\xad\xd2\xd7\xfe\x6a\xef\x25\x08\x00\xc0\x9e\x0d\xfe\x57\
\x85\x7f\x1f\x5e\xac\xf1\xf0\xdf\xea\xc5\xfc\xbd\xbc\xca\x7b\x0b\
\x02\x00\xb0\xe3\xc1\x3f\x29\xbc\x2b\x3c\xd5\x80\xc1\xdf\xea\xa9\
\xfc\xbd\x4d\xf2\x5e\x83\x00\x00\xfc\x64\xf8\xff\x6c\xf8\xbb\x06\
\x0e\xfe\x56\xe9\x7b\xfc\x59\xef\x39\x08\x00\x50\xfa\xe0\x3f\x3a\
\xfc\x65\x01\x83\xbf\x55\xfa\x9e\x8f\x76\x06\x40\x00\x80\xd2\x06\
\xff\xbe\xe1\x3f\x86\x97\x0a\x1c\xfe\x5b\xbd\x94\x5f\x83\x7d\x9d\
\x09\x10\x00\xa0\xe9\x83\x7f\x72\xf8\x97\xe1\xc7\x05\x0f\xfe\x56\
\x3f\xce\xaf\xc9\x64\x67\x04\x04\x00\x68\xe2\xf0\xff\xb9\xf0\xff\
\x0c\xfc\x9d\x4a\xaf\xcd\xcf\x39\x2b\x20\x00\x40\x53\x06\xff\x6b\
\xc3\x17\x0c\xf8\x3d\x96\x5e\xab\xd7\x3a\x3b\x20\x00\x40\x5d\x07\
\xff\xac\xf0\x87\x61\xa3\xa1\x3e\x6e\x1b\xf3\x6b\x37\xcb\x59\x02\
\x01\x00\xea\x32\xf8\x07\xc3\x65\x61\xc8\x20\x9f\xb0\xa1\xfc\x5a\
\x0e\x3a\x5b\x20\x00\x40\x95\x87\xff\x92\xf0\x1d\x83\xbb\xe3\xd2\
\x6b\xba\xc4\x19\x03\x01\x00\xaa\x36\xf8\x8f\x0f\x77\x19\xd4\x5d\
\x97\x5e\xe3\xe3\x9d\x39\x10\x00\xa0\xdf\x83\x7f\x6e\xb8\x39\x8c\
\x18\xce\x3d\x33\x92\x5f\xf3\xb9\xce\x20\x08\x00\xd0\xeb\xc1\x3f\
\x35\x5c\x13\xd6\x1a\xc8\x7d\xb3\x36\xbf\x07\x53\x9d\x49\x10\x00\
\xa0\x17\xc3\xff\x92\xf0\xa8\x01\x5c\x19\xe9\xbd\xb8\xc4\xd9\x04\
\x01\x00\xba\x35\xf8\x4f\x0a\xcb\x0c\xdc\xca\x4a\xef\xcd\x49\xce\
\x2a\x08\x00\xd0\xa9\xc1\x7f\x70\xf8\x74\x18\x35\x64\x2b\x6f\x34\
\xbf\x57\x07\x3b\xbb\x20\x00\x40\xbb\x83\x7f\x7a\xb8\x36\x3c\x67\
\xb0\xd6\xce\x73\xf9\xbd\x9b\xee\x2c\x83\x00\x00\xe3\x19\xfe\x97\
\x86\x27\x0c\xd2\xda\x4b\xef\xe1\xa5\xce\x34\x08\x00\xb0\xbb\xc1\
\x7f\x6a\xf8\xba\xc1\xd9\x38\xe9\x3d\x3d\xd5\x19\x07\x01\x00\x5a\
\x07\xff\xbc\xf0\x99\xb0\xc9\xb0\x6c\xac\x4d\xf9\x3d\x9e\xe7\xcc\
\x83\x00\x80\xc1\x3f\x33\x5c\x17\xd6\x19\x90\xc5\x58\x97\xdf\xf3\
\x99\x9e\x01\x04\x00\x28\x6f\xf0\x0f\x84\x5f\x09\x4f\x1a\x88\xc5\
\x7a\x32\x9f\x81\x01\xcf\x04\x02\x00\x94\x31\xfc\xcf\x08\x2b\x0d\
\x40\xb2\x74\x16\xce\xf0\x6c\x20\x00\x40\x73\x07\xff\x91\xe1\x73\
\x06\x1e\x3b\x91\xce\xc6\x91\x9e\x15\x04\x00\x68\xce\xe0\xdf\x27\
\xfc\x6e\x58\x6f\xc8\xb1\x1b\xeb\xf3\x59\xd9\xc7\xb3\x83\x00\x00\
\xf5\x1d\xfc\x93\xc2\xaf\x86\xa7\x0d\x36\xc6\xe9\xe9\x7c\x76\x26\
\x79\x96\x10\x00\xa0\x5e\xc3\xff\xac\xf0\xf7\x06\x19\x13\x94\xce\
\xd0\x59\x9e\x29\x04\x00\xa8\xfe\xe0\x3f\x36\xfc\xb5\xc1\x45\x87\
\xa5\x33\x75\xac\x67\x0c\x01\x00\xaa\x37\xf8\xf7\x0b\xbf\x1f\x36\
\x18\x56\x74\xc9\x86\x7c\xc6\xf6\xf3\xcc\x21\x00\x40\xff\x07\xff\
\x94\xf0\xbe\xb0\xca\x80\xa2\x47\x56\xe5\x33\x37\xc5\x33\x88\x00\
\x00\xfd\x19\xfe\xe7\x85\x7f\x34\x90\xe8\x93\x74\xf6\xce\xf3\x2c\
\x22\x00\x40\xef\x06\xff\xfc\x70\x87\x01\x44\x45\xa4\xb3\x38\xdf\
\xb3\x89\x00\x00\xdd\x1b\xfc\x73\xc2\x8d\x61\xc4\xd0\xa1\x62\x46\
\xf2\xd9\x9c\xe3\x59\x45\x00\x80\xce\x0d\xfe\xa9\xe1\xca\x30\x6c\
\xd0\x50\x71\xc3\xf9\xac\x4e\xf5\xec\x22\x00\xc0\xc4\x86\xff\x85\
\xe1\x61\x83\x85\x9a\x49\x67\xf6\x42\xcf\x30\x02\x00\x8c\x7f\xf0\
\x9f\x10\xee\x31\x48\xa8\xb9\x74\x86\x4f\xf0\x4c\x23\x00\xc0\xee\
\x07\xff\x81\xe1\xd6\x30\x6a\x78\xd0\x10\xa3\xf9\x4c\x1f\xe8\x19\
\x47\x00\x80\xed\x07\xff\xf4\xf0\xeb\xe1\x59\x03\x83\x86\x7a\x36\
\x9f\xf1\xe9\x9e\x79\x04\x00\xd8\x32\xfc\xdf\x11\x1e\x37\x20\x28\
\x44\x3a\xeb\xef\xf0\xec\x23\x00\x50\xf2\xe0\x3f\x25\xfc\xad\x81\
\x40\xa1\xd2\xd9\x3f\xc5\x5d\x80\x00\x40\x49\x83\xff\xd0\xf0\x67\
\x61\x93\x21\x40\xe1\x36\xe5\x67\xe1\x50\x77\x03\x02\x00\x4d\x1e\
\xfc\x7b\x85\x7f\x17\x5e\x70\xf1\xc3\x36\x5e\xc8\xcf\xc6\x5e\xee\
\x0a\x04\x00\x9a\x34\xf8\x07\xc2\x3f\x0d\xdf\x77\xd1\xc3\x2e\x7d\
\x3f\x3f\x2b\x03\xee\x0e\x04\x00\xea\x3e\xfc\x7f\x26\xdc\xeb\x62\
\x87\x71\x49\xcf\xcc\xcf\xb8\x43\x10\x00\xa8\xe3\xe0\x7f\x75\xf8\
\x73\x17\x39\x4c\x48\x7a\x86\x5e\xed\x4e\x41\x00\xa0\x0e\x83\xff\
\x55\xe1\x3f\x84\x17\x5d\xde\xd0\x11\x2f\xe6\x67\xea\x55\xee\x18\
\x04\x00\xaa\x38\xf8\x27\x85\x77\x85\xa7\x5c\xd8\xd0\x15\x4f\xe5\
\x67\x6c\x92\x3b\x07\x01\x80\xaa\x0c\xff\x33\xc3\x37\x5d\xd0\xd0\
\x13\xe9\x59\x3b\xd3\xdd\x83\x00\x40\x3f\x07\xff\xd1\xe1\x2f\x5d\
\xc8\xd0\x17\xe9\xd9\x3b\xda\x5d\x84\x00\x40\x2f\x07\xff\xbe\xe1\
\x63\xe1\x25\x97\x30\xf4\xd5\x4b\xf9\x59\xdc\xd7\xdd\x84\x00\x40\
\x37\x07\xff\xe4\xf0\x6b\xe1\x19\x17\x2f\x54\xca\x33\xf9\xd9\x9c\
\xec\xae\x42\x00\xa0\xd3\xc3\xff\x9c\xf0\x6d\x17\x2d\x54\x5a\x7a\
\x46\xcf\x71\x67\x21\x00\xd0\x89\xc1\xff\xba\x70\xbb\x8b\x15\x6a\
\x25\x3d\xb3\xaf\x73\x87\x21\x00\xd0\xce\xe0\x9f\x15\x3e\x11\x36\
\xba\x4c\xa1\x96\x36\xe6\x67\x78\x96\x3b\x0d\x01\x80\x3d\x19\xfc\
\x83\xe1\xb2\x30\xe4\x02\x85\x46\x18\xca\xcf\xf4\xa0\x3b\x0e\x01\
\x80\x9d\x0d\xff\xa5\xe1\x41\x17\x26\x34\x52\x7a\xb6\x97\xba\xeb\
\x10\x00\x18\x3b\xf8\x17\x86\xbb\x5d\x90\x50\x84\xf4\xac\x2f\x74\
\xf7\xe1\x45\x28\x7b\xf0\x1f\x10\x6e\x0e\x23\x2e\x45\x28\xca\x48\
\x7e\xf6\x0f\x70\x17\x0a\x00\x94\x35\xf8\xa7\x85\x0f\x86\xb5\x2e\
\x42\x28\xda\xda\x7c\x17\x4c\x73\x37\x0a\x00\x34\x7f\xf8\xbf\x2d\
\x3c\xea\xe2\x03\xc6\x48\x77\xc2\xdb\xdc\x91\x02\x00\xcd\x1c\xfc\
\x27\x85\xe5\x2e\x3a\x60\x17\xd2\x1d\x71\x92\x3b\x53\x00\xa0\x19\
\x83\xff\xe0\xf0\x27\x61\x93\xcb\x0d\xd8\x03\x9b\xf2\x9d\x71\xb0\
\x3b\x54\x00\xa0\x9e\x83\x7f\x46\xf8\x50\x78\xde\x85\x06\xb4\xe1\
\xf9\x7c\x87\xcc\x70\xa7\x0a\x00\xd4\x63\xf0\x0f\x84\x4b\xc3\x13\
\x2e\x30\xa0\x03\x9e\xc8\x77\xca\x80\x3b\x56\x00\xa0\xba\xc3\xff\
\xb4\xf0\x7f\x5c\x58\x40\x17\xa4\xbb\xe5\x34\x77\xad\x00\x40\xb5\
\x06\xff\xbc\xf0\x5f\xed\xf9\x81\x1e\xf4\x03\xd2\x5d\x33\xcf\xdd\
\x2b\x00\xd0\xdf\xc1\x3f\x33\x5c\x17\xd6\xb9\x98\x80\x1e\x5a\x97\
\xef\x9e\x99\xee\x62\x01\x80\xde\xef\xf9\xdf\x19\x9e\x74\x11\x01\
\x7d\xf4\x64\xbe\x8b\xf4\x03\x04\x00\x7a\x30\xfc\xcf\x08\x2b\x5d\
\x3c\x40\x85\xa4\x3b\xe9\x0c\x77\xb4\x00\x40\x77\x06\xff\x91\xe1\
\xf3\x2e\x1a\xa0\xc2\xd2\x1d\x75\xa4\x3b\x5b\x00\xa0\x33\x83\x7f\
\x9f\xf0\xd1\xb0\xde\xe5\x02\xd4\xc0\xfa\x7c\x67\xed\xe3\x0e\x17\
\x00\x68\x6f\xf0\x4f\x0e\xef\x09\x4f\xbb\x50\x80\x1a\x7a\x3a\xdf\
\x61\x93\xdd\xe9\x02\x00\x7b\x3e\xfc\xcf\x0a\xdf\x72\x81\x00\x0d\
\x90\xee\xb2\xb3\xdc\xed\x02\x00\xbb\x1e\xfc\xaf\x09\x7f\xe3\xc2\
\x00\x1a\x28\xdd\x6d\xaf\x71\xd7\x0b\x00\x6c\x3b\xf8\xf7\x0f\x7f\
\x10\x36\xb8\x24\x80\x06\xdb\x90\xef\xba\xfd\xdd\xfd\x02\x40\xe9\
\x83\x7f\x4a\x78\x5f\x58\xe5\x62\x00\x0a\xb2\x2a\xdf\x7d\x53\xcc\
\x02\x01\xa0\xc4\xe1\xbf\x38\x3c\xe0\x22\x00\x0a\x96\xee\xc0\xc5\
\x66\x82\x00\x50\xca\xe0\x5f\x10\xee\xf0\xe0\x03\xbc\x22\xdd\x89\
\x0b\xcc\x08\x01\xa0\xa9\x83\x7f\x4e\xb8\x29\x8c\x78\xd8\x01\xb6\
\x33\x92\xef\xc8\x39\x66\x86\x00\xd0\x94\xc1\x3f\x35\x5c\x15\xd6\
\x78\xc0\x01\x76\x6b\x4d\xbe\x33\xa7\x9a\x21\x02\x40\x9d\x87\xff\
\x45\xe1\x61\x0f\x34\xc0\xb8\xa5\xbb\xf3\x22\xb3\x44\x00\xa8\xdb\
\xe0\x3f\x31\xdc\xe3\x01\x06\x98\xb0\x74\x97\x9e\x68\xb6\x08\x00\
\x55\x1f\xfc\x07\x85\x3f\x0a\xa3\x1e\x5a\x80\x8e\x19\xcd\x77\xeb\
\x41\x66\x8d\x00\x50\xb5\xc1\x3f\x3d\xfc\x9b\xf0\xac\x07\x15\xa0\
\x6b\x9e\xcd\x77\xed\x74\xb3\x47\x00\xa8\xc2\xf0\x7f\x47\xf8\xae\
\x07\x13\xa0\x67\xd2\x9d\xfb\x0e\x33\x48\x00\xe8\xd7\xe0\x3f\x25\
\xfc\xad\x07\x11\xa0\x6f\xd2\x1d\x7c\x8a\x99\x24\x00\xf4\x6a\xf0\
\x1f\x1a\xfe\x2c\x6c\xf2\xf0\x01\xf4\xdd\xa6\x7c\x27\x1f\x6a\x46\
\x09\x00\xdd\x1a\xfc\x7b\x85\xdf\x0a\x2f\x78\xe0\x00\x2a\xe7\x85\
\x7c\x47\xef\x65\x66\x09\x00\x9d\x1a\xfc\x03\xe1\x97\xc3\x0f\x3c\
\x60\x00\x95\xf7\x83\x7c\x67\x0f\x98\x61\x02\xc0\x44\x86\xff\x1b\
\xc3\x0a\x0f\x14\x40\xed\xa4\xbb\xfb\x8d\x66\x99\x00\x30\xde\xc1\
\x7f\x44\xf8\x0b\x0f\x10\x40\xed\xa5\xbb\xfc\x08\xb3\x4d\x00\xd8\
\xdd\xe0\xdf\x3b\xfc\x4e\x78\xd1\x43\x03\xd0\x18\x2f\xe6\xbb\x7d\
\x6f\xb3\x4e\x00\x68\x1d\xfc\x93\xc2\xbb\xc3\x53\x1e\x14\x80\xc6\
\x7a\x2a\xdf\xf5\x93\xcc\x3e\x01\x20\x0d\xff\x37\x87\x6f\x7a\x30\
\x00\x8a\x91\xee\xfc\x37\x0b\x00\xe5\x0e\xfe\x63\xc2\x5f\x79\x10\
\x00\x8a\x95\x66\xc0\x31\x02\x40\x39\x83\x7f\xdf\xf0\xb1\xf0\x92\
\xc3\x0f\x50\xbc\x97\xf2\x4c\xd8\x57\x00\x68\xee\xe0\x9f\x1c\xfe\
\x55\x78\xc6\x81\x07\xa0\xc5\x33\x79\x46\x4c\x16\x00\x9a\x35\xfc\
\xdf\x1a\xbe\xed\x80\x03\xb0\x1b\x69\x56\xbc\x55\x00\xa8\xff\xe0\
\x3f\x2e\xdc\xee\x40\x03\x30\x4e\x5f\x4c\x33\x44\x00\xa8\xdf\xe0\
\x9f\x1d\xae\x0f\x1b\x1d\x62\x00\xda\xb4\x31\xcf\x92\xd9\x02\x40\
\xf5\x07\xff\x60\xb8\x3c\x0c\x39\xb8\x00\x74\xc8\x50\x9e\x2d\x83\
\x02\x40\x35\x87\xff\xd2\xf0\xa0\x83\x0a\x40\x97\xa4\x19\xb3\x54\
\x00\xa8\xce\xe0\x5f\x14\xbe\xec\x60\x02\xd0\x23\x69\xe6\x2c\x12\
\x00\xfa\x37\xf8\x0f\x08\x9f\x0a\x23\x0e\x23\x00\x3d\x36\x92\x67\
\xd0\x01\x02\x40\xef\x06\xff\xb4\xf0\xc1\xb0\xd6\x01\x04\xa0\xcf\
\xd6\xe6\x99\x34\x4d\x00\xe8\xee\xf0\x7f\x7b\x78\xcc\x81\x03\xa0\
\x62\xd2\x6c\x7a\xbb\x00\xd0\xf9\xc1\x7f\x72\xf8\x9a\x03\x06\x40\
\xc5\xa5\x59\x75\xb2\x00\x30\xf1\xc1\x7f\x48\xf8\xd3\xb0\xc9\xa1\
\x02\xa0\x26\x36\xe5\xd9\x75\x88\x00\x30\xfe\xc1\x3f\x23\xfc\x66\
\x78\xde\x41\x02\xa0\xa6\x9e\xcf\xb3\x6c\x86\x00\xb0\xfb\xc1\x3f\
\x10\x7e\x29\x7c\xcf\xc1\x01\xa0\x21\xbe\x97\x67\xdb\x80\x00\xb0\
\xe3\xe1\x7f\x5a\xf8\x86\x83\x02\x40\x43\xa5\x19\x77\x9a\x00\xf0\
\x93\xc1\x7f\x78\xb8\xcd\xc1\x00\xa0\x10\x69\xe6\x1d\x5e\x6c\x00\
\x88\x6f\x7e\x66\xf8\x70\x58\xe7\x30\x00\x50\x98\x75\x79\x06\xce\
\x2c\x26\x00\xc4\x37\x3b\x29\xbc\x33\xfc\xd0\x01\x00\xa0\x70\x3f\
\xcc\x33\x71\x52\xa3\x03\x40\x7c\x83\x6f\x0a\xf7\x7b\xc3\x01\x60\
\x1b\x69\x36\xbe\xa9\x71\x01\x20\xbe\xa9\xa3\xc2\xe7\xbd\xc1\x00\
\xb0\x4b\x69\x56\x1e\x55\xfb\x00\x10\xdf\xc4\x3e\xe1\xf7\xc2\x7a\
\x6f\x2a\x00\xec\x91\xf5\x79\x76\xee\x53\xbb\x00\x10\x5f\xf4\xe4\
\xf0\x2f\xc2\xd3\xde\x48\x00\x68\xcb\xd3\x79\x96\x4e\xae\x45\x00\
\x88\x2f\xf4\xec\xf0\x2d\x6f\x1c\x00\x74\x44\x9a\xa9\x67\x57\x36\
\x00\xc4\x17\xf7\xda\xf0\x3f\xbd\x51\x00\xd0\x15\x69\xc6\xbe\xb6\
\x32\x01\x20\xbe\x98\xfd\xc3\xc7\xc3\x06\x6f\x0e\x00\x74\xd5\x86\
\x3c\x73\xf7\xef\x5b\x00\x88\xdf\x7c\x4a\x78\x7f\x58\xed\x0d\x01\
\x80\x9e\x5a\x9d\x67\xf0\x94\x9e\x06\x80\xf8\x0d\x17\x87\x07\xbc\
\x01\x00\xd0\x57\x69\x16\x2f\xee\x7a\x00\x88\xdf\x64\x41\xb8\xd3\
\x0b\x0e\x00\x95\x92\x66\xf3\x82\x8e\x07\x80\xf8\x45\xe7\x84\x9b\
\xc2\x88\x17\x19\x00\x2a\x69\x24\xcf\xea\x39\x13\x0e\x00\xf1\x8b\
\x4c\x0d\x57\x87\x35\x5e\x58\x00\xa8\x85\x35\x79\x76\x4f\x6d\x2b\
\x00\xc4\xbf\x78\x71\x78\xc4\x0b\x09\x00\xb5\x94\x66\xf8\xc5\x7b\
\x1c\x00\xe2\x1f\x3e\x31\x7c\xd5\x0b\x07\x00\x8d\x90\x66\xfa\x89\
\x3b\x0d\x00\xf1\x5f\x1e\x14\xfe\x38\x8c\x7a\xb1\x00\xa0\x51\x46\
\xf3\x8c\x3f\xe8\x95\x00\x10\xff\xcf\xf4\xf0\x1b\xe1\x39\x2f\x10\
\x00\x34\xda\x73\x79\xe6\x4f\x4f\x01\xe0\xd8\x70\xb7\x17\x05\x00\
\x8a\x90\x66\xfe\xb1\x63\x57\x00\x4b\xc3\x43\x5e\x18\x00\x68\xa4\
\x34\xe3\x97\xee\xb0\x04\x18\xff\xc5\x60\xb8\x32\x0c\x7b\xa1\x00\
\xa0\x11\x86\xf3\x6c\x1f\xdc\xed\x1f\x03\x1c\xf6\xc1\x3f\x00\x50\
\x77\xbb\xfc\x60\xa0\xdd\x7d\x10\xd0\xf1\xfa\x01\x00\x50\xcb\x3d\
\xff\xf1\x9d\xf8\x28\xe0\xd4\x0f\x78\xd0\x0b\x0a\x00\x95\xf6\xe0\
\xd8\x3d\x7f\x47\xfe\x32\xa0\xdc\x0f\xb8\x42\x3f\x00\x00\x2a\xb9\
\xe7\xbf\xa2\x75\xcf\xdf\xd1\xbf\x0e\x58\x3f\x00\x00\xea\xb1\xe7\
\xef\x68\x00\x68\xe9\x07\xdc\xe5\xc5\x07\x80\xbe\xb8\x6b\x77\x7b\
\xfe\xae\x04\x00\xfd\x00\x00\xa8\xf6\x9e\xbf\xab\x01\x40\x3f\x00\
\x00\xaa\xb9\xe7\xef\x7a\x00\xd0\x0f\x00\x80\xea\xed\xf9\x7b\x16\
\x00\xf4\x03\x00\xa0\x3a\x7b\xfe\x9e\x07\x80\x31\x41\x60\x89\x7e\
\x00\x00\xb4\xb5\xe7\x5f\xd2\xcd\x19\xdd\xd5\x00\xa0\x1f\x00\x00\
\xfd\xdb\xf3\xf7\x35\x00\xb4\xf4\x03\x6e\xd4\x0f\x00\x80\x1d\xee\
\xf9\x6f\xec\xf4\x9e\xbf\x12\x01\x60\x4c\x10\x58\xa0\x1f\x00\x00\
\xdb\xec\xf9\x17\xf4\x7a\x1e\xf7\x3c\x00\xe8\x07\x00\x40\x6f\xf6\
\xfc\x95\x0c\x00\xfa\x01\x00\xd8\xf3\xf7\x6f\x06\xf7\x35\x00\xe8\
\x07\x00\x60\xcf\x5f\x70\x00\xd0\x0f\x00\xc0\x9e\xbf\xe0\x00\xa0\
\x1f\x00\x80\x3d\x7f\xc1\x01\x60\x4c\x3f\xe0\x72\xfd\x00\x00\x6a\
\xba\xe7\xbf\xbc\xdf\x7b\xfe\x5a\x06\x80\x31\x41\x60\xb6\x7e\x00\
\x00\x35\xdb\xf3\xcf\xae\xfa\x7c\xad\x7c\x00\x68\xe9\x07\xdc\xe9\
\x70\x01\x50\x51\x77\x56\x6d\xcf\xdf\x88\x00\xa0\x1f\x00\x80\x3d\
\x7f\xc1\x01\x40\x3f\x00\x00\x7b\xfe\x42\x03\x80\x7e\x00\x00\xf6\
\xfc\x05\x07\x00\xfd\x00\x00\xec\xf9\x0b\x0e\x00\x63\x82\xc0\xf9\
\xe1\x3b\x0e\x28\x00\x1d\x96\x66\xcb\xf9\x4d\x9a\x99\x8d\x0a\x00\
\xfa\x01\x00\xd8\xf3\x17\x1a\x00\x5a\xfa\x01\x37\xe8\x07\x00\xd0\
\xe6\x9e\xff\x86\xba\xef\xf9\x8b\x0c\x00\xfa\x01\x00\x94\xbe\xe7\
\x2f\x3a\x00\xe8\x07\x00\x50\xea\x9e\x5f\x00\xd8\xbe\x1f\x30\xe4\
\xa0\x03\x90\x0d\x35\x75\xcf\x2f\x00\xec\xbc\x1f\xb0\xd1\xc1\x07\
\x28\xd6\xc6\xa6\xef\xf9\x05\x80\x9d\x07\x81\xf9\xfa\x01\x00\xc5\
\xee\xf9\xe7\x97\x3c\x03\x8b\x0e\x00\xfa\x01\x00\xf6\xfc\x02\x80\
\x10\x90\xfa\x01\x97\xe9\x07\x00\x34\x76\xcf\x7f\x59\x69\x7b\x7e\
\x01\x40\x3f\x00\xc0\x9e\xdf\x8c\x13\x00\xc6\xd1\x0f\xb8\xc3\xc3\
\x03\x50\x5b\x77\x94\xbe\xe7\x17\x00\xf4\x03\x00\xec\xf9\x11\x00\
\xf4\x03\x00\xec\xf9\x05\x00\x2f\x82\x7e\x00\x80\x3d\xbf\x00\x80\
\x7e\x00\x80\x3d\xbf\x00\xc0\xae\x83\xc0\x62\xfd\x00\x80\xbe\xed\
\xf9\x17\x9b\x45\x02\x80\x7e\x00\x80\x3d\x3f\x02\x40\xdf\xfa\x01\
\xd7\xeb\x07\x00\x74\x6d\xcf\x7f\xbd\x3d\xbf\x00\xa0\x1f\x00\x60\
\xcf\x8f\x00\x50\xd9\x7e\xc0\x03\x1e\x5c\x80\xb6\x3d\x60\xcf\x2f\
\x00\xd4\x35\x04\x4c\xd1\x0f\x00\x68\x7b\xcf\x3f\xc5\x2c\x11\x00\
\xea\x1e\x04\x66\xe9\x07\x00\xec\xf1\x9e\x7f\x96\xd9\x21\x00\xe8\
\x07\x00\xd8\xf3\x23\x00\xe8\x07\x00\xd8\xf3\x23\x00\xd4\xbf\x1f\
\xf0\x01\xfd\x00\xa0\xe0\x3d\xff\x07\xec\xf9\x05\x80\x62\x0d\x7d\
\xee\xf5\x47\x0f\xfd\xf7\x93\xee\x1c\xbe\xff\x3c\xfd\x00\xa0\xf9\
\xe2\xae\x4b\x77\x5e\xba\xfb\xcc\x00\x01\xa0\x48\xab\x7f\xef\xe0\
\xc1\x70\x59\x18\x0a\x2f\xaf\xfe\xd8\x21\x8f\x0e\x7d\xf1\xb4\x95\
\x2e\x08\xa0\xa9\xd2\x1d\x97\xee\xba\xcd\x77\xde\x96\xbb\x2f\xdd\
\x81\x3e\xd1\x4f\x00\x28\x6a\xf8\x2f\x09\xdf\xc9\x0f\xc1\xb6\xfe\
\xf0\xf0\xfb\x86\x97\xbf\xf9\x51\x97\x05\xd0\x18\x71\xa7\xa5\xbb\
\x6d\x87\x77\xde\x96\xbb\x70\x89\xd9\x20\x00\x34\x7d\xf0\x2f\x0c\
\x77\xef\xe4\x21\x18\x6b\xe3\xea\x5b\x8f\x5d\x3e\x7c\xef\x39\xc3\
\x2e\x0f\xa0\xb6\xe2\x0e\x4b\x77\xd9\xe6\x3b\x6d\xf7\xf7\x5e\xba\
\x1b\x17\x9a\x15\x02\x40\xd3\x06\xff\xdc\x70\x73\x18\xd9\x83\x87\
\x60\xac\xa1\xa1\x3f\x5f\xb4\x5c\x3f\x00\xa8\xdd\x9e\x3f\xee\xae\
\x57\x56\x9c\x7b\x6e\x24\xdf\x95\x73\xcd\x0e\x01\xa0\xee\x83\x7f\
\x6a\xb8\x26\xac\x1d\xe7\x43\xb0\x2d\xfd\x00\xa0\x9e\x7b\xfe\x76\
\xad\xcd\x77\xe7\x54\xb3\x44\x00\xa8\xe3\xf0\xbf\x24\x4c\xf4\x21\
\x68\xed\x07\xac\xd4\x0f\x00\x2a\xbc\xe7\x5f\xd9\xd1\x3b\x6f\xcb\
\x1d\x7a\x89\x99\x22\x00\xd4\x65\xf0\xbf\x3e\x2c\xeb\xf0\x43\xa0\
\x1f\x00\x34\x61\xcf\xdf\xae\x74\xa7\xbe\xde\x8c\x11\x00\xaa\x3a\
\xf8\x0f\x0e\x9f\x0e\xa3\x5d\x7c\x08\xb6\xed\x07\x7c\x56\x3f\x00\
\xe8\xe3\x9e\xff\xb3\x6d\xed\xf9\xdb\x35\x9a\xef\xd8\x83\xcd\x1c\
\x01\xa0\x2a\x83\x7f\x7a\xb8\x36\x3c\xd7\xa3\x87\x40\x3f\x00\x68\
\xc2\x9e\xbf\x5d\xcf\xe5\x3b\x77\xba\x19\x24\x00\xf4\x73\xf8\xff\
\x62\x78\xa2\x4f\x0f\x81\x7e\x00\xd0\x84\x3d\x7f\xbb\xd2\xdd\xfb\
\x8b\x66\x91\x00\xd0\xeb\xc1\x7f\x6a\xf8\x7a\x45\x1e\x82\x1d\xf5\
\x03\xfc\xfd\x02\x40\x27\xf7\xfc\x43\x3d\xd8\xf3\xb7\x2b\xdd\xc5\
\xa7\x9a\x4d\x02\x40\xb7\x07\xff\x61\xe1\x33\x61\x53\x05\x1f\x82\
\xb1\x86\xf5\x03\x80\x0e\xee\xf9\x87\x2b\x7e\xe7\x6d\xca\x77\xf3\
\x61\x66\x95\x00\xd0\xe9\xc1\x3f\x33\xfc\x76\x58\x57\xf1\x87\x40\
\x3f\x00\x68\xc2\x9e\xbf\x5d\xeb\xf2\x5d\x3d\xd3\xec\x12\x00\x26\
\x3a\xf8\x07\xc2\x3f\x0b\x4f\xd6\xec\x21\xd0\x0f\x00\x9a\xb0\xe7\
\x6f\xd7\x93\xf9\xee\x1e\x30\xcb\x04\x80\x76\x86\xff\xe9\xe1\xbe\
\x9a\x3f\x04\xdb\xf6\x03\x6e\xd1\x0f\x00\x76\xb1\xe7\xbf\xa5\xb2\
\x7b\xfe\x76\xa5\x3b\xfc\x74\x33\x4d\x00\xd8\xd3\xc1\x7f\x44\xf8\
\x6f\x0d\x7a\x00\xf4\x03\x80\x26\xec\xf9\x27\x22\xdd\xe9\x47\x98\
\x71\x02\xc0\xce\x06\xff\xde\xe1\x23\x61\x7d\x83\x1f\x82\x6d\xfb\
\x01\xb7\x9f\x76\x9f\x0b\x10\x0a\xde\xf3\xc7\x1d\x50\xc3\x3d\x7f\
\xbb\xd6\xe7\x3b\x7e\x6f\x33\x4f\x00\xd8\x3a\xf8\x27\x85\x7f\x1e\
\x7e\x54\xc8\x43\xa0\x1f\x00\xf6\xfc\x4d\xd8\xf3\xb7\xeb\x47\xf9\
\xce\x9f\x24\x00\x94\x3d\xfc\xdf\x12\xfe\x6f\xa1\x0f\x81\x7e\x00\
\xd8\xf3\x97\x2c\xdd\xfd\x6f\x11\x00\xca\x1b\xfc\xc7\x86\xff\xe1\
\x01\xd0\x0f\x00\x7b\xfe\xe2\xa5\x59\x70\xac\x00\xd0\xfc\xc1\xbf\
\x5f\xf8\x4f\x61\x83\x43\xaf\x1f\x00\xf6\xfc\x64\x1b\xf2\x6c\xd8\
\x4f\x00\x68\xde\xe0\x9f\x12\xde\x1b\x56\x39\xe8\xe3\xea\x07\xdc\
\xa7\x1f\x00\xb5\xde\xf3\xdf\xe7\x2e\x1b\x97\x55\x79\x56\x4c\x11\
\x00\x9a\x31\xfc\xcf\x0d\xff\xe0\x60\xeb\x07\x80\x3d\x3f\x7b\x28\
\xcd\x8c\x73\x05\x80\xfa\x0e\xfe\xe3\xc2\x17\x1d\xe4\x8e\xf6\x03\
\x96\xe9\x07\x40\xa5\xf7\xfc\xcb\xec\xf9\x3b\x2a\xcd\x90\xe3\x04\
\x80\xfa\x0c\xfe\xd9\xe1\x7a\xe9\x57\x3f\x00\xec\xf9\xe9\xc8\x4f\
\x41\xb7\xcc\x94\xd9\x02\x40\x75\x07\xff\x60\xb8\x42\xfa\xf5\xf9\
\x01\xe0\xcf\xf3\xd3\x8d\x9f\x82\xe6\x19\x33\x28\x00\x54\x6b\xf8\
\x5f\x10\x1e\x72\x40\xf5\x03\xc0\x9e\x9f\x2e\x4b\xb3\xe6\x02\x01\
\xa0\xff\x83\x7f\x51\xf8\xb2\x03\xe9\xf3\x03\xc0\x9f\xe7\xa7\xc7\
\xd2\xec\x59\x24\x00\xf4\x7e\xf0\x1f\x10\x6e\x09\xa3\x0e\xa1\x7e\
\x00\xd8\xf3\xd3\x27\xa3\x79\x16\x1d\x20\x00\x74\x7f\xf0\x4f\x0b\
\xff\x3a\xac\x75\xf0\xf4\x03\xc0\x9e\x9f\x8a\x58\x9b\x67\xd3\x34\
\x01\xa0\x3b\xc3\xff\xed\xe1\x31\x07\x4d\x3f\x00\xec\xf9\xa9\xa8\
\x34\xa3\xde\x2e\x00\x74\x6e\xf0\x9f\x1c\xbe\xe6\x60\xe9\x07\x80\
\x3d\x3f\x35\x91\x66\xd6\xc9\x02\x40\xfb\x83\xff\x90\xf0\xa7\x61\
\x93\xc3\xd4\x88\x7e\xc0\x63\x43\xb7\x9f\xb6\xc2\x05\x0f\xbb\xdc\
\xf3\xaf\x48\xcf\x8a\x3b\xa3\x11\x36\xe5\x19\x76\x88\x00\xb0\xe7\
\x83\x7f\x46\xf8\xcd\xf0\xbc\x03\xa4\x1f\x00\xf6\xfc\xd4\xdc\xf3\
\x79\xa6\xcd\x10\x00\x76\x3e\xf8\x07\xc2\x2f\x85\xef\x39\x30\x25\
\xf4\x03\x8e\xd1\x0f\x80\xcd\x7b\xfe\x63\xec\xf9\xcb\xf0\xbd\x3c\
\xe3\x06\x04\x80\x6d\x87\xff\x69\xe1\x1b\x0e\x88\x7e\x00\xd8\xf3\
\xd3\x70\x69\xd6\x9d\x56\x7c\x00\x88\x17\xe1\xf0\x70\x9b\x03\xa1\
\x1f\xe0\xf3\x03\x28\xec\xcf\xf3\xdb\xf3\x93\x66\xdf\xe1\xc5\x05\
\x80\xf8\xa6\x67\x86\x0f\x87\x75\x0e\x01\x2d\xfd\x80\x47\x0c\x09\
\x1a\xba\xe7\x7f\xc4\x9e\x9f\x16\xeb\xf2\x2c\x9c\xd9\xf8\x00\x90\
\xf7\xfc\xef\x0c\x3f\xf4\xc6\xa3\x1f\x80\x3d\x3f\x6c\xf6\xc3\x3c\
\x1b\x07\x1a\x19\x00\xe2\x1b\x7b\x53\xb8\xdf\x1b\xcd\x38\xfa\x01\
\xcb\xf4\x03\xa8\xf9\x9e\x7f\x99\x3d\x3f\xe3\x90\x66\xe4\x9b\x1a\
\x13\x00\xe2\x9b\x39\x2a\x7c\xde\x1b\x8b\x7e\x00\xf6\xfc\xb0\x47\
\xd2\xcc\x3c\xaa\xb6\x01\x20\xbe\xf8\x7d\xc2\x47\xc3\x7a\x6f\x26\
\xfa\x01\xd8\xf3\xc3\xb8\xac\xcf\x33\x74\x9f\xda\x04\x80\xf8\x62\
\x27\x87\xf7\x84\xa7\xbd\x81\x74\xa9\x1f\xb0\xda\xb0\xa1\x62\x7b\
\xfe\xd5\xf6\xfc\x74\xc9\xd3\x79\xa6\x4e\xae\x74\x00\x88\x2f\xf0\
\xec\xf0\x2d\x6f\x18\x3d\xea\x07\x6c\x30\x7c\xe8\xf3\x9e\x7f\x83\
\x3d\x3f\x3d\x92\x66\xeb\xd9\x95\x0b\x00\xf1\x45\xbd\x26\xfc\x8d\
\x37\x08\xfd\x00\xec\xf9\xa1\xab\xd2\xac\x7d\x4d\xdf\x03\x40\x7c\
\x11\xfb\x87\x3f\x08\x1b\xbc\x29\xf4\xcd\xc7\xf5\x03\xe8\xf1\x9e\
\xff\xe3\xf6\xfc\xf4\xd5\x86\x3c\x7b\xf7\xef\x79\x00\x88\xdf\x74\
\x4a\x78\x5f\x58\xe5\x8d\x40\x3f\x00\x7b\x7e\xe8\x8b\x55\x79\x16\
\x4f\xe9\x49\x00\x88\xdf\x68\x71\xf8\x47\x2f\x3c\x95\xed\x07\xdc\
\xb6\xf9\xef\x17\xd0\x0f\xa0\x73\x7b\xfe\xdb\x7c\x6e\x3f\x95\x96\
\x66\xf2\xe2\xae\x05\x80\xf8\xc5\x17\x84\x3b\xbc\xd0\xd4\xa8\x1f\
\xb0\xc2\x00\x63\x82\x7b\xfe\x15\xf6\xfc\xd4\x48\x9a\xd1\x0b\x3a\
\x16\x00\xe2\x17\x9b\x13\x6e\x0c\x23\x5e\x5c\xea\xd9\x0f\x38\x53\
\x3f\x80\x71\xee\xf9\xcf\xb4\xe7\xa7\xae\x46\xf2\xcc\x9e\xd3\x76\
\x00\x88\x7f\x79\x6a\xb8\xd2\x8f\xbd\x68\xc4\x03\xa1\x1f\xc0\xf8\
\xf6\xfc\xfe\x07\x0f\xb5\x5f\x87\xe6\x19\x3e\x75\x5c\x01\x20\xfe\
\x85\x8b\xc2\xc3\x5e\x40\xf4\x03\xb0\xe7\x87\x5a\x4b\xb3\xfc\xa2\
\xdd\x06\x80\xf8\x87\x4e\x08\xf7\x78\xc1\xd0\x0f\xc0\x9e\x1f\x1a\
\x25\xcd\xf6\x13\xb6\x0b\x00\xf1\x1f\x1e\x18\x6e\x0d\xa3\x5e\x24\
\xf4\x03\xb0\xe7\x87\x46\x1a\xcd\xb3\xfe\xc0\xcd\x01\x20\xfe\x8f\
\x45\x7e\xec\x85\x7e\x80\x7e\x80\x3d\x3f\x14\xd5\x0f\x58\xb4\xf5\
\x27\x00\x29\x04\x7c\xc5\x8b\x42\xc1\xd6\x0c\xdd\xb6\xd0\xdf\x2f\
\xd0\xf8\x3d\xff\xc2\xf4\xb9\xfd\x6b\x9c\x77\x0a\x96\x66\xfd\xa2\
\x1d\x75\x00\x2e\x54\xfe\xa3\xf8\x7e\xc0\x17\xf4\x03\x1a\xb7\xe7\
\xff\x82\x3d\x3f\xca\x80\x69\xc6\xef\xf2\x4f\x01\xe4\x3f\xfe\x77\
\xb5\x94\x8c\x7e\xc0\x99\x0f\x1b\x9e\xb5\xdf\xf3\x3f\x6c\xcf\x4f\
\xe9\x3f\xdd\xcc\x33\x7d\xea\x78\x3e\x07\x60\x6e\xb8\xd9\x9e\x0c\
\xfd\x00\xfd\x00\x7b\x7e\xa8\xe5\x07\x02\xa5\x19\x3e\x77\x22\x9f\
\x04\xa8\x1f\x80\x7e\x80\x7e\x80\x3d\x3f\xd4\x70\xcf\xdf\xa9\xbf\
\x0b\x40\x3f\x80\xd2\xfb\x01\x8f\xeb\x07\x54\x7e\xcf\xff\xb8\xb3\
\x8a\x3d\x7f\x17\xfe\x36\x40\xfd\x00\xd0\x0f\xb0\xe7\x87\xfa\xec\
\xf9\x3b\xfa\xd7\x01\xeb\x07\x80\x7e\x80\x3d\x3f\xd4\x63\xcf\xdf\
\xf1\x00\xa0\x1f\x00\xfa\x01\xf6\xfc\x50\xfd\x3d\x7f\xd7\x02\x80\
\x7e\x00\xe8\x07\xd8\xf3\x43\x75\xf7\xfc\x5d\x0f\x00\x63\xfa\x01\
\x57\x49\xe5\x14\xde\x0f\xb8\x5f\x3f\xa0\x6b\x7b\xfe\xfb\x9d\x31\
\x0a\xdf\xf3\x5f\x35\xde\x3d\x7f\x4f\x02\x80\x7e\x00\xb4\xf4\x03\
\xbe\xa1\x1f\x30\x61\xdf\xb0\xe7\xc7\x7d\x32\x91\x3d\x7f\x4f\x03\
\xc0\x98\x20\xb0\x30\x7c\xd9\x9b\x47\xe1\xfd\x80\xe5\xfa\x01\x6d\
\xef\xf9\x97\xfb\x89\x22\x85\x4b\x33\x74\x61\xb7\xe6\x74\xd7\x02\
\x80\x7e\x00\xe8\x07\xd8\xf3\x43\x7f\xf7\xfc\x7d\x0d\x00\xfa\x01\
\xb0\xb5\x1f\x30\x4f\x3f\x60\xb7\x7b\xfe\x79\xf6\xfc\xd8\xf3\x77\
\x70\xcf\xdf\xf7\x00\xa0\x1f\x00\xfa\x01\xf6\xfc\xd0\xfb\x3d\x7f\
\x65\x02\x80\x7e\x00\xf8\xfc\x00\x7f\x9e\x1f\x7a\xb7\xe7\xaf\x5c\
\x00\x18\x13\x04\x2e\xd0\x0f\x40\x3f\xa0\xbc\x7e\x80\x3d\x3f\x6c\
\x9e\x7d\x17\xf4\x73\x06\xf7\x35\x00\xe8\x07\xc0\x98\x7e\xc0\xb2\
\x02\xfa\x01\xcb\xec\xf9\xf1\xd3\xbf\x5e\xee\xf9\x2b\x1d\x00\x5a\
\xfa\x01\x9f\xb4\x07\xa4\xe8\x3d\xe0\xa7\x8e\xf9\x5a\x23\xfb\x01\
\x69\xcf\x1f\xdf\x9b\xe7\x9b\xc2\xf7\xfc\x9f\xec\xf5\x9e\xbf\x16\
\x01\x40\x3f\x00\x1a\xd8\x0f\xb0\xe7\x87\xbe\xee\xf9\x6b\x15\x00\
\x5a\xfa\x01\x0f\x39\x38\x94\xdd\x0f\x38\x75\x45\x7d\xf7\xfc\xa7\
\xda\xf3\x53\xba\x87\xfa\xbd\xe7\xaf\x65\x00\xd0\x0f\x80\x9a\xf6\
\x03\xec\xf9\xa1\x32\x7b\xfe\xda\x06\x00\xfd\x00\xd8\xa6\x1f\x50\
\xed\xcf\x0f\xd8\xb2\xe7\xf7\xe7\xf9\xb1\xe7\xaf\xd0\x9e\xbf\xf6\
\x01\x40\x3f\x00\x2a\xdc\x0f\xb0\xe7\x87\xca\xee\xf9\x1b\x13\x00\
\xf4\x03\x60\x9b\x7e\xc0\xbd\x15\xd8\xf3\xdf\x6b\xcf\x8f\x3d\x7f\
\x75\xf7\xfc\x8d\x0b\x00\x39\x04\x0c\x86\x2b\xfd\xaf\x0e\xf4\x03\
\xfa\xd0\x0f\xb0\xe7\x87\x35\x79\x06\x0d\xd6\x75\x8e\xd6\x36\x00\
\x8c\x09\x02\x73\xf4\x03\xd0\x0f\xe8\x51\x3f\xc0\x9e\x1f\xb6\xee\
\xf9\xe7\xd4\x7d\x7e\xd6\x3e\x00\xb4\xf4\x03\xee\x76\x38\xd1\x0f\
\xe8\x42\x3f\xc0\x9e\x1f\x5e\xce\x33\x66\x61\x53\xe6\x66\x63\x02\
\x80\x7e\x00\xbc\xd2\x0f\xf8\x6e\x27\xfb\x01\xf6\xfc\x50\xdf\x3d\
\x7f\x51\x01\x40\x3f\x00\x3a\xd4\x0f\xb0\xe7\x87\xda\xef\xf9\x8b\
\x0b\x00\xfa\x01\x30\x81\x7e\x80\x3d\x3f\x34\x66\xcf\x5f\x6c\x00\
\x18\x13\x04\x8e\xd7\x0f\x40\x3f\x60\x37\xfd\x00\x7b\x7e\xd8\xba\
\xe7\x3f\xbe\x84\xd9\x58\x44\x00\x18\x13\x04\x96\xea\x07\xe0\xf3\
\x03\xb6\xef\x07\xd8\xf3\xc3\xe6\xd9\xb0\xb4\xa4\x99\x58\x54\x00\
\x68\xe9\x07\x0c\x3b\xf0\x14\xdf\x0f\xb0\xe7\x87\xe1\x26\xef\xf9\
\x05\x80\x9d\xf7\x03\x6e\xb2\xe7\xa4\xf0\x3d\xa7\xf3\x4f\xc9\xe7\
\xff\xa6\xa6\xef\xf9\x05\x00\xfd\x00\x00\x0a\xdc\xf3\x0b\x00\xfa\
\x01\x00\x14\xb8\xe7\x17\x00\xf4\x03\x00\xec\xf9\x0b\xdc\xf3\x0b\
\x00\xfa\x01\x00\xf6\xfc\x08\x00\xfa\x01\x00\xf6\xfc\x02\x80\x17\
\x61\x4f\xfb\x01\x0f\x7a\xa0\x00\x2a\xef\x41\x7b\x7e\x01\xa0\x1b\
\xfd\x80\x2b\xf4\x03\x00\x2a\xbb\xe7\xbf\xc2\x9e\x5f\x00\xd0\x0f\
\x00\xb0\xe7\x47\x00\xe8\x5a\x3f\xe0\x2e\x0f\x1f\x40\xdf\xdc\x65\
\xcf\x2f\x00\xe8\x07\x00\xd8\xf3\x23\x00\xe8\x07\x00\xd8\xf3\x23\
\x00\xe8\x07\x00\xd8\xf3\x23\x00\xe8\x07\x00\xd8\xf3\x0b\x00\x5e\
\x84\x6e\x06\x81\x25\xfa\x01\x00\x6d\xef\xf9\x97\x98\x25\x02\x80\
\x7e\x00\x80\x3d\x3f\x02\x40\x6d\xfb\x01\x37\xea\x07\x00\xec\x74\
\xcf\x7f\xa3\x3d\xbf\x00\xd0\xe4\x20\xb0\x40\x3f\x00\x60\xbb\x3d\
\xff\x02\x33\x42\x00\xd0\x0f\x00\xb0\xe7\x47\x00\xd0\x0f\x00\xb0\
\xe7\x47\x00\x68\x90\x1f\xbf\xeb\xdc\xfd\xc3\xc7\xc3\xd3\xcf\x5c\
\x7d\xf2\xf2\xd5\x1f\x3d\x44\x3f\x00\x68\xae\xb8\xe3\xd2\x5d\x97\
\xee\xbc\x7c\xf7\xed\x6f\x16\x08\x00\xa5\x0d\xfe\x29\xe1\xfd\x61\
\x75\x78\xf9\x15\xef\x3e\xe7\x91\x55\x1f\x3a\xee\x7e\x17\x05\xd0\
\x34\xe9\x6e\x4b\x77\xdc\x36\x77\xde\x96\x3b\x30\xdd\x85\x53\xcc\
\x06\x01\xa0\x84\xe1\x7f\x7e\x78\xa0\xe5\x21\xd8\xd6\x7b\xce\x5a\
\xb1\xea\xc3\x47\x3e\xee\xd2\x00\x6a\x3f\xf8\xe3\x2e\x4b\x77\xda\
\x2e\xef\xbc\x2d\x77\xe2\xf9\x66\x84\x00\xd0\xd4\xc1\xbf\x20\xdc\
\xb9\x9b\x87\x60\xac\x0d\xcf\xbc\xef\xf4\xe5\xab\x3f\x72\xd8\x1a\
\x97\x08\x50\x3b\x71\x77\xa5\x3b\x2c\xdd\x65\xe3\xb8\xf7\xd2\x1d\
\xe9\x4f\x03\x08\x00\x8d\x19\xfc\x73\xc3\x4d\x61\x64\x1c\x0f\xc1\
\x36\x3f\x22\xd3\x0f\x00\x6a\xb8\xe7\x5f\xdd\xe6\x9d\x37\x92\xef\
\xcc\xb9\x66\x88\x00\x50\xd7\xc1\x3f\x35\x5c\x1d\xd6\xb4\xf9\x10\
\xbc\xbc\x83\x7e\xc0\x4a\x17\x0c\x50\xe1\x3d\xff\xca\x1d\xec\xf9\
\xdb\xb5\x26\xdf\xa1\x53\xcd\x14\x01\xa0\x4e\xc3\xff\xe2\xd0\xa9\
\x87\x40\x3f\x00\x68\xc2\x9e\xbf\x5d\xe9\x2e\xbd\xd8\x6c\x11\x00\
\xaa\x3e\xf8\x4f\x0c\x5f\xed\xd2\x43\xa0\x1f\x00\x34\x61\xcf\xdf\
\xae\x74\xb7\x9e\x68\xd6\x08\x00\x55\x1b\xfc\x07\x85\x3f\x0a\xa3\
\x3d\x78\x08\xf4\x03\x80\xba\xef\xf9\xdb\x35\x9a\xef\xda\x83\xcc\
\x1e\x01\xa0\xdf\x83\x7f\x7a\xf8\x8d\xf0\x5c\x8f\x1f\x02\xfd\x00\
\xa0\x09\x7b\xfe\x76\x3d\x97\xef\xde\xe9\x66\x91\x00\xd0\x8f\xe1\
\xff\x0b\xe1\xbb\x7d\x7e\x08\x5a\xfb\x01\xf7\xad\xfa\xf0\x91\x8f\
\xb9\xa4\x80\x2e\xec\xf9\x1f\x4b\x77\x4c\xa5\xee\xbc\x2d\x77\xf0\
\x2f\x98\x49\x02\x40\xaf\x06\xff\x29\xe1\x7f\x57\xec\x21\xd0\x0f\
\x00\x9a\xb0\xe7\x6f\x57\xba\x93\x4f\x31\xa3\x04\x80\x6e\x0d\xfe\
\xc3\xc2\x7f\x09\x9b\x2a\xfc\x10\xe8\x07\x00\x75\xdf\xf3\xb7\x6b\
\x53\xbe\xa3\x0f\x33\xb3\x04\x80\x4e\x0d\xfe\xbd\xc2\x6f\x85\x17\
\x6a\xf2\x10\xe8\x07\x00\x4d\xd8\xf3\xb7\xeb\x85\x7c\x67\xef\x65\
\x86\x09\x00\xed\x0e\xfe\x81\xf0\xcb\xe1\x07\x35\x7d\x08\xf4\x03\
\x80\x26\xec\xf9\xdb\xf5\x83\x7c\x87\x0f\x98\x69\x02\xc0\x78\x86\
\xff\x1b\xc3\x8a\x86\x3c\x04\x63\x6d\xcc\xfd\x80\x61\x97\x1d\x30\
\x66\xcf\x3f\x9c\xf7\xfc\x1b\x1b\x78\xef\xa5\xbb\xfc\x8d\x66\x9b\
\x00\xb0\xbb\xc1\x7f\x44\xf8\x8b\x06\x3e\x00\x3b\xeb\x07\x6c\x74\
\xf9\x41\xd1\x7b\xfe\x8d\x35\xdb\xf3\x4f\x44\xba\xdb\x8f\x30\xeb\
\x04\x80\xd6\xc1\xbf\x77\xf8\x9d\xf0\x62\x01\x0f\xc1\x98\x7e\xc0\
\x5b\xf5\x03\xa0\xe8\x3d\xff\x5b\x1f\x29\xea\xce\xdb\x72\xc7\xa7\
\xbb\x7e\x6f\xb3\xaf\xf0\x00\x10\x87\x60\x52\x78\x77\x78\xaa\xb0\
\x87\x40\x3f\x00\xec\xf9\x5f\x2e\xd8\x53\xf9\xee\x9f\x24\x00\x94\
\x39\xfc\xcf\x0c\xdf\x2c\xfc\x21\x68\xed\x07\x2c\xd3\x0f\x80\x46\
\xef\xf9\x97\x35\x74\xcf\xdf\xae\x34\x03\xce\x14\x00\xca\x19\xfc\
\xc7\x84\xbf\x72\xf0\x77\x6a\x48\x3f\x00\x1a\xb9\xe7\x1f\x72\xbf\
\xed\x54\x9a\x09\xc7\x08\x00\xcd\x1d\xfc\xfb\x86\x8f\x85\x97\x1c\
\x76\xfd\x00\xb0\xe7\xa7\xc5\x4b\x79\x46\xec\x2b\x00\x34\x67\xf0\
\x4f\x0e\xbf\x16\x9e\x71\xc0\xf5\x03\xc0\x9e\x9f\xdd\x78\x26\xcf\
\x8c\xc9\x02\x40\xbd\x87\xff\x39\xe1\xdb\x0e\xb4\xcf\x0f\x00\x7f\
\x9e\x9f\x71\x4a\xb3\xe3\x1c\x01\xa0\x7e\x83\xff\x75\xe1\x76\x07\
\x58\x3f\x00\xec\xf9\x99\xa0\x34\x4b\x5e\x27\x00\x54\x7f\xf0\xcf\
\x0a\x9f\x90\x7e\xbb\xde\x0f\x78\x74\xd5\x87\xe6\xeb\x07\x40\x25\
\xf6\xfc\xf3\xd3\x9e\xff\x51\x77\x53\x77\x7f\x0a\x9a\x67\xcb\x2c\
\x01\xa0\x7a\x83\x7f\x30\x5c\x26\xfd\xf6\xbc\x1f\xb0\x42\x3f\x00\
\xfa\xba\xe7\x5f\xe1\x2e\xea\xed\x4f\x41\xf3\xac\x19\x14\x00\xaa\
\x31\xfc\x97\x84\x07\x1d\x4c\xfd\x00\xb0\xe7\xa7\x47\xd2\xcc\x59\
\x22\x00\xf4\x6f\xf0\x2f\x0c\x77\x3b\x88\xfa\x01\x60\xcf\x4f\x9f\
\xa4\x19\xb4\x50\x00\xe8\xdd\xe0\x9f\x1b\x6e\x0e\x23\x0e\x9f\x7e\
\x00\xd8\xf3\xd3\x67\x23\x79\x26\xcd\x15\x00\xba\x37\xf8\xa7\x85\
\x6b\xc2\x5a\x07\xae\x36\x9f\x1f\xf0\xa8\xcb\x1b\x26\xb4\xe7\x7f\
\xd4\x9f\xe7\xaf\x8d\xb5\x79\x46\x4d\x13\x00\x3a\x3b\xfc\xdf\x16\
\xa4\x5f\xfd\x00\xb0\xe7\xa7\xea\xd2\xac\x7a\x9b\x00\x30\xf1\xc1\
\x7f\x52\x58\xee\x40\x35\xa0\x1f\x70\xd5\x1b\xf4\x03\x60\x4f\xf6\
\xfc\xf1\xac\xd8\xf3\x37\x42\x7a\x1f\x4f\x12\x00\xc6\x3f\xf8\x0f\
\x0e\x9f\x0e\xa3\x0e\x91\x7e\x00\xd8\xf3\x53\x53\xa3\x79\x96\x1d\
\x2c\x00\xec\x7e\xf0\xcf\x08\xd7\x86\xe7\x1c\x1c\xfd\x00\xb0\xe7\
\xa7\x21\x9e\xcb\xb3\x6d\x86\x00\xb0\xe3\xe1\x7f\x69\x78\xc2\x41\
\x29\xaa\x1f\xb0\x6c\xf5\x47\xe6\xe9\x07\x50\xe8\x9e\x7f\x5e\xda\
\xf3\x2f\xb3\xe7\x2f\x4a\x9a\x71\x97\x0a\x00\x3f\x19\xfc\xa7\x86\
\xaf\x3b\x18\xfa\x01\x86\x02\xf6\xfc\x14\x22\xcd\xbc\x53\x8b\x0d\
\x00\xf1\xcd\xcf\x0b\x9f\x09\x9b\x1c\x06\x72\x3f\xe0\x3e\x03\x82\
\x86\xef\xf9\xef\xb3\xe7\x27\xdb\x94\x67\xe0\xbc\x62\x02\x40\x7c\
\xb3\x33\xc3\x75\x61\x9d\x03\x80\x7e\x00\xf6\xfc\x14\x6e\x5d\x9e\
\x89\x33\x1b\x1b\x00\xe2\x9b\x1b\x08\xbf\x12\x9e\xf4\x86\xa3\x1f\
\x80\x3d\x3f\x6c\xe3\xc9\x3c\x23\x07\x1a\x15\x00\xe2\x1b\x3a\x3d\
\xac\xf4\x06\xa3\x1f\x80\x3d\x3f\xec\x52\x9a\x95\xa7\xd7\x3e\x00\
\xc4\x37\x71\x64\xf8\x9c\x37\x14\xfd\x00\xec\xf9\x61\x5c\xd2\xec\
\x3c\xb2\x76\x01\x20\xbe\xe8\xbd\xc3\xef\x86\xf5\xde\x44\x3a\xdb\
\x0f\x38\x4a\x3f\x80\x8a\xee\xf9\x8f\xb2\xe7\xa7\xd3\xd6\xe7\x59\
\xba\x77\xe5\x03\x40\x7c\x91\x93\xc2\xaf\x86\x1f\x79\xe3\xe8\x5e\
\x3f\xe0\x8c\xe5\xfa\x01\x54\x6b\xcf\x7f\x86\xcf\xed\xa7\x9b\x7e\
\x94\x67\xeb\xa4\x4a\x06\x80\xf8\xc2\xde\x12\xfe\xde\x1b\x85\x7e\
\x00\xf6\xfc\xd0\x15\x69\xc6\xbe\xa5\x32\x01\x20\xbe\x98\x63\xc3\
\x5f\x7b\x63\xf0\xf7\x0b\xe0\x73\xfb\xa1\x27\xd2\xcc\x3d\xb6\x6f\
\x01\x20\x7e\xf3\xfd\xc2\xef\x87\x0d\xde\x0c\xf4\x03\xb0\xe7\x87\
\x9e\xda\x90\x67\xf0\x7e\x3d\x0b\x00\xf1\x9b\x4d\x09\xef\x0d\xab\
\xbc\x01\x54\xb4\x1f\x30\x64\x58\xd1\xe1\x3d\xff\x90\x3d\x3f\x15\
\xb5\x2a\xcf\xe4\x29\x5d\x0d\x00\xf1\x1b\x9c\x1b\xfe\xc1\x0b\x8e\
\x7e\x00\xf6\xfc\x50\x29\x69\x36\x9f\xdb\xf1\x00\x10\xbf\xe8\x71\
\xe1\x4b\x5e\x60\xf4\x03\xb0\xe7\x87\x4a\x4b\xb3\xfa\xb8\x09\x07\
\x80\xf8\x45\x66\x87\x1b\xfc\xd8\x8b\x9a\xf7\x03\x56\xea\x07\x30\
\xce\x3d\xbf\x4f\x2e\xa5\xd6\xeb\xd0\x3c\xbb\x67\x8f\x3b\x00\xc4\
\xbf\x34\x18\xae\x08\xc3\x5e\x48\xf4\x03\xb0\xe7\x87\x5a\x1a\xce\
\xb3\x7c\x70\x8f\x02\x40\xfc\x83\x17\x84\x87\xbc\x70\xe8\x07\x60\
\xcf\x0f\x8d\x90\x66\xfa\x05\x3b\x0d\x00\xf1\x5f\x2e\x0a\x5f\xf1\
\x42\xe1\xef\x17\xc0\xe7\xf6\x43\x23\xa5\x19\xbf\xe8\x95\x00\x10\
\xff\xcf\x81\xe1\x96\x30\xea\xc5\x41\x3f\x00\x7b\x7e\x68\xb4\xd1\
\x3c\xf3\x0f\xdc\x1a\x00\x6e\x15\x00\xd0\x0f\xd0\x0f\xb0\xe7\x87\
\x22\x02\xc0\xad\x9b\x03\xc0\x98\x15\xc0\x89\xe1\xab\x5e\x1c\xf4\
\x03\xf4\x03\xec\xf9\xa1\x91\xd2\x8c\x3f\x71\x57\x25\xc0\x4b\x82\
\x9d\x18\xfa\x01\x06\xa7\x3d\x3f\x34\x43\x3a\xff\x97\xec\xe9\x1f\
\x03\x9c\x16\x3e\x18\xd6\x7a\xe1\x28\xb8\x1f\xe0\xef\x17\xf0\xb9\
\xfd\x50\x67\x6b\xf3\x2c\x9f\xd6\xce\x07\x01\x1d\xa0\x1f\x80\x7e\
\x80\x7e\x80\x3d\x3f\xd4\x72\xcf\x7f\x40\x27\x3e\x0a\x58\x3f\x80\
\xe2\x3f\x50\xe3\x99\xab\xde\xb0\x4c\x3f\xa0\xf2\x7b\xfe\x65\x3e\
\xc0\x0c\x7b\xfe\x9f\xec\xf9\x3b\xf9\x97\x01\x5d\xac\x1f\x80\x7e\
\x80\x7e\x80\x3d\x3f\x54\x72\xcf\x7f\x71\xb7\xff\x3a\x60\xfd\x00\
\xd0\x0f\xb0\xe7\x87\x1a\xec\xf9\x3b\x1a\x00\x5a\xfa\x01\x3e\x40\
\x08\xfd\x00\xfd\x00\x7b\x7e\xe8\xdf\x07\xfa\x1c\xd0\xee\x1c\x6f\
\x3b\x00\x8c\x09\x02\x27\xe8\x07\xa0\x1f\xa0\x1f\x60\xcf\x0f\x3d\
\xdd\xf3\x9f\x30\xd1\xf9\x3d\xe1\x00\xd0\xd2\x0f\x78\xc4\x1b\x43\
\xd1\xfd\x80\x6b\xf5\x03\xba\xf6\xe3\xfe\x6b\xed\xf9\x29\xde\x23\
\xe3\xdd\xf3\xf7\x24\x00\xe4\x10\x30\x55\x3f\x00\xfd\x00\xfd\x00\
\x7b\x7e\xe8\xca\x9e\x7f\x6a\x27\x67\x76\x47\x03\x80\x7e\x00\x8c\
\xe9\x07\xbc\x57\x3f\x60\xc2\x7b\xfe\xf7\xda\xf3\x63\xcf\x3f\x91\
\x3d\x7f\xcf\x03\x40\x4b\x3f\xe0\x1e\x6f\x22\xfa\x01\xfa\x01\xf6\
\xfc\x30\x2e\xf7\x74\x62\xcf\xdf\xb7\x00\xa0\x1f\x00\xdb\xf4\x03\
\x56\x18\xf0\xbb\xdd\xf3\xaf\xb0\xe7\xc7\x9e\xbf\x73\x7b\xfe\xbe\
\x07\x80\x31\xfd\x80\x6b\xf4\x03\x28\xbc\x1f\xb0\x72\xd5\x75\xfa\
\x01\xdb\x0d\xfe\xeb\x36\xef\xf9\x57\x3a\x23\x14\xbe\xe7\xbf\xa6\
\xd3\x7b\xfe\x4a\x04\x00\xfd\x00\xd0\x0f\xb0\xe7\x87\xde\xee\xf9\
\x2b\x15\x00\xf4\x03\x60\x9b\x7e\xc0\xf2\x22\xfb\x01\x5b\xf6\xfc\
\xcb\xed\xf9\xb1\xe7\xef\xee\x9e\xbf\x92\x01\x40\x3f\x00\xca\xec\
\x07\xd8\xf3\x43\xef\xf6\xfc\x95\x0e\x00\xfa\x01\xb0\x4d\x3f\xe0\
\x91\x06\xef\xf9\x1f\xb1\xe7\xc7\x9e\xbf\xb7\x7b\xfe\xca\x07\x00\
\xfd\x00\x68\x70\x3f\xc0\x9e\x1f\xfa\xb6\xe7\xaf\x4d\x00\xd0\x0f\
\x80\x06\x7d\x7e\x80\x3f\xcf\x0f\x7d\xdf\xf3\xd7\x2e\x00\x8c\x09\
\x02\x17\xe9\x07\x50\x78\x3f\xe0\xb1\x3a\xf6\x03\xf2\x9e\xff\x31\
\xef\x21\x85\xef\xf9\x2f\xaa\xf2\x8c\xad\x74\x00\xd0\x0f\x80\x7a\
\xf5\x03\xec\xf9\xa1\x5a\x7b\xfe\x5a\x07\x80\x96\x7e\xc0\xa7\xf4\
\x03\xd0\x0f\x98\xb7\xba\x82\x7b\xfe\xd5\xf6\xfc\xd8\xf3\x6f\x9e\
\x51\x07\xd4\x65\xae\xd6\x26\x00\xe8\x07\xc0\x76\xfd\x80\x0d\x15\
\xd8\xf3\x6f\xb0\xe7\x87\xea\xee\xf9\x1b\x15\x00\xf4\x03\xa0\x1a\
\xfd\x00\x7b\x7e\xa8\xfe\x9e\xbf\x91\x01\x40\x3f\x00\xfa\xd3\x0f\
\xb0\xe7\x87\xfa\xec\xf9\x1b\x1b\x00\xf4\x03\xa0\x87\xfd\x00\x7b\
\x7e\xa8\xdd\x9e\xbf\xf1\x01\x60\x4c\x10\x58\x14\xbe\xe2\x90\xa2\
\x1f\xd0\xc1\x7e\x80\x3d\x3f\xbc\x9c\x67\xcb\xa2\x26\xcd\xcc\x46\
\x05\x00\xfd\x00\xe8\x6c\x3f\xc0\x9e\x1f\xea\xbd\xe7\x2f\x2e\x00\
\x8c\xe9\x07\x5c\xad\x1f\x80\x7e\xc0\xf8\xfb\x01\xf6\xfc\xb0\x79\
\x76\x5c\x5d\xf7\x3d\x7f\x91\x01\x40\x3f\x00\xda\xe8\x07\xd8\xf3\
\x43\xa3\xf6\xfc\x45\x07\x00\xfd\x00\xd8\x83\x7e\x80\x3d\x3f\x34\
\x72\xcf\x2f\x00\x6c\xdf\x0f\x78\xd8\x41\x47\x3f\xc0\x9e\x1f\xb2\
\x87\x9b\xba\xe7\x17\x00\x76\xde\x0f\x58\xe3\xe0\x53\x6e\x3f\xe0\
\xec\x95\x9b\x79\x2d\x28\xd7\x9a\xa6\xef\xf9\x05\x80\x9d\x07\x81\
\xb9\x79\xd7\x33\xe2\x41\x00\x28\xc6\x48\xbe\xfb\xe7\x96\x3c\x03\
\x8b\x0e\x00\xfa\x01\x00\xf6\xfc\x02\x00\x29\x08\x5c\xa8\x1f\x00\
\xd0\xd8\x3d\xff\x85\x66\x9d\x00\xa0\x1f\x00\x60\xcf\x2f\x00\xb0\
\xd3\x7e\xc0\xcd\xfa\x01\x00\xb5\xdd\xf3\xdf\x5c\xfa\x9e\x5f\x00\
\xd0\x0f\x00\xb0\xe7\x47\x00\xd0\x0f\x00\xb0\xe7\x17\x00\xbc\x08\
\xfa\x01\x00\xf6\xfc\x02\x00\xfa\x01\x00\xf6\xfc\x02\x00\xfa\x01\
\x00\xf6\xfc\x02\x00\xfa\x01\x00\xf6\xfc\x02\x00\xad\xfd\x80\xab\
\xf4\x03\x00\xba\xb6\xe7\xbf\xca\x9e\x5f\x00\xd0\x0f\x00\xb0\xe7\
\x47\x00\xa8\x64\x10\x58\x18\xbe\xec\xe1\x05\x68\x5b\xba\x43\x17\
\x9a\x29\x02\x80\x7e\x00\x80\x3d\x3f\x02\x80\x7e\x00\x80\x3d\x3f\
\x02\x80\x7e\x00\x80\x3d\x3f\x02\x80\x7e\x00\x80\x3d\x3f\x02\x40\
\x55\x83\xc0\x05\xfa\x01\x40\xc1\x7b\xfe\x0b\xcc\x02\x01\x40\x3f\
\x40\x3f\x00\xb0\xe7\x47\x00\x28\xb6\x1f\xf0\x49\xfd\x00\xa0\xc1\
\x7b\xfe\x4f\xda\xf3\x0b\x00\xe8\x07\x00\xf6\xfc\x08\x00\xec\xa4\
\x1f\xf0\x90\x8b\x03\xa8\xb1\x87\xec\xf9\x05\x00\xf4\x03\x00\x7b\
\x7e\x04\x00\xf4\x03\x00\x7b\x7e\x04\x00\xf4\x03\x00\x7b\x7e\x04\
\x00\xf4\x03\x00\x7b\x7e\x04\x00\x76\x1c\x02\x06\xc3\x95\xfa\x01\
\x40\x1f\xf7\xfc\xe9\x0e\x1a\x74\x27\x0b\x00\xf4\x27\x08\xcc\xd1\
\x0f\x00\xfa\xb0\xe7\x9f\xe3\x0e\x16\x00\xa8\x4e\x3f\xe0\x6e\x97\
\x13\xd0\x45\x77\xdb\xf3\x0b\x00\xe8\x07\x00\xf6\xfc\x08\x00\xe8\
\x07\x00\xf6\xfc\x08\x00\xe8\x07\x00\xf6\xfc\x08\x00\xf4\x3d\x08\
\x1c\xaf\x1f\x00\x8c\x63\xcf\x7f\xbc\xbb\x53\x00\xa0\x59\x41\x60\
\xa9\x7e\x00\xb0\x8b\x3d\xff\x52\x77\xa5\x00\x40\xf3\xfb\x01\xc3\
\x2e\x3c\x20\xdf\x05\xf6\xfc\x02\x00\x85\xf5\x03\x6e\xd2\x0f\x80\
\xa2\xf7\xfc\x37\xd9\xf3\x0b\x00\xe8\x07\xb8\x10\xc1\x9e\x1f\x01\
\x00\xfd\x00\xc0\x9e\x1f\x01\x00\xfd\x00\xc0\x9e\x1f\x01\x00\xfd\
\x00\xc0\x9e\x1f\x01\x00\xfd\x00\xc0\x9e\x1f\x01\x80\x86\xf6\x03\
\x1e\x74\xa1\x42\x2d\x3c\x68\xcf\x8f\x00\x40\xa7\xfb\x01\x57\xe8\
\x07\x40\xa5\xf7\xfc\x57\xd8\xf3\x23\x00\xa0\x1f\x00\xf6\xfc\x20\
\x00\xd0\x95\x7e\xc0\x5d\x2e\x5f\xe8\xab\xbb\xec\xf9\x11\x00\xd0\
\x0f\x00\x7b\x7e\x10\x00\xd0\x0f\x00\x7b\x7e\x10\x00\xd0\x0f\x00\
\x7b\x7e\x10\x00\xd0\x0f\x00\x7b\x7e\x04\x00\xe8\x56\x10\x58\xa2\
\x1f\x00\x13\xda\xf3\x2f\x71\x97\x20\x00\xa0\x1f\x00\xf6\xfc\x20\
\x00\x50\xcb\x7e\xc0\x8d\xfa\x01\xb0\xcb\x3d\xff\x8d\xf6\xfc\x08\
\x00\x34\x35\x08\x2c\xd0\x0f\x80\x1d\xee\xf9\x17\xb8\x23\x10\x00\
\xd0\x0f\x00\x7b\x7e\x10\x00\xd0\x0f\x00\x7b\x7e\x10\x00\xd0\x0f\
\x00\x7b\x7e\x10\x00\xd0\x0f\x00\x7b\x7e\x10\x00\xd0\x0f\x00\x7b\
\x7e\x10\x00\xa8\x75\x3f\xe0\x72\xfd\x00\x6a\xbe\xe7\xbf\xdc\x9e\
\x1f\x01\x00\xda\x0b\x02\xb3\xf5\x03\xa8\xe9\x9e\x7f\xb6\x67\x18\
\x01\x00\x3a\xd3\x0f\xb8\xd3\x70\xa1\xe2\xee\xb4\xe7\x47\x00\x00\
\xfd\x00\xec\xf9\x41\x00\x00\xfd\x00\xec\xf9\x41\x00\x00\xfd\x00\
\xec\xf9\x41\x00\x00\xfd\x00\xec\xf9\x41\x00\x80\xf6\x82\xc0\xf9\
\xe1\x3b\x06\x14\x5d\x92\xce\xd6\xf9\x9e\x35\x04\x00\xd0\x0f\xc0\
\x9e\x1f\x04\x00\xa8\x60\x3f\xe0\x06\xfd\x00\x26\xb8\xe7\xbf\xc1\
\x9e\x1f\x01\x00\xf4\x03\xb0\xe7\x07\x01\x00\xf4\x03\xb0\xe7\x07\
\x01\x00\xea\xd4\x0f\x18\x32\xe8\x68\x31\x64\xcf\x8f\x00\x00\xe5\
\xf4\x03\x36\x1a\x7c\xc5\xdb\x68\xcf\x8f\x00\x00\xe5\x05\x81\xf9\
\xfa\x01\xc5\xef\xf9\xe7\x7b\x16\x10\x00\x40\x3f\xc0\x50\xb4\xe7\
\x07\x01\x00\x0a\xec\x07\x5c\xa6\x1f\xd0\xf8\x3d\xff\x65\xf6\xfc\
\x20\x00\x80\x7e\x80\x3d\x3f\x08\x00\xc0\x76\xfd\x80\x3b\x0c\xcf\
\xda\xbb\xc3\x9e\x1f\x04\x00\xd0\x0f\xb0\xe7\x07\x04\x00\xd0\x0f\
\xb0\xe7\x07\x01\x00\xd0\x0f\xb0\xe7\x07\x01\x00\xd0\x0f\xb0\xe7\
\x07\x01\x00\xd8\x51\x10\x58\xac\x1f\xd0\xf7\x3d\xff\x62\x67\x11\
\x04\x00\xd0\x0f\xb0\xe7\x07\x04\x00\xe8\x79\x3f\xe0\x7a\xfd\x80\
\xae\xef\xf9\xaf\xb7\xe7\x07\x01\x00\xf4\x03\xec\xf9\x01\x01\x00\
\x2a\xd7\x0f\x78\xc0\xe0\x9e\xb0\x07\xec\xf9\x41\x00\x80\xba\x85\
\x80\x29\xfa\x01\x13\xde\xf3\x4f\x71\x96\x40\x00\x80\xba\x06\x81\
\x59\xfa\x01\xe3\xde\xf3\xcf\x72\x76\x40\x00\x00\xfd\x00\x7b\x7e\
\x40\x00\x00\xfd\x00\x7b\x7e\x40\x00\x80\xfa\xf6\x03\x3e\x50\x78\
\x3f\x60\x28\xbf\x06\xf6\xfc\x20\x00\x80\x7e\x80\x3d\x3f\x20\x00\
\x40\x39\x41\xe0\xb8\xf0\xa5\x02\x86\x7f\xfa\x1e\x8f\xf3\x9e\x83\
\x00\x00\x94\xd1\x0f\xb0\xe7\x07\x01\x00\x28\xa8\x1f\x60\xcf\x0f\
\x02\x00\x50\x50\x3f\xc0\x9e\x1f\x04\x00\xa0\xb0\x7e\x80\x3d\x3f\
\x08\x00\x40\x07\x83\xc0\x79\x15\xef\x07\xa4\xaf\xed\x3c\xef\x15\
\x08\x00\x40\x19\xfd\x00\x7b\x7e\x10\x00\x80\x1e\xf6\x03\x3e\xd1\
\xe7\x7e\xc0\xc6\xfc\x35\xd8\xf3\x83\x00\x00\x14\xd2\x0f\xb0\xe7\
\x07\x01\x00\x28\xa8\x1f\x60\xcf\x0f\x02\x00\x50\xd1\x7e\xc0\xea\
\x2e\x0c\xfe\xd5\xf6\xfc\x20\x00\x00\xe5\xf4\x03\xec\xf9\x41\x00\
\x00\x0a\xeb\x07\xd8\xf3\x83\x00\x00\x14\xd4\x0f\xb0\xe7\x07\x01\
\x00\x68\x50\x3f\xe0\xfd\xbb\xe9\x07\xac\xce\xff\x8c\x3d\x3f\x08\
\x00\x40\x01\xfd\x00\x7b\x7e\x10\x00\x80\x82\xfa\x01\x5f\xcc\xec\
\xf9\xa1\x40\xff\x1f\x19\x8e\x77\x5c\xdd\x81\x8b\x10\x00\x00\x00\
\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x0c\x5d\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x02\x00\x00\x00\x02\x00\x08\x03\x00\x00\x00\xc3\xa6\x24\xc8\
\x00\x00\x00\x03\x73\x42\x49\x54\x08\x08\x08\xdb\xe1\x4f\xe0\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x96\xc7\x00\x00\x96\xc7\x01\
\x17\xb5\xfe\x65\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\
\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\
\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x02\x5b\x50\x4c\x54\
\x45\xff\xff\xff\x42\x4a\x60\x42\x4a\x60\x42\x4a\x60\x42\x4a\x60\
\x42\x4a\x60\x53\x5b\x6d\x5e\x65\x75\x60\x67\x77\x63\x6a\x79\x68\
\x6e\x7d\x6c\x72\x80\x6e\x74\x84\x74\x7a\x86\x76\x7d\x8c\x7d\x83\
\x8e\x84\x84\x7e\x85\x8c\x9a\x88\x8e\x97\x89\x83\x68\x8c\x92\x9c\
\x93\x98\xa2\x95\x93\x86\x9b\x90\x67\x9d\xa2\xa7\xa2\xa6\xaa\xad\
\xb1\xb3\xaf\x9f\x64\xb2\xb6\xb7\xb6\xbb\xc1\xba\xbd\xbd\xc3\xc6\
\xc4\xc4\xc7\xc5\xc6\xc9\xc6\xc7\xca\xc7\xc9\xcc\xc9\xd7\xdb\xda\
\xe2\xbd\x3e\xe5\xea\xeb\xe7\xec\xed\xe9\xd9\x9c\xeb\xba\x16\xeb\
\xba\x17\xeb\xbb\x18\xeb\xbb\x19\xeb\xbb\x1a\xeb\xbb\x1b\xeb\xbc\
\x1b\xec\xbc\x1d\xec\xbc\x1e\xec\xbd\x1f\xec\xbd\x20\xec\xbd\x21\
\xec\xbe\x22\xec\xbe\x23\xec\xbe\x24\xec\xbe\x25\xec\xbf\x26\xec\
\xbf\x27\xed\xbf\x28\xed\xc0\x29\xed\xc0\x2a\xed\xc0\x2b\xed\xc0\
\x2c\xed\xc1\x2d\xed\xc1\x2e\xed\xc2\x30\xed\xc2\x31\xee\xc3\x33\
\xee\xc3\x34\xee\xc3\x35\xee\xc4\x37\xee\xc4\x38\xee\xc4\x39\xee\
\xc5\x3a\xee\xc5\x3b\xee\xc6\x3d\xef\xc6\x3f\xef\xc6\x40\xef\xc7\
\x41\xef\xc7\x42\xef\xc8\x44\xef\xc9\x47\xef\xc9\x48\xef\xc9\x49\
\xef\xc9\x4a\xf0\xca\x4b\xf0\xcb\x4f\xf0\xcb\x50\xf0\xcc\x51\xf0\
\xcc\x52\xf0\xcc\x53\xf0\xcd\x56\xf1\xcd\x57\xf1\xcd\x58\xf1\xce\
\x59\xf1\xce\x5b\xf1\xcf\x5b\xf1\xcf\x5c\xf1\xcf\x5d\xf1\xcf\x5e\
\xf1\xd0\x61\xf2\xd1\x64\xf2\xd1\x65\xf2\xd2\x66\xf2\xd2\x67\xf2\
\xd3\x69\xf2\xd3\x6a\xf2\xd3\x6b\xf2\xd3\x6c\xf3\xd4\x6e\xf3\xd4\
\x6f\xf3\xd5\x70\xf3\xd5\x72\xf3\xd6\x75\xf3\xd7\x78\xf4\xd7\x7a\
\xf4\xd9\x7d\xf4\xd9\x7e\xf4\xda\x81\xf4\xdb\x85\xf5\xdb\x86\xf5\
\xdc\x87\xf5\xdc\x88\xf5\xdc\x89\xf5\xdc\x8a\xf5\xdd\x8b\xf5\xdd\
\x8d\xf5\xdd\x8e\xf5\xde\x90\xf6\xdf\x92\xf6\xdf\x94\xf6\xe0\x95\
\xf6\xe0\x96\xf6\xe0\x98\xf6\xe1\x9a\xf7\xe2\x9c\xf7\xe2\x9d\xf7\
\xe2\x9e\xf7\xe3\xa1\xf7\xe3\xa2\xf7\xe4\xa4\xf7\xe5\xa6\xf8\xe5\
\xa8\xf8\xe6\xaa\xf8\xe7\xae\xf8\xe7\xaf\xf8\xe7\xb0\xf8\xe8\xb1\
\xf9\xe9\xb5\xf9\xe9\xb6\xf9\xea\xb7\xf9\xec\xbd\xf9\xec\xbe\xfa\
\xec\xbf\xfa\xec\xc0\xfa\xed\xc1\xfa\xed\xc4\xfa\xee\xc5\xfa\xee\
\xc7\xfa\xef\xc9\xfa\xef\xca\xfb\xf0\xcb\xfb\xf0\xcc\xfb\xf0\xcd\
\xfb\xf0\xce\xfb\xf1\xcf\xfb\xf1\xd1\xfb\xf3\xd6\xfc\xf3\xd7\xfc\
\xf4\xd9\xfc\xf4\xda\xfc\xf4\xdb\xfc\xf5\xdc\xfc\xf5\xdd\xfc\xf6\
\xe0\xfc\xf6\xe2\xfd\xf7\xe3\xfd\xf7\xe5\xfd\xf8\xe7\xfd\xf8\xe8\
\xfd\xf9\xeb\xfd\xf9\xec\xfd\xfa\xed\xfe\xfa\xee\xfe\xfa\xef\xfe\
\xfb\xf0\xfe\xfb\xf1\xfe\xfb\xf3\xfe\xfc\xf4\xfe\xfc\xf5\xfe\xfc\
\xf6\xfe\xfd\xf7\xfe\xfd\xf8\xfe\xfd\xf9\xff\xfd\xfa\xff\xfe\xfa\
\xff\xfe\xfc\xff\xfe\xfd\xff\xff\xfe\xff\xff\xff\x48\xdb\xe9\x6b\
\x00\x00\x00\x05\x74\x52\x4e\x53\x00\x15\x8c\x90\xd2\x54\x1f\x7a\
\x1a\x00\x00\x09\x63\x49\x44\x41\x54\x78\xda\xed\xdd\x7d\xac\x9e\
\xe3\x01\xc7\xf1\xfb\x39\x47\xa7\x2b\xba\x62\x45\xd1\xa6\xeb\x89\
\x15\x45\x44\x86\xec\xc5\x3f\xa6\x19\xad\x19\x09\x46\xa5\xff\x90\
\x2d\x31\x95\x1a\x93\x89\xb7\x6d\x41\x9a\x76\x21\xd3\x75\x13\x11\
\x99\x20\xd1\x34\x7b\xf1\x96\x08\x09\x36\xf1\x16\xaf\x11\xcd\x6c\
\x99\x1d\xa5\x98\xb3\xcc\xda\x0a\xd5\x56\x7b\xba\x6c\xc9\xb6\xf3\
\xf2\xe8\x79\x5e\xee\xeb\xb9\xef\xeb\xba\x3e\xdf\xbf\x7a\x4e\x4f\
\x9e\xfb\x9c\xe7\xfa\xf4\xf7\xdc\xc7\x1f\x8f\xbe\xe2\xdf\xf5\x4f\
\x0a\xdd\xcc\xb9\xe5\xf5\xc5\x81\x39\x29\x35\x63\xa2\xe7\x6e\xb7\
\x46\x11\xae\xbe\x22\xbe\x1a\x8d\x22\xab\x1a\xfd\x0d\x00\x46\x7f\
\xd3\x04\xe4\x0d\x20\xb7\x09\x08\x29\x20\x4a\x00\xd9\x4d\x40\x40\
\x01\x71\x02\xc8\x6e\x02\xc2\x09\x88\x13\x40\x7e\x13\x10\x4c\x40\
\xa4\x00\xf2\x9b\x80\x50\x02\x22\x05\x90\xe1\x04\x04\x12\x10\x2b\
\x80\x0c\x27\x20\x8c\x80\x58\x01\xe4\x38\x01\x41\x04\x44\x0b\x20\
\xc7\x09\x08\x21\x20\x5a\x00\x59\x4e\x40\x00\x01\xf1\x02\xc8\x72\
\x02\xca\x17\x10\x2f\x80\x3c\x27\xa0\x74\x01\x11\x03\xc8\x73\x02\
\xca\x16\x10\x31\x80\x4c\x27\xa0\x64\x01\x31\x03\xc8\x74\x02\xca\
\x15\x10\x33\x80\x5c\x27\xa0\x54\x01\x51\x03\xc8\x75\x02\xca\x14\
\x10\x35\x80\x6c\x27\xa0\x44\x01\x71\x03\xc8\x76\x02\xca\x13\x10\
\x37\x80\x7c\x27\xa0\x34\x01\x91\x03\xc8\x77\x02\xca\x12\x10\x39\
\x80\x8c\x27\xa0\x24\x01\xb1\x03\xc8\x78\x02\xca\x11\x10\x3b\x80\
\x9c\x27\xa0\x14\x01\xd1\x03\xc8\x79\x02\xca\x10\x10\x3d\x80\xac\
\x27\xa0\x04\x01\xf1\x03\xc8\x7a\x02\xba\x17\x10\x3f\x80\xbc\x27\
\xa0\x6b\x01\x09\x00\xc8\x7b\x02\xba\x15\x90\x00\x80\xb0\x13\xb0\
\x7d\x73\xb7\xed\xac\xb5\x80\x14\x00\x04\x9d\x80\x6d\xef\x77\xdb\
\x70\xad\x37\x20\x05\x00\x99\xdf\x05\x74\x27\x20\x09\x00\x99\xdf\
\x05\x74\x25\x20\x09\x00\xd9\x4f\x40\x17\x02\xd2\x00\x90\xfd\x04\
\x74\x2e\x20\x0d\x00\x26\xa0\x63\x01\x89\x00\x30\x01\x9d\x0a\x48\
\x04\x80\x09\xe8\x54\x40\x2a\x00\x4c\x40\x87\x02\x52\x01\x60\x02\
\x3a\x14\x90\x0c\x00\x13\xd0\x99\x80\x64\x00\x98\x80\xce\x04\xa4\
\x03\xc0\x04\x74\x24\x20\x1d\x00\x26\xa0\x23\x01\x09\x01\x30\x01\
\x9d\x08\x48\x08\x80\x09\xe8\x44\x40\x4a\x00\x4c\x40\x07\x02\x52\
\x02\x60\x02\x3a\x10\x90\x14\x00\x13\xd0\xbe\x80\xa4\x00\x98\x80\
\xf6\x05\xa4\x05\xc0\x04\xb4\x2d\x20\x2d\x00\x3d\x9b\x80\x49\x7b\
\x35\xab\x11\xa1\x80\xc4\x00\xf4\x6a\x02\x26\x4f\x6b\x56\x7f\x84\
\x1b\x90\x18\x00\x77\x01\xed\x0a\x48\x0d\x80\xbb\x80\x36\x05\xa4\
\x06\xc0\x04\xb4\x29\x20\x39\x00\x26\xa0\x3d\x01\xc9\x01\x30\x01\
\xed\x09\x48\x0f\x80\x09\x68\x4b\x40\x7a\x00\x4c\x40\x5b\x02\x12\
\x04\x60\x02\xda\x11\x90\x20\x00\x13\xd0\x8e\x80\x14\x01\x98\x80\
\x36\x04\xa4\x08\xc0\x04\xb4\x21\x20\x49\x00\x26\xa0\x75\x01\x49\
\x02\x30\x01\xad\x0b\x48\x13\x80\x09\x68\x59\x40\x9a\x00\x4c\x40\
\xcb\x02\x12\x05\x60\x02\x5a\x15\x90\x28\x00\x13\xd0\xaa\x80\x54\
\x01\x98\x80\x16\x05\xa4\x0a\xc0\x04\xb4\x28\x20\x59\x00\x26\xa0\
\x35\x01\xc9\x02\x30\x01\xad\x09\x48\x17\x80\x09\x68\x49\x40\xba\
\x00\x4c\x40\x4b\x02\x12\x06\x60\x02\x5a\x11\x90\x30\x00\x13\xd0\
\x8a\x80\x94\x01\x98\x80\x16\x04\xa4\x0c\xc0\x04\xb4\x20\x20\x69\
\x00\x26\x60\x62\x01\x49\x03\x30\x01\x13\x0b\x48\x1b\x80\x09\x98\
\x50\x40\xda\x00\x4c\xc0\x84\x02\x12\x07\x60\x02\x26\x7a\x6a\x12\
\x07\x60\x02\x76\x21\x20\x0b\x00\x26\x60\xa2\x7f\x21\x85\x09\x00\
\xc0\x04\x00\x60\x02\x00\x30\x01\x00\x98\x00\x00\x4c\x00\x00\x26\
\x00\x00\x13\x00\x80\x09\x00\xc0\x04\x00\x60\x02\x00\x30\x01\x00\
\x98\x00\x00\x4c\x00\x00\x26\x00\x00\x13\x00\x80\x09\x00\xc0\x04\
\x00\x60\x02\x00\x30\x01\x00\x98\x00\x00\x4c\x00\x00\x26\x00\x00\
\x13\x00\x80\x09\x00\xc0\x04\x00\x60\x02\x00\x30\x01\x00\x98\x00\
\x00\xd2\x9e\x80\x1d\xdb\x9a\x35\x0c\x40\x2e\x13\xb0\x79\xa8\x59\
\x00\xb8\x0b\x00\xc0\x5d\x00\x00\x26\x00\x00\x13\x00\x80\x09\x00\
\xc0\x04\x00\x60\x02\x00\x30\x01\x00\x98\x00\x00\x4c\x00\x00\x26\
\x00\x00\x13\x00\x80\x09\x00\xc0\x04\x00\x60\x02\x00\x30\x01\x00\
\x98\x00\x00\x4c\x00\x00\x26\x00\x00\x13\x00\x40\x4d\x27\xa0\x6f\
\x72\xb7\x35\x00\x88\x79\x02\x26\x4f\xef\xb6\x3e\x00\xdc\x05\x00\
\xe0\x2e\x00\x00\x13\x00\x80\x09\x00\xc0\x04\x00\x60\x02\x00\x30\
\x01\x00\x98\x00\x00\x4c\x00\x00\x26\x00\x00\x13\x00\x80\x09\x00\
\xc0\x04\x00\x60\x02\x00\x30\x01\x00\x98\x00\x00\x4c\x00\x00\x26\
\x00\x00\x13\x00\x80\x09\x00\xc0\x04\x00\x60\x02\x00\x30\x01\x00\
\x98\x00\x00\x4c\x00\x00\x26\x00\x00\x13\x00\x80\x09\x00\xc0\x04\
\x00\x60\x02\x00\x30\x01\xb1\x00\xd8\xf0\xcf\xda\xb7\x71\xa8\x9a\
\x36\x64\x01\x60\xeb\xc6\xda\xb7\x69\x5b\x35\xed\xf0\x12\x20\x00\
\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\
\x10\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\
\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\
\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\
\x00\x00\xf0\x14\x00\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\
\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\
\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\
\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\
\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\
\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\
\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\xd5\x0d\xc0\xd4\
\xd9\x2a\xb7\x7d\xe2\x02\xb0\x9b\xca\xce\x4b\x80\x00\x10\x00\x02\
\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\
\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\
\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\xd5\x07\x40\xdf\xf4\
\xfd\xca\x6b\xaa\x73\x8d\x0a\xc0\x1e\x27\x2c\xba\xfc\xe6\x15\xcb\
\xcb\xeb\xe6\x65\x17\x9d\x76\x84\xb3\x8d\x05\xc0\x31\xd7\x9c\x3f\
\xff\xf0\x3d\x4b\x7d\xc8\x03\xbe\x74\xc6\x65\xdf\xdb\xc7\xe9\xc6\
\x00\x60\xcf\xf3\x97\xec\x1f\xe4\x81\x8f\xbd\xf2\x6b\x8e\xb7\xfe\
\x00\xfa\x2f\x39\xa1\x11\xe8\xa1\xf7\xbd\xe0\x64\xe7\x5b\x7b\x00\
\xdf\x1c\x08\xf8\xe0\x0b\x0f\x72\xc0\x35\x07\x70\xc8\x82\xa0\x2f\
\x2f\xe7\x36\x9c\x70\xbd\x01\x7c\x7b\x52\xd0\x87\x9f\x37\xdf\x09\
\xd7\x1a\xc0\xe7\x06\x02\x5f\x60\xc0\x09\xd7\x1a\xc0\x9c\xd0\x17\
\x38\xd8\x09\xd7\x1a\xc0\xcc\xd0\x17\x98\xb1\x97\x23\xae\x33\x80\
\x03\x43\x5f\xa0\xf1\x05\x47\x5c\x67\x00\x5b\x83\x5f\x61\xb3\x23\
\xae\x33\x80\x77\x43\x5f\x60\xcb\x1b\x8e\xb8\xce\x00\x82\x1f\xcf\
\xfa\x1d\x8e\xb8\xce\x00\x06\x43\xbf\x06\xbc\xeb\x84\x6b\x0d\x60\
\xfb\x33\x71\x3f\x3e\x00\xdd\xb6\x26\xec\x3f\xd1\x87\xfe\xec\x84\
\xeb\x0d\xe0\xe3\x7b\x42\xbe\x48\x0f\xde\xef\x80\x6b\x0e\xa0\x58\
\x7b\x5f\x38\x01\xef\xdd\xbd\xdd\x01\xd7\x1d\x40\xf1\xc0\x8d\xeb\
\x03\x3d\xf2\x93\xd7\xfb\x1d\x30\x02\x00\xc5\x6b\x37\x3c\xfa\x61\
\x80\x87\x7d\x7b\xd5\xed\x1f\x39\xde\x18\x00\x14\x5b\xef\x5e\x7a\
\xdd\x9d\x8f\xbd\xf4\x72\x79\x3d\x77\xef\xaa\x4b\xaf\x79\xd1\xe1\
\x46\x02\xa0\x28\x86\x07\x1f\xbf\xeb\xe7\x2b\xcb\xeb\x96\xfb\x5e\
\xdc\xe0\x68\x23\x02\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\
\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\
\x00\x04\x80\x00\x50\x22\x00\xfa\x07\x4e\x3a\xf3\xac\x1e\xb6\x60\
\xde\xe4\x5d\x7c\x37\x8d\x83\xbf\x72\xf4\xde\x00\xf4\xae\x13\x7f\
\xb8\xf2\xea\xf3\x16\x2e\xe8\x61\x67\xfd\x60\xd5\x8f\xce\xd9\xbd\
\xe9\x37\xf3\x99\xd3\xaf\x58\x79\xdd\x77\x96\xde\xb4\x62\xe9\x97\
\x01\xe8\x49\x9f\xbf\x78\xf1\xa1\x53\x7a\xbf\x39\xb3\xbf\x71\xd5\
\x61\x4d\x3e\x3f\xef\xaa\x6f\xcd\xfd\xcf\xfb\x16\x4f\x3f\xfa\xbb\
\x17\xee\x0b\x40\xf8\x8e\xbf\xf2\x98\x8a\xae\x3c\xf3\xb2\xb3\xc7\
\x8d\xff\xe2\x4b\x67\xfd\xff\xa3\xe3\xae\xfe\x2a\x00\xc1\x4f\x61\
\x71\x75\xaf\xb6\xfd\xa7\x7c\x7d\xcc\x67\x4e\x39\x71\xd4\xf3\x31\
\x6d\xd1\x41\x00\x04\xbe\xfc\xb9\x7b\x54\x79\xf9\xd3\x66\x8c\xfa\
\x70\xd6\xa9\x63\xfe\x7e\xca\xd9\x00\x84\x6d\xe1\x61\x95\x5e\x7e\
\xea\x39\xa3\x9e\x8b\x45\x9f\x1d\xfb\x05\x47\xcd\x07\x20\x64\x8d\
\xaa\x9f\xdf\xa3\x8e\x1c\x79\x3b\x32\x77\xfc\x17\x9c\x04\x40\xc8\
\x66\x55\xfe\x46\x9e\xb3\x3e\xe5\xcf\xff\x6d\xbf\x03\x00\x08\xd8\
\xec\xca\x7f\xfe\x91\x77\x79\x4d\xcf\x7a\x0e\x00\x01\xab\xfe\xbd\
\x9c\x0f\x9c\x60\x01\x92\x7f\xbb\xe9\x6a\x01\x4c\xab\xfc\xe7\x1f\
\xf1\xdf\x7a\xa6\x34\xfd\xbf\x4c\xed\x0d\x40\xc0\xde\xab\xfc\xe7\
\x1f\xf1\x36\x75\x9b\x87\x9a\x7d\xc1\xdf\x00\x08\xd8\x3b\x95\xff\
\xfc\x23\xcf\xf7\xed\x66\x5f\xf0\x16\x00\x01\x7b\xa3\x4e\x0b\xd0\
\x94\xe3\xce\x41\x00\x02\x36\x54\xf5\xc0\xee\xfc\xcb\x88\x0f\x9a\
\x9d\xf5\xba\x0f\x00\x08\xd9\xbd\x15\xff\xf8\xbf\x1f\xb9\x41\xaf\
\xbc\x30\xee\xef\x87\x7f\x57\x00\x10\xb2\xe7\x9e\xaa\xf6\x1e\x64\
\xcd\xa8\x0f\x57\x8f\x7b\x6f\xb1\x27\x5e\x05\x20\x6c\xab\x87\x2a\
\xbc\xf8\xf0\xea\x2d\xa3\x3e\x7e\x7f\xec\x7b\x0b\xff\xfd\xd7\x05\
\x00\x61\xfb\xf0\x96\xea\xde\xd0\xfb\x1f\xb7\xad\x1d\xfb\x8a\xb0\
\x66\xd4\x9b\x96\xbe\x7e\xeb\x47\x00\x84\xee\xcd\x15\xbf\xdd\x52\
\xcd\x95\x9f\xbd\xe1\xd9\x71\x9f\x7b\x68\xd9\x2b\xff\xfb\xf3\xc7\
\xbf\x59\x36\x58\x00\x10\x7e\x87\x1f\x58\xfe\xf0\xab\x9b\x7a\x7c\
\xd1\x6d\x7f\xfd\xc3\xaa\x5b\x37\x36\xfb\xb5\xf0\x67\xbf\x7a\x7a\
\xdd\x8e\xa2\xd8\xb4\xf6\x91\x65\x0f\x0e\x17\x00\xf4\xa2\x75\xab\
\x6f\xfa\xfe\x8f\x7f\xf1\xcb\x1e\xb6\x7c\xc9\xf5\x77\x7c\xda\x7b\
\x09\x3f\x71\xdb\x4f\x96\xfc\xf4\xda\x4b\x6e\xbc\x67\x7d\x51\x00\
\xd0\xb3\xdf\xc7\xdf\x7c\xe1\xf9\x1e\xf6\xa7\x4f\x76\xf9\xdd\x6c\
\xf9\x63\x1e\x87\x5f\x23\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\
\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\
\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\
\x00\x08\x00\x01\x20\x00\x04\x80\x7a\x0d\x60\xeb\x4e\x95\xdb\x27\
\x71\x01\x18\x76\x62\x79\x03\x90\x7b\x00\x01\x20\x00\x04\x80\x00\
\x10\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\
\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\
\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\
\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\
\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\
\x02\x40\x00\x08\x00\x01\x00\x80\xa7\x00\x00\x01\x20\x00\x04\x80\
\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\
\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\
\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\
\x20\x00\x04\x80\xea\xd7\xbf\x00\xc4\x42\xf9\x0b\x92\x06\xa7\xe7\
\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x0b\xa7\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x02\x00\x00\x00\x02\x00\x08\x03\x00\x00\x00\xc3\xa6\x24\xc8\
\x00\x00\x00\x03\x73\x42\x49\x54\x08\x08\x08\xdb\xe1\x4f\xe0\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x7a\x2f\x00\x00\x7a\x2f\x01\
\x1f\xfd\xfb\xc9\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\
\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\
\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x02\xf7\x50\x4c\x54\
\x45\xff\xff\xff\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\
\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\
\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\
\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\
\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\
\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\
\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\
\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\
\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\
\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\
\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\
\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\
\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\
\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\
\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\
\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\
\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\
\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\
\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\
\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\
\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\
\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\
\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\
\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\
\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\
\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\
\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\
\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\
\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x73\x83\xbf\x55\
\x60\x80\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\
\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\
\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\
\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\
\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\
\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\
\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\
\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\
\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\
\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\
\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\
\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\
\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\
\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\
\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\
\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\
\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\
\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\x83\xbf\x55\x60\x80\x73\
\x83\xbf\x55\x60\x80\x73\x83\xbf\x60\xa4\x24\x1c\x00\x00\x00\xfb\
\x74\x52\x4e\x53\x00\x01\x01\x02\x02\x03\x03\x04\x04\x05\x05\x06\
\x06\x08\x08\x0a\x0a\x0b\x0b\x0c\x0c\x0e\x0e\x10\x10\x11\x11\x12\
\x12\x13\x13\x15\x15\x17\x17\x19\x19\x20\x20\x21\x21\x25\x25\x26\
\x26\x28\x28\x2b\x2b\x2f\x2f\x30\x30\x32\x32\x34\x34\x36\x36\x39\
\x39\x3a\x3a\x3b\x3b\x40\x40\x44\x44\x45\x45\x46\x46\x47\x47\x48\
\x48\x49\x49\x4d\x4d\x4f\x4f\x51\x51\x53\x53\x59\x59\x61\x61\x66\
\x66\x68\x68\x6a\x6a\x6b\x6b\x6d\x6d\x6f\x6f\x70\x70\x73\x73\x78\
\x78\x79\x79\x7e\x7e\x7f\x7f\x80\x80\x81\x81\x87\x87\x8b\x8b\x8c\
\x8c\x92\x92\x94\x94\x95\x95\x96\x96\x99\x99\x9a\x9a\x9c\x9c\x9d\
\x9d\x9e\x9e\xa1\xa1\xa3\xa3\xa7\xa7\xab\xab\xac\xac\xad\xae\xb0\
\xb0\xb2\xb2\xb6\xb6\xb7\xb7\xb8\xb8\xb9\xb9\xba\xba\xbb\xbb\xbe\
\xbe\xc2\xc2\xc3\xc3\xc5\xc5\xc8\xc8\xcc\xcc\xce\xce\xd2\xd2\xd3\
\xd3\xd4\xd4\xd8\xd8\xda\xda\xdb\xdb\xdd\xdd\xdf\xdf\xe0\xe0\xe1\
\xe1\xe2\xe2\xe5\xe5\xe7\xe7\xe8\xe8\xe9\xe9\xea\xea\xeb\xeb\xec\
\xec\xed\xed\xef\xef\xf0\xf0\xf1\xf1\xf2\xf2\xf3\xf3\xf4\xf4\xf5\
\xf5\xf8\xf8\xf9\xf9\xfa\xfa\xfb\xfb\xfc\xfc\xfd\xfd\xfe\xfe\x3b\
\x64\x5c\xe5\x00\x00\x07\x1b\x49\x44\x41\x54\x78\xda\xed\xdc\x5f\
\x6c\xde\xf3\x1e\xc0\xf1\xcf\xf3\x5b\x43\xb3\x34\xfe\xf4\x34\x4d\
\x4f\xb1\xd8\xd4\x74\x33\x8e\x44\x30\x4f\x04\x59\x18\xc1\x59\x1c\
\x07\x71\x63\x19\x17\xb8\x98\x18\x91\x9c\x1b\xd7\x27\x39\xc9\xc8\
\xb8\x23\x11\xe2\x02\x11\x11\x59\x8a\x61\x61\x16\xe7\xc1\xe6\x9c\
\xc3\xb2\xcc\x52\xcd\x26\x48\xd7\x55\x5b\x61\x63\xb4\xcf\xd6\x73\
\x51\xeb\xbf\xb5\xfb\x23\x2e\xf8\x7d\x5e\xaf\x8b\xad\xcf\x9f\x9b\
\xcf\x67\xef\xe7\xf7\x7c\xfb\xa4\x5d\x04\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xbf\x23\x95\
\x59\xee\x2f\xe6\x35\xb7\x9c\x54\x9a\x29\x87\x07\x86\xbe\x3c\x74\
\x62\x7b\x29\xdd\xfc\xa3\xc7\x1d\x40\x43\xf5\x8a\x6a\x7b\xc7\xdc\
\x92\x95\xfe\x63\x4f\x6f\xed\xfd\x5a\xfd\x38\x9e\x39\xa7\xc4\xf3\
\x1f\x3c\x66\x00\xc5\x0d\xd5\x15\x9d\xa5\xbd\xde\xed\x5c\x5f\x7b\
\xfd\xe8\x57\x82\x4a\xe9\xe7\x1f\x3d\x6a\x00\xcb\x1f\xba\xa6\xe4\
\xef\x79\x1b\xd7\x6d\x38\xca\xa3\xd7\xad\x29\xff\xfc\x6f\xce\x1e\
\xc0\xa5\x0f\xdf\x92\xe0\xd8\xf3\xca\xda\x2d\xb3\x3c\x72\x49\x92\
\xf9\xb7\x4e\x7e\xc3\x9b\xf8\xb2\xf1\x91\x67\x16\x67\x38\xf7\x2e\
\xba\x6b\xce\x87\x33\x9d\x05\x4e\x4e\x34\xff\xc1\x19\x02\x38\xf3\
\xa9\x7b\x2a\x49\xbe\xf3\xb9\x72\x71\xed\xfb\x23\xee\x3d\x23\xd5\
\xfc\xfb\x8e\x08\xe0\xc2\x67\xae\x89\x34\x3a\x97\x7e\xbc\x77\xda\
\x5d\x17\x24\x9b\xbf\x7f\xda\x19\xe0\xea\xe7\xda\x23\x93\xde\x95\
\x9b\xa6\xdc\xbe\x2a\xdd\xfc\xef\x4d\x09\x60\xe1\xfa\x8e\xc8\xa5\
\x67\x45\xf7\xa4\x5b\xe7\x26\x9c\xff\xf3\x49\x6f\x01\x2d\x4f\x5f\
\x9c\x6c\xfe\x68\x5e\xf4\xc6\x8f\xe3\x37\xfe\x94\x72\xfe\x03\xe3\
\x01\x14\x8f\xff\x2d\xd2\x99\xdf\xd2\x75\xf8\x33\x91\x4a\xd2\xf9\
\xc7\x03\xb8\xff\x1f\x91\xd0\x45\xdf\x7e\xf4\xcb\x57\x59\xe7\xdf\
\xf2\xcb\x19\x60\x41\xad\x25\xe3\x02\x62\xa0\xba\x6b\xec\xb5\x90\
\x76\xfe\xdd\x11\x45\x44\xac\xca\x39\x7f\xb4\xdc\x3d\xf6\xf7\xdd\
\x59\xe7\x5f\x35\x76\x05\x58\xbc\xa5\x31\xe7\x02\xe2\xa7\x4b\x77\
\x44\xc4\xa2\xc4\xf3\x7f\x16\x45\xc4\xea\xac\xf3\x47\xe3\xea\x88\
\xec\xf3\x57\x62\xc1\x8e\x86\xac\x0b\x88\xfa\xe2\x5d\x31\x3f\xf5\
\xfc\xbb\x8b\xa8\xe6\x9d\x3f\x1a\xaa\x91\x7d\xfe\x22\x96\x47\x62\
\xcb\x23\xfb\xfc\x95\xd6\xee\xa6\xc4\x0b\xd8\xbf\x70\x34\xf9\xfc\
\x0d\xcb\x32\xcf\x1f\x4d\xcb\x46\x93\xcf\xdf\x70\x76\xa4\x76\xf6\
\x68\xf2\xf9\x1b\xda\x72\x2f\xa0\x2d\x79\x00\x6d\x02\xc8\x1e\x40\
\x6b\xee\x05\xb4\x26\x0f\xa0\xd5\x19\x20\xf9\xfc\x95\x7a\xee\x05\
\x44\xf2\x00\xa2\x08\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\
\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\
\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\
\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\
\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\
\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\x00\
\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\
\x10\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\
\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x02\
\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\
\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\
\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\
\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\
\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\
\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\
\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\
\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\
\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\x20\x00\x04\x80\x00\x10\
\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\
\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\
\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\x00\
\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\
\x10\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\
\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\
\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\
\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\
\x00\x10\x00\x02\x40\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\
\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\
\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\
\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\
\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\
\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\
\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\
\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\
\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\x00\
\xa2\x18\xb6\x83\xcc\x86\x8b\x6e\x4b\xc8\xac\xbb\xe8\xb5\x84\xcc\
\x7a\x8b\x3e\x4b\xc8\xac\x4f\x00\xd9\x03\xe8\xb7\x84\xcc\xfa\x8b\
\x6d\x96\x90\xd9\xb6\xca\x49\xdb\x17\xa4\xde\xc0\x68\xea\xe9\x77\
\x2d\x29\x86\xbb\xbc\x0c\xf2\xea\x1a\x29\x62\xb3\x35\xe4\xb5\x39\
\x2a\x71\xca\xce\x56\x6f\x01\x59\x8f\x80\x9d\xfb\x8a\xf8\x7e\xad\
\x17\x42\x56\x6b\xf7\x45\x25\xa2\x65\x53\xa7\x2b\x40\x4a\x3b\xaf\
\x1e\x8c\x22\x62\xc0\x25\x20\xeb\x05\x60\x30\xa2\x12\x11\x73\x37\
\x54\x5d\x01\x12\xaa\x5d\x7f\x20\x62\x4e\x44\x8c\x7c\x75\xfb\x1c\
\x2f\x87\x74\x86\xef\xeb\x8e\xb1\x00\x62\xd7\x37\x37\xda\x47\x3a\
\xf7\xbf\x1c\x87\x03\x88\xff\x34\x5d\x6e\x21\xc9\x3c\xf6\xaf\x98\
\x08\x20\x3e\x58\xb2\xd0\x4a\x52\xe9\x7a\x60\x64\x72\x00\xc3\xb5\
\xf9\xe7\x59\x4a\x22\xeb\x1f\x18\x88\xc9\x01\xc4\x77\xaf\x35\x2e\
\xb5\x96\x34\xd6\x1d\xfe\xf7\x1f\x0f\x20\x86\xdf\x1a\xb8\xd6\x8f\
\x08\xe7\x50\x5f\xf3\xcf\x91\x98\x1e\x40\xc4\xd6\xff\xce\x3f\xd3\
\x72\x12\xf8\x70\xf5\x0b\x13\x37\x2a\x93\x1f\x99\x7b\xc7\xc3\xe9\
\xce\x82\xe9\x3e\x08\xea\x5e\xfb\xe2\x81\x98\x25\x80\x88\x96\x55\
\x6b\xda\x04\x50\x62\x7d\xeb\x9e\x1d\x9c\x72\x47\x65\xfa\x33\x9a\
\x96\x55\x6f\xea\x14\x40\x29\xed\xec\xaa\xbd\xf3\xc3\xb4\xfb\x2a\
\x33\x3c\xaf\xe1\x8a\x25\xcd\xed\xed\xad\x1d\x7f\xf8\x33\x61\xd1\
\xf4\x9b\x04\xb0\xff\xd0\x1f\x7d\x11\x87\x7a\xfa\x7b\x7b\x87\xb6\
\xbf\x7f\xf0\xc8\x87\x2a\xa5\x4e\xbe\xbd\x7d\x5e\x73\x5b\xc7\xcd\
\x4d\xbf\x32\x80\xfd\xaf\xf6\xf4\x0d\x7d\xd9\xbb\xa7\xcc\x2b\x2a\
\x77\x00\x63\xda\x96\xdd\x70\x73\xe3\x09\x07\xf0\xd3\xab\xaf\xbf\
\xb3\xb7\xfc\xcb\xc9\x10\x40\x44\xcc\xbb\xf2\x8e\xeb\x4f\x28\x80\
\x0d\x2f\x6e\xfe\xca\x37\x8c\xa5\x72\xeb\xa6\xfa\x4c\x46\x66\xf2\
\xee\xdf\xed\xab\x7c\x1a\x57\x6e\x3b\xbe\x00\x3e\xbd\xf3\x64\xdb\
\x2a\xa5\xe6\x87\xfa\x8f\x1d\xc0\xde\x07\x4f\xb7\xa9\xd2\x5a\xb2\
\xf1\x58\x01\xbc\x7d\xbe\x2d\x95\xd9\x69\x4f\x1c\x3d\x80\xc7\x4f\
\xb5\xa3\x92\x5b\x39\x34\x7b\x00\x83\x77\xda\x4f\xf9\x2d\xdd\x32\
\x5b\x00\x1f\x5d\x66\x3b\x29\x3e\x14\xd8\x38\x73\x00\x6f\x9f\x65\
\x37\x39\x74\xd4\x66\x0a\xe0\xdf\xe7\xd8\x4c\x16\x9d\x9f\x1c\x19\
\xc0\xff\xfc\x50\x64\x22\x17\xf5\x4c\x0f\xe0\xf3\xbf\xd8\x4a\x26\
\xd5\xbe\xa9\x01\xec\xf1\x8b\x11\xc9\xac\x98\x1a\xc0\x5f\x6d\x24\
\x9b\xc7\x26\x07\xf0\xa8\x7d\xa4\xd3\xbe\x6d\x22\x80\x4f\xff\x6c\
\x1f\xf9\xdc\x3e\x11\xc0\x6d\xb6\x91\xd1\x93\x87\x03\x78\xca\x2e\
\x52\xea\xf8\x62\x2c\x80\xdd\x3e\x01\x4a\xea\xbe\xb1\x00\xee\xb5\
\x89\xac\xe7\xc0\xa1\x7a\x7d\x64\x64\x30\xf5\x09\x30\xf5\xef\x83\
\xf6\x3e\x1f\x11\xf1\xfc\x1e\x01\x64\xb5\x7e\xfc\x0f\x52\x6a\xf8\
\xb8\x3e\xb2\x35\xf7\x7f\x90\x95\xfb\x0a\x50\x7f\x29\xe2\xa5\x83\
\x5e\x08\x79\x2d\x1c\xfe\xf9\x5c\x5b\xc8\x6c\xf3\x7b\xd9\xdf\x05\
\x93\xcf\xff\xf5\xa8\x00\x52\xfb\x42\x00\xb9\xf5\x09\x20\xb7\x7e\
\x01\x38\x03\x08\xc0\x5b\x00\x59\x15\x15\x3b\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xf7\
\xe3\xff\x3c\x6c\xb6\x21\xea\xdb\x38\x0b\x00\x00\x00\x00\x49\x45\
\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x14\
\x0e\x3a\x5a\x98\
\x00\x44\
\x00\x65\x00\x6c\x00\x66\x00\x74\x00\x33\x00\x44\x00\x5f\x00\x46\x00\x6c\x00\x65\x00\x78\x00\x69\x00\x62\x00\x6c\x00\x65\x00\x4d\
\x00\x65\x00\x73\x00\x68\
\x00\x05\
\x00\x6f\xa6\x53\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x73\
\x00\x0c\
\x02\x43\x1b\x27\
\x00\x69\
\x00\x6e\x00\x63\x00\x6f\x00\x6d\x00\x69\x00\x6e\x00\x67\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0c\
\x08\xa7\x02\xc7\
\x00\x66\
\x00\x6f\x00\x6c\x00\x64\x00\x65\x00\x72\x00\x2d\x00\x38\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0a\
\x0c\x9c\x06\xa7\
\x00\x6c\
\x00\x61\x00\x79\x00\x65\x00\x72\x00\x73\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x08\xc8\x58\x67\
\x00\x73\
\x00\x61\x00\x76\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0b\
\x00\xbb\x38\x27\
\x00\x72\
\x00\x65\x00\x61\x00\x64\x00\x69\x00\x6e\x00\x67\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x42\x00\x02\x00\x00\x00\x05\x00\x00\x00\x04\
\x00\x00\x00\xbe\x00\x00\x00\x00\x00\x01\x00\x00\x63\x15\
\x00\x00\x00\x52\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x70\x00\x00\x00\x00\x00\x01\x00\x00\x18\x11\
\x00\x00\x00\xa8\x00\x00\x00\x00\x00\x01\x00\x00\x56\xb4\
\x00\x00\x00\x8e\x00\x00\x00\x00\x00\x01\x00\x00\x31\x86\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x42\x00\x02\x00\x00\x00\x05\x00\x00\x00\x04\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\xbe\x00\x00\x00\x00\x00\x01\x00\x00\x63\x15\
\x00\x00\x01\x55\x78\x58\x3a\x80\
\x00\x00\x00\x52\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x55\x78\x58\x3a\x80\
\x00\x00\x00\x70\x00\x00\x00\x00\x00\x01\x00\x00\x18\x11\
\x00\x00\x01\x55\x78\x58\x3a\x80\
\x00\x00\x00\xa8\x00\x00\x00\x00\x00\x01\x00\x00\x56\xb4\
\x00\x00\x01\x55\x78\x58\x3a\x80\
\x00\x00\x00\x8e\x00\x00\x00\x00\x00\x01\x00\x00\x31\x86\
\x00\x00\x01\x55\x78\x58\x3a\x80\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 63.832536 | 129 | 0.726932 |
79411204668a1c4adf3b7510f8801b4afc42c25a | 525 | py | Python | NFC_Reader/quick2wire/syscall.py | lleon95/NFC_Points_Service | 1d59fe23ff5d2e9ee635dfbcbe874e801bc8b336 | [
"MIT"
] | 46 | 2016-04-18T07:51:58.000Z | 2021-12-01T14:12:27.000Z | quick2wire/syscall.py | ilumitr/py532lib | 09afdb3b5a0f560c3ddeb1659c02dfbfdb8547ce | [
"BSD-2-Clause"
] | 4 | 2020-05-25T15:39:35.000Z | 2022-01-22T14:16:54.000Z | quick2wire/syscall.py | ilumitr/py532lib | 09afdb3b5a0f560c3ddeb1659c02dfbfdb8547ce | [
"BSD-2-Clause"
] | 36 | 2015-02-12T20:27:19.000Z | 2022-01-18T06:34:44.000Z |
import os
import errno
import ctypes
libc = ctypes.CDLL(None, use_errno=True)
def errcheck(result, func, args):
if result < 0:
e = ctypes.get_errno()
raise OSError(e, errno.strerror(e))
return result
def lookup(restype, name, argtypes):
f = libc[name]
f.restye = restype
f.argtypes = argtypes
f.errcheck = errcheck
return f
class SelfClosing(object):
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
return False
| 16.935484 | 43 | 0.626667 |
7941125416c75c62bdf713a8e9398c152091e03e | 26,917 | py | Python | test/functional/interface_zmq.py | blinkhash/blinkhash-core | e05662019c2fa4cb2dc3736f38e48492712c23b1 | [
"MIT"
] | 3 | 2021-07-27T16:59:47.000Z | 2021-12-31T20:55:46.000Z | test/functional/interface_zmq.py | blinkhash/blinkhash-core | e05662019c2fa4cb2dc3736f38e48492712c23b1 | [
"MIT"
] | null | null | null | test/functional/interface_zmq.py | blinkhash/blinkhash-core | e05662019c2fa4cb2dc3736f38e48492712c23b1 | [
"MIT"
] | 1 | 2021-12-31T12:58:23.000Z | 2021-12-31T12:58:23.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the ZMQ notification interface."""
import struct
from test_framework.address import (
ADDRESS_BCRT1_P2WSH_OP_TRUE,
ADDRESS_BCRT1_UNSPENDABLE,
)
from test_framework.blocktools import (
add_witness_commitment,
create_block,
create_coinbase,
)
from test_framework.test_framework import BlinkhashTestFramework
from test_framework.messages import (
CTransaction,
hash256,
tx_from_hex,
)
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
from test_framework.netutil import test_ipv6_local
from io import BytesIO
from time import sleep
# Test may be skipped and not have zmq installed
try:
import zmq
except ImportError:
pass
def hash256_reversed(byte_str):
return hash256(byte_str)[::-1]
class ZMQSubscriber:
def __init__(self, socket, topic):
self.sequence = None # no sequence number received yet
self.socket = socket
self.topic = topic
self.socket.setsockopt(zmq.SUBSCRIBE, self.topic)
# Receive message from publisher and verify that topic and sequence match
def _receive_from_publisher_and_check(self):
topic, body, seq = self.socket.recv_multipart()
# Topic should match the subscriber topic.
assert_equal(topic, self.topic)
# Sequence should be incremental.
received_seq = struct.unpack('<I', seq)[-1]
if self.sequence is None:
self.sequence = received_seq
else:
assert_equal(received_seq, self.sequence)
self.sequence += 1
return body
def receive(self):
return self._receive_from_publisher_and_check()
def receive_sequence(self):
body = self._receive_from_publisher_and_check()
hash = body[:32].hex()
label = chr(body[32])
mempool_sequence = None if len(body) != 32+1+8 else struct.unpack("<Q", body[32+1:])[0]
if mempool_sequence is not None:
assert label == "A" or label == "R"
else:
assert label == "D" or label == "C"
return (hash, label, mempool_sequence)
class ZMQTestSetupBlock:
"""Helper class for setting up a ZMQ test via the "sync up" procedure.
Generates a block on the specified node on instantiation and provides a
method to check whether a ZMQ notification matches, i.e. the event was
caused by this generated block. Assumes that a notification either contains
the generated block's hash, it's (coinbase) transaction id, the raw block or
raw transaction data.
"""
def __init__(self, test_framework, node):
self.block_hash = test_framework.generate(node, 1, sync_fun=test_framework.no_op)[0]
coinbase = node.getblock(self.block_hash, 2)['tx'][0]
self.tx_hash = coinbase['txid']
self.raw_tx = coinbase['hex']
self.raw_block = node.getblock(self.block_hash, 0)
def caused_notification(self, notification):
return (
self.block_hash in notification
or self.tx_hash in notification
or self.raw_block in notification
or self.raw_tx in notification
)
class ZMQTest (BlinkhashTestFramework):
def set_test_params(self):
self.num_nodes = 2
if self.is_wallet_compiled():
self.requires_wallet = True
# This test isn't testing txn relay/timing, so set whitelist on the
# peers for instant txn relay. This speeds up the test run time 2-3x.
self.extra_args = [["[email protected]"]] * self.num_nodes
def skip_test_if_missing_module(self):
self.skip_if_no_py3_zmq()
self.skip_if_no_blinkhashd_zmq()
def run_test(self):
self.ctx = zmq.Context()
try:
self.test_basic()
self.test_sequence()
self.test_mempool_sync()
self.test_reorg()
self.test_multiple_interfaces()
self.test_ipv6()
finally:
# Destroy the ZMQ context.
self.log.debug("Destroying ZMQ context")
self.ctx.destroy(linger=None)
# Restart node with the specified zmq notifications enabled, subscribe to
# all of them and return the corresponding ZMQSubscriber objects.
def setup_zmq_test(self, services, *, recv_timeout=60, sync_blocks=True, ipv6=False):
subscribers = []
for topic, address in services:
socket = self.ctx.socket(zmq.SUB)
if ipv6:
socket.setsockopt(zmq.IPV6, 1)
subscribers.append(ZMQSubscriber(socket, topic.encode()))
self.restart_node(0, [f"-zmqpub{topic}={address}" for topic, address in services] +
self.extra_args[0])
for i, sub in enumerate(subscribers):
sub.socket.connect(services[i][1])
# Ensure that all zmq publisher notification interfaces are ready by
# running the following "sync up" procedure:
# 1. Generate a block on the node
# 2. Try to receive the corresponding notification on all subscribers
# 3. If all subscribers get the message within the timeout (1 second),
# we are done, otherwise repeat starting from step 1
for sub in subscribers:
sub.socket.set(zmq.RCVTIMEO, 1000)
while True:
test_block = ZMQTestSetupBlock(self, self.nodes[0])
recv_failed = False
for sub in subscribers:
try:
while not test_block.caused_notification(sub.receive().hex()):
self.log.debug("Ignoring sync-up notification for previously generated block.")
except zmq.error.Again:
self.log.debug("Didn't receive sync-up notification, trying again.")
recv_failed = True
if not recv_failed:
self.log.debug("ZMQ sync-up completed, all subscribers are ready.")
break
# set subscriber's desired timeout for the test
for sub in subscribers:
sub.socket.set(zmq.RCVTIMEO, recv_timeout*1000)
self.connect_nodes(0, 1)
if sync_blocks:
self.sync_blocks()
return subscribers
def test_basic(self):
# Invalid zmq arguments don't take down the node, see #17185.
self.restart_node(0, ["-zmqpubrawtx=foo", "-zmqpubhashtx=bar"])
address = 'tcp://127.0.0.1:28332'
subs = self.setup_zmq_test([(topic, address) for topic in ["hashblock", "hashtx", "rawblock", "rawtx"]])
hashblock = subs[0]
hashtx = subs[1]
rawblock = subs[2]
rawtx = subs[3]
num_blocks = 5
self.log.info(f"Generate {num_blocks} blocks (and {num_blocks} coinbase txes)")
genhashes = self.generatetoaddress(self.nodes[0], num_blocks, ADDRESS_BCRT1_UNSPENDABLE)
for x in range(num_blocks):
# Should receive the coinbase txid.
txid = hashtx.receive()
# Should receive the coinbase raw transaction.
hex = rawtx.receive()
tx = CTransaction()
tx.deserialize(BytesIO(hex))
tx.calc_sha256()
assert_equal(tx.hash, txid.hex())
# Should receive the generated raw block.
block = rawblock.receive()
assert_equal(genhashes[x], hash256_reversed(block[:80]).hex())
# Should receive the generated block hash.
hash = hashblock.receive().hex()
assert_equal(genhashes[x], hash)
# The block should only have the coinbase txid.
assert_equal([txid.hex()], self.nodes[1].getblock(hash)["tx"])
if self.is_wallet_compiled():
self.log.info("Wait for tx from second node")
payment_txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
self.sync_all()
# Should receive the broadcasted txid.
txid = hashtx.receive()
assert_equal(payment_txid, txid.hex())
# Should receive the broadcasted raw transaction.
hex = rawtx.receive()
assert_equal(payment_txid, hash256_reversed(hex).hex())
# Mining the block with this tx should result in second notification
# after coinbase tx notification
self.generatetoaddress(self.nodes[0], 1, ADDRESS_BCRT1_UNSPENDABLE)
hashtx.receive()
txid = hashtx.receive()
assert_equal(payment_txid, txid.hex())
self.log.info("Test the getzmqnotifications RPC")
assert_equal(self.nodes[0].getzmqnotifications(), [
{"type": "pubhashblock", "address": address, "hwm": 1000},
{"type": "pubhashtx", "address": address, "hwm": 1000},
{"type": "pubrawblock", "address": address, "hwm": 1000},
{"type": "pubrawtx", "address": address, "hwm": 1000},
])
assert_equal(self.nodes[1].getzmqnotifications(), [])
def test_reorg(self):
if not self.is_wallet_compiled():
self.log.info("Skipping reorg test because wallet is disabled")
return
address = 'tcp://127.0.0.1:28333'
# Should only notify the tip if a reorg occurs
hashblock, hashtx = self.setup_zmq_test(
[(topic, address) for topic in ["hashblock", "hashtx"]],
recv_timeout=2) # 2 second timeout to check end of notifications
self.disconnect_nodes(0, 1)
# Generate 1 block in nodes[0] with 1 mempool tx and receive all notifications
payment_txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
disconnect_block = self.generatetoaddress(self.nodes[0], 1, ADDRESS_BCRT1_UNSPENDABLE, sync_fun=self.no_op)[0]
disconnect_cb = self.nodes[0].getblock(disconnect_block)["tx"][0]
assert_equal(self.nodes[0].getbestblockhash(), hashblock.receive().hex())
assert_equal(hashtx.receive().hex(), payment_txid)
assert_equal(hashtx.receive().hex(), disconnect_cb)
# Generate 2 blocks in nodes[1] to a different address to ensure split
connect_blocks = self.generatetoaddress(self.nodes[1], 2, ADDRESS_BCRT1_P2WSH_OP_TRUE, sync_fun=self.no_op)
# nodes[0] will reorg chain after connecting back nodes[1]
self.connect_nodes(0, 1)
self.sync_blocks() # tx in mempool valid but not advertised
# Should receive nodes[1] tip
assert_equal(self.nodes[1].getbestblockhash(), hashblock.receive().hex())
# During reorg:
# Get old payment transaction notification from disconnect and disconnected cb
assert_equal(hashtx.receive().hex(), payment_txid)
assert_equal(hashtx.receive().hex(), disconnect_cb)
# And the payment transaction again due to mempool entry
assert_equal(hashtx.receive().hex(), payment_txid)
assert_equal(hashtx.receive().hex(), payment_txid)
# And the new connected coinbases
for i in [0, 1]:
assert_equal(hashtx.receive().hex(), self.nodes[1].getblock(connect_blocks[i])["tx"][0])
# If we do a simple invalidate we announce the disconnected coinbase
self.nodes[0].invalidateblock(connect_blocks[1])
assert_equal(hashtx.receive().hex(), self.nodes[1].getblock(connect_blocks[1])["tx"][0])
# And the current tip
assert_equal(hashtx.receive().hex(), self.nodes[1].getblock(connect_blocks[0])["tx"][0])
def test_sequence(self):
"""
Sequence zmq notifications give every blockhash and txhash in order
of processing, regardless of IBD, re-orgs, etc.
Format of messages:
<32-byte hash>C : Blockhash connected
<32-byte hash>D : Blockhash disconnected
<32-byte hash>R<8-byte LE uint> : Transactionhash removed from mempool for non-block inclusion reason
<32-byte hash>A<8-byte LE uint> : Transactionhash added mempool
"""
self.log.info("Testing 'sequence' publisher")
[seq] = self.setup_zmq_test([("sequence", "tcp://127.0.0.1:28333")])
self.disconnect_nodes(0, 1)
# Mempool sequence number starts at 1
seq_num = 1
# Generate 1 block in nodes[0] and receive all notifications
dc_block = self.generatetoaddress(self.nodes[0], 1, ADDRESS_BCRT1_UNSPENDABLE, sync_fun=self.no_op)[0]
# Note: We are not notified of any block transactions, coinbase or mined
assert_equal((self.nodes[0].getbestblockhash(), "C", None), seq.receive_sequence())
# Generate 2 blocks in nodes[1] to a different address to ensure a chain split
self.generatetoaddress(self.nodes[1], 2, ADDRESS_BCRT1_P2WSH_OP_TRUE, sync_fun=self.no_op)
# nodes[0] will reorg chain after connecting back nodes[1]
self.connect_nodes(0, 1)
# Then we receive all block (dis)connect notifications for the 2 block reorg
assert_equal((dc_block, "D", None), seq.receive_sequence())
block_count = self.nodes[1].getblockcount()
assert_equal((self.nodes[1].getblockhash(block_count-1), "C", None), seq.receive_sequence())
assert_equal((self.nodes[1].getblockhash(block_count), "C", None), seq.receive_sequence())
# Rest of test requires wallet functionality
if self.is_wallet_compiled():
self.log.info("Wait for tx from second node")
payment_txid = self.nodes[1].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=5.0, replaceable=True)
self.sync_all()
self.log.info("Testing sequence notifications with mempool sequence values")
# Should receive the broadcasted txid.
assert_equal((payment_txid, "A", seq_num), seq.receive_sequence())
seq_num += 1
self.log.info("Testing RBF notification")
# Replace it to test eviction/addition notification
rbf_info = self.nodes[1].bumpfee(payment_txid)
self.sync_all()
assert_equal((payment_txid, "R", seq_num), seq.receive_sequence())
seq_num += 1
assert_equal((rbf_info["txid"], "A", seq_num), seq.receive_sequence())
seq_num += 1
# Doesn't get published when mined, make a block and tx to "flush" the possibility
# though the mempool sequence number does go up by the number of transactions
# removed from the mempool by the block mining it.
mempool_size = len(self.nodes[0].getrawmempool())
c_block = self.generatetoaddress(self.nodes[0], 1, ADDRESS_BCRT1_UNSPENDABLE)[0]
# Make sure the number of mined transactions matches the number of txs out of mempool
mempool_size_delta = mempool_size - len(self.nodes[0].getrawmempool())
assert_equal(len(self.nodes[0].getblock(c_block)["tx"])-1, mempool_size_delta)
seq_num += mempool_size_delta
payment_txid_2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
self.sync_all()
assert_equal((c_block, "C", None), seq.receive_sequence())
assert_equal((payment_txid_2, "A", seq_num), seq.receive_sequence())
seq_num += 1
# Spot check getrawmempool results that they only show up when asked for
assert type(self.nodes[0].getrawmempool()) is list
assert type(self.nodes[0].getrawmempool(mempool_sequence=False)) is list
assert "mempool_sequence" not in self.nodes[0].getrawmempool(verbose=True)
assert_raises_rpc_error(-8, "Verbose results cannot contain mempool sequence values.", self.nodes[0].getrawmempool, True, True)
assert_equal(self.nodes[0].getrawmempool(mempool_sequence=True)["mempool_sequence"], seq_num)
self.log.info("Testing reorg notifications")
# Manually invalidate the last block to test mempool re-entry
# N.B. This part could be made more lenient in exact ordering
# since it greatly depends on inner-workings of blocks/mempool
# during "deep" re-orgs. Probably should "re-construct"
# blockchain/mempool state from notifications instead.
block_count = self.nodes[0].getblockcount()
best_hash = self.nodes[0].getbestblockhash()
self.nodes[0].invalidateblock(best_hash)
sleep(2) # Bit of room to make sure transaction things happened
# Make sure getrawmempool mempool_sequence results aren't "queued" but immediately reflective
# of the time they were gathered.
assert self.nodes[0].getrawmempool(mempool_sequence=True)["mempool_sequence"] > seq_num
assert_equal((best_hash, "D", None), seq.receive_sequence())
assert_equal((rbf_info["txid"], "A", seq_num), seq.receive_sequence())
seq_num += 1
# Other things may happen but aren't wallet-deterministic so we don't test for them currently
self.nodes[0].reconsiderblock(best_hash)
self.generatetoaddress(self.nodes[1], 1, ADDRESS_BCRT1_UNSPENDABLE)
self.log.info("Evict mempool transaction by block conflict")
orig_txid = self.nodes[0].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=1.0, replaceable=True)
# More to be simply mined
more_tx = []
for _ in range(5):
more_tx.append(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.1))
raw_tx = self.nodes[0].getrawtransaction(orig_txid)
bump_info = self.nodes[0].bumpfee(orig_txid)
# Mine the pre-bump tx
block = create_block(int(self.nodes[0].getbestblockhash(), 16), create_coinbase(self.nodes[0].getblockcount()+1))
tx = tx_from_hex(raw_tx)
block.vtx.append(tx)
for txid in more_tx:
tx = tx_from_hex(self.nodes[0].getrawtransaction(txid))
block.vtx.append(tx)
add_witness_commitment(block)
block.solve()
assert_equal(self.nodes[0].submitblock(block.serialize().hex()), None)
tip = self.nodes[0].getbestblockhash()
assert_equal(int(tip, 16), block.sha256)
orig_txid_2 = self.nodes[0].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=1.0, replaceable=True)
# Flush old notifications until evicted tx original entry
(hash_str, label, mempool_seq) = seq.receive_sequence()
while hash_str != orig_txid:
(hash_str, label, mempool_seq) = seq.receive_sequence()
mempool_seq += 1
# Added original tx
assert_equal(label, "A")
# More transactions to be simply mined
for i in range(len(more_tx)):
assert_equal((more_tx[i], "A", mempool_seq), seq.receive_sequence())
mempool_seq += 1
# Bumped by rbf
assert_equal((orig_txid, "R", mempool_seq), seq.receive_sequence())
mempool_seq += 1
assert_equal((bump_info["txid"], "A", mempool_seq), seq.receive_sequence())
mempool_seq += 1
# Conflict announced first, then block
assert_equal((bump_info["txid"], "R", mempool_seq), seq.receive_sequence())
mempool_seq += 1
assert_equal((tip, "C", None), seq.receive_sequence())
mempool_seq += len(more_tx)
# Last tx
assert_equal((orig_txid_2, "A", mempool_seq), seq.receive_sequence())
mempool_seq += 1
self.generatetoaddress(self.nodes[0], 1, ADDRESS_BCRT1_UNSPENDABLE)
self.sync_all() # want to make sure we didn't break "consensus" for other tests
def test_mempool_sync(self):
"""
Use sequence notification plus getrawmempool sequence results to "sync mempool"
"""
if not self.is_wallet_compiled():
self.log.info("Skipping mempool sync test")
return
self.log.info("Testing 'mempool sync' usage of sequence notifier")
[seq] = self.setup_zmq_test([("sequence", "tcp://127.0.0.1:28333")])
# In-memory counter, should always start at 1
next_mempool_seq = self.nodes[0].getrawmempool(mempool_sequence=True)["mempool_sequence"]
assert_equal(next_mempool_seq, 1)
# Some transactions have been happening but we aren't consuming zmq notifications yet
# or we lost a ZMQ message somehow and want to start over
txids = []
num_txs = 5
for _ in range(num_txs):
txids.append(self.nodes[1].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=1.0, replaceable=True))
self.sync_all()
# 1) Consume backlog until we get a mempool sequence number
(hash_str, label, zmq_mem_seq) = seq.receive_sequence()
while zmq_mem_seq is None:
(hash_str, label, zmq_mem_seq) = seq.receive_sequence()
assert label == "A" or label == "R"
assert hash_str is not None
# 2) We need to "seed" our view of the mempool
mempool_snapshot = self.nodes[0].getrawmempool(mempool_sequence=True)
mempool_view = set(mempool_snapshot["txids"])
get_raw_seq = mempool_snapshot["mempool_sequence"]
assert_equal(get_raw_seq, 6)
# Snapshot may be too old compared to zmq message we read off latest
while zmq_mem_seq >= get_raw_seq:
sleep(2)
mempool_snapshot = self.nodes[0].getrawmempool(mempool_sequence=True)
mempool_view = set(mempool_snapshot["txids"])
get_raw_seq = mempool_snapshot["mempool_sequence"]
# Things continue to happen in the "interim" while waiting for snapshot results
# We have node 0 do all these to avoid p2p races with RBF announcements
for _ in range(num_txs):
txids.append(self.nodes[0].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=0.1, replaceable=True))
self.nodes[0].bumpfee(txids[-1])
self.sync_all()
self.generatetoaddress(self.nodes[0], 1, ADDRESS_BCRT1_UNSPENDABLE)
final_txid = self.nodes[0].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=0.1, replaceable=True)
# 3) Consume ZMQ backlog until we get to "now" for the mempool snapshot
while True:
if zmq_mem_seq == get_raw_seq - 1:
break
(hash_str, label, mempool_sequence) = seq.receive_sequence()
if mempool_sequence is not None:
zmq_mem_seq = mempool_sequence
if zmq_mem_seq > get_raw_seq:
raise Exception(f"We somehow jumped mempool sequence numbers! zmq_mem_seq: {zmq_mem_seq} > get_raw_seq: {get_raw_seq}")
# 4) Moving forward, we apply the delta to our local view
# remaining txs(5) + 1 rbf(A+R) + 1 block connect + 1 final tx
expected_sequence = get_raw_seq
r_gap = 0
for _ in range(num_txs + 2 + 1 + 1):
(hash_str, label, mempool_sequence) = seq.receive_sequence()
if mempool_sequence is not None:
if mempool_sequence != expected_sequence:
# Detected "R" gap, means this a conflict eviction, and mempool tx are being evicted before its
# position in the incoming block message "C"
if label == "R":
assert mempool_sequence > expected_sequence
r_gap += mempool_sequence - expected_sequence
else:
raise Exception(f"WARNING: txhash has unexpected mempool sequence value: {mempool_sequence} vs expected {expected_sequence}")
if label == "A":
assert hash_str not in mempool_view
mempool_view.add(hash_str)
expected_sequence = mempool_sequence + 1
elif label == "R":
assert hash_str in mempool_view
mempool_view.remove(hash_str)
expected_sequence = mempool_sequence + 1
elif label == "C":
# (Attempt to) remove all txids from known block connects
block_txids = self.nodes[0].getblock(hash_str)["tx"][1:]
for txid in block_txids:
if txid in mempool_view:
expected_sequence += 1
mempool_view.remove(txid)
expected_sequence -= r_gap
r_gap = 0
elif label == "D":
# Not useful for mempool tracking per se
continue
else:
raise Exception("Unexpected ZMQ sequence label!")
assert_equal(self.nodes[0].getrawmempool(), [final_txid])
assert_equal(self.nodes[0].getrawmempool(mempool_sequence=True)["mempool_sequence"], expected_sequence)
# 5) If you miss a zmq/mempool sequence number, go back to step (2)
self.generatetoaddress(self.nodes[0], 1, ADDRESS_BCRT1_UNSPENDABLE)
def test_multiple_interfaces(self):
# Set up two subscribers with different addresses
# (note that after the reorg test, syncing would fail due to different
# chain lengths on node0 and node1; for this test we only need node0, so
# we can disable syncing blocks on the setup)
subscribers = self.setup_zmq_test([
("hashblock", "tcp://127.0.0.1:28334"),
("hashblock", "tcp://127.0.0.1:28335"),
], sync_blocks=False)
# Generate 1 block in nodes[0] and receive all notifications
self.generatetoaddress(self.nodes[0], 1, ADDRESS_BCRT1_UNSPENDABLE, sync_fun=self.no_op)
# Should receive the same block hash on both subscribers
assert_equal(self.nodes[0].getbestblockhash(), subscribers[0].receive().hex())
assert_equal(self.nodes[0].getbestblockhash(), subscribers[1].receive().hex())
def test_ipv6(self):
if not test_ipv6_local():
self.log.info("Skipping IPv6 test, because IPv6 is not supported.")
return
self.log.info("Testing IPv6")
# Set up subscriber using IPv6 loopback address
subscribers = self.setup_zmq_test([
("hashblock", "tcp://[::1]:28332")
], ipv6=True)
# Generate 1 block in nodes[0]
self.generatetoaddress(self.nodes[0], 1, ADDRESS_BCRT1_UNSPENDABLE)
# Should receive the same block hash
assert_equal(self.nodes[0].getbestblockhash(), subscribers[0].receive().hex())
if __name__ == '__main__':
ZMQTest().main()
| 45.699491 | 149 | 0.631014 |
79411254cf95f489ad64e96f5ea40f5580e75b05 | 943 | py | Python | ExerciciosPYTHON/PythonCeV/modulos/moeda.py | Samuel-Melo890/Python-Desafios | 2abc7734d6a6c1f5ab67421f792d6889d93bac94 | [
"MIT"
] | null | null | null | ExerciciosPYTHON/PythonCeV/modulos/moeda.py | Samuel-Melo890/Python-Desafios | 2abc7734d6a6c1f5ab67421f792d6889d93bac94 | [
"MIT"
] | 2 | 2022-03-18T16:06:07.000Z | 2022-03-18T16:55:29.000Z | ExerciciosPYTHON/PythonCeV/modulos/moeda.py | Samuel-Melo890/Python-Desafios | 2abc7734d6a6c1f5ab67421f792d6889d93bac94 | [
"MIT"
] | null | null | null | def aumentar(p=0, t=0, c=False):
t = t / 100
res = p * (1 + t)
if c:
return moeda(res)
else:
return res
def diminuir(p=0, t=0, c=False):
t = t / 100
res = p * (1 - t)
if c:
return moeda(res)
else:
return res
def dobro(p=0, c=False):
res = p * 2
if c:
return moeda(res)
else:
return res
def metade(p=0, c=False):
res = p / 2
if c:
return moeda(res)
else:
return res
def moeda(p=0):
return f'R${p:.2f}'.replace('.', ',')
def resumo(p=0, ta=0, tr=0):
print('-' * 40)
print(f'{"Resumo do Valor":^40}')
print('-' * 40)
print(f'Preço analisado: \t{moeda(p)}')
print(f'Dobro do Preço: \t{dobro(p, True)}')
print(f'Metade do Preço: \t{metade(p, True)}')
print(f'Aumento de {ta}%: \t{aumentar(p, ta, c=True)}')
print(f'Redução de {tr}%: \t{diminuir(p, tr, c=True)}')
print('-' * 40)
| 19.645833 | 59 | 0.497349 |
794112d5c20ea5b72fd9018417ada65ef2ab8938 | 108,972 | py | Python | model.py | phoebsc/mask_ild | 2a960b33a3952a39edcb30b44a3e6ea27157fedb | [
"MIT"
] | null | null | null | model.py | phoebsc/mask_ild | 2a960b33a3952a39edcb30b44a3e6ea27157fedb | [
"MIT"
] | null | null | null | model.py | phoebsc/mask_ild | 2a960b33a3952a39edcb30b44a3e6ea27157fedb | [
"MIT"
] | null | null | null | """
Mask R-CNN
The main Mask R-CNN model implemenetation.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import os
import sys
import glob
import random
import math
import datetime
import itertools
import json
import re
import logging
from collections import OrderedDict
import numpy as np
import scipy.misc
import tensorflow as tf
import keras
import keras.backend as K
import keras.layers as KL
import keras.initializers as KI
import keras.engine as KE
import keras.models as KM
import utils
# Requires TensorFlow 1.3+ and Keras 2.0.8+.
from distutils.version import LooseVersion
assert LooseVersion(tf.__version__) >= LooseVersion("1.3")
assert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')
############################################################
# Utility Functions
############################################################
def log(text, arr=None):
"""Prints a text message. And, optionally, if a Numpy array is provided it
prints it's shape, min, and max values.
"""
if arr is not None:
text = text.ljust(25)
array=np.array(arr)
text += ("shape: {:20} min: {:10.5f} max: {:10.5f}".format(
str(array.shape),
array.min() if array.size else "",
array.max() if array.size else ""))
print(text)
class BatchNorm(KL.BatchNormalization):
"""Batch Normalization class. Subclasses the Keras BN class and
hardcodes training=False so the BN layer doesn't update
during training.
Batch normalization has a negative effect on training if batches are small
so we disable it here.
"""
def call(self, inputs, training=None):
return super(self.__class__, self).call(inputs, training=False)
############################################################
# Resnet Graph
############################################################
# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
def identity_block(input_tensor, kernel_size, filters, stage, block,
use_bias=True):
"""The identity_block is the block that has no conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',
use_bias=use_bias)(input_tensor)
x = BatchNorm(axis=3, name=bn_name_base + '2a')(x)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(axis=3, name=bn_name_base + '2b')(x)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',
use_bias=use_bias)(x)
x = BatchNorm(axis=3, name=bn_name_base + '2c')(x)
x = KL.Add()([x, input_tensor])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block,
strides=(2, 2), use_bias=True):
"""conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,
name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)
x = BatchNorm(axis=3, name=bn_name_base + '2a')(x)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(axis=3, name=bn_name_base + '2b')(x)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +
'2c', use_bias=use_bias)(x)
x = BatchNorm(axis=3, name=bn_name_base + '2c')(x)
shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,
name=conv_name_base + '1', use_bias=use_bias)(input_tensor)
shortcut = BatchNorm(axis=3, name=bn_name_base + '1')(shortcut)
x = KL.Add()([x, shortcut])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def resnet_graph(input_image, architecture, stage5=False):
assert architecture in ["resnet50", "resnet101"]
# Stage 1
x = KL.ZeroPadding2D((3, 3))(input_image)
x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)
x = BatchNorm(axis=3, name='bn_conv1')(x)
x = KL.Activation('relu')(x)
C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
# Stage 2
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
# Stage 3
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
# Stage 4
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
block_count = {"resnet50": 5, "resnet101": 22}[architecture]
for i in range(block_count):
x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i))
C4 = x
# Stage 5
if stage5:
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
else:
C5 = None
return [C1, C2, C3, C4, C5]
############################################################
# Proposal Layer
############################################################
def apply_box_deltas_graph(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, 4] where each row is y1, x1, y2, x2
deltas: [N, 4] where each row is [dy, dx, log(dh), log(dw)]
"""
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= tf.exp(deltas[:, 2])
width *= tf.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
result = tf.stack([y1, x1, y2, x2], axis=1, name="apply_box_deltas_out")
return result
def clip_boxes_graph(boxes, window):
"""
boxes: [N, 4] each row is y1, x1, y2, x2
window: [4] in the form y1, x1, y2, x2
"""
# Split corners
wy1, wx1, wy2, wx2 = tf.split(window, 4)
y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
# Clip
y1 = tf.maximum(tf.minimum(y1, wy2), wy1)
x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes")
return clipped
class ProposalLayer(KE.Layer):
"""Receives anchor scores and selects a subset to pass as proposals
to the second stage. Filtering is done based on anchor scores and
non-max suppression to remove overlaps. It also applies bounding
box refinment detals to anchors.
Inputs:
rpn_probs: [batch, anchors, (bg prob, fg prob)]
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
Returns:
Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]
"""
def __init__(self, proposal_count, nms_threshold, anchors,
config=None, **kwargs):
"""
anchors: [N, (y1, x1, y2, x2)] anchors defined in image coordinates
"""
super(ProposalLayer, self).__init__(**kwargs)
self.config = config
self.proposal_count = proposal_count
self.nms_threshold = nms_threshold
self.anchors = anchors.astype(np.float32)
def call(self, inputs):
# Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]
scores = inputs[0][:, :, 1]
# Box deltas [batch, num_rois, 4]
deltas = inputs[1]
deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])
# Base anchors
anchors = self.anchors
# Improve performance by trimming to top anchors by score
# and doing the rest on the smaller subset.
pre_nms_limit = min(10000, self.anchors.shape[0])
ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,
name="top_anchors").indices
scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
anchors = utils.batch_slice(ix, lambda x: tf.gather(anchors, x),
self.config.IMAGES_PER_GPU,
names=["pre_nms_anchors"])
# Apply deltas to anchors to get refined anchors.
# [batch, N, (y1, x1, y2, x2)]
boxes = utils.batch_slice([anchors, deltas],
lambda x, y: apply_box_deltas_graph(x, y),
self.config.IMAGES_PER_GPU,
names=["refined_anchors"])
# Clip to image boundaries. [batch, N, (y1, x1, y2, x2)]
height, width = self.config.IMAGE_SHAPE[:2]
window = np.array([0, 0, height, width]).astype(np.float32)
boxes = utils.batch_slice(boxes,
lambda x: clip_boxes_graph(x, window),
self.config.IMAGES_PER_GPU,
names=["refined_anchors_clipped"])
# Filter out small boxes
# According to Xinlei Chen's paper, this reduces detection accuracy
# for small objects, so we're skipping it.
# Normalize dimensions to range of 0 to 1.
normalized_boxes = boxes / np.array([[height, width, height, width]])
# Non-max suppression
def nms(normalized_boxes, scores):
indices = tf.image.non_max_suppression(
normalized_boxes, scores, self.proposal_count,
self.nms_threshold, name="rpn_non_max_suppression")
proposals = tf.gather(normalized_boxes, indices)
# Pad if needed
padding = self.proposal_count - tf.shape(proposals)[0]
proposals = tf.concat([proposals, tf.zeros([padding, 4])], 0)
return proposals
proposals = utils.batch_slice([normalized_boxes, scores], nms,
self.config.IMAGES_PER_GPU)
return proposals
def compute_output_shape(self, input_shape):
return (None, self.proposal_count, 4)
############################################################
# ROIAlign Layer
############################################################
def log2_graph(x):
"""Implementatin of Log2. TF doesn't have a native implemenation."""
return tf.log(x) / tf.log(2.0)
class PyramidROIAlign(KE.Layer):
"""Implements ROI Pooling on multiple levels of the feature pyramid.
Params:
- pool_shape: [height, width] of the output pooled regions. Usually [7, 7]
- image_shape: [height, width, chanells]. Shape of input image in pixels
Inputs:
- boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized
coordinates. Possibly padded with zeros if not enough
boxes to fill the array.
- Feature maps: List of feature maps from different levels of the pyramid.
Each is [batch, height, width, channels]
Output:
Pooled regions in the shape: [batch, num_boxes, height, width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
def __init__(self, pool_shape, image_shape, **kwargs):
super(PyramidROIAlign, self).__init__(**kwargs)
self.pool_shape = tuple(pool_shape)
self.image_shape = tuple(image_shape)
def call(self, inputs):
# Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords
boxes = inputs[0]
# Feature Maps. List of feature maps from different level of the
# feature pyramid. Each is [batch, height, width, channels]
feature_maps = inputs[1:]
# Assign each ROI to a level in the pyramid based on the ROI area.
y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)
h = y2 - y1
w = x2 - x1
# Equation 1 in the Feature Pyramid Networks paper. Account for
# the fact that our coordinates are normalized here.
# e.g. a 224x224 ROI (in pixels) maps to P4
image_area = tf.cast(
self.image_shape[0] * self.image_shape[1], tf.float32)
roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))
roi_level = tf.minimum(5, tf.maximum(
2, 4 + tf.cast(tf.round(roi_level), tf.int32)))
roi_level = tf.squeeze(roi_level, 2)
# Loop through levels and apply ROI pooling to each. P2 to P5.
pooled = []
box_to_level = []
for i, level in enumerate(range(2, 6)):
ix = tf.where(tf.equal(roi_level, level))
level_boxes = tf.gather_nd(boxes, ix)
# Box indicies for crop_and_resize.
box_indices = tf.cast(ix[:, 0], tf.int32)
# Keep track of which box is mapped to which level
box_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_boxes = tf.stop_gradient(level_boxes)
box_indices = tf.stop_gradient(box_indices)
# Crop and Resize
# From Mask R-CNN paper: "We sample four regular locations, so
# that we can evaluate either max or average pooling. In fact,
# interpolating only a single value at each bin center (without
# pooling) is nearly as effective."
#
# Here we use the simplified approach of a single value per bin,
# which is how it's done in tf.crop_and_resize()
# Result: [batch * num_boxes, pool_height, pool_width, channels]
pooled.append(tf.image.crop_and_resize(
feature_maps[i], level_boxes, box_indices, self.pool_shape,
method="bilinear"))
# Pack pooled features into one tensor
pooled = tf.concat(pooled, axis=0)
# Pack box_to_level mapping into one array and add another
# column representing the order of pooled boxes
box_to_level = tf.concat(box_to_level, axis=0)
box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)
box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],
axis=1)
# Rearrange pooled features to match the order of the original boxes
# Sort box_to_level by batch then box index
# TF doesn't have a way to sort by two columns, so merge them and sort.
sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]
ix = tf.nn.top_k(sorting_tensor, k=tf.shape(
box_to_level)[0]).indices[::-1]
ix = tf.gather(box_to_level[:, 2], ix)
pooled = tf.gather(pooled, ix)
# Re-add the batch dimension
pooled = tf.expand_dims(pooled, 0)
return pooled
def compute_output_shape(self, input_shape):
return input_shape[0][:2] + self.pool_shape + (input_shape[1][-1], )
############################################################
# Detection Target Layer
############################################################
def overlaps_graph(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
"""
# 1. Tile boxes2 and repeate boxes1. This allows us to compare
# every boxes1 against every boxes2 without loops.
# TF doesn't have an equivalent to np.repeate() so simulate it
# using tf.tile() and tf.reshape.
b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),
[1, 1, tf.shape(boxes2)[0]]), [-1, 4])
b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])
# 2. Compute intersections
b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)
b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)
y1 = tf.maximum(b1_y1, b2_y1)
x1 = tf.maximum(b1_x1, b2_x1)
y2 = tf.minimum(b1_y2, b2_y2)
x2 = tf.minimum(b1_x2, b2_x2)
intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)
# 3. Compute unions
b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
union = b1_area + b2_area - intersection
# 4. Compute IoU and reshape to [boxes1, boxes2]
iou = intersection / union
overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])
return overlaps
def detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):
"""Generates detection targets for one image. Subsamples proposals and
generates target class IDs, bounding box deltas, and masks for each.
Inputs:
proposals: [N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [MAX_GT_INSTANCES] int class IDs
gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.
gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.
deltas: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (dy, dx, log(dh), log(dw))]
Class-specific bbox refinments.
masks: [TRAIN_ROIS_PER_IMAGE, height, width). Masks cropped to bbox
boundaries and resized to neural network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
# Assertions
asserts = [
tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],
name="roi_assertion"),
]
with tf.control_dependencies(asserts):
proposals = tf.identity(proposals)
# Remove zero padding
proposals, _ = trim_zeros_graph(proposals, name="trim_proposals")
gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name="trim_gt_boxes")
gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,
name="trim_gt_class_ids")
gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,
name="trim_gt_masks")
# Compute overlaps matrix [rpn_rois, gt_boxes]
overlaps = overlaps_graph(proposals, gt_boxes)
# Determine postive and negative ROIs
roi_iou_max = tf.reduce_max(overlaps, axis=1)
# 1. Positive ROIs are those with >= 0.5 IoU with a GT box
positive_roi_bool = (roi_iou_max >= 0.5)
positive_indices = tf.where(positive_roi_bool)[:, 0]
# 2. Negative ROIs are those with < 0.5 with every GT box
negative_indices = tf.where(roi_iou_max < 0.5)[:, 0]
# Subsample ROIs. Aim for 33% positive
# Positive ROIs
positive_count = int(config.TRAIN_ROIS_PER_IMAGE *
config.ROI_POSITIVE_RATIO)
positive_indices = tf.random_shuffle(positive_indices)[:positive_count]
# Negative ROIs. Fill the rest of the batch.
negative_count = config.TRAIN_ROIS_PER_IMAGE - \
tf.shape(positive_indices)[0]
negative_indices = tf.random_shuffle(negative_indices)[:negative_count]
# Gather selected ROIs
positive_rois = tf.gather(proposals, positive_indices)
negative_rois = tf.gather(proposals, negative_indices)
# Assign positive ROIs to GT boxes.
positive_overlaps = tf.gather(overlaps, positive_indices)
roi_gt_box_assignment = tf.argmax(positive_overlaps, axis=1)
roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)
roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)
# Compute bbox refinement for positive ROIs
deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)
deltas /= config.BBOX_STD_DEV
# Assign positive ROIs to GT masks
# Permute masks to [N, height, width, 1]
transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)
# Pick the right mask for each ROI
roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)
# Compute mask targets
boxes = positive_rois
if config.USE_MINI_MASK:
# Transform ROI corrdinates from normalized image space
# to normalized mini-mask space.
y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)
gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)
gt_h = gt_y2 - gt_y1
gt_w = gt_x2 - gt_x1
y1 = (y1 - gt_y1) / gt_h
x1 = (x1 - gt_x1) / gt_w
y2 = (y2 - gt_y1) / gt_h
x2 = (x2 - gt_x1) / gt_w
boxes = tf.concat([y1, x1, y2, x2], 1)
box_ids = tf.range(0, tf.shape(roi_masks)[0])
masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,
box_ids,
config.MASK_SHAPE)
# Remove the extra dimension from masks.
masks = tf.squeeze(masks, axis=3)
# Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with
# binary cross entropy loss.
masks = tf.round(masks)
# Append negative ROIs and pad bbox deltas and masks that
# are not used for negative ROIs with zeros.
rois = tf.concat([positive_rois, negative_rois], axis=0)
N = tf.shape(negative_rois)[0]
P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)
rois = tf.pad(rois, [(0, P), (0, 0)])
roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])
roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])
deltas = tf.pad(deltas, [(0, N + P), (0, 0)])
masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])
return rois, roi_gt_class_ids, deltas, masks
class DetectionTargetLayer(KE.Layer):
"""Subsamples proposals and generates target box refinment, class_ids,
and masks for each.
Inputs:
proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.
gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized
coordinates.
gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized
coordinates
target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, NUM_CLASSES,
(dy, dx, log(dh), log(dw), class_id)]
Class-specific bbox refinments.
target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width)
Masks cropped to bbox boundaries and resized to neural
network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
def __init__(self, config, **kwargs):
super(DetectionTargetLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
proposals = inputs[0]
gt_class_ids = inputs[1]
gt_boxes = inputs[2]
gt_masks = inputs[3]
# Slice the batch and run a graph for each slice
# TODO: Rename target_bbox to target_deltas for clarity
names = ["rois", "target_class_ids", "target_bbox", "target_mask"]
outputs = utils.batch_slice(
[proposals, gt_class_ids, gt_boxes, gt_masks],
lambda w, x, y, z: detection_targets_graph(
w, x, y, z, self.config),
self.config.IMAGES_PER_GPU, names=names)
return outputs
def compute_output_shape(self, input_shape):
return [
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois
(None, 1), # class_ids
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas
(None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],
self.config.MASK_SHAPE[1]) # masks
]
def compute_mask(self, inputs, mask=None):
return [None, None, None, None]
############################################################
# Detection Layer
############################################################
def clip_to_window(window, boxes):
"""
window: (y1, x1, y2, x2). The window in the image we want to clip to.
boxes: [N, (y1, x1, y2, x2)]
"""
boxes[:, 0] = np.maximum(np.minimum(boxes[:, 0], window[2]), window[0])
boxes[:, 1] = np.maximum(np.minimum(boxes[:, 1], window[3]), window[1])
boxes[:, 2] = np.maximum(np.minimum(boxes[:, 2], window[2]), window[0])
boxes[:, 3] = np.maximum(np.minimum(boxes[:, 3], window[3]), window[1])
return boxes
def refine_detections(rois, probs, deltas, window, config):
"""Refine classified proposals and filter overlaps and return final
detections.
Inputs:
rois: [N, (y1, x1, y2, x2)] in normalized coordinates
probs: [N, num_classes]. Class probabilities.
deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific
bounding box deltas.
window: (y1, x1, y2, x2) in image coordinates. The part of the image
that contains the image excluding the padding.
Returns detections shaped: [N, (y1, x1, y2, x2, class_id, score)]
"""
# Class IDs per ROI
class_ids = np.argmax(probs, axis=1)
# Class probability of the top class of each ROI
class_scores = probs[np.arange(class_ids.shape[0]), class_ids]
# Class-specific bounding box deltas
deltas_specific = deltas[np.arange(deltas.shape[0]), class_ids]
# Apply bounding box deltas
# Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates
refined_rois = utils.apply_box_deltas(
rois, deltas_specific * config.BBOX_STD_DEV)
# Convert coordiates to image domain
# TODO: better to keep them normalized until later
height, width = config.IMAGE_SHAPE[:2]
refined_rois *= np.array([height, width, height, width])
# Clip boxes to image window
refined_rois = clip_to_window(window, refined_rois)
# Round and cast to int since we're deadling with pixels now
refined_rois = np.rint(refined_rois).astype(np.int32)
# TODO: Filter out boxes with zero area
# Filter out background boxes
keep = np.where(class_ids > 0)[0]
# Filter out low confidence boxes
if config.DETECTION_MIN_CONFIDENCE:
keep = np.intersect1d(
keep, np.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[0])
# Apply per-class NMS
pre_nms_class_ids = class_ids[keep]
pre_nms_scores = class_scores[keep]
pre_nms_rois = refined_rois[keep]
nms_keep = []
for class_id in np.unique(pre_nms_class_ids):
# Pick detections of this class
ixs = np.where(pre_nms_class_ids == class_id)[0]
# Apply NMS
class_keep = utils.non_max_suppression(
pre_nms_rois[ixs], pre_nms_scores[ixs],
config.DETECTION_NMS_THRESHOLD)
# Map indicies
class_keep = keep[ixs[class_keep]]
nms_keep = np.union1d(nms_keep, class_keep)
keep = np.intersect1d(keep, nms_keep).astype(np.int32)
# Keep top detections
roi_count = config.DETECTION_MAX_INSTANCES
top_ids = np.argsort(class_scores[keep])[::-1][:roi_count]
keep = keep[top_ids]
# Arrange output as [N, (y1, x1, y2, x2, class_id, score)]
# Coordinates are in image domain.
result = np.hstack((refined_rois[keep],
class_ids[keep][..., np.newaxis],
class_scores[keep][..., np.newaxis]))
return result
class DetectionLayer(KE.Layer):
"""Takes classified proposal boxes and their bounding box deltas and
returns the final detection boxes.
Returns:
[batch, num_detections, (y1, x1, y2, x2, class_score)] in pixels
"""
def __init__(self, config=None, **kwargs):
super(DetectionLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
def wrapper(rois, mrcnn_class, mrcnn_bbox, image_meta):
detections_batch = []
for b in range(self.config.BATCH_SIZE):
_, _, window, _ = parse_image_meta(image_meta)
detections = refine_detections(
rois[b], mrcnn_class[b], mrcnn_bbox[b], window[b], self.config)
# Pad with zeros if detections < DETECTION_MAX_INSTANCES
gap = self.config.DETECTION_MAX_INSTANCES - detections.shape[0]
assert gap >= 0
if gap > 0:
detections = np.pad(
detections, [(0, gap), (0, 0)], 'constant', constant_values=0)
detections_batch.append(detections)
# Stack detections and cast to float32
# TODO: track where float64 is introduced
detections_batch = np.array(detections_batch).astype(np.float32)
# Reshape output
# [batch, num_detections, (y1, x1, y2, x2, class_score)] in pixels
return np.reshape(detections_batch, [self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])
# Return wrapped function
return tf.py_func(wrapper, inputs, tf.float32)
def compute_output_shape(self, input_shape):
return (None, self.config.DETECTION_MAX_INSTANCES, 6)
# Region Proposal Network (RPN)
def rpn_graph(feature_map, anchors_per_location, anchor_stride):
"""Builds the computation graph of Region Proposal Network.
feature_map: backbone features [batch, height, width, depth]
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
Returns:
rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, W, W, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
# TODO: check if stride of 2 causes alignment issues if the featuremap
# is not even.
# Shared convolutional base of the RPN
shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',
strides=anchor_stride,
name='rpn_conv_shared')(feature_map)
# Anchor Score. [batch, height, width, anchors per location * 2].
x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',
activation='linear', name='rpn_class_raw')(shared)
# Reshape to [batch, anchors, 2]
rpn_class_logits = KL.Lambda(
lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)
# Softmax on last dimension of BG/FG.
rpn_probs = KL.Activation(
"softmax", name="rpn_class_xxx")(rpn_class_logits)
# Bounding box refinement. [batch, H, W, anchors per location, depth]
# where depth is [x, y, log(w), log(h)]
x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding="valid",
activation='linear', name='rpn_bbox_pred')(shared)
# Reshape to [batch, anchors, 4]
rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)
return [rpn_class_logits, rpn_probs, rpn_bbox]
def build_rpn_model(anchor_stride, anchors_per_location, depth):
"""Builds a Keras model of the Region Proposal Network.
It wraps the RPN graph so it can be used multiple times with shared
weights.
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
depth: Depth of the backbone feature map.
Returns a Keras Model object. The model outputs, when called, are:
rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, W, W, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
input_feature_map = KL.Input(shape=[None, None, depth],
name="input_rpn_feature_map")
outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)
return KM.Model([input_feature_map], outputs, name="rpn_model")
############################################################
# Feature Pyramid Network Heads
############################################################
def fpn_classifier_graph(rois, feature_maps,
image_shape, pool_size, num_classes):
"""Builds the computation graph of the feature pyramid network classifier
and regressor heads.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from diffent layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_shape: [height, width, depth]
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
Returns:
logits: [N, NUM_CLASSES] classifier logits (before softmax)
probs: [N, NUM_CLASSES] classifier probabilities
bbox_deltas: [N, (dy, dx, log(dh), log(dw))] Deltas to apply to
proposal boxes
"""
# ROI Pooling
# Shape: [batch, num_boxes, pool_height, pool_width, channels]
x = PyramidROIAlign([pool_size, pool_size], image_shape,
name="roi_align_classifier")([rois] + feature_maps)
# Two 1024 FC layers (implemented with Conv2D for consistency)
x = KL.TimeDistributed(KL.Conv2D(1024, (pool_size, pool_size), padding="valid"),
name="mrcnn_class_conv1")(x)
x = KL.TimeDistributed(BatchNorm(axis=3), name='mrcnn_class_bn1')(x)
x = KL.Activation('relu')(x)
x = KL.Dropout(0.5)(x)
x = KL.TimeDistributed(KL.Conv2D(1024, (1, 1)),
name="mrcnn_class_conv2")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_class_bn2')(x)
x = KL.Activation('relu')(x)
shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),
name="pool_squeeze")(x)
# Classifier head
mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),
name='mrcnn_class_logits')(shared)
mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"),
name="mrcnn_class")(mrcnn_class_logits)
# BBox head
# [batch, boxes, num_classes * (dy, dx, log(dh), log(dw))]
x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),
name='mrcnn_bbox_fc')(shared)
# Reshape to [batch, boxes, num_classes, (dy, dx, log(dh), log(dw))]
s = K.int_shape(x)
mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name="mrcnn_bbox")(x)
return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox
def build_fpn_mask_graph(rois, feature_maps,
image_shape, pool_size, num_classes):
"""Builds the computation graph of the mask head of Feature Pyramid Network.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from diffent layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_shape: [height, width, depth]
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
Returns: Masks [batch, roi_count, height, width, num_classes]
"""
# ROI Pooling
# Shape: [batch, boxes, pool_height, pool_width, channels]
x = PyramidROIAlign([pool_size, pool_size], image_shape,
name="roi_align_mask")([rois] + feature_maps)
# Conv layers
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv1")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_mask_bn1')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv2")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_mask_bn2')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv3")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_mask_bn3')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv4")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_mask_bn4')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"),
name="mrcnn_mask_deconv")(x)
x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation="sigmoid"),
name="mrcnn_mask")(x)
return x
############################################################
# Loss Functions
############################################################
def smooth_l1_loss(y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typicallly: [N, 4], but could be any shape.
"""
diff = K.abs(y_true - y_pred)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
return loss
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Crossentropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):
"""Return the RPN bounding box loss graph.
config: the model config object.
target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
Uses 0 padding to fill in unsed bbox deltas.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
"""
# Positive anchors contribute to the loss, but negative and
# neutral anchors (match value of 0 or -1) don't.
rpn_match = K.squeeze(rpn_match, -1)
indices = tf.where(K.equal(rpn_match, 1))
# Pick bbox deltas that contribute to the loss
rpn_bbox = tf.gather_nd(rpn_bbox, indices)
# Trim target bounding box deltas to the same length as rpn_bbox.
batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)
target_bbox = batch_pack_graph(target_bbox, batch_counts,
config.IMAGES_PER_GPU)
# TODO: use smooth_l1_loss() rather than reimplementing here
# to reduce code duplication
diff = K.abs(target_bbox - rpn_bbox)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def mrcnn_class_loss_graph(target_class_ids, pred_class_logits,
active_class_ids):
"""Loss for the classifier head of Mask RCNN.
target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero
padding to fill in the array.
pred_class_logits: [batch, num_rois, num_classes]
active_class_ids: [batch, num_classes]. Has a value of 1 for
classes that are in the dataset of the image, and 0
for classes that are not in the dataset.
"""
target_class_ids = tf.cast(target_class_ids, 'int64')
# Find predictions of classes that are not in the dataset.
pred_class_ids = tf.argmax(pred_class_logits, axis=2)
# TODO: Update this line to work with batch > 1. Right now it assumes all
# images in a batch have the same active_class_ids
pred_active = tf.gather(active_class_ids[0], pred_class_ids)
# Loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target_class_ids, logits=pred_class_logits)
# Erase losses of predictions of classes that are not in the active
# classes of the image.
loss = loss * pred_active
# Computer loss mean. Use only predictions that contribute
# to the loss to get a correct mean.
loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)
return loss
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
"""Loss for Mask R-CNN bounding box refinement.
target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
target_class_ids: [batch, num_rois]. Integer class IDs.
pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
"""
# Reshape to merge batch and roi dimensions for simplicity.
target_class_ids = K.reshape(target_class_ids, (-1,))
target_bbox = K.reshape(target_bbox, (-1, 4))
pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))
# Only positive ROIs contribute to the loss. And only
# the right class_id of each ROI. Get their indicies.
positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]
positive_roi_class_ids = tf.cast(
tf.gather(target_class_ids, positive_roi_ix), tf.int64)
indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)
# Gather the deltas (predicted and true) that contribute to loss
target_bbox = tf.gather(target_bbox, positive_roi_ix)
pred_bbox = tf.gather_nd(pred_bbox, indices)
# Smooth-L1 Loss
loss = K.switch(tf.size(target_bbox) > 0,
smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
tf.constant(0.0))
loss = K.mean(loss)
loss = K.reshape(loss, [1, 1])
return loss
def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):
"""Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
"""
# Reshape for simplicity. Merge first two dimensions into one.
target_class_ids = K.reshape(target_class_ids, (-1,))
mask_shape = tf.shape(target_masks)
target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))
pred_shape = tf.shape(pred_masks)
pred_masks = K.reshape(pred_masks,
(-1, pred_shape[2], pred_shape[3], pred_shape[4]))
# Permute predicted masks to [N, num_classes, height, width]
pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])
# Only positive ROIs contribute to the loss. And only
# the class specific mask of each ROI.
positive_ix = tf.where(target_class_ids > 0)[:, 0]
positive_class_ids = tf.cast(
tf.gather(target_class_ids, positive_ix), tf.int64)
indices = tf.stack([positive_ix, positive_class_ids], axis=1)
# Gather the masks (predicted and true) that contribute to loss
y_true = tf.gather(target_masks, positive_ix)
y_pred = tf.gather_nd(pred_masks, indices)
# Compute binary cross entropy. If no positive ROIs, then return 0.
# shape: [batch, roi, num_classes]
loss = K.switch(tf.size(y_true) > 0,
K.binary_crossentropy(target=y_true, output=y_pred),
tf.constant(0.0))
loss = K.mean(loss)
loss = K.reshape(loss, [1, 1])
return loss
############################################################
# Data Generator
############################################################
def load_image_gt(dataset, config, image_id, augment=False,
use_mini_mask=False):
"""Load and return ground truth data for an image (image, mask, bounding boxes).
augment: If true, apply random image augmentation. Currently, only
horizontal flipping is offered.
use_mini_mask: If False, returns full-size masks that are the same height
and width as the original image. These can be big, for example
1024x1024x100 (for 100 instances). Mini masks are smaller, typically,
224x224 and are generated by extracting the bounding box of the
object and resizing it to MINI_MASK_SHAPE.
Returns:
image: [height, width, 3]
shape: the original shape of the image before resizing and cropping.
class_ids: [instance_count] Integer class IDs
bbox: [instance_count, (y1, x1, y2, x2)]
mask: [height, width, instance_count]. The height and width are those
of the image unless use_mini_mask is True, in which case they are
defined in MINI_MASK_SHAPE.
"""
# Load image and mask
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
shape = image.shape
image, window, scale, padding = utils.resize_image(
image,
min_dim=config.IMAGE_MIN_DIM,
max_dim=config.IMAGE_MAX_DIM,
padding=config.IMAGE_PADDING)
mask = utils.resize_mask(mask, scale, padding)
# Random horizontal flips.
if augment:
if random.randint(0, 1):
image = np.fliplr(image)
mask = np.fliplr(mask)
# Bounding boxes. Note that some boxes might be all zeros
# if the corresponding mask got cropped out.
# bbox: [num_instances, (y1, x1, y2, x2)]
bbox = utils.extract_bboxes(mask)
# Active classes
# Different datasets have different classes, so track the
# classes supported in the dataset of this image.
active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)
source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]["source"]]
active_class_ids[source_class_ids] = 1
# Resize masks to smaller size to reduce memory usage
if use_mini_mask:
mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)
# Image meta data
image_meta = compose_image_meta(image_id, shape, window, active_class_ids)
return image, image_meta, class_ids, bbox, mask
def build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):
"""Generate targets for training Stage 2 classifier and mask heads.
This is not used in normal training. It's useful for debugging or to train
the Mask RCNN heads without using the RPN head.
Inputs:
rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.
gt_class_ids: [instance count] Integer class IDs
gt_boxes: [instance count, (y1, x1, y2, x2)]
gt_masks: [height, width, instance count] Grund truth masks. Can be full
size or mini-masks.
Returns:
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific
bbox refinments.
masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped
to bbox boundaries and resized to neural network output size.
"""
assert rpn_rois.shape[0] > 0
assert gt_class_ids.dtype == np.int32, "Expected int but got {}".format(
gt_class_ids.dtype)
assert gt_boxes.dtype == np.int32, "Expected int but got {}".format(
gt_boxes.dtype)
assert gt_masks.dtype == np.bool_, "Expected bool but got {}".format(
gt_masks.dtype)
# It's common to add GT Boxes to ROIs but we don't do that here because
# according to XinLei Chen's paper, it doesn't help.
# Trim empty padding in gt_boxes and gt_masks parts
instance_ids = np.where(gt_class_ids > 0)[0]
assert instance_ids.shape[0] > 0, "Image must contain instances."
gt_class_ids = gt_class_ids[instance_ids]
gt_boxes = gt_boxes[instance_ids]
gt_masks = gt_masks[:, :, instance_ids]
# Compute areas of ROIs and ground truth boxes.
rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \
(rpn_rois[:, 3] - rpn_rois[:, 1])
gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \
(gt_boxes[:, 3] - gt_boxes[:, 1])
# Compute overlaps [rpn_rois, gt_boxes]
overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))
for i in range(overlaps.shape[1]):
gt = gt_boxes[i]
overlaps[:, i] = utils.compute_iou(
gt, rpn_rois, gt_box_area[i], rpn_roi_area)
# Assign ROIs to GT boxes
rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)
rpn_roi_iou_max = overlaps[np.arange(
overlaps.shape[0]), rpn_roi_iou_argmax]
# GT box assigned to each ROI
rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]
rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]
# Positive ROIs are those with >= 0.5 IoU with a GT box.
fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]
# Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)
# TODO: To hard example mine or not to hard example mine, that's the question
# bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
# Subsample ROIs. Aim for 33% foreground.
# FG
fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)
if fg_ids.shape[0] > fg_roi_count:
keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)
else:
keep_fg_ids = fg_ids
# BG
remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]
if bg_ids.shape[0] > remaining:
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
else:
keep_bg_ids = bg_ids
# Combine indicies of ROIs to keep
keep = np.concatenate([keep_fg_ids, keep_bg_ids])
# Need more?
remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]
if remaining > 0:
# Looks like we don't have enough samples to maintain the desired
# balance. Reduce requirements and fill in the rest. This is
# likely different from the Mask RCNN paper.
# There is a small chance we have neither fg nor bg samples.
if keep.shape[0] == 0:
# Pick bg regions with easier IoU threshold
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
assert bg_ids.shape[0] >= remaining
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
assert keep_bg_ids.shape[0] == remaining
keep = np.concatenate([keep, keep_bg_ids])
else:
# Fill the rest with repeated bg rois.
keep_extra_ids = np.random.choice(
keep_bg_ids, remaining, replace=True)
keep = np.concatenate([keep, keep_extra_ids])
assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \
"keep doesn't match ROI batch size {}, {}".format(
keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)
# Reset the gt boxes assigned to BG ROIs.
rpn_roi_gt_boxes[keep_bg_ids, :] = 0
rpn_roi_gt_class_ids[keep_bg_ids] = 0
# For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.
rois = rpn_rois[keep]
roi_gt_boxes = rpn_roi_gt_boxes[keep]
roi_gt_class_ids = rpn_roi_gt_class_ids[keep]
roi_gt_assignment = rpn_roi_iou_argmax[keep]
# Class-aware bbox deltas. [y, x, log(h), log(w)]
bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,
config.NUM_CLASSES, 4), dtype=np.float32)
pos_ids = np.where(roi_gt_class_ids > 0)[0]
bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(
rois[pos_ids], roi_gt_boxes[pos_ids, :4])
# Normalize bbox refinments
bboxes /= config.BBOX_STD_DEV
# Generate class-specific target masks.
masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),
dtype=np.float32)
for i in pos_ids:
class_id = roi_gt_class_ids[i]
assert class_id > 0, "class id must be greater than 0"
gt_id = roi_gt_assignment[i]
class_mask = gt_masks[:, :, gt_id]
if config.USE_MINI_MASK:
# Create a mask placeholder, the size of the image
placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)
# GT box
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]
gt_w = gt_x2 - gt_x1
gt_h = gt_y2 - gt_y1
# Resize mini mask to size of GT box
placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \
np.round(scipy.misc.imresize(class_mask.astype(float), (gt_h, gt_w),
interp='nearest') / 255.0).astype(bool)
# Place the mini batch in the placeholder
class_mask = placeholder
# Pick part of the mask and resize it
y1, x1, y2, x2 = rois[i].astype(np.int32)
m = class_mask[y1:y2, x1:x2]
mask = scipy.misc.imresize(
m.astype(float), config.MASK_SHAPE, interp='nearest') / 255.0
masks[i, :, :, class_id] = mask
return rois, roi_gt_class_ids, bboxes, masks
def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):
"""Given the anchors and GT boxes, compute overlaps and identify positive
anchors and deltas to refine them to match their corresponding GT boxes.
anchors: [num_anchors, (y1, x1, y2, x2)]
gt_class_ids: [num_gt_boxes] Integer class IDs.
gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]
Returns:
rpn_match: [N] (int32) matches between anchors and GT boxes.
1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
"""
# RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)
# RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]
rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))
# Areas of anchors and GT boxes
gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \
(gt_boxes[:, 3] - gt_boxes[:, 1])
anchor_area = (anchors[:, 2] - anchors[:, 0]) * \
(anchors[:, 3] - anchors[:, 1])
# Compute overlaps [num_anchors, num_gt_boxes]
# Each cell contains the IoU of an anchor and GT box.
overlaps = np.zeros((anchors.shape[0], gt_boxes.shape[0]))
for i in range(overlaps.shape[1]):
gt = gt_boxes[i]
overlaps[:, i] = utils.compute_iou(
gt, anchors, gt_box_area[i], anchor_area)
# Match anchors to GT Boxes
# If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.
# If an anchor overlaps a GT box with IoU < 0.3 then it's negative.
# Neutral anchors are those that don't match the conditions above,
# and they don't influence the loss function.
# However, don't keep any GT box unmatched (rare, but happens). Instead,
# match it to the closest anchor (even if its max IoU is < 0.3).
#
# 1. Set negative anchors first. It gets overwritten if a gt box is matched to them.
anchor_iou_argmax = np.argmax(overlaps, axis=1)
anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]
rpn_match[anchor_iou_max < 0.3] = -1
# 2. Set an anchor for each GT box (regardless of IoU value).
# TODO: If multiple anchors have the same IoU match all of them
gt_iou_argmax = np.argmax(overlaps, axis=0)
rpn_match[gt_iou_argmax] = 1
# 3. Set anchors with high overlap as positive.
rpn_match[anchor_iou_max >= 0.7] = 1
# Subsample to balance positive and negative anchors
# Don't let positives be more than half the anchors
ids = np.where(rpn_match == 1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)
if extra > 0:
# Reset the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# Same for negative proposals
ids = np.where(rpn_match == -1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -
np.sum(rpn_match == 1))
if extra > 0:
# Rest the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# For positive anchors, compute shift and scale needed to transform them
# to match the corresponding GT boxes.
ids = np.where(rpn_match == 1)[0]
ix = 0 # index into rpn_bbox
# TODO: use box_refinment() rather than duplicating the code here
for i, a in zip(ids, anchors[ids]):
# Closest gt box (it might have IoU < 0.7)
gt = gt_boxes[anchor_iou_argmax[i]]
# Convert coordinates to center plus width/height.
# GT Box
gt_h = gt[2] - gt[0]
gt_w = gt[3] - gt[1]
gt_center_y = gt[0] + 0.5 * gt_h
gt_center_x = gt[1] + 0.5 * gt_w
# Anchor
a_h = a[2] - a[0]
a_w = a[3] - a[1]
a_center_y = a[0] + 0.5 * a_h
a_center_x = a[1] + 0.5 * a_w
# Compute the bbox refinement that the RPN should predict.
rpn_bbox[ix] = [
(gt_center_y - a_center_y) / a_h,
(gt_center_x - a_center_x) / a_w,
np.log(gt_h / a_h),
np.log(gt_w / a_w),
]
# Normalize
rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV
ix += 1
return rpn_match, rpn_bbox
def generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):
"""Generates ROI proposals similar to what a region proposal network
would generate.
image_shape: [Height, Width, Depth]
count: Number of ROIs to generate
gt_class_ids: [N] Integer ground truth class IDs
gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.
Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.
"""
# placeholder
rois = np.zeros((count, 4), dtype=np.int32)
# Generate random ROIs around GT boxes (90% of count)
rois_per_box = int(0.9 * count / gt_boxes.shape[0])
for i in range(gt_boxes.shape[0]):
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]
h = gt_y2 - gt_y1
w = gt_x2 - gt_x1
# random boundaries
r_y1 = max(gt_y1 - h, 0)
r_y2 = min(gt_y2 + h, image_shape[0])
r_x1 = max(gt_x1 - w, 0)
r_x2 = min(gt_x2 + w, image_shape[1])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))
x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:rois_per_box]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:rois_per_box]
if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
box_rois = np.hstack([y1, x1, y2, x2])
rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois
# Generate random ROIs anywhere in the image (10% of count)
remaining_count = count - (rois_per_box * gt_boxes.shape[0])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))
x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:remaining_count]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:remaining_count]
if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
global_rois = np.hstack([y1, x1, y2, x2])
rois[-remaining_count:] = global_rois
return rois
def data_generator(dataset, config, shuffle=True, augment=True, random_rois=0,
batch_size=1, detection_targets=False):
"""A generator that returns images and corresponding target class ids,
bounding box deltas, and masks.
dataset: The Dataset object to pick data from
config: The model config object
shuffle: If True, shuffles the samples before every epoch
augment: If True, applies image augmentation to images (currently only
horizontal flips are supported)
random_rois: If > 0 then generate proposals to be used to train the
network classifier and mask heads. Useful if training
the Mask RCNN part without the RPN.
batch_size: How many images to return in each call
detection_targets: If True, generate detection targets (class IDs, bbox
deltas, and masks). Typically for debugging or visualizations because
in trainig detection targets are generated by DetectionTargetLayer.
Returns a Python generator. Upon calling next() on it, the
generator returns two lists, inputs and outputs. The containtes
of the lists differs depending on the received arguments:
inputs list:
- images: [batch, H, W, C]
- image_meta: [batch, size of image meta]
- rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)
- rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
- gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs
- gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]
- gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width
are those of the image unless use_mini_mask is True, in which
case they are defined in MINI_MASK_SHAPE.
outputs list: Usually empty in regular training. But if detection_targets
is True then the outputs list contains target class_ids, bbox deltas,
and masks.
"""
b = 0 # batch item index
image_index = -1
image_ids = np.copy(dataset.image_ids)
error_count = 0
# Anchors
# [anchor_count, (y1, x1, y2, x2)]
anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
config.BACKBONE_SHAPES,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
# Keras requires a generator to run indefinately.
while True:
try:
# Increment index to pick next image. Shuffle if at the start of an epoch.
image_index = (image_index + 1) % len(image_ids)
if shuffle and image_index == 0:
np.random.shuffle(image_ids)
# Get GT bounding boxes and masks for image.
image_id = image_ids[image_index]
image, image_meta, gt_class_ids, gt_boxes, gt_masks = \
load_image_gt(dataset, config, image_index,
augment=augment, use_mini_mask=config.USE_MINI_MASK)
# Skip images that have no instances. This can happen in cases
# where we train on a subset of classes and the image doesn't
# have any of the classes we care about.
if not np.any(gt_class_ids):
continue
# RPN Targets
rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,
gt_class_ids, gt_boxes, config)
#convert list to arrays
gt_class_ids=np.array(gt_class_ids)
gt_boxes=np.array(gt_boxes)
# Mask R-CNN Targets
if random_rois:
rpn_rois = generate_random_rois(
image.shape, random_rois, gt_class_ids, gt_boxes)
if detection_targets:
rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask =\
build_detection_targets(
rpn_rois, gt_class_ids, gt_boxes, gt_masks, config)
# Init batch arrays
if b == 0:
batch_image_meta = np.zeros(
(batch_size,) + image_meta.shape, dtype=image_meta.dtype)
batch_rpn_match = np.zeros(
[batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)
batch_rpn_bbox = np.zeros(
[batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)
batch_images = np.zeros(
(batch_size,) + image.shape, dtype=np.float32)
batch_gt_class_ids = np.zeros(
(batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)
batch_gt_boxes = np.zeros(
(batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)
if config.USE_MINI_MASK:
batch_gt_masks = np.zeros((batch_size, config.MINI_MASK_SHAPE[0], config.MINI_MASK_SHAPE[1],
config.MAX_GT_INSTANCES))
else:
batch_gt_masks = np.zeros(
(batch_size, image.shape[0], image.shape[1], config.MAX_GT_INSTANCES))
if random_rois:
batch_rpn_rois = np.zeros(
(batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)
if detection_targets:
batch_rois = np.zeros(
(batch_size,) + rois.shape, dtype=rois.dtype)
batch_mrcnn_class_ids = np.zeros(
(batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)
batch_mrcnn_bbox = np.zeros(
(batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)
batch_mrcnn_mask = np.zeros(
(batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)
# If more instances than fits in the array, sub-sample from them.
if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:
ids = np.random.choice(
np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)
gt_class_ids = gt_class_ids[ids]
gt_boxes = gt_boxes[ids]
gt_masks = gt_masks[:, :, ids]
# Add to batch
gt_class_ids=np.array(gt_class_ids)
batch_image_meta[b] = image_meta
batch_rpn_match[b] = rpn_match[:, np.newaxis]
batch_rpn_bbox[b] = rpn_bbox
batch_images[b] = mold_image(image.astype(np.float32), config)
batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids
batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes
batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks
if random_rois:
batch_rpn_rois[b] = rpn_rois
if detection_targets:
batch_rois[b] = rois
batch_mrcnn_class_ids[b] = mrcnn_class_ids
batch_mrcnn_bbox[b] = mrcnn_bbox
batch_mrcnn_mask[b] = mrcnn_mask
b += 1
# Batch full?
if b >= batch_size:
inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,
batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]
outputs = []
if random_rois:
inputs.extend([batch_rpn_rois])
if detection_targets:
inputs.extend([batch_rois])
# Keras requires that output and targets have the same number of dimensions
batch_mrcnn_class_ids = np.expand_dims(
batch_mrcnn_class_ids, -1)
outputs.extend(
[batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])
yield inputs, outputs
# start a new batch
b = 0
except (GeneratorExit, KeyboardInterrupt):
raise
except:
# Log it and skip the image
logging.exception("Error processing image {}".format(
dataset.image_info[image_index]))
error_count += 1
if error_count > 5:
raise
############################################################
# MaskRCNN Class
############################################################
class MaskRCNN():
"""Encapsulates the Mask RCNN model functionality.
The actual Keras model is in the keras_model property.
"""
def __init__(self, mode, config, model_dir):
"""
mode: Either "training" or "inference"
config: A Sub-class of the Config class
model_dir: Directory to save training logs and trained weights
"""
assert mode in ['training', 'inference']
self.mode = mode
self.config = config
self.model_dir = model_dir
self.set_log_dir()
self.keras_model = self.build(mode=mode, config=config)
def build(self, mode, config):
"""Build Mask R-CNN architecture.
input_shape: The shape of the input image.
mode: Either "training" or "inference". The inputs and
outputs of the model differ accordingly.
"""
assert mode in ['training', 'inference']
# Image size must be dividable by 2 multiple times
h, w = config.IMAGE_SHAPE[:2]
if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):
raise Exception("Image size must be dividable by 2 at least 6 times "
"to avoid fractions when downscaling and upscaling."
"For example, use 256, 320, 384, 448, 512, ... etc. ")
# Inputs
input_image = KL.Input(
shape=config.IMAGE_SHAPE.tolist(), name="input_image")
input_image_meta = KL.Input(shape=[None], name="input_image_meta")
if mode == "training":
# RPN GT
input_rpn_match = KL.Input(
shape=[None, 1], name="input_rpn_match", dtype=tf.int32)
input_rpn_bbox = KL.Input(
shape=[None, 4], name="input_rpn_bbox", dtype=tf.float32)
# Detection GT (class IDs, bounding boxes, and masks)
# 1. GT Class IDs (zero padded)
input_gt_class_ids = KL.Input(
shape=[None], name="input_gt_class_ids", dtype=tf.int32)
# 2. GT Boxes in pixels (zero padded)
# [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates
input_gt_boxes = KL.Input(
shape=[None, 4], name="input_gt_boxes", dtype=tf.float32)
# Normalize coordinates
h, w = K.shape(input_image)[1], K.shape(input_image)[2]
image_scale = K.cast(K.stack([h, w, h, w], axis=0), tf.float32)
gt_boxes = KL.Lambda(lambda x: x / image_scale)(input_gt_boxes)
# 3. GT Masks (zero padded)
# [batch, height, width, MAX_GT_INSTANCES]
if config.USE_MINI_MASK:
input_gt_masks = KL.Input(
shape=[config.MINI_MASK_SHAPE[0],
config.MINI_MASK_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
else:
input_gt_masks = KL.Input(
shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
# Build the shared convolutional layers.
# Bottom-up Layers
# Returns a list of the last layers of each stage, 5 in total.
# Don't create the thead (stage 5), so we pick the 4th item in the list.
_, C2, C3, C4, C5 = resnet_graph(input_image, "resnet101", stage5=True)
# Top-down Layers
# TODO: add assert to varify feature map sizes match what's in config
P5 = KL.Conv2D(256, (1, 1), name='fpn_c5p5')(C5)
P4 = KL.Add(name="fpn_p4add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p5upsampled")(P5),
KL.Conv2D(256, (1, 1), name='fpn_c4p4')(C4)])
P3 = KL.Add(name="fpn_p3add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p4upsampled")(P4),
KL.Conv2D(256, (1, 1), name='fpn_c3p3')(C3)])
P2 = KL.Add(name="fpn_p2add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p3upsampled")(P3),
KL.Conv2D(256, (1, 1), name='fpn_c2p2')(C2)])
# Attach 3x3 conv to all P layers to get the final feature maps.
P2 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p2")(P2)
P3 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p3")(P3)
P4 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p4")(P4)
P5 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p5")(P5)
# P6 is used for the 5th anchor scale in RPN. Generated by
# subsampling from P5 with stride of 2.
P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name="fpn_p6")(P5)
# Note that P6 is used in RPN, but not in the classifier heads.
rpn_feature_maps = [P2, P3, P4, P5, P6]
mrcnn_feature_maps = [P2, P3, P4, P5]
# Generate Anchors
self.anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
config.BACKBONE_SHAPES,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
# RPN Model
rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,
len(config.RPN_ANCHOR_RATIOS), 256)
# Loop through pyramid layers
layer_outputs = [] # list of lists
for p in rpn_feature_maps:
layer_outputs.append(rpn([p]))
# Concatenate layer outputs
# Convert from list of lists of level outputs to list of lists
# of outputs across levels.
# e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]
output_names = ["rpn_class_logits", "rpn_class", "rpn_bbox"]
outputs = list(zip(*layer_outputs))
outputs = [KL.Concatenate(axis=1, name=n)(list(o))
for o, n in zip(outputs, output_names)]
rpn_class_logits, rpn_class, rpn_bbox = outputs
# Generate proposals
# Proposals are [N, (y1, x1, y2, x2)] in normalized coordinates.
proposal_count = config.POST_NMS_ROIS_TRAINING if mode == "training"\
else config.POST_NMS_ROIS_INFERENCE
rpn_rois = ProposalLayer(proposal_count=proposal_count,
nms_threshold=0.7,
name="ROI",
anchors=self.anchors,
config=config)([rpn_class, rpn_bbox])
if mode == "training":
# Class ID mask to mark class IDs supported by the dataset the image
# came from.
_, _, _, active_class_ids = KL.Lambda(lambda x: parse_image_meta_graph(x),
mask=[None, None, None, None])(input_image_meta)
if not config.USE_RPN_ROIS:
# Ignore predicted ROIs and use ROIs provided as an input.
input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],
name="input_roi", dtype=np.int32)
# Normalize coordinates to 0-1 range.
target_rois = KL.Lambda(lambda x: K.cast(
x, tf.float32) / image_scale[:4])(input_rois)
else:
target_rois = rpn_rois
# Generate detection targets
# Subsamples proposals and generates target outputs for training
# Note that proposal class IDs, gt_boxes, and gt_masks are zero
# padded. Equally, returned rois and targets are zero padded.
rois, target_class_ids, target_bbox, target_mask =\
DetectionTargetLayer(config, name="proposal_targets")([
target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])
# Network Heads
# TODO: verify that this handles zero padded ROIs
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rois, mrcnn_feature_maps, config.IMAGE_SHAPE,
config.POOL_SIZE, config.NUM_CLASSES)
mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,
config.IMAGE_SHAPE,
config.MASK_POOL_SIZE,
config.NUM_CLASSES)
# TODO: clean up (use tf.identify if necessary)
output_rois = KL.Lambda(lambda x: x * 1, name="output_rois")(rois)
# Losses
rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name="rpn_class_loss")(
[input_rpn_match, rpn_class_logits])
rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name="rpn_bbox_loss")(
[input_rpn_bbox, input_rpn_match, rpn_bbox])
class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name="mrcnn_class_loss")(
[target_class_ids, mrcnn_class_logits, active_class_ids])
bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name="mrcnn_bbox_loss")(
[target_bbox, target_class_ids, mrcnn_bbox])
mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name="mrcnn_mask_loss")(
[target_mask, target_class_ids, mrcnn_mask])
# Model
inputs = [input_image, input_image_meta,
input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]
if not config.USE_RPN_ROIS:
inputs.append(input_rois)
outputs = [rpn_class_logits, rpn_class, rpn_bbox,
mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,
rpn_rois, output_rois,
rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]
model = KM.Model(inputs, outputs, name='mask_rcnn')
else:
# Network Heads
# Proposal classifier and BBox regressor heads
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, config.IMAGE_SHAPE,
config.POOL_SIZE, config.NUM_CLASSES)
# Detections
# output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in image coordinates
detections = DetectionLayer(config, name="mrcnn_detection")(
[rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])
# Convert boxes to normalized coordinates
# TODO: let DetectionLayer return normalized coordinates to avoid
# unnecessary conversions
h, w = config.IMAGE_SHAPE[:2]
detection_boxes = KL.Lambda(
lambda x: x[..., :4] / np.array([h, w, h, w]))(detections)
# Create masks for detections
mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,
config.IMAGE_SHAPE,
config.MASK_POOL_SIZE,
config.NUM_CLASSES)
model = KM.Model([input_image, input_image_meta],
[detections, mrcnn_class, mrcnn_bbox,
mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],
name='mask_rcnn')
# Add multi-GPU support.
if config.GPU_COUNT > 1:
from parallel_model import ParallelModel
model = ParallelModel(model, config.GPU_COUNT)
return model
def find_last(self):
"""Finds the last checkpoint file of the last trained model in the
model directory.
Returns:
log_dir: The directory where events and weights are saved
checkpoint_path: the path to the last checkpoint file
"""
# Get directory names. Each directory corresponds to a model
dir_names = next(os.walk(self.model_dir))[1]
key = self.config.NAME.lower()
dir_names = filter(lambda f: f.startswith(key), dir_names)
dir_names = sorted(dir_names)
if not dir_names:
return None, None
# Pick last directory
dir_name = os.path.join(self.model_dir, dir_names[-1])
# Find the last checkpoint
checkpoints = next(os.walk(dir_name))[2]
checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints)
checkpoints = sorted(checkpoints)
if not checkpoints:
return dir_name, None
checkpoint = os.path.join(dir_name, checkpoints[-1])
return dir_name, checkpoint
def load_weights(self, filepath, by_name=False, exclude=None):
"""Modified version of the correspoding Keras function with
the addition of multi-GPU support and the ability to exclude
some layers from loading.
exlude: list of layer names to excluce
"""
import h5py
from keras.engine import topology
if exclude:
by_name = True
if h5py is None:
raise ImportError('`load_weights` requires h5py.')
f = h5py.File(filepath, mode='r')
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
keras_model = self.keras_model
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
# Exclude some layers
if exclude:
layers = filter(lambda l: l.name not in exclude, layers)
if by_name:
topology.load_weights_from_hdf5_group_by_name(f, layers)
else:
topology.load_weights_from_hdf5_group(f, layers)
if hasattr(f, 'close'):
f.close()
# Update the log directory
self.set_log_dir(filepath)
def get_imagenet_weights(self):
"""Downloads ImageNet trained weights from Keras.
Returns path to weights file.
"""
from keras.utils.data_utils import get_file
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
'releases/download/v0.2/'\
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
return weights_path
def compile(self, learning_rate, momentum):
"""Gets the model ready for training. Adds losses, regularization, and
metrics. Then calls the Keras compile() function.
"""
# Optimizer object
optimizer = keras.optimizers.SGD(lr=learning_rate, momentum=momentum,
clipnorm=5.0)
# Add Losses
# First, clear previously set losses to avoid duplication
self.keras_model._losses = []
self.keras_model._per_input_losses = {}
loss_names = ["rpn_class_loss", "rpn_bbox_loss",
"mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"]
for name in loss_names:
layer = self.keras_model.get_layer(name)
if layer.output in self.keras_model.losses:
continue
self.keras_model.add_loss(
tf.reduce_mean(layer.output, keep_dims=True))
# Add L2 Regularization
# Skip gamma and beta weights of batch normalization layers.
reg_losses = [keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)
for w in self.keras_model.trainable_weights
if 'gamma' not in w.name and 'beta' not in w.name]
self.keras_model.add_loss(tf.add_n(reg_losses))
# Compile
self.keras_model.compile(optimizer=optimizer, loss=[
None] * len(self.keras_model.outputs))
# Add metrics for losses
for name in loss_names:
if name in self.keras_model.metrics_names:
continue
layer = self.keras_model.get_layer(name)
self.keras_model.metrics_names.append(name)
self.keras_model.metrics_tensors.append(tf.reduce_mean(layer.output,
keep_dims=True))
def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):
"""Sets model layers as trainable if their names match
the given regular expression.
"""
# Print message on the first call (but not on recursive calls)
if verbose > 0 and keras_model is None:
log("Selecting layers to train")
keras_model = keras_model or self.keras_model
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
for layer in layers:
# Is the layer a model?
if layer.__class__.__name__ == 'Model':
print("In model: ", layer.name)
self.set_trainable(
layer_regex, keras_model=layer, indent=indent + 4)
continue
if not layer.weights:
continue
# Is it trainable?
trainable = bool(re.fullmatch(layer_regex, layer.name))
# Update layer. If layer is a container, update inner layer.
if layer.__class__.__name__ == 'TimeDistributed':
layer.layer.trainable = trainable
else:
layer.trainable = trainable
# Print trainble layer names
if trainable and verbose > 0:
log("{}{:20} ({})".format(" " * indent, layer.name,
layer.__class__.__name__))
def set_log_dir(self, model_path=None):
"""Sets the model log directory and epoch counter.
model_path: If None, or a format different from what this code uses
then set a new log directory and start epochs from 0. Otherwise,
extract the log directory and the epoch counter from the file
name.
"""
# Set date and epoch counter as if starting a new model
self.epoch = 0
now = datetime.datetime.now()
# If we have a model path with date and epochs use them
if model_path:
# Continue from we left of. Get epoch and date from the file name
# A sample model path might look like:
# /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5
regex = r".*/\w+(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})/mask\_rcnn\_\w+(\d{4})\.h5"
m = re.match(regex, model_path)
if m:
now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),
int(m.group(4)), int(m.group(5)))
self.epoch = int(m.group(6)) + 1
# Directory for training logs
self.log_dir = os.path.join(self.model_dir, "{}{:%Y%m%dT%H%M}".format(
self.config.NAME.lower(), now))
# Path to save after each epoch. Include placeholders that get filled by Keras.
self.checkpoint_path = os.path.join(self.log_dir, "mask_rcnn_{}_*epoch*.h5".format(
self.config.NAME.lower()))
self.checkpoint_path = self.checkpoint_path.replace(
"*epoch*", "{epoch:04d}")
def train(self, train_dataset, val_dataset, learning_rate, epochs, layers):
"""Train the model.
train_dataset, val_dataset: Training and validation Dataset objects.
learning_rate: The learning rate to train with
epochs: Number of training epochs. Note that previous training epochs
are considered to be done alreay, so this actually determines
the epochs to train in total rather than in this particaular
call.
layers: Allows selecting wich layers to train. It can be:
- A regular expression to match layer names to train
- One of these predefined values:
heaads: The RPN, classifier and mask heads of the network
all: All the layers
3+: Train Resnet stage 3 and up
4+: Train Resnet stage 4 and up
5+: Train Resnet stage 5 and up
"""
assert self.mode == "training", "Create model in training mode."
# Pre-defined layer regular expressions
layer_regex = {
# all layers but the backbone
"heads": r"(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# From Resnet stage 4 layers and up
"3+": r"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"4+": r"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"5+": r"(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# All layers
"all": ".*",
}
if layers in layer_regex.keys():
layers = layer_regex[layers]
# Data generators
train_generator = data_generator(train_dataset, self.config, shuffle=True,
batch_size=self.config.BATCH_SIZE)
val_generator = data_generator(val_dataset, self.config, shuffle=True,
batch_size=self.config.BATCH_SIZE)
# Callbacks
callbacks = [
keras.callbacks.TensorBoard(log_dir=self.log_dir,
histogram_freq=0, write_graph=True, write_images=False),
keras.callbacks.ModelCheckpoint(self.checkpoint_path,
verbose=0, save_weights_only=True),
]
# Common parameters to pass to fit_generator()
fit_kwargs = {
"steps_per_epoch": self.config.STEPS_PER_EPOCH,
"callbacks": callbacks,
"validation_data": next(val_generator),
"validation_steps": self.config.VALIDATION_STEPS,
"max_queue_size": 100,
"workers": max(self.config.BATCH_SIZE // 2, 2),
"use_multiprocessing": True,#TODO
}
# Train
log("\nStarting at epoch {}. LR={}\n".format(self.epoch, learning_rate))
log("Checkpoint Path: {}".format(self.checkpoint_path))
self.set_trainable(layers)
self.compile(learning_rate, self.config.LEARNING_MOMENTUM)
self.keras_model.fit_generator(
train_generator,
initial_epoch=self.epoch,
epochs=epochs,
**fit_kwargs
)
self.epoch = max(self.epoch, epochs)
def mold_inputs(self, images):
"""Takes a list of images and modifies them to the format expected
as an input to the neural network.
images: List of image matricies [height,width,depth]. Images can have
different sizes.
Returns 3 Numpy matricies:
molded_images: [N, h, w, 3]. Images resized and normalized.
image_metas: [N, length of meta data]. Details about each image.
windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the
original image (padding excluded).
"""
molded_images = []
image_metas = []
windows = []
for image in images:
# Resize image to fit the model expected size
# TODO: move resizing to mold_image()
molded_image, window, scale, padding = utils.resize_image(
image,
min_dim=self.config.IMAGE_MIN_DIM,
max_dim=self.config.IMAGE_MAX_DIM,
padding=self.config.IMAGE_PADDING)
molded_image = mold_image(molded_image, self.config)
# Build image_meta
image_meta = compose_image_meta(
0, image.shape, window,
np.zeros([self.config.NUM_CLASSES], dtype=np.int32))
# Append
molded_images.append(molded_image)
windows.append(window)
image_metas.append(image_meta)
# Pack into arrays
molded_images = np.stack(molded_images)
image_metas = np.stack(image_metas)
windows = np.stack(windows)
return molded_images, image_metas, windows
def unmold_detections(self, detections, mrcnn_mask, image_shape, window):
"""Reformats the detections of one image from the format of the neural
network output to a format suitable for use in the rest of the
application.
detections: [N, (y1, x1, y2, x2, class_id, score)]
mrcnn_mask: [N, height, width, num_classes]
image_shape: [height, width, depth] Original size of the image before resizing
window: [y1, x1, y2, x2] Box in the image where the real image is
excluding the padding.
Returns:
boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels
class_ids: [N] Integer class IDs for each bounding box
scores: [N] Float probability scores of the class_id
masks: [height, width, num_instances] Instance masks
"""
# How many detections do we have?
# Detections array is padded with zeros. Find the first class_id == 0.
zero_ix = np.where(detections[:, 4] == 0)[0]
N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]
# Extract boxes, class_ids, scores, and class-specific masks
boxes = detections[:N, :4]
class_ids = detections[:N, 4].astype(np.int32)
scores = detections[:N, 5]
masks = mrcnn_mask[np.arange(N), :, :, class_ids]
# Filter out detections with zero area. Often only happens in early
# stages of training when the network weights are still a bit random.
exclude_ix = np.where(
(boxes[:, 2] - boxes[:, 0]) * (boxes[:, 2] - boxes[:, 0]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
masks = np.delete(masks, exclude_ix, axis=0)
N = class_ids.shape[0]
# Compute scale and shift to translate coordinates to image domain.
h_scale = image_shape[0] / (window[2] - window[0])
w_scale = image_shape[1] / (window[3] - window[1])
scale = min(h_scale, w_scale)
shift = window[:2] # y, x
scales = np.array([scale, scale, scale, scale])
shifts = np.array([shift[0], shift[1], shift[0], shift[1]])
# Translate bounding boxes to image domain
boxes = np.multiply(boxes - shifts, scales).astype(np.int32)
# Resize masks to original image size and set boundary threshold.
full_masks = []
for i in range(N):
# Convert neural network mask to full size mask
full_mask = utils.unmold_mask(masks[i], boxes[i], image_shape)
full_masks.append(full_mask)
full_masks = np.stack(full_masks, axis=-1)\
if full_masks else np.empty((0,) + masks.shape[1:3])
return boxes, class_ids, scores, full_masks
def detect(self, images, verbose=0):
"""Runs the detection pipeline.
images: List of images, potentially of different sizes.
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(
images) == self.config.BATCH_SIZE, "len(images) must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(images)))
for image in images:
log("image", image)
# Mold inputs to format expected by the neural network
molded_images, image_metas, windows = self.mold_inputs(images)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
# Run object detection
detections, mrcnn_class, mrcnn_bbox, mrcnn_mask, \
rois, rpn_class, rpn_bbox =\
self.keras_model.predict([molded_images, image_metas], verbose=0)
# Process detections
results = []
for i, image in enumerate(images):
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, windows[i])
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def ancestor(self, tensor, name, checked=None):
"""Finds the ancestor of a TF tensor in the computation graph.
tensor: TensorFlow symbolic tensor.
name: Name of ancestor tensor to find
checked: For internal use. A list of tensors that were already
searched to avoid loops in traversing the graph.
"""
checked = checked if checked is not None else []
# Put a limit on how deep we go to avoid very long loops
if len(checked) > 500:
return None
# Convert name to a regex and allow matching a number prefix
# because Keras adds them automatically
if isinstance(name, str):
name = re.compile(name.replace("/", r"(\_\d+)*/"))
parents = tensor.op.inputs
for p in parents:
if p in checked:
continue
if bool(re.fullmatch(name, p.name)):
return p
checked.append(p)
a = self.ancestor(p, name, checked)
if a is not None:
return a
return None
def find_trainable_layer(self, layer):
"""If a layer is encapsulated by another layer, this function
digs through the encapsulation and returns the layer that holds
the weights.
"""
if layer.__class__.__name__ == 'TimeDistributed':
return self.find_trainable_layer(layer.layer)
return layer
def get_trainable_layers(self):
"""Returns a list of layers that have weights."""
layers = []
# Loop through all layers
for l in self.keras_model.layers:
# If layer is a wrapper, find inner trainable layer
l = self.find_trainable_layer(l)
# Include layer if it has weights
if l.get_weights():
layers.append(l)
return layers
def run_graph(self, images, outputs):
"""Runs a sub-set of the computation graph that computes the given
outputs.
outputs: List of tuples (name, tensor) to compute. The tensors are
symbolic TensorFlow tensors and the names are for easy tracking.
Returns an ordered dict of results. Keys are the names received in the
input and values are Numpy arrays.
"""
model = self.keras_model
# Organize desired outputs into an ordered dict
outputs = OrderedDict(outputs)
for o in outputs.values():
assert o is not None
# Build a Keras function to run parts of the computation graph
inputs = model.inputs
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
kf = K.function(model.inputs, list(outputs.values()))
# Run inference
molded_images, image_metas, windows = self.mold_inputs(images)
# TODO: support training mode?
# if TEST_MODE == "training":
# model_in = [molded_images, image_metas,
# target_rpn_match, target_rpn_bbox,
# gt_boxes, gt_masks]
# if not config.USE_RPN_ROIS:
# model_in.append(target_rois)
# if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
# model_in.append(1.)
# outputs_np = kf(model_in)
# else:
model_in = [molded_images, image_metas]
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
model_in.append(0.)
outputs_np = kf(model_in)
# Pack the generated Numpy arrays into a a dict and log the results.
outputs_np = OrderedDict([(k, v)
for k, v in zip(outputs.keys(), outputs_np)])
for k, v in outputs_np.items():
log(k, v)
return outputs_np
############################################################
# Data Formatting
############################################################
def compose_image_meta(image_id, image_shape, window, active_class_ids):
"""Takes attributes of an image and puts them in one 1D array. Use
parse_image_meta() to parse the values back.
image_id: An int ID of the image. Useful for debugging.
image_shape: [height, width, channels]
window: (y1, x1, y2, x2) in pixels. The area of the image where the real
image is (excluding the padding)
active_class_ids: List of class_ids available in the dataset from which
the image came. Useful if training on images from multiple datasets
where not all classes are present in all datasets.
"""
meta = np.array(
[image_id] + # size=1
list(image_shape) + # size=3
list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates
list(active_class_ids) # size=num_classes
)
return meta
# Two functions (for Numpy and TF) to parse image_meta tensors.
def parse_image_meta(meta):
"""Parses an image info Numpy array to its components.
See compose_image_meta() for more details.
"""
image_id = meta[:, 0]
image_shape = meta[:, 1:4]
window = meta[:, 4:8] # (y1, x1, y2, x2) window of image in in pixels
active_class_ids = meta[:, 8:]
return image_id, image_shape, window, active_class_ids
def parse_image_meta_graph(meta):
"""Parses a tensor that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
"""
image_id = meta[:, 0]
image_shape = meta[:, 1:4]
window = meta[:, 4:8]
active_class_ids = meta[:, 8:]
return [image_id, image_shape, window, active_class_ids]
def mold_image(images, config):
"""Takes RGB images with 0-255 values and subtraces
the mean pixel and converts it to float. Expects image
colors in RGB order.
"""
return images.astype(np.float32) - config.MEAN_PIXEL
def unmold_image(normalized_images, config):
"""Takes a image normalized with mold() and returns the original."""
return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)
############################################################
# Miscellenous Graph Functions
############################################################
def trim_zeros_graph(boxes, name=None):
"""Often boxes are represented with matricies of shape [N, 4] and
are padded with zeros. This removes zero boxes.
boxes: [N, 4] matrix of boxes.
non_zeros: [N] a 1D boolean mask identifying the rows to keep
"""
non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)
boxes = tf.boolean_mask(boxes, non_zeros, name=name)
return boxes, non_zeros
def batch_pack_graph(x, counts, num_rows):
"""Picks different number of values from each row
in x depending on the values in counts.
"""
outputs = []
for i in range(num_rows):
outputs.append(x[i, :counts[i]])
return tf.concat(outputs, axis=0)
| 43.311606 | 115 | 0.603504 |
794113342d82ccd328c78cb773b16d666a06e7d7 | 2,459 | py | Python | PythonAPI/agents/navigation/roaming_agent.py | magh24/carla_RL_IAs | a38fb353bd84330c6c20b9cc8e824d7bbb02cfe5 | [
"MIT"
] | 39 | 2020-03-17T10:12:49.000Z | 2022-03-12T14:18:45.000Z | PythonAPI/agents/navigation/roaming_agent.py | marintoro/LearningByCheating | a13b331ee8d69071570c97b35f1348758d658ee5 | [
"MIT"
] | null | null | null | PythonAPI/agents/navigation/roaming_agent.py | marintoro/LearningByCheating | a13b331ee8d69071570c97b35f1348758d658ee5 | [
"MIT"
] | 16 | 2020-06-11T20:15:57.000Z | 2022-03-13T01:55:16.000Z | #!/usr/bin/env python
# Copyright (c) 2018 Intel Labs.
# authors: German Ros ([email protected])
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
""" This module implements an agent that roams around a track following
random waypoints and avoiding other vehicles.
The agent also responds to traffic lights. """
from agents.navigation.agent import Agent, AgentState
from agents.navigation.local_planner import LocalPlanner
class RoamingAgent(Agent):
"""
RoamingAgent implements a basic agent that navigates scenes making random
choices when facing an intersection.
This agent respects traffic lights and other vehicles.
"""
def __init__(self, vehicle):
"""
:param vehicle: actor to apply to local planner logic onto
"""
super(RoamingAgent, self).__init__(vehicle)
self._proximity_threshold = 10.0 # meters
self._state = AgentState.NAVIGATING
self._local_planner = LocalPlanner(self._vehicle)
def run_step(self, debug=False):
"""
Execute one step of navigation.
:return: carla.VehicleControl
"""
# is there an obstacle in front of us?
hazard_detected = False
# retrieve relevant elements for safe navigation, i.e.: traffic lights
# and other vehicles
actor_list = self._world.get_actors()
vehicle_list = actor_list.filter("*vehicle*")
lights_list = actor_list.filter("*traffic_light*")
# check possible obstacles
vehicle_state, vehicle = self._is_vehicle_hazard(vehicle_list)
if vehicle_state:
if debug:
print("!!! VEHICLE BLOCKING AHEAD [{}])".format(vehicle.id))
self._state = AgentState.BLOCKED_BY_VEHICLE
hazard_detected = True
# check for the state of the traffic lights
light_state, traffic_light = self._is_light_red(lights_list)
if light_state:
if debug:
print("=== RED LIGHT AHEAD [{}])".format(traffic_light.id))
self._state = AgentState.BLOCKED_RED_LIGHT
hazard_detected = True
if hazard_detected:
control = self.emergency_stop()
else:
self._state = AgentState.NAVIGATING
# standard local planner behavior
control = self._local_planner.run_step()
return control
| 31.935065 | 78 | 0.652704 |
7941145524ea474c54af1a798e242b8a96919315 | 2,097 | py | Python | Main.py | ChinaKevinLi/Leaving-Lock | 9a57f2cca5c294fd413ca1306676731579723cfb | [
"Apache-2.0"
] | 5 | 2020-01-20T15:51:06.000Z | 2020-01-21T09:55:43.000Z | Main.py | cy333/Leaving-Lock | 9a57f2cca5c294fd413ca1306676731579723cfb | [
"Apache-2.0"
] | null | null | null | Main.py | cy333/Leaving-Lock | 9a57f2cca5c294fd413ca1306676731579723cfb | [
"Apache-2.0"
] | 1 | 2020-01-21T06:12:52.000Z | 2020-01-21T06:12:52.000Z | import face_recognition
import cv2
import numpy as np
import os
import time
end_time = 0
video_capture = cv2.VideoCapture(0)
known_face_encodings = [
]
user_path = "./users/"
user_name = "Owner"
saved_encodings = os.listdir(user_path + user_name + "/")
for file in saved_encodings:
file = user_path + user_name + "/" + file
known_face_encodings.append(np.load(file))
while True:
known_here = False
ret, frame = video_capture.read()
rgb_frame = frame[:, :, ::-1]
face_locations = face_recognition.face_locations(rgb_frame, model="cnn")
face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
face_distances = list(face_distances <= 0.3)
if True in face_distances:
name = user_name
if (not known_here) and (name == user_name):
known_here = True
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
if known_here:
cv2.putText(frame, "Safe", (20, 40), font, 0.6, (0, 255, 255), 2)
end_time = time.time()
else:
start_time = time.time()
cv2.putText(frame, "Warning", (20, 40), font, 0.6, (0, 255, 255), 2)
if end_time < 1:
end_time = time.time()
elif start_time-end_time > 10:
os.system("rundll32.exe user32.dll LockWorkStation")
end_time = time.time()
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows() | 30.838235 | 93 | 0.618503 |
79411854c09b9c43283264b3341d7643c4673988 | 35,586 | py | Python | atoMEC/postprocess/conductivity.py | timcallow/atoMEC | 8c945caf6afcfffbeebe4356edd3c968eb7833dc | [
"BSD-3-Clause"
] | null | null | null | atoMEC/postprocess/conductivity.py | timcallow/atoMEC | 8c945caf6afcfffbeebe4356edd3c968eb7833dc | [
"BSD-3-Clause"
] | null | null | null | atoMEC/postprocess/conductivity.py | timcallow/atoMEC | 8c945caf6afcfffbeebe4356edd3c968eb7833dc | [
"BSD-3-Clause"
] | null | null | null | """
The conductivity module handles routines used to model the electrical conductivity.
So far just the Kubo-Greenwood method is implemented.
Classes
-------
* :class:`KuboGreenwood` : Holds various routines needed to compute the Kubo-Greenwood \
conducivity, including its various components. Also contains\
various properties related to the KG conducivity.
* :class:`SphHamInts`: Holds the routines to construct angular integrals from the \
spherical harmonic functions
* :class:`RadialInts`: Holds the routines to construct radial integrals from the radial\
KS orbitals
"""
# standard packages
import sys
from math import factorial
import functools
# external packages
import numpy as np
from scipy.special import lpmv
from scipy.integrate import quad
# internal modules
from atoMEC import mathtools
class KuboGreenwood:
"""Class for Kubo-Greenwood conductivity and MIS via TRK sum rule."""
def __init__(self, orbitals, valence_orbs=[], nmax=0, lmax=0):
self._orbitals = orbitals
self._xgrid = orbitals._xgrid
self._eigfuncs = orbitals._eigfuncs
self._eigvals = orbitals.eigvals
self._occnums = orbitals.occnums
self._DOS_w = orbitals.DOS * orbitals.kpt_int_weight
nbands, self._spindims, lmax_default, nmax_default = np.shape(self._eigvals)
if self._spindims == 2:
sys.exit(
"Kubo-Greenwood is not yet set-up for spin-polarized calculations. \
Please run again with spin-unpolarized input."
)
if nmax == 0:
self._nmax = nmax_default
else:
self._nmax = nmax
if lmax == 0:
self._lmax = lmax_default
else:
self._lmax = lmax
self.valence_orbs = valence_orbs
@property
def all_orbs(self):
r"""List of tuples: all the possible orbital pairings."""
all_orbs_tmp = []
for l in range(self._lmax):
for n in range(self._nmax):
all_orbs_tmp.append((l, n))
self._all_orbs = all_orbs_tmp
return self._all_orbs
@property
def cond_orbs(self):
r"""List of tuples: all the conduction band orbital pairings."""
cond_orbs_tmp = self.all_orbs
for val_orbs in self.valence_orbs:
cond_orbs_tmp.remove(val_orbs)
self._cond_orbs = cond_orbs_tmp
return self._cond_orbs
@property
def sig_tot(self):
r"""ndarray: the integrated total conductivity."""
self._sig_tot = self.calc_sig(
self.R1_int_tt, self.R2_int_tt, self.all_orbs, self.all_orbs
)
return self._sig_tot
@property
def sig_cc(self):
r"""ndarray: the integrated cc conductivity component."""
self._sig_cc = self.calc_sig(
self.R1_int_cc, self.R2_int_cc, self.cond_orbs, self.cond_orbs
)
return self._sig_cc
@property
def sig_vv(self):
r"""ndarray: the integrated vv conductivity component."""
self._sig_vv = self.calc_sig(
self.R1_int_vv, self.R2_int_vv, self.valence_orbs, self.valence_orbs
)
return self._sig_vv
@property
def sig_cv(self):
r"""ndarray: the integrated cv conductivity component."""
self._sig_cv = self.calc_sig(
self.R1_int_cv, self.R2_int_cv, self.cond_orbs, self.valence_orbs
)
return self._sig_cv
@property
def N_tot(self):
r"""float: the total electron number from TRK sum-rule."""
self._N_tot = self.sig_tot * (2 * self.sph_vol / np.pi)
return self._N_tot
@property
def N_free(self):
r"""float: the free electron number from TRK sum-rule."""
self._N_free = self.sig_cc * (2 * self.sph_vol / np.pi)
return self._N_free
@property
def sph_vol(self):
r"""float: the volume of the sphere."""
rmax = np.exp(self._xgrid)[-1]
V = (4.0 / 3.0) * np.pi * rmax ** 3.0
return V
def cond_tot(self, component="tt", gamma=0.01, maxfreq=50, nfreq=500):
"""
Calculate the chosen component of dynamical electrical conductivity sig(w).
Parameters
----------
component : str, optional
the desired component of the conducivity e.g. "cc", "tt" etc
gamma : float, optional
smoothing factor
maxfreq : float, optional
maximum frequency to scan up to
nfreq : int, optional
number of points in the frequency grid
Returns
-------
cond_tot_ : ndarray
dynamical electrical conductivity
"""
if component == "tt":
R1_int = self.R1_int_tt
R2_int = self.R2_int_tt
orb_subset_1 = self.all_orbs
orb_subset_2 = self.all_orbs
elif component == "cc":
R1_int = self.R1_int_cc
R2_int = self.R2_int_cc
orb_subset_1 = self.cond_orbs
orb_subset_2 = self.cond_orbs
elif component == "cv":
R1_int = self.R1_int_cv
R2_int = self.R2_int_cv
orb_subset_1 = self.cond_orbs
orb_subset_2 = self.valence_orbs
elif component == "vv":
R1_int = self.R1_int_vv
R2_int = self.R2_int_vv
orb_subset_1 = self.valence_orbs
orb_subset_2 = self.valence_orbs
else:
sys.exit("Component of conducivity not recognised")
cond_tot_ = self.calc_sig_func(
R1_int, R2_int, orb_subset_1, orb_subset_2, maxfreq, nfreq, gamma
)
return cond_tot_
@property
@functools.lru_cache
def R1_int_tt(self):
"""Total-total component of the R1 radial integral."""
R1_int_tt_ = RadialInts.calc_R1_int_mat(
self._eigfuncs,
self._occnums,
self._xgrid,
self.all_orbs,
self.all_orbs,
)
return R1_int_tt_
@property
@functools.lru_cache
def R1_int_cc(self):
"""Conducting-conducting component of the R1 radial integral."""
R1_int_cc_ = RadialInts.calc_R1_int_mat(
self._eigfuncs,
self._occnums,
self._xgrid,
self.cond_orbs,
self.cond_orbs,
)
return R1_int_cc_
@property
@functools.lru_cache
def R1_int_cv(self):
"""Conducting-valence component of the R1 radial integral."""
R1_int_cv_ = RadialInts.calc_R1_int_mat(
self._eigfuncs,
self._occnums,
self._xgrid,
self.cond_orbs,
self.valence_orbs,
)
return R1_int_cv_
@property
@functools.lru_cache
def R1_int_vv(self):
"""Valence-valence component of the R1 radial integral."""
R1_int_vv_ = RadialInts.calc_R1_int_mat(
self._eigfuncs,
self._occnums,
self._xgrid,
self.valence_orbs,
self.valence_orbs,
)
return R1_int_vv_
@property
@functools.lru_cache
def R2_int_tt(self):
"""Total-total component of the R2 radial integral."""
R2_int_tt_ = RadialInts.calc_R2_int_mat(
self._eigfuncs,
self._occnums,
self._xgrid,
self.all_orbs,
self.all_orbs,
)
return R2_int_tt_
@property
@functools.lru_cache
def R2_int_cc(self):
"""Conducting-conducting component of the R2 radial integral."""
R2_int_cc_ = RadialInts.calc_R2_int_mat(
self._eigfuncs,
self._occnums,
self._xgrid,
self.cond_orbs,
self.cond_orbs,
)
return R2_int_cc_
@property
@functools.lru_cache
def R2_int_cv(self):
"""Conducting-valence component of the R2 radial integral."""
R2_int_cv_ = RadialInts.calc_R2_int_mat(
self._eigfuncs,
self._occnums,
self._xgrid,
self.cond_orbs,
self.valence_orbs,
)
return R2_int_cv_
@property
@functools.lru_cache
def R2_int_vv(self):
"""Valence-valence component of the R2 radial integral."""
R2_int_vv_ = RadialInts.calc_R2_int_mat(
self._eigfuncs,
self._occnums,
self._xgrid,
self.valence_orbs,
self.valence_orbs,
)
return R2_int_vv_
def check_sum_rule(self, l, n, m):
r"""
Check the sum rule (see notes) for an orbital :math:`\phi_{nlm}` is satisfied.
Parameters
----------
l : int
angular quantum number
n : int
principal quantum number
m : int
magnetic quantum number
Returns
-------
sum_mom : ndarray
the momentum sum rule (see notes)
Notes
-----
The expression for the momentum sum rule is given by
.. math::
S_{p} = \sum_{(n_1,l_1,m_1)\neq (n,l,m)}\
\frac{|\langle\phi_{nlm}|\nabla|\phi_{n_1 l_1 m_1}\rangle|^2} {\
\epsilon_{n_1,l_1,m_1}-\epsilon_{n,l,m}}
If the sum rule is satisfied, the summation above should equal 1/2.
See Eq. (38) of Ref. [7]_ for an explanation of this sum rule.
References
----------
.. [7] Calderin, L. et al, Kubo--Greenwood electrical conductivity formulation
and implementation for projector augmented wave datasets", Comp. Phys Comms.
221 (2017): 118-142.
`DOI:doi.org/10.1016/j.cpc.2017.08.008
<https://doi.org/10.1016/j.cpc.2017.08.008>`__.
"""
# set up the orbitals to sum over
new_orbs = self.all_orbs
new_orbs.remove((l, n))
# initialize sum_mom and various indices
nbands, nspin, lmax, nmax = np.shape(self._eigvals)
sum_mom = np.zeros((nbands))
# compute the sum rule
for k in range(nbands):
for l1, n1 in new_orbs:
# the eigenvalue difference
eig_diff = self._eigvals[k, 0, l1, n1] - self._eigvals[k, 0, l, n]
# only states with |l-l_1|=1 contribute
if abs(l1 - l) != 1:
continue
else:
# scale eigenfunctions by sqrt(4 pi) due to different normalization
orb_l1n1 = np.sqrt(4 * np.pi) * self._eigfuncs[k, 0, l1, n1]
orb_ln = np.sqrt(4 * np.pi) * self._eigfuncs[k, 0, l, n]
# compute the matrix element <\phi|\grad|\phi> and its complex conj
if abs(m) > l1:
mel_sq = 0
else:
mel = self.calc_mel_grad_int(
orb_ln, orb_l1n1, l, n, l1, n1, m, self._xgrid
)
mel_cc = self.calc_mel_grad_int(
orb_l1n1, orb_ln, l1, n1, l, n, m, self._xgrid
)
mel_sq = np.abs(mel_cc * mel)
sum_mom[k] += mel_sq / eig_diff
return sum_mom
def calc_sig(self, R1_int, R2_int, orb_subset_1, orb_subset_2):
r"""
Compute the *integrated* dynamical conducivity for given subsets (see notes).
Parameters
----------
R1_int : ndarray
the 'R1' radial component of the integrand (see notes)
R2_int : ndarray
the 'R2' radial component of the integrand (see notes)
orb_subset_1 : list of tuples
the first subset of orbitals to sum over
orb_subset_2 : list of tuples
the second subset of orbitals to sum over
Returns
-------
sig : float
the integrated dynamical conductivity
Notes
-----
This function returns the integrated dynamical conductivity,
:math:`\bar{\sigma}=\int_0^\infty d\omega \sigma(\omega)`. The conductivity
:math:`\sigma(\omega)` is defined as
.. math::
\sigma_{S_1,S2}(\omega) = \frac{2\pi}{3V\omega}
\sum_{i\in S_1}\sum_{j\in S_2} (f_i - f_j)\
|\langle\phi_{i}|\nabla|\phi_{j}\rangle|^2\delta(\epsilon_j-\epsilon_i-\omega),
where :math:`S_1,S_2` denote the subsets of orbitals specified in the function's
paramaters, e.g. the conduction-conduction orbitals.
In practise, the integral in the above equation is given by a discrete sum due
to the presenence of the dirac-delta function.
The paramaters `R1_int` and `R2_int` refer to radial integral components in the
calculation of the matrix elements. See the supplementary information of
Ref. [8]_ for more information on thse components, and the functions
:func:`calc_R1_int_mat` and :func:`calc_R2_int_mat` for their definitions.
References
----------
.. [8] Callow, T.J. et al., "Accurate and efficient computation of mean
ionization states with an average-atom Kubo-Greenwood approach."
arXiv preprint arXiv:2203.05863 (2022).
`<https://arxiv.org/abs/2203.05863>`__.
"""
# get matrix dimensions
nbands, nspin, lmax, nmax = np.shape(self._occnums)
# compute the angular integrals (see functions for defns)
P2_int = SphHamInts.P_mat_int(2, lmax)
P4_int = SphHamInts.P_mat_int(4, lmax)
# compute the products of the radial and angular integrals
tmp_mat_1 = np.einsum("kabcd,ace->kabcde", R1_int, P2_int)
tmp_mat_2 = np.einsum("kabcd,ace->kabcde", R2_int, P4_int)
tmp_mat_3 = np.einsum("kcdab,cae->kabcde", R1_int, P2_int)
tmp_mat_4 = np.einsum("kcdab,cae->kabcde", R2_int, P4_int)
# compute the sum over the matrix element |< phi_nlm | nabla | phi_pqm >|^2
mel_sq_mat = np.sum(
np.abs((tmp_mat_1 + tmp_mat_2) * (tmp_mat_3 + tmp_mat_4)),
axis=-1,
)
# compute the f_nl - f_pq matrix
occ_diff_mat = self.calc_occ_diff_mat(self._occnums, orb_subset_1, orb_subset_2)
# compute the (e_nl - e_pq)^-1 matrix
eig_diff_mat = self.calc_eig_diff_mat(self._eigvals, orb_subset_1, orb_subset_2)
# put it all together for the integrated conducivity
sig_bare = np.einsum(
"kln,klnpq->", self._DOS_w[:, 0], mel_sq_mat * occ_diff_mat / eig_diff_mat
)
# multiply by prefactor 2*pi/V
sig = 2 * np.pi * sig_bare / self.sph_vol
return sig
def calc_sig_func(
self, R1_int, R2_int, orb_subset_1, orb_subset_2, omega_max, n_freq, gamma
):
r"""
Compute the dynamical conducivity for given subsets (see notes).
Parameters
----------
R1_int : ndarray
the 'R1' radial component of the integrand (see notes)
R2_int : ndarray
the 'R2' radial component of the integrand (see notes)
orb_subset_1 : list of tuples
the first subset of orbitals to sum over
orb_subset_2 : list of tuples
the second subset of orbitals to sum over
omega_max : float
maximum value of the frequency grid
n_freq : int
number of points in the frequency grid
gamma : float
smoothing factor for the Lorentzian
Returns
-------
sig_omega, nele: tuple (ndarray, float)
* sig_omega: 2d array containing frequency grid and conductivity
:math:`\sigma(\omega)`
* n_ele: the number of electrons from integration of :math:`\sigma(\omega)`;
equivalent to N_ij (for orb subsets ij) in the limit :math:`\gamma\to 0`
Notes
-----
This function returns the dynamical conductivity, :math:`\sigma(\omega)`,
defined as
.. math::
\sigma_{S_1,S2}(\omega) &= \frac{2\pi}{3V\omega}
\sum_{i\in S_1}\sum_{j\in S_2} (f_i - f_j)\
|\langle\phi_{i}|\nabla|\phi_{j}\rangle|^2\
\mathcal{L}(\epsilon_i, \epsilon_j, \gamma, \omega) \\
\mathcal{L}(\epsilon_i, \epsilon_j, \gamma, \omega)&=\
\frac{\gamma}{\pi}\frac{1}{\gamma^2+(\omega+[\epsilon_i-\epsilon_j)])^2}
where :math:`S_1,S_2` denote the subsets of orbitals specified in the function's
paramaters, e.g. the conduction-conduction orbitals.
As can be seen in the above equation, the dirac-delta function in the definition
of the KG conductivity (see `calc_sig` function) is represented by a Lorentzian
distribution :math:`\mathcal{L}` to obtain a smooth conductivity function. In
the limit :math:`\gamma\to 0`, the Lorentzian becomes a delta function.
The paramaters `R1_int` and `R2_int` refer to radial integral components in the
calculation of the matrix elements. See the supplementary information of
Ref. [8]_ for more information on these components, and the functions
:func:`calc_R1_int_mat` and :func:`calc_R2_int_mat` for their definitions.
"""
# get the dimensions of the array
nbands, nspin, lmax, nmax = np.shape(self._occnums)
# compute the angular momenta integrals
P2_int = SphHamInts.P_mat_int(2, lmax)
P4_int = SphHamInts.P_mat_int(4, lmax)
# put the angular and radial integrals together
tmp_mat_1 = np.einsum("kabcd,ace->kabcde", R1_int, P2_int)
tmp_mat_2 = np.einsum("kabcd,ace->kabcde", R2_int, P4_int)
tmp_mat_3 = np.einsum("kcdab,cae->kabcde", R1_int, P2_int)
tmp_mat_4 = np.einsum("kcdab,cae->kabcde", R2_int, P4_int)
mel_sq_mat = np.sum(
np.abs((tmp_mat_1 + tmp_mat_2) * (tmp_mat_3 + tmp_mat_4)),
axis=-1,
)
# compute the occupation number and eigenvalue differences
occ_diff_mat = self.calc_occ_diff_mat(self._occnums, orb_subset_1, orb_subset_2)
eig_diff_mat = self.calc_eig_diff_mat(self._eigvals, orb_subset_1, orb_subset_2)
# set up the frequency array - must start a bit above zero
# sqrt spacing from origin gives faster convergence wrt nfreq
omega_arr = np.linspace(1e-5, np.sqrt(omega_max), n_freq) ** 2
# set up lorentzian: requires dummy array to get right shape
sig_omega = np.zeros((np.size(omega_arr), 2))
omega_dummy_mat = np.ones((nbands, lmax, nmax, lmax, nmax, n_freq))
eig_diff_omega_mat = np.einsum(
"nijkl,nijklm->nijklm", eig_diff_mat, omega_dummy_mat
)
eig_diff_lorentz_mat = mathtools.lorentzian(
omega_arr, eig_diff_omega_mat, gamma
)
# put everythin together to get conductivity
mat1 = np.einsum(
"kln,klnpq->klnpq", self._DOS_w[:, 0], mel_sq_mat * occ_diff_mat
)
mat2 = eig_diff_lorentz_mat / eig_diff_omega_mat
# assign sig(w) and w to sig_omega array dimensions
sig_omega[:, 1] = (
np.einsum("nijkl,nijklm->m", mat1, mat2) * 2 * np.pi / self.sph_vol
)
sig_omega[:, 0] = omega_arr
# integrate and convert to get electron number
N_ele = self.sig_to_N(np.trapz(sig_omega[:, 1], x=omega_arr), self.sph_vol)
return sig_omega, N_ele
@staticmethod
def calc_occ_diff_mat(occnums, orb_subset_1, orb_subset_2):
"""
Compute the matrix of occupation number diffs -(f_l1n1 - f_l2n2).
Parameters
----------
occnums : ndarray
the (unweighted FD) KS occupation numbers
orb_subset_1 : tuple
the first subset of orbitals (eg valence)
orb_subset_2 : tuple
the second subset of orbitals (eg conduction)
Returns
-------
occ_diff_mat : ndarray
the occupation number difference matrix
"""
nbands, nspin, lmax, nmax = np.shape(occnums)
occ_diff_mat = np.zeros((nbands, lmax, nmax, lmax, nmax), dtype=np.float32)
for l1, n1 in orb_subset_1:
for l2, n2 in orb_subset_2:
occ_diff = -(occnums[:, 0, l1, n1] - occnums[:, 0, l2, n2])
# integral is one-sided wrt energy differences
occ_diff = np.where(occ_diff > 0.0, occ_diff, 0.0)
# only terms with l1 = l2 +/- 1 will contribute to final answer
if abs(l1 - l2) != 1:
continue
else:
occ_diff_mat[:, l1, n1, l2, n2] = occ_diff
return occ_diff_mat
@staticmethod
def calc_eig_diff_mat(eigvals, orb_subset_1, orb_subset_2):
"""
Compute the matrix of eigenvalue differences e_l1n1 - e_ln2n2.
Parameters
----------
eigvals : ndarray
the KS energy eigenvalues
orb_subset_1 : tuple
the first subset of orbitals (eg valence)
orb_subset_2 : tuple
the second subset of orbitals (eg conduction)
Returns
-------
occ_diff_mat : ndarray
the occupation number difference matrix
"""
nbands, nspin, lmax, nmax = np.shape(eigvals)
eig_diff_mat = np.zeros((nbands, lmax, nmax, lmax, nmax), dtype=np.float32)
eig_diff_mat += 1e-6
for l1, n1 in orb_subset_1:
for l2, n2 in orb_subset_2:
eig_diff = eigvals[:, 0, l1, n1] - eigvals[:, 0, l2, n2]
# integral is one-sided wrt energy differences
eig_diff = np.where(eig_diff > 0, eig_diff, 1e-6)
# only terms with l1 = l2 +/- 1 will contribute to final answer
if abs(l1 - l2) != 1:
continue
else:
eig_diff_mat[:, l1, n1, l2, n2] = eig_diff
return eig_diff_mat
@staticmethod
def calc_mel_grad_int(orb_l1n1, orb_l2n2, l1, n1, l2, n2, m, xgrid):
r"""
Calculate the matrix element :math:`|<\phi_{n1l1}|\nabla|\phi_{n1l2}>|^2`.
Parameters
----------
orb_l1n1 : ndarray
l1,n1 radial KS orbital
orb_l2n2 : ndarray
l2,n2 radial KS orbital
l1 : int
1st angular momentum quantum number
n1 : int
1st principal quantum number
l2 : int
2nd angular momentum quantum number
n2 : int
2nd principal quantum number
m : int
magnetic quantum number
xgrid : ndarray
log grid
Returns
-------
mel_grad_int : float
the matrix element :math:`|<\phi_{n1l1}|\nabla|\phi_{n1l2}>|^2`.
"""
R1_int = RadialInts.calc_R1_int(orb_l1n1, orb_l2n2, xgrid)
R2_int = RadialInts.calc_R2_int(orb_l1n1, orb_l2n2, xgrid)
mel_grad_int = R1_int * SphHamInts.P_int(
2, l1, l2, m
) + R2_int * SphHamInts.P_int(4, l1, l2, m)
return mel_grad_int
@staticmethod
def sig_to_N(sig, V):
"""
Map the integrated conducivity to electron number.
Parameters
----------
sig : float
integrated conducivity
V : float
volume of sphere
Returns
-------
N_ele : float
electron number
"""
N_ele = sig * (2 * V / np.pi)
return N_ele
class SphHamInts:
"""Contains the functions needed to compute various spherical harmonic integrals."""
@classmethod
def P_mat_int(cls, func_int, lmax):
"""
Compute the matrix of P function (angular) integrals (see notes).
Parameters
----------
func_int : int
the desired P integral (can be 2 or 4)
lmax : int
the maximum value of angular momentum
Returns
-------
P_mat : ndarray
matrix of P func integrals for chosen func_int
Notes
-----
See Refs. [7]_ and [8]_ (supplemental material) for the definitions of the
P2 and P4 functions, ands the :func:`P2_func`, :func:`P4_func` and
:func:`P_int` functions.
"""
P_mat = np.zeros((lmax, lmax, 2 * lmax + 1))
for l1 in range(lmax):
for l2 in range(lmax):
# sum rules mean all terms with l1!=l2 are zero
if abs(l1 - l2) == 1:
# m cannot exceed either of l1 or l2
lsmall = min(l1, l2)
for m in range(-lsmall, lsmall + 1):
P_mat[l1, l2, lsmall + m] = cls.P_int(func_int, l1, l2, m)
else:
continue
return P_mat
@classmethod
def P_int(cls, func_int, l1, l2, m):
r"""
Integrate the P2 or P4 function (see notes).
Parameters
----------
func_int : int
the desired P integral (can be 2 or 4)
l1 : int
1st angular quantum number
l2 : int
2nd angular quantum number
m : int
magnetic quantum number
Returns
-------
P_int_ : float
the integrated P2 or P4 function
Notes
-----
The integrals are defined as
.. math::
\bar{P}^{(n)}_{ll'm} = 2\pi c_{lm}c_{l'm}\int_{-1}^1 dx \
f_p^{(n)}[l_1,l_2,m](x)
With the functions :math:`f_p^{(n)}(x)` defined below (:func:`P2_func`
and :func:`P4_func`).
"""
if func_int == 2:
integ = quad(cls.P2_func, -1, 1, args=(l1, l2, m))[0]
elif func_int == 4:
integ = quad(cls.P4_func, -1, 1, args=(l1, l2, m))[0]
else:
sys.exit("Error: func_int value not recognised, must be 2 or 4")
P_int_ = 2 * np.pi * cls.sph_ham_coeff(l1, m) * cls.sph_ham_coeff(l2, m) * integ
return P_int_
@staticmethod
def P2_func(x, l1, l2, m):
r"""
Calculate the 'P2' function (see notes).
Parameters
----------
x : float
input for Legendre polynomial
l1 : int
1st angular quantum number
l2 : int
2nd angular quantum number
m : int
magnetic quantum number
Returns
-------
P2_func_ : float
the P2 function
Notes
-----
The P2 function is defined as (see also Refs. [7]_ and [8]_)
.. math::
f_p^{(2)}[l_1,l_2,m](x) = x P_{l_1}^m (x) P_{l_2}^m (x)
where P_{l}^m (x) are Legendre polynomial functions.
"""
P2_func_ = x * lpmv(m, l1, x) * lpmv(m, l2, x)
return P2_func_
@staticmethod
def P4_func(x, l1, l2, m):
r"""
Calculate the 'P4' function (see notes).
Parameters
----------
x : float
input for Legendre polynomial
l1 : int
1st angular quantum number
l2 : int
2nd angular quantum number
m : int
magnetic quantum number
Returns
-------
P4_func_ : float
the P4 function
Notes
-----
The P4 function is defined as (see also Refs. [7]_ and [8]_)
.. math::
f_p^{(4)}[l_1,l_2,m](x)&=-(1-x)^2 P^m_{l_1}(x) \frac{dP_{l_2}^m(x)}{dx}\\
&= P^m_{l_1}(x) [(l_2+m)P_{l_2-1}^m(x)-xl_2\
P_{l_2}^m(x)]
where :math:`P_{l}^m(x)` are Legendre polynomial functions.
"""
if (l2 + m) != 0:
factor = (l2 + m) * lpmv(m, l2 - 1, x) - l2 * x * lpmv(m, l2, x)
else:
factor = -l2 * x * lpmv(m, l2, x)
return lpmv(m, l1, x) * factor
@staticmethod
def sph_ham_coeff(l, m):
r"""
Compute coefficients of spherical harmonic functions.
Parameters
----------
l : int
angular quantum number
m : int
magnetic quantum number
Returns
-------
c_lm : float
coefficient for spherical harmonic function (l,m) (see notes)
Notes
-----
The spherical harmonic functions with coefficients :math:`c_{lm}` are defined as
.. math::
Y_m^l(\theta,\phi) &= c_{lm} P_l^m (\cos\theta) e^{im\phi}\\
c_{lm} &= \sqrt{\frac{(2l+1)(l-m)!}{4\pi(l+m)!}}
"""
c_lm = np.sqrt((2 * l + 1) * factorial(l - m) / (factorial(l + m) * 4 * np.pi))
return c_lm
class RadialInts:
"""Contains functions required to compute various integrals of the radial KS fns."""
@classmethod
def calc_R1_int_mat(cls, eigfuncs, occnums, xgrid, orb_subset_1, orb_subset_2):
r"""
Compute the 'R1' integral matrix (see notes).
Parameters
----------
eigfuncs : ndarray
the KS eigenfunctions
occnums : ndarray
the KS occupation numbers
xgrid : ndarray
the log grid
orb_subset_1 : tuple
the first subset of orbitals (eg valence)
orb_subset_2 : tuple
the second subset of orbitals (eg conduction)
Returns
-------
R1_mat : ndarray
the R1 integral matrix (see notes)
Notes
-----
The definition of the R1 integral is (see Ref. [7]_ and supplementary of [8]_)
.. math::
R^{(1)}=4\pi\int_0^R dr r^2 X_{n_1 l_1}(r) \frac{dX_{n_2 l_2}(r)}{dr},
where :math:`X_{nl}(r)` are the radial KS functions.
"""
# take the derivative of orb2
# compute the gradient of the orbitals
deriv_orb2 = np.gradient(eigfuncs, xgrid, axis=-1, edge_order=2)
# chain rule to convert from dP_dx to dX_dr
grad_orb2 = np.exp(-1.5 * xgrid) * (deriv_orb2 - 0.5 * eigfuncs)
# initiliaze the matrix
nbands, nspin, lmax, nmax = np.shape(occnums)
R1_mat = np.zeros((nbands, lmax, nmax, lmax, nmax), dtype=np.float32)
# integrate over the sphere
for l1, n1 in orb_subset_1:
for l2, n2 in orb_subset_2:
# only l1 = l2 +/- 1 terms are non-zero
if abs(l1 - l2) != 1:
continue
else:
R1_mat[:, l1, n1, l2, n2] = cls.R1_int_term(
eigfuncs[:, 0, l1, n1], grad_orb2[:, 0, l2, n2], xgrid
)
# non-symmetric term
if orb_subset_1 != orb_subset_2:
R1_mat[:, l2, n2, l1, n1] = cls.R1_int_term(
eigfuncs[:, 0, l2, n2], grad_orb2[:, 0, l1, n1], xgrid
)
return R1_mat
@staticmethod
def R1_int_term(eigfunc, grad_orb2, xgrid):
"""
Input function to the :func:`calc_R1_int_mat` function.
Parameters
----------
eigfunc : ndarray
KS orbital l1,n1
grad_orb2 : ndarray
derivative of KS orbital l2,n2
xgrid : ndarray
log grid
Returns
-------
R1_int : float
the matrix element for the R1_int_mat function
"""
func_int = eigfunc * np.exp(-xgrid / 2.0) * grad_orb2
R1_int = 4 * np.pi * np.trapz(np.exp(3.0 * xgrid) * func_int, xgrid)
return R1_int
@classmethod
def calc_R2_int_mat(cls, eigfuncs, occnums, xgrid, orb_subset_1, orb_subset_2):
r"""
Compute the 'R2' integral matrix (see notes).
Parameters
----------
eigfuncs : ndarray
the KS eigenfunctions
occnums : ndarray
the KS occupation numbers
xgrid : ndarray
the log grid
orb_subset_1 : tuple
the first subset of orbitals (eg valence)
orb_subset_2 : tuple
the second subset of orbitals (eg conduction)
Returns
-------
R2_mat : ndarray
the R2 integral matrix (see notes)
Notes
-----
The definition of the R2 integral is (see Ref. [7]_ and supplementary of [8]_)
.. math::
R^{(1)}=4\pi\int_0^R dr r X_{n_1 l_1}(r) X_{n_2 l_2}(r),
where :math:`X_{nl}(r)` are the radial KS functions.
"""
# initiliaze the matrix
nbands, nspin, lmax, nmax = np.shape(occnums)
R2_mat = np.zeros((nbands, lmax, nmax, lmax, nmax), dtype=np.float32)
# integrate over the sphere
for l1, n1 in orb_subset_1:
for l2, n2 in orb_subset_2:
if abs(l1 - l2) != 1:
continue
else:
R2_mat[:, l1, n1, l2, n2] = cls.R2_int_term(
eigfuncs[:, 0, l1, n1], eigfuncs[:, 0, l2, n2], xgrid
)
if orb_subset_1 != orb_subset_2:
R2_mat[:, l2, n2, l1, n1] = cls.R2_int_term(
eigfuncs[:, 0, l2, n2], eigfuncs[:, 0, l1, n1], xgrid
)
return R2_mat
@staticmethod
def R2_int_term(eigfunc_1, eigfunc_2, xgrid):
"""
Input function to the :func:`calc_R2_int_mat` function.
Parameters
----------
eigfunc_1 : ndarray
KS orbital l1,n1
eigfunc_2 : ndarray
KS orbital l2,n2
xgrid : ndarray
log grid
Returns
-------
R2_int : float
the matrix element for the R2_int_mat function
"""
R2_int = 4 * np.pi * np.trapz(np.exp(xgrid) * eigfunc_1 * eigfunc_2, xgrid)
return R2_int
@staticmethod
def calc_R1_int(orb1, orb2, xgrid):
r"""
Compute the R1 integral between two orbitals orb1 and orb2 (see notes).
Parameters
----------
orb1 : ndarray
the first radial orbital
orb2 : ndarray
the second radial orbital
Returns
-------
R1_int : ndarray
the R1 integral
Notes
-----
See :func:`calc_R1_int_mat` for definition of the integral
"""
# take the derivative of orb2
# compute the gradient of the orbitals
deriv_orb2 = np.gradient(orb2, xgrid, axis=-1, edge_order=2)
# chain rule to convert from dP_dx to dX_dr
grad_orb2 = np.exp(-1.5 * xgrid) * (deriv_orb2 - 0.5 * orb2)
# integrate over the sphere
func_int = orb1 * np.exp(-xgrid / 2.0) * grad_orb2
R1_int = np.trapz(np.exp(3.0 * xgrid) * func_int, xgrid)
return R1_int
@staticmethod
def calc_R2_int(orb1, orb2, xgrid):
r"""
Compute the R2 integral between two orbitals orb1 and orb2 (see notes).
Parameters
----------
orb1 : ndarray
the first radial orbital
orb2 : ndarray
the second radial orbital
Returns
-------
R2_int : ndarray
the R2 integral
Notes
-----
See :func:`calc_R2_int_mat` for definition of the integral
"""
func_int = np.exp(xgrid) * orb1 * orb2
R2_int = np.trapz(func_int, xgrid)
return R2_int
| 32.767956 | 91 | 0.552268 |
7941186b6329ee619ffa553b68ea6005c5082a73 | 1,244 | py | Python | test.py | jannctu/TIN | bb1a49a16e4f206b114aa3abf3dda3726a8a6b45 | [
"MIT"
] | 35 | 2020-05-23T09:20:52.000Z | 2022-03-25T02:09:04.000Z | test.py | jannctu/TIN | bb1a49a16e4f206b114aa3abf3dda3726a8a6b45 | [
"MIT"
] | 7 | 2020-05-29T08:41:51.000Z | 2021-05-28T08:24:55.000Z | test.py | jannctu/TIN | bb1a49a16e4f206b114aa3abf3dda3726a8a6b45 | [
"MIT"
] | 11 | 2020-05-29T06:57:38.000Z | 2022-03-06T08:11:59.000Z | import torch
from model import TIN
import os
from os.path import join
import numpy as np
from PIL import Image
import scipy.io as io
import cv2
import time
test_img = 'img/mri_brain.jpg'
## READ IMAGE
im = np.array(cv2.imread(test_img), dtype=np.float32)
## Multiscale
scales = [0.5,1.0,1.5]
images = []
for scl in scales:
img_scale = cv2.resize(im, None, fx=scl, fy=scl, interpolation=cv2.INTER_LINEAR)
images.append(img_scale.transpose(2, 0, 1)) # (H x W x C) to (C x H x W)
## CREATE MODEL
weight_file = 'weights/TIN2.pth'
model = TIN(False,2)
model.cuda()
model.eval()
#load weight
checkpoint = torch.load(weight_file)
model.load_state_dict(checkpoint)
## FEED FORWARD
h, w, _ = im.shape
ms_fuse = np.zeros((h, w))
with torch.no_grad():
for img in images:
img = img[np.newaxis, :, :, :]
img = torch.from_numpy(img)
img = img.cuda()
out = model(img)
fuse = out[-1].squeeze().detach().cpu().numpy()
fuse = cv2.resize(fuse, (w, h), interpolation=cv2.INTER_LINEAR)
ms_fuse += fuse
ms_fuse /= len(scales)
filename = 'mri_brain'
result = Image.fromarray(255-(ms_fuse * 255).astype(np.uint8))
result.save( "img/result_%s.png" % filename)
print('finished.')
| 24.88 | 84 | 0.659164 |
7941186f68e1801a850754ef50254b529df91fbd | 1,279 | py | Python | youtv/utils/random_data.py | tomejorge/SeleniumBase | e3e50bbd80594c52131b0d88ca3e2c2f7692e340 | [
"MIT"
] | null | null | null | youtv/utils/random_data.py | tomejorge/SeleniumBase | e3e50bbd80594c52131b0d88ca3e2c2f7692e340 | [
"MIT"
] | null | null | null | youtv/utils/random_data.py | tomejorge/SeleniumBase | e3e50bbd80594c52131b0d88ca3e2c2f7692e340 | [
"MIT"
] | null | null | null | import random
import string
def letters_lower_case(length=5, chars=string.ascii_lowercase):
return ''.join(random.choice(chars) for i in range(length))
def random_lower_string_with_numbers(length):
return ''.join(random.choice(string.ascii_lowercase + string.digits) for i in range(length))
def letters_upper_case(length, chars=string.ascii_uppercase):
return ''.join(random.choice(chars) for i in range(length))
def random_phone(length):
return ''.join(random.choice(string.digits) for i in range(length))
def random_number(length):
return ''.join(random.choice(string.digits) for i in range(length))
def random_password(length):
return ''.join(random.choice(string.ascii_letters + string.digits + string.punctuation) for i in range(length))
def random_email_address():
return ''.join(random.choice(string.ascii_lowercase + string.digits) for i in range(15)) + "@youtv-automation.dk"
def random_name():
return ''.join(random.choice(string.ascii_lowercase) for i in range(4)) + " " + ''.join(
random.choice(string.ascii_lowercase) for i in range(4))
def random_address():
return ''.join(random.choice(string.ascii_lowercase) for i in range(4)) + " " + ''.join(
random.choice(string.digits) for i in range(4))
| 31.195122 | 117 | 0.724003 |
7941189f2dcd54afa035720814d2d2bcb6069229 | 2,919 | py | Python | sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/get_metadata_activity.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/get_metadata_activity.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/get_metadata_activity.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 1 | 2019-06-17T22:18:23.000Z | 2019-06-17T22:18:23.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .execution_activity import ExecutionActivity
class GetMetadataActivity(ExecutionActivity):
"""Activity to get metadata of dataset.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param name: Required. Activity name.
:type name: str
:param description: Activity description.
:type description: str
:param depends_on: Activity depends on condition.
:type depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:param user_properties: Activity user properties.
:type user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:param type: Required. Constant filled by server.
:type type: str
:param linked_service_name: Linked service reference.
:type linked_service_name:
~azure.mgmt.datafactory.models.LinkedServiceReference
:param policy: Activity policy.
:type policy: ~azure.mgmt.datafactory.models.ActivityPolicy
:param dataset: Required. GetMetadata activity dataset reference.
:type dataset: ~azure.mgmt.datafactory.models.DatasetReference
:param field_list: Fields of metadata to get from dataset.
:type field_list: list[object]
"""
_validation = {
'name': {'required': True},
'type': {'required': True},
'dataset': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'depends_on': {'key': 'dependsOn', 'type': '[ActivityDependency]'},
'user_properties': {'key': 'userProperties', 'type': '[UserProperty]'},
'type': {'key': 'type', 'type': 'str'},
'linked_service_name': {'key': 'linkedServiceName', 'type': 'LinkedServiceReference'},
'policy': {'key': 'policy', 'type': 'ActivityPolicy'},
'dataset': {'key': 'typeProperties.dataset', 'type': 'DatasetReference'},
'field_list': {'key': 'typeProperties.fieldList', 'type': '[object]'},
}
def __init__(self, **kwargs):
super(GetMetadataActivity, self).__init__(**kwargs)
self.dataset = kwargs.get('dataset', None)
self.field_list = kwargs.get('field_list', None)
self.type = 'GetMetadata'
| 42.926471 | 94 | 0.643714 |
794118a571e90ba74c7040c331046ba9013e7609 | 680 | py | Python | sa/migrations/0143_managed_object_profile_report_attempts.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 84 | 2017-10-22T11:01:39.000Z | 2022-02-27T03:43:48.000Z | sa/migrations/0143_managed_object_profile_report_attempts.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 22 | 2017-12-11T07:21:56.000Z | 2021-09-23T02:53:50.000Z | sa/migrations/0143_managed_object_profile_report_attempts.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 23 | 2017-12-06T06:59:52.000Z | 2022-02-24T00:02:25.000Z | # ----------------------------------------------------------------------
# managedobjectprofile report_ping_attempts
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Third-party modules
from django.db import models
# NOC modules
from noc.core.migration.base import BaseMigration
class Migration(BaseMigration):
def migrate(self):
self.db.add_column(
"sa_managedobjectprofile",
"report_ping_attempts",
models.BooleanField("Report RTT", default=False),
)
| 30.909091 | 72 | 0.476471 |
794118ccf0e6ddae74a2941442b8f828952047e3 | 5,770 | py | Python | tests/model/test_base_component.py | taiga4112/pDESy | ff2a77a3f7ad1714960dfabfcb977279a0b53d09 | [
"MIT"
] | null | null | null | tests/model/test_base_component.py | taiga4112/pDESy | ff2a77a3f7ad1714960dfabfcb977279a0b53d09 | [
"MIT"
] | 2 | 2020-09-20T10:23:28.000Z | 2020-09-23T02:34:13.000Z | tests/model/test_base_component.py | taiga4112/pDESy | ff2a77a3f7ad1714960dfabfcb977279a0b53d09 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
from pDESy.model.base_component import BaseComponent
from pDESy.model.base_task import BaseTask, BaseTaskState
from pDESy.model.base_factory import BaseFactory
import datetime
def test_init():
c1 = BaseComponent("c1")
assert c1.name == "c1"
assert len(c1.ID) > 0
c2 = BaseComponent("c2")
task = BaseTask("task")
c = BaseComponent(
"c",
ID="xx88xx",
child_component_list=[c1],
parent_component_list=[c2],
targeted_task_list=[task],
space_size=2.0,
placed_factory=BaseFactory("t"),
placed_factory_id_record=["fff"],
)
assert c.name == "c"
assert c.ID == "xx88xx"
assert c.child_component_list == [c1]
assert c.parent_component_list == [c2]
assert c.targeted_task_list == [task]
assert c.space_size == 2.0
assert c.placed_factory.name == "t"
assert c.placed_factory_id_record == ["fff"]
def test_extend_child_component_list():
c = BaseComponent("c")
assert c.parent_component_list == []
c1 = BaseComponent("c1")
c2 = BaseComponent("c2")
c.extend_child_component_list([c1, c2])
assert c.child_component_list == [c1, c2]
assert c1.parent_component_list == [c]
assert c2.parent_component_list == [c]
def test_append_child_component():
c = BaseComponent("c")
assert c.parent_component_list == []
c1 = BaseComponent("c1")
c2 = BaseComponent("c2")
c.append_child_component(c1)
c1.append_child_component(c2)
assert c.child_component_list == [c1]
assert c1.child_component_list == [c2]
assert c2.parent_component_list == [c1]
assert c1.parent_component_list == [c]
def test_set_placed_factory():
c = BaseComponent("c")
c1 = BaseComponent("c1")
c2 = BaseComponent("c2")
c.append_child_component(c1)
c1.append_child_component(c2)
factory = BaseFactory("factory")
c.set_placed_factory(factory, set_to_all_children=False)
assert c.placed_factory == factory
assert c1.placed_factory is None
assert c2.placed_factory is None
c.set_placed_factory(factory, set_to_all_children=True)
assert c.placed_factory == factory
assert c1.placed_factory == factory
assert c2.placed_factory == factory
def test_is_ready():
c = BaseComponent("c")
task1 = BaseTask("task1")
task2 = BaseTask("task2")
c.extend_targeted_task_list([task1, task2])
assert c.is_ready() is False
# case 1
task1.state = BaseTaskState.READY
assert c.is_ready() is True
# case 2
task2.state = BaseTaskState.WORKING
assert c.is_ready() is False
# case 3
task2.state = BaseTaskState.FINISHED
assert c.is_ready() is True
# case 4
task1.state = BaseTaskState.FINISHED
task2.state = BaseTaskState.FINISHED
assert c.is_ready() is False
def test_extend_targeted_task_list():
c = BaseComponent("c")
assert c.parent_component_list == []
task1 = BaseTask("task1")
task2 = BaseTask("task2")
c.extend_targeted_task_list([task1, task2])
assert c.targeted_task_list == [task1, task2]
assert task1.target_component == c
assert task2.target_component == c
def test_append_targeted_task():
c = BaseComponent("c")
assert c.parent_component_list == []
task = BaseTask("task1")
assert task.target_component is None
c.append_targeted_task(task)
assert c.targeted_task_list == [task]
assert task.target_component == c
def test_initialize():
pass
def test_str():
print(BaseComponent("c1"))
def test_create_data_for_gantt_plotly():
c = BaseComponent("c")
task1 = BaseTask("task1")
task2 = BaseTask("task2")
c.extend_targeted_task_list([task1, task2])
# Set test case (start time = 0, finish time = 5)
task1.start_time_list = [0, 2]
task1.ready_time_list = [0, 2]
task1.finish_time_list = [3, 5]
task2.start_time_list = [1]
task2.ready_time_list = [2]
task2.finish_time_list = [5]
init_datetime = datetime.datetime(2020, 4, 1, 8, 0, 0)
# timedelta = 1day
timedelta = datetime.timedelta(days=1)
df = c.create_data_for_gantt_plotly(init_datetime, timedelta)
assert df[0]["Start"] == (init_datetime + 0 * timedelta).strftime(
"%Y-%m-%d %H:%M:%S"
)
assert df[0]["Finish"] == (init_datetime + (5 + 1.0) * timedelta).strftime(
"%Y-%m-%d %H:%M:%S"
)
assert df[0]["Type"] == "Component"
# def test_get_state_record_list():
# c = BaseComponent("c")
# task1 = BaseTask("task1")
# task2 = BaseTask("task2")
# c.extend_targeted_task_list([task1, task2])
# # Set test case
# task1.ready_time_list = [0, 4]
# task1.start_time_list = [1, 5]
# task1.finish_time_list = [2, 6]
# task2.ready_time_list = [1]
# task2.start_time_list = [2]
# task2.finish_time_list = [3]
# assert c.get_state_record_list(auto_task=False) == [
# BaseTaskState.READY,
# BaseTaskState.WORKING,
# BaseTaskState.WORKING,
# BaseTaskState.FINISHED,
# BaseTaskState.READY,
# BaseTaskState.WORKING,
# BaseTaskState.FINISHED,
# ]
# def test_get_ready_start_finish_time_list():
# c = BaseComponent("c")
# task1 = BaseTask("task1")
# task2 = BaseTask("task2")
# c.extend_targeted_task_list([task1, task2])
# # Set test case
# task1.ready_time_list = [0, 4]
# task1.start_time_list = [1, 5]
# task1.finish_time_list = [2, 6]
# task2.ready_time_list = [1]
# task2.start_time_list = [2]
# task2.finish_time_list = [3]
# rlist, slist, flist = c.get_ready_start_finish_time_list(auto_task=True)
# assert rlist == [0, 4]
# assert slist == [1, 5]
# assert flist == [3, 6]
| 28.009709 | 79 | 0.654766 |
79411919189c8dfdd78707a956f145c2a62b2e0c | 3,352 | py | Python | cleff/profiles/urls.py | lancekrogers/music-network | e8b21f3f1bbeb1ee46fb41c2b25f3b3e26e97097 | [
"Apache-2.0"
] | null | null | null | cleff/profiles/urls.py | lancekrogers/music-network | e8b21f3f1bbeb1ee46fb41c2b25f3b3e26e97097 | [
"Apache-2.0"
] | null | null | null | cleff/profiles/urls.py | lancekrogers/music-network | e8b21f3f1bbeb1ee46fb41c2b25f3b3e26e97097 | [
"Apache-2.0"
] | 1 | 2015-08-12T20:51:04.000Z | 2015-08-12T20:51:04.000Z | from django.conf.urls import include, url
from django.contrib.auth.views import login, logout
from django.http import Http404
from .views import musician_registration, non_musician_registration, choose, musician_profile, \
non_musician_profile, update_musician_profile, musician_add_time_frame, musician_update_time_frame, add_genre,\
update_genres, add_instrument, update_instruments, musician_add_location, update_musician_location, \
youtube_url_decoder_view, update_video, update_friends, add_profile_image, add_profile_image_non_musician, \
update_non_musician_profile, update_watched_musicians, LocationCreateView, change_search_area_range, \
non_musician_change_search_area_range, MusicianPublicProfile
urlpatterns = [
url(r'^register-musician/$',
musician_registration,
name='register_musician'),
url(r'^register-non-musician/$',
non_musician_registration,
name='register_non_musician'),
url(r'^login/',
login,
name='Login'),
url(r'^logout/',
logout, {'next_page': 'main:home'},
name='Logout'),
url(r'^choose/$',
choose, name='choose'),
url(r'^musician/$',
musician_profile,
name='musician_profile'),
url(r'^non-musician/$',
non_musician_profile,
name='non_musician_profile'),
url(r'^musician/update/$',
update_musician_profile,
name='musician_update'),
url(r'^add-availability/',
musician_add_time_frame,
name='add_availability'),
url(r'^update-availability/',
musician_update_time_frame,
name='update_availability'),
url(r'^add-genre/',
add_genre,
name='add_genre'),
url(r'^update-genre/',
update_genres,
name='update_genres'),
url(r'^add-family/',
add_instrument,
name='add_instrument'),
url(r'^update-family/',
update_instruments,
name='update_instruments'),
url(r'^musisian-add-location/',
LocationCreateView.as_view(),
name='add_musician_location'),
url(r'^musician-update-location/',
update_musician_location,
name='update_musician_location'),
url(r'^add-video/',
youtube_url_decoder_view,
name='add_youtube_url'),
url(r'^update-video/',
update_video,
name='update_video'),
url(r'^update-friends/',
update_friends,
name='update_friends'),
url(r'^musician-profile-image/',
add_profile_image,
name='musician_profile_image'),
url(r'^non-musician-profile-image/',
add_profile_image_non_musician,
name='non_musician_profile_image'),
url(r'^non-musician/update/$',
update_non_musician_profile,
name='non_musician_update'),
url(r'^update-watched-musicians/',
update_watched_musicians,
name='update_watched_musicians'),
url(r'^musician-public-profile/(?P<pk>\d+)/',
Http404,
name='public_profile'),
url(r'^update-range-musician/',
change_search_area_range,
name='update_area_range'),
url(r'update-range-nonmusician/',
non_musician_change_search_area_range,
name='update_non_area_range'),
url(r'^musician-public/(?P<pk>\d+)/$',
MusicianPublicProfile.as_view(),
name='m_public_profile'),
]
| 34.556701 | 115 | 0.663783 |
79411924d4ef7a9cb7ca61d98db9018f3ea695ec | 1,054 | py | Python | kubernetes/test/test_v1_persistent_volume_claim_list.py | Scalr/kubernetes-client-python | 07442bdb76f0876ec96c0b0da6f9c4b06d7e5e38 | [
"Apache-2.0"
] | 3 | 2019-05-19T05:05:37.000Z | 2020-03-20T04:56:20.000Z | kubernetes/test/test_v1_persistent_volume_claim_list.py | Scalr/kubernetes-client-python | 07442bdb76f0876ec96c0b0da6f9c4b06d7e5e38 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_persistent_volume_claim_list.py | Scalr/kubernetes-client-python | 07442bdb76f0876ec96c0b0da6f9c4b06d7e5e38 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_persistent_volume_claim_list import V1PersistentVolumeClaimList
class TestV1PersistentVolumeClaimList(unittest.TestCase):
""" V1PersistentVolumeClaimList unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1PersistentVolumeClaimList(self):
"""
Test V1PersistentVolumeClaimList
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_persistent_volume_claim_list.V1PersistentVolumeClaimList()
pass
if __name__ == '__main__':
unittest.main()
| 23.422222 | 105 | 0.734345 |
79411963c55e8016bb5d9e7440580663617c8785 | 5,493 | py | Python | hw/ip/otbn/dv/otbnsim/test/stats_test.py | vsukhoml/opentitan | bb0bd16b3eca0ef2dd4144b5df49b8663c59101f | [
"Apache-2.0"
] | 1 | 2020-05-11T05:18:20.000Z | 2020-05-11T05:18:20.000Z | hw/ip/otbn/dv/otbnsim/test/stats_test.py | vsukhoml/opentitan | bb0bd16b3eca0ef2dd4144b5df49b8663c59101f | [
"Apache-2.0"
] | 1 | 2022-02-15T22:20:51.000Z | 2022-02-15T22:20:51.000Z | hw/ip/otbn/dv/otbnsim/test/stats_test.py | vsukhoml/opentitan | bb0bd16b3eca0ef2dd4144b5df49b8663c59101f | [
"Apache-2.0"
] | 1 | 2021-12-04T06:08:11.000Z | 2021-12-04T06:08:11.000Z | # Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import py
import os
from sim.sim import OTBNSim
from sim.stats import ExecutionStats
import testutil
def _run_sim_for_stats(sim: OTBNSim) -> ExecutionStats:
sim.run(verbose=False, collect_stats=True)
# Ensure that the execution was successful.
assert sim.state.ext_regs.read('ERR_BITS', False) == 0
assert sim.stats
return sim.stats
def _simulate_asm_file(asm_file: str, tmpdir: py.path.local) -> ExecutionStats:
'''Run the OTBN simulator, collect statistics, and return them.'''
sim = testutil.prepare_sim_for_asm_file(asm_file, tmpdir, start_addr=0)
return _run_sim_for_stats(sim)
def _simulate_asm_str(assembly: str, tmpdir: py.path.local) -> ExecutionStats:
sim = testutil.prepare_sim_for_asm_str(assembly, tmpdir, start_addr=0)
return _run_sim_for_stats(sim)
def test_basic_block_stats(tmpdir: py.path.local) -> None:
'''Check if statistics for basic blocks are calculated correctly.'''
asm = """
jump:
/* A basic block of 4 instructions, ending with a jump. */
addi x7, x7, 1
addi x7, x7, 2
addi x7, x7, 3
jal x0, branch1
nop /* Should never be executed. */
branch1:
/* A basic block of 3 instructions, ending with a branch. */
addi x7, x7, 4
addi x7, x7, -10
beq x7, x0, branch2
branch2:
/* A basic block of 3 instructions, ending with a branch. */
addi x7, x7, 4
addi x7, x7, -4
beq x7, x0, exit
nop /* Should never be executed. */
exit:
ecall
"""
stats = _simulate_asm_str(asm, tmpdir)
assert stats.get_insn_count() == 11
assert sorted(stats.basic_block_histo) == sorted({
4: 1, # 1 basic block with 4 instructions (jump)
3: 2, # 2 basic blocks with 3 instructions (branch1 and branch2)
1: 1 # 1 basic block with 1 instruction (exit)
})
assert sorted(stats.ext_basic_block_histo) == sorted({
7: 1, # 1 ext. basic block with 7 instructions (jump + branch1)
3: 2, # 1 ext. basic block with 3 instructions (branch2)
1: 1 # 1 ext. basic block with 1 instruction (exit)
})
def test_basic_block_stats_loop(tmpdir: py.path.local) -> None:
'''Check if statistics for basic blocks LOOPs are calculated properly.'''
asm = """
/* Loop x7 == 3 times over a body of 2 instructions. */
addi x7, x0, 3
loop x7, 2
nop
nop
ecall
"""
stats = _simulate_asm_str(asm, tmpdir)
assert stats.get_insn_count() == 9
assert sorted(stats.basic_block_histo) == sorted({
4: 1, # 1 basic block with 4 instructions (addi + loop + 2x nop)
2: 1, # 1 basic block with 2 instructions (loop body on second iter.)
1: 1 # 1 basic block with 1 instruction (ecall)
})
assert sorted(stats.ext_basic_block_histo) == sorted(stats.basic_block_histo)
def test_basic_block_stats_loopi(tmpdir: py.path.local) -> None:
'''Check if statistics for basic blocks in LOOPIs are calculated properly'''
asm = """
/* Loop 3 times over a body of 2 instructions. */
loopi 3, 2
nop
nop
ecall
"""
stats = _simulate_asm_str(asm, tmpdir)
assert stats.get_insn_count() == 8
assert sorted(stats.basic_block_histo) == sorted({
3: 1, # 1 basic block with 4 instructions (addi + loop + 2x nop)
2: 1, # 1 basic block with 2 instructions (loop body on second iter.)
1: 1 # 1 basic block with 1 instruction (ecall)
})
assert sorted(stats.ext_basic_block_histo) == sorted({
8: 1 # All instructions are statically determined.
})
def test_general_and_loop(tmpdir: py.path.local) -> None:
'''Test the collection of general statistics as well as loop stats.'''
asm_file = os.path.join(os.path.dirname(__file__),
'simple', 'loops', 'loops.s')
stats = _simulate_asm_file(asm_file, tmpdir)
# General statistics
assert stats.stall_count == 2
assert stats.get_insn_count() == 28
assert stats.insn_histo == {'addi': 22, 'loop': 4, 'loopi': 1, 'ecall': 1}
assert stats.func_calls == []
# Loop statistics.
exp = [
# Outer LOOPI
{'iterations': 4, 'loop_addr': 8, 'loop_len': 4},
# Inner LOOP
{'iterations': 3, 'loop_addr': 16, 'loop_len': 1},
{'iterations': 3, 'loop_addr': 16, 'loop_len': 1},
{'iterations': 3, 'loop_addr': 16, 'loop_len': 1},
{'iterations': 3, 'loop_addr': 16, 'loop_len': 1}
]
assert stats.loops == exp
def test_func_call_direct(tmpdir: py.path.local) -> None:
'''Test the collection of statistics related to loops.'''
asm_file = os.path.join(os.path.dirname(__file__),
'simple', 'subroutines', 'direct-call.s')
stats = _simulate_asm_file(asm_file, tmpdir)
exp = [{'call_site': 4, 'callee_func': 12, 'caller_func': 0}]
assert stats.func_calls == exp
def test_func_call_indirect(tmpdir: py.path.local) -> None:
'''Test the collection of statistics related to loops.'''
asm_file = os.path.join(os.path.dirname(__file__),
'simple', 'subroutines', 'indirect-call.s')
stats = _simulate_asm_file(asm_file, tmpdir)
exp = [{'call_site': 8, 'callee_func': 16, 'caller_func': 0}]
assert stats.func_calls == exp
| 30.859551 | 81 | 0.636264 |
79411965511090477d1ffd366bcf57553a64b2b7 | 1,625 | py | Python | ermaket/api/system/hierarchy_manager.py | SqrtMinusOne/ERMaket_Experiment | c4a7b61651edd15a619d9b690e2aaeaab4de282d | [
"Apache-2.0"
] | null | null | null | ermaket/api/system/hierarchy_manager.py | SqrtMinusOne/ERMaket_Experiment | c4a7b61651edd15a619d9b690e2aaeaab4de282d | [
"Apache-2.0"
] | null | null | null | ermaket/api/system/hierarchy_manager.py | SqrtMinusOne/ERMaket_Experiment | c4a7b61651edd15a619d9b690e2aaeaab4de282d | [
"Apache-2.0"
] | null | null | null | import atexit
import logging
import os
from ermaket.api.config import Config
from ermaket.api.system.hierarchy import Hierachy
from ermaket.utils import Singleton
__all__ = ['HierachyManager']
_hierarchy = None
class HierachyManager(metaclass=Singleton):
def __init__(self, reload=False, save=True, path=None):
self._config = Config()
if path is not None:
self._path = path
else:
self._path = self._config.XML['hierarchyPath']
self.read(reload)
if save:
atexit.register(lambda manager: manager.save(), self)
@property
def h(self):
return self.hierarchy
def set_path(self, path):
self._path = path
def read(self, reload):
global _hierarchy
if _hierarchy is None or reload:
if os.path.exists(self._path):
with open(self._path) as f:
_hierarchy = Hierachy.from_xml(f.read())
logging.info(
f'Read hierarchy. Elements number: {len(_hierarchy)}'
)
else:
_hierarchy = Hierachy()
logging.info('Created new hierarchy')
self.hierarchy = _hierarchy
def save(self):
with open(self._path, 'w') as f:
f.write(self.hierarchy.pretty_xml())
try:
logging.info(
f'Saved hierarchy. Elements number: {len(self.hierarchy)}'
)
except ValueError:
pass
def drop(self):
global _hierarchy
_hierarchy = Hierachy()
self.hierarchy = _hierarchy
| 26.639344 | 74 | 0.576615 |
79411af7e97cff105b96ea41c2581aeaf7bc651b | 15,666 | py | Python | lib/sqlalchemy/testing/assertions.py | Thhhza/sqlalchemy | f2b267043e17b2b769dc2a5b8139f6be2a3d4e84 | [
"MIT"
] | 1 | 2019-07-29T02:53:51.000Z | 2019-07-29T02:53:51.000Z | lib/sqlalchemy/testing/assertions.py | Thhhza/sqlalchemy | f2b267043e17b2b769dc2a5b8139f6be2a3d4e84 | [
"MIT"
] | 1 | 2021-09-11T14:30:32.000Z | 2021-09-11T14:30:32.000Z | lib/sqlalchemy/testing/assertions.py | Thhhza/sqlalchemy | f2b267043e17b2b769dc2a5b8139f6be2a3d4e84 | [
"MIT"
] | 2 | 2016-12-19T02:27:46.000Z | 2019-07-29T02:53:54.000Z | # testing/assertions.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import absolute_import
from . import util as testutil
from sqlalchemy import pool, orm, util
from sqlalchemy.engine import default, create_engine, url
from sqlalchemy import exc as sa_exc
from sqlalchemy.util import decorator
from sqlalchemy import types as sqltypes, schema
import warnings
import re
from .warnings import resetwarnings
from .exclusions import db_spec, _is_excluded
from . import assertsql
from . import config
import itertools
from .util import fail
import contextlib
def emits_warning(*messages):
"""Mark a test as emitting a warning.
With no arguments, squelches all SAWarning failures. Or pass one or more
strings; these will be matched to the root of the warning description by
warnings.filterwarnings().
"""
# TODO: it would be nice to assert that a named warning was
# emitted. should work with some monkeypatching of warnings,
# and may work on non-CPython if they keep to the spirit of
# warnings.showwarning's docstring.
# - update: jython looks ok, it uses cpython's module
@decorator
def decorate(fn, *args, **kw):
# todo: should probably be strict about this, too
filters = [dict(action='ignore',
category=sa_exc.SAPendingDeprecationWarning)]
if not messages:
filters.append(dict(action='ignore',
category=sa_exc.SAWarning))
else:
filters.extend(dict(action='ignore',
message=message,
category=sa_exc.SAWarning)
for message in messages)
for f in filters:
warnings.filterwarnings(**f)
try:
return fn(*args, **kw)
finally:
resetwarnings()
return decorate
def emits_warning_on(db, *warnings):
"""Mark a test as emitting a warning on a specific dialect.
With no arguments, squelches all SAWarning failures. Or pass one or more
strings; these will be matched to the root of the warning description by
warnings.filterwarnings().
"""
spec = db_spec(db)
@decorator
def decorate(fn, *args, **kw):
if isinstance(db, util.string_types):
if not spec(config._current):
return fn(*args, **kw)
else:
wrapped = emits_warning(*warnings)(fn)
return wrapped(*args, **kw)
else:
if not _is_excluded(*db):
return fn(*args, **kw)
else:
wrapped = emits_warning(*warnings)(fn)
return wrapped(*args, **kw)
return decorate
def uses_deprecated(*messages):
"""Mark a test as immune from fatal deprecation warnings.
With no arguments, squelches all SADeprecationWarning failures.
Or pass one or more strings; these will be matched to the root
of the warning description by warnings.filterwarnings().
As a special case, you may pass a function name prefixed with //
and it will be re-written as needed to match the standard warning
verbiage emitted by the sqlalchemy.util.deprecated decorator.
"""
@decorator
def decorate(fn, *args, **kw):
with expect_deprecated(*messages):
return fn(*args, **kw)
return decorate
@contextlib.contextmanager
def expect_deprecated(*messages):
# todo: should probably be strict about this, too
filters = [dict(action='ignore',
category=sa_exc.SAPendingDeprecationWarning)]
if not messages:
filters.append(dict(action='ignore',
category=sa_exc.SADeprecationWarning))
else:
filters.extend(
[dict(action='ignore',
message=message,
category=sa_exc.SADeprecationWarning)
for message in
[(m.startswith('//') and
('Call to deprecated function ' + m[2:]) or m)
for m in messages]])
for f in filters:
warnings.filterwarnings(**f)
try:
yield
finally:
resetwarnings()
def global_cleanup_assertions():
"""Check things that have to be finalized at the end of a test suite.
Hardcoded at the moment, a modular system can be built here
to support things like PG prepared transactions, tables all
dropped, etc.
"""
_assert_no_stray_pool_connections()
_STRAY_CONNECTION_FAILURES = 0
def _assert_no_stray_pool_connections():
global _STRAY_CONNECTION_FAILURES
# lazy gc on cPython means "do nothing." pool connections
# shouldn't be in cycles, should go away.
testutil.lazy_gc()
# however, once in awhile, on an EC2 machine usually,
# there's a ref in there. usually just one.
if pool._refs:
# OK, let's be somewhat forgiving.
_STRAY_CONNECTION_FAILURES += 1
print("Encountered a stray connection in test cleanup: %s"
% str(pool._refs))
# then do a real GC sweep. We shouldn't even be here
# so a single sweep should really be doing it, otherwise
# there's probably a real unreachable cycle somewhere.
testutil.gc_collect()
# if we've already had two of these occurrences, or
# after a hard gc sweep we still have pool._refs?!
# now we have to raise.
if pool._refs:
err = str(pool._refs)
# but clean out the pool refs collection directly,
# reset the counter,
# so the error doesn't at least keep happening.
pool._refs.clear()
_STRAY_CONNECTION_FAILURES = 0
assert False, "Stray connection refused to leave "\
"after gc.collect(): %s" % err
elif _STRAY_CONNECTION_FAILURES > 10:
assert False, "Encountered more than 10 stray connections"
_STRAY_CONNECTION_FAILURES = 0
def eq_(a, b, msg=None):
"""Assert a == b, with repr messaging on failure."""
assert a == b, msg or "%r != %r" % (a, b)
def ne_(a, b, msg=None):
"""Assert a != b, with repr messaging on failure."""
assert a != b, msg or "%r == %r" % (a, b)
def is_(a, b, msg=None):
"""Assert a is b, with repr messaging on failure."""
assert a is b, msg or "%r is not %r" % (a, b)
def is_not_(a, b, msg=None):
"""Assert a is not b, with repr messaging on failure."""
assert a is not b, msg or "%r is %r" % (a, b)
def startswith_(a, fragment, msg=None):
"""Assert a.startswith(fragment), with repr messaging on failure."""
assert a.startswith(fragment), msg or "%r does not start with %r" % (
a, fragment)
def assert_raises(except_cls, callable_, *args, **kw):
try:
callable_(*args, **kw)
success = False
except except_cls:
success = True
# assert outside the block so it works for AssertionError too !
assert success, "Callable did not raise an exception"
def assert_raises_message(except_cls, msg, callable_, *args, **kwargs):
try:
callable_(*args, **kwargs)
assert False, "Callable did not raise an exception"
except except_cls as e:
assert re.search(
msg, util.text_type(e), re.UNICODE), "%r !~ %s" % (msg, e)
print(util.text_type(e).encode('utf-8'))
class AssertsCompiledSQL(object):
def assert_compile(self, clause, result, params=None,
checkparams=None, dialect=None,
checkpositional=None,
use_default_dialect=False,
allow_dialect_select=False,
literal_binds=False):
if use_default_dialect:
dialect = default.DefaultDialect()
elif allow_dialect_select:
dialect = None
else:
if dialect is None:
dialect = getattr(self, '__dialect__', None)
if dialect is None:
dialect = config.db.dialect
elif dialect == 'default':
dialect = default.DefaultDialect()
elif isinstance(dialect, util.string_types):
dialect = url.URL(dialect).get_dialect()()
kw = {}
compile_kwargs = {}
if params is not None:
kw['column_keys'] = list(params)
if literal_binds:
compile_kwargs['literal_binds'] = True
if isinstance(clause, orm.Query):
context = clause._compile_context()
context.statement.use_labels = True
clause = context.statement
if compile_kwargs:
kw['compile_kwargs'] = compile_kwargs
c = clause.compile(dialect=dialect, **kw)
param_str = repr(getattr(c, 'params', {}))
if util.py3k:
param_str = param_str.encode('utf-8').decode('ascii', 'ignore')
print(
("\nSQL String:\n" +
util.text_type(c) +
param_str).encode('utf-8'))
else:
print(
"\nSQL String:\n" +
util.text_type(c).encode('utf-8') +
param_str)
cc = re.sub(r'[\n\t]', '', util.text_type(c))
eq_(cc, result, "%r != %r on dialect %r" % (cc, result, dialect))
if checkparams is not None:
eq_(c.construct_params(params), checkparams)
if checkpositional is not None:
p = c.construct_params(params)
eq_(tuple([p[x] for x in c.positiontup]), checkpositional)
class ComparesTables(object):
def assert_tables_equal(self, table, reflected_table, strict_types=False):
assert len(table.c) == len(reflected_table.c)
for c, reflected_c in zip(table.c, reflected_table.c):
eq_(c.name, reflected_c.name)
assert reflected_c is reflected_table.c[c.name]
eq_(c.primary_key, reflected_c.primary_key)
eq_(c.nullable, reflected_c.nullable)
if strict_types:
msg = "Type '%s' doesn't correspond to type '%s'"
assert isinstance(reflected_c.type, type(c.type)), \
msg % (reflected_c.type, c.type)
else:
self.assert_types_base(reflected_c, c)
if isinstance(c.type, sqltypes.String):
eq_(c.type.length, reflected_c.type.length)
eq_(
set([f.column.name for f in c.foreign_keys]),
set([f.column.name for f in reflected_c.foreign_keys])
)
if c.server_default:
assert isinstance(reflected_c.server_default,
schema.FetchedValue)
assert len(table.primary_key) == len(reflected_table.primary_key)
for c in table.primary_key:
assert reflected_table.primary_key.columns[c.name] is not None
def assert_types_base(self, c1, c2):
assert c1.type._compare_type_affinity(c2.type),\
"On column %r, type '%s' doesn't correspond to type '%s'" % \
(c1.name, c1.type, c2.type)
class AssertsExecutionResults(object):
def assert_result(self, result, class_, *objects):
result = list(result)
print(repr(result))
self.assert_list(result, class_, objects)
def assert_list(self, result, class_, list):
self.assert_(len(result) == len(list),
"result list is not the same size as test list, " +
"for class " + class_.__name__)
for i in range(0, len(list)):
self.assert_row(class_, result[i], list[i])
def assert_row(self, class_, rowobj, desc):
self.assert_(rowobj.__class__ is class_,
"item class is not " + repr(class_))
for key, value in desc.items():
if isinstance(value, tuple):
if isinstance(value[1], list):
self.assert_list(getattr(rowobj, key), value[0], value[1])
else:
self.assert_row(value[0], getattr(rowobj, key), value[1])
else:
self.assert_(getattr(rowobj, key) == value,
"attribute %s value %s does not match %s" % (
key, getattr(rowobj, key), value))
def assert_unordered_result(self, result, cls, *expected):
"""As assert_result, but the order of objects is not considered.
The algorithm is very expensive but not a big deal for the small
numbers of rows that the test suite manipulates.
"""
class immutabledict(dict):
def __hash__(self):
return id(self)
found = util.IdentitySet(result)
expected = set([immutabledict(e) for e in expected])
for wrong in util.itertools_filterfalse(lambda o:
isinstance(o, cls), found):
fail('Unexpected type "%s", expected "%s"' % (
type(wrong).__name__, cls.__name__))
if len(found) != len(expected):
fail('Unexpected object count "%s", expected "%s"' % (
len(found), len(expected)))
NOVALUE = object()
def _compare_item(obj, spec):
for key, value in spec.items():
if isinstance(value, tuple):
try:
self.assert_unordered_result(
getattr(obj, key), value[0], *value[1])
except AssertionError:
return False
else:
if getattr(obj, key, NOVALUE) != value:
return False
return True
for expected_item in expected:
for found_item in found:
if _compare_item(found_item, expected_item):
found.remove(found_item)
break
else:
fail(
"Expected %s instance with attributes %s not found." % (
cls.__name__, repr(expected_item)))
return True
def assert_sql_execution(self, db, callable_, *rules):
assertsql.asserter.add_rules(rules)
try:
callable_()
assertsql.asserter.statement_complete()
finally:
assertsql.asserter.clear_rules()
def assert_sql(self, db, callable_, list_, with_sequences=None):
if (with_sequences is not None and
config.db.dialect.supports_sequences):
rules = with_sequences
else:
rules = list_
newrules = []
for rule in rules:
if isinstance(rule, dict):
newrule = assertsql.AllOf(*[
assertsql.ExactSQL(k, v) for k, v in rule.items()
])
else:
newrule = assertsql.ExactSQL(*rule)
newrules.append(newrule)
self.assert_sql_execution(db, callable_, *newrules)
def assert_sql_count(self, db, callable_, count):
self.assert_sql_execution(
db, callable_, assertsql.CountStatements(count))
@contextlib.contextmanager
def assert_execution(self, *rules):
assertsql.asserter.add_rules(rules)
try:
yield
assertsql.asserter.statement_complete()
finally:
assertsql.asserter.clear_rules()
def assert_statement_count(self, count):
return self.assert_execution(assertsql.CountStatements(count))
| 34.506608 | 78 | 0.588217 |
79411b7e3a0100048f0c9d786f2e9b11509cf93e | 18,321 | py | Python | calendarapp/accounts/migrations/0007_auto_20201117_1616.py | rozumalex/calendar | ef58c4b221845a32cb4398f2838b94f03cd25d1e | [
"MIT"
] | null | null | null | calendarapp/accounts/migrations/0007_auto_20201117_1616.py | rozumalex/calendar | ef58c4b221845a32cb4398f2838b94f03cd25d1e | [
"MIT"
] | null | null | null | calendarapp/accounts/migrations/0007_auto_20201117_1616.py | rozumalex/calendar | ef58c4b221845a32cb4398f2838b94f03cd25d1e | [
"MIT"
] | 1 | 2021-05-22T21:29:40.000Z | 2021-05-22T21:29:40.000Z | # Generated by Django 3.1.3 on 2020-11-17 16:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0006_auto_20201117_0819'),
]
operations = [
migrations.AlterField(
model_name='user',
name='timezone',
field=models.CharField(choices=[('Africa/Abidjan', 'Africa/Abidjan'), ('Africa/Accra', 'Africa/Accra'), ('Africa/Addis_Ababa', 'Africa/Addis_Ababa'), ('Africa/Algiers', 'Africa/Algiers'), ('Africa/Asmara', 'Africa/Asmara'), ('Africa/Bamako', 'Africa/Bamako'), ('Africa/Bangui', 'Africa/Bangui'), ('Africa/Banjul', 'Africa/Banjul'), ('Africa/Bissau', 'Africa/Bissau'), ('Africa/Blantyre', 'Africa/Blantyre'), ('Africa/Brazzaville', 'Africa/Brazzaville'), ('Africa/Bujumbura', 'Africa/Bujumbura'), ('Africa/Cairo', 'Africa/Cairo'), ('Africa/Casablanca', 'Africa/Casablanca'), ('Africa/Ceuta', 'Africa/Ceuta'), ('Africa/Conakry', 'Africa/Conakry'), ('Africa/Dakar', 'Africa/Dakar'), ('Africa/Dar_es_Salaam', 'Africa/Dar_es_Salaam'), ('Africa/Djibouti', 'Africa/Djibouti'), ('Africa/Douala', 'Africa/Douala'), ('Africa/El_Aaiun', 'Africa/El_Aaiun'), ('Africa/Freetown', 'Africa/Freetown'), ('Africa/Gaborone', 'Africa/Gaborone'), ('Africa/Harare', 'Africa/Harare'), ('Africa/Johannesburg', 'Africa/Johannesburg'), ('Africa/Juba', 'Africa/Juba'), ('Africa/Kampala', 'Africa/Kampala'), ('Africa/Khartoum', 'Africa/Khartoum'), ('Africa/Kigali', 'Africa/Kigali'), ('Africa/Kinshasa', 'Africa/Kinshasa'), ('Africa/Lagos', 'Africa/Lagos'), ('Africa/Libreville', 'Africa/Libreville'), ('Africa/Lome', 'Africa/Lome'), ('Africa/Luanda', 'Africa/Luanda'), ('Africa/Lubumbashi', 'Africa/Lubumbashi'), ('Africa/Lusaka', 'Africa/Lusaka'), ('Africa/Malabo', 'Africa/Malabo'), ('Africa/Maputo', 'Africa/Maputo'), ('Africa/Maseru', 'Africa/Maseru'), ('Africa/Mbabane', 'Africa/Mbabane'), ('Africa/Mogadishu', 'Africa/Mogadishu'), ('Africa/Monrovia', 'Africa/Monrovia'), ('Africa/Nairobi', 'Africa/Nairobi'), ('Africa/Ndjamena', 'Africa/Ndjamena'), ('Africa/Niamey', 'Africa/Niamey'), ('Africa/Nouakchott', 'Africa/Nouakchott'), ('Africa/Ouagadougou', 'Africa/Ouagadougou'), ('Africa/Porto-Novo', 'Africa/Porto-Novo'), ('Africa/Sao_Tome', 'Africa/Sao_Tome'), ('Africa/Tripoli', 'Africa/Tripoli'), ('Africa/Tunis', 'Africa/Tunis'), ('Africa/Windhoek', 'Africa/Windhoek'), ('America/Adak', 'America/Adak'), ('America/Anchorage', 'America/Anchorage'), ('America/Anguilla', 'America/Anguilla'), ('America/Antigua', 'America/Antigua'), ('America/Araguaina', 'America/Araguaina'), ('America/Argentina/Buenos_Aires', 'America/Argentina/Buenos_Aires'), ('America/Argentina/Catamarca', 'America/Argentina/Catamarca'), ('America/Argentina/Cordoba', 'America/Argentina/Cordoba'), ('America/Argentina/Jujuy', 'America/Argentina/Jujuy'), ('America/Argentina/La_Rioja', 'America/Argentina/La_Rioja'), ('America/Argentina/Mendoza', 'America/Argentina/Mendoza'), ('America/Argentina/Rio_Gallegos', 'America/Argentina/Rio_Gallegos'), ('America/Argentina/Salta', 'America/Argentina/Salta'), ('America/Argentina/San_Juan', 'America/Argentina/San_Juan'), ('America/Argentina/San_Luis', 'America/Argentina/San_Luis'), ('America/Argentina/Tucuman', 'America/Argentina/Tucuman'), ('America/Argentina/Ushuaia', 'America/Argentina/Ushuaia'), ('America/Aruba', 'America/Aruba'), ('America/Asuncion', 'America/Asuncion'), ('America/Atikokan', 'America/Atikokan'), ('America/Bahia', 'America/Bahia'), ('America/Bahia_Banderas', 'America/Bahia_Banderas'), ('America/Barbados', 'America/Barbados'), ('America/Belem', 'America/Belem'), ('America/Belize', 'America/Belize'), ('America/Blanc-Sablon', 'America/Blanc-Sablon'), ('America/Boa_Vista', 'America/Boa_Vista'), ('America/Bogota', 'America/Bogota'), ('America/Boise', 'America/Boise'), ('America/Cambridge_Bay', 'America/Cambridge_Bay'), ('America/Campo_Grande', 'America/Campo_Grande'), ('America/Cancun', 'America/Cancun'), ('America/Caracas', 'America/Caracas'), ('America/Cayenne', 'America/Cayenne'), ('America/Cayman', 'America/Cayman'), ('America/Chicago', 'America/Chicago'), ('America/Chihuahua', 'America/Chihuahua'), ('America/Costa_Rica', 'America/Costa_Rica'), ('America/Creston', 'America/Creston'), ('America/Cuiaba', 'America/Cuiaba'), ('America/Curacao', 'America/Curacao'), ('America/Danmarkshavn', 'America/Danmarkshavn'), ('America/Dawson', 'America/Dawson'), ('America/Dawson_Creek', 'America/Dawson_Creek'), ('America/Denver', 'America/Denver'), ('America/Detroit', 'America/Detroit'), ('America/Dominica', 'America/Dominica'), ('America/Edmonton', 'America/Edmonton'), ('America/Eirunepe', 'America/Eirunepe'), ('America/El_Salvador', 'America/El_Salvador'), ('America/Fort_Nelson', 'America/Fort_Nelson'), ('America/Fortaleza', 'America/Fortaleza'), ('America/Glace_Bay', 'America/Glace_Bay'), ('America/Goose_Bay', 'America/Goose_Bay'), ('America/Grand_Turk', 'America/Grand_Turk'), ('America/Grenada', 'America/Grenada'), ('America/Guadeloupe', 'America/Guadeloupe'), ('America/Guatemala', 'America/Guatemala'), ('America/Guayaquil', 'America/Guayaquil'), ('America/Guyana', 'America/Guyana'), ('America/Halifax', 'America/Halifax'), ('America/Havana', 'America/Havana'), ('America/Hermosillo', 'America/Hermosillo'), ('America/Indiana/Indianapolis', 'America/Indiana/Indianapolis'), ('America/Indiana/Knox', 'America/Indiana/Knox'), ('America/Indiana/Marengo', 'America/Indiana/Marengo'), ('America/Indiana/Petersburg', 'America/Indiana/Petersburg'), ('America/Indiana/Tell_City', 'America/Indiana/Tell_City'), ('America/Indiana/Vevay', 'America/Indiana/Vevay'), ('America/Indiana/Vincennes', 'America/Indiana/Vincennes'), ('America/Indiana/Winamac', 'America/Indiana/Winamac'), ('America/Inuvik', 'America/Inuvik'), ('America/Iqaluit', 'America/Iqaluit'), ('America/Jamaica', 'America/Jamaica'), ('America/Juneau', 'America/Juneau'), ('America/Kentucky/Louisville', 'America/Kentucky/Louisville'), ('America/Kentucky/Monticello', 'America/Kentucky/Monticello'), ('America/Kralendijk', 'America/Kralendijk'), ('America/La_Paz', 'America/La_Paz'), ('America/Lima', 'America/Lima'), ('America/Los_Angeles', 'America/Los_Angeles'), ('America/Lower_Princes', 'America/Lower_Princes'), ('America/Maceio', 'America/Maceio'), ('America/Managua', 'America/Managua'), ('America/Manaus', 'America/Manaus'), ('America/Marigot', 'America/Marigot'), ('America/Martinique', 'America/Martinique'), ('America/Matamoros', 'America/Matamoros'), ('America/Mazatlan', 'America/Mazatlan'), ('America/Menominee', 'America/Menominee'), ('America/Merida', 'America/Merida'), ('America/Metlakatla', 'America/Metlakatla'), ('America/Mexico_City', 'America/Mexico_City'), ('America/Miquelon', 'America/Miquelon'), ('America/Moncton', 'America/Moncton'), ('America/Monterrey', 'America/Monterrey'), ('America/Montevideo', 'America/Montevideo'), ('America/Montserrat', 'America/Montserrat'), ('America/Nassau', 'America/Nassau'), ('America/New_York', 'America/New_York'), ('America/Nipigon', 'America/Nipigon'), ('America/Nome', 'America/Nome'), ('America/Noronha', 'America/Noronha'), ('America/North_Dakota/Beulah', 'America/North_Dakota/Beulah'), ('America/North_Dakota/Center', 'America/North_Dakota/Center'), ('America/North_Dakota/New_Salem', 'America/North_Dakota/New_Salem'), ('America/Nuuk', 'America/Nuuk'), ('America/Ojinaga', 'America/Ojinaga'), ('America/Panama', 'America/Panama'), ('America/Pangnirtung', 'America/Pangnirtung'), ('America/Paramaribo', 'America/Paramaribo'), ('America/Phoenix', 'America/Phoenix'), ('America/Port-au-Prince', 'America/Port-au-Prince'), ('America/Port_of_Spain', 'America/Port_of_Spain'), ('America/Porto_Velho', 'America/Porto_Velho'), ('America/Puerto_Rico', 'America/Puerto_Rico'), ('America/Punta_Arenas', 'America/Punta_Arenas'), ('America/Rainy_River', 'America/Rainy_River'), ('America/Rankin_Inlet', 'America/Rankin_Inlet'), ('America/Recife', 'America/Recife'), ('America/Regina', 'America/Regina'), ('America/Resolute', 'America/Resolute'), ('America/Rio_Branco', 'America/Rio_Branco'), ('America/Santarem', 'America/Santarem'), ('America/Santiago', 'America/Santiago'), ('America/Santo_Domingo', 'America/Santo_Domingo'), ('America/Sao_Paulo', 'America/Sao_Paulo'), ('America/Scoresbysund', 'America/Scoresbysund'), ('America/Sitka', 'America/Sitka'), ('America/St_Barthelemy', 'America/St_Barthelemy'), ('America/St_Johns', 'America/St_Johns'), ('America/St_Kitts', 'America/St_Kitts'), ('America/St_Lucia', 'America/St_Lucia'), ('America/St_Thomas', 'America/St_Thomas'), ('America/St_Vincent', 'America/St_Vincent'), ('America/Swift_Current', 'America/Swift_Current'), ('America/Tegucigalpa', 'America/Tegucigalpa'), ('America/Thule', 'America/Thule'), ('America/Thunder_Bay', 'America/Thunder_Bay'), ('America/Tijuana', 'America/Tijuana'), ('America/Toronto', 'America/Toronto'), ('America/Tortola', 'America/Tortola'), ('America/Vancouver', 'America/Vancouver'), ('America/Whitehorse', 'America/Whitehorse'), ('America/Winnipeg', 'America/Winnipeg'), ('America/Yakutat', 'America/Yakutat'), ('America/Yellowknife', 'America/Yellowknife'), ('Antarctica/Casey', 'Antarctica/Casey'), ('Antarctica/Davis', 'Antarctica/Davis'), ('Antarctica/DumontDUrville', 'Antarctica/DumontDUrville'), ('Antarctica/Macquarie', 'Antarctica/Macquarie'), ('Antarctica/Mawson', 'Antarctica/Mawson'), ('Antarctica/McMurdo', 'Antarctica/McMurdo'), ('Antarctica/Palmer', 'Antarctica/Palmer'), ('Antarctica/Rothera', 'Antarctica/Rothera'), ('Antarctica/Syowa', 'Antarctica/Syowa'), ('Antarctica/Troll', 'Antarctica/Troll'), ('Antarctica/Vostok', 'Antarctica/Vostok'), ('Arctic/Longyearbyen', 'Arctic/Longyearbyen'), ('Asia/Aden', 'Asia/Aden'), ('Asia/Almaty', 'Asia/Almaty'), ('Asia/Amman', 'Asia/Amman'), ('Asia/Anadyr', 'Asia/Anadyr'), ('Asia/Aqtau', 'Asia/Aqtau'), ('Asia/Aqtobe', 'Asia/Aqtobe'), ('Asia/Ashgabat', 'Asia/Ashgabat'), ('Asia/Atyrau', 'Asia/Atyrau'), ('Asia/Baghdad', 'Asia/Baghdad'), ('Asia/Bahrain', 'Asia/Bahrain'), ('Asia/Baku', 'Asia/Baku'), ('Asia/Bangkok', 'Asia/Bangkok'), ('Asia/Barnaul', 'Asia/Barnaul'), ('Asia/Beirut', 'Asia/Beirut'), ('Asia/Bishkek', 'Asia/Bishkek'), ('Asia/Brunei', 'Asia/Brunei'), ('Asia/Chita', 'Asia/Chita'), ('Asia/Choibalsan', 'Asia/Choibalsan'), ('Asia/Colombo', 'Asia/Colombo'), ('Asia/Damascus', 'Asia/Damascus'), ('Asia/Dhaka', 'Asia/Dhaka'), ('Asia/Dili', 'Asia/Dili'), ('Asia/Dubai', 'Asia/Dubai'), ('Asia/Dushanbe', 'Asia/Dushanbe'), ('Asia/Famagusta', 'Asia/Famagusta'), ('Asia/Gaza', 'Asia/Gaza'), ('Asia/Hebron', 'Asia/Hebron'), ('Asia/Ho_Chi_Minh', 'Asia/Ho_Chi_Minh'), ('Asia/Hong_Kong', 'Asia/Hong_Kong'), ('Asia/Hovd', 'Asia/Hovd'), ('Asia/Irkutsk', 'Asia/Irkutsk'), ('Asia/Jakarta', 'Asia/Jakarta'), ('Asia/Jayapura', 'Asia/Jayapura'), ('Asia/Jerusalem', 'Asia/Jerusalem'), ('Asia/Kabul', 'Asia/Kabul'), ('Asia/Kamchatka', 'Asia/Kamchatka'), ('Asia/Karachi', 'Asia/Karachi'), ('Asia/Kathmandu', 'Asia/Kathmandu'), ('Asia/Khandyga', 'Asia/Khandyga'), ('Asia/Kolkata', 'Asia/Kolkata'), ('Asia/Krasnoyarsk', 'Asia/Krasnoyarsk'), ('Asia/Kuala_Lumpur', 'Asia/Kuala_Lumpur'), ('Asia/Kuching', 'Asia/Kuching'), ('Asia/Kuwait', 'Asia/Kuwait'), ('Asia/Macau', 'Asia/Macau'), ('Asia/Magadan', 'Asia/Magadan'), ('Asia/Makassar', 'Asia/Makassar'), ('Asia/Manila', 'Asia/Manila'), ('Asia/Muscat', 'Asia/Muscat'), ('Asia/Nicosia', 'Asia/Nicosia'), ('Asia/Novokuznetsk', 'Asia/Novokuznetsk'), ('Asia/Novosibirsk', 'Asia/Novosibirsk'), ('Asia/Omsk', 'Asia/Omsk'), ('Asia/Oral', 'Asia/Oral'), ('Asia/Phnom_Penh', 'Asia/Phnom_Penh'), ('Asia/Pontianak', 'Asia/Pontianak'), ('Asia/Pyongyang', 'Asia/Pyongyang'), ('Asia/Qatar', 'Asia/Qatar'), ('Asia/Qostanay', 'Asia/Qostanay'), ('Asia/Qyzylorda', 'Asia/Qyzylorda'), ('Asia/Riyadh', 'Asia/Riyadh'), ('Asia/Sakhalin', 'Asia/Sakhalin'), ('Asia/Samarkand', 'Asia/Samarkand'), ('Asia/Seoul', 'Asia/Seoul'), ('Asia/Shanghai', 'Asia/Shanghai'), ('Asia/Singapore', 'Asia/Singapore'), ('Asia/Srednekolymsk', 'Asia/Srednekolymsk'), ('Asia/Taipei', 'Asia/Taipei'), ('Asia/Tashkent', 'Asia/Tashkent'), ('Asia/Tbilisi', 'Asia/Tbilisi'), ('Asia/Tehran', 'Asia/Tehran'), ('Asia/Thimphu', 'Asia/Thimphu'), ('Asia/Tokyo', 'Asia/Tokyo'), ('Asia/Tomsk', 'Asia/Tomsk'), ('Asia/Ulaanbaatar', 'Asia/Ulaanbaatar'), ('Asia/Urumqi', 'Asia/Urumqi'), ('Asia/Ust-Nera', 'Asia/Ust-Nera'), ('Asia/Vientiane', 'Asia/Vientiane'), ('Asia/Vladivostok', 'Asia/Vladivostok'), ('Asia/Yakutsk', 'Asia/Yakutsk'), ('Asia/Yangon', 'Asia/Yangon'), ('Asia/Yekaterinburg', 'Asia/Yekaterinburg'), ('Asia/Yerevan', 'Asia/Yerevan'), ('Atlantic/Azores', 'Atlantic/Azores'), ('Atlantic/Bermuda', 'Atlantic/Bermuda'), ('Atlantic/Canary', 'Atlantic/Canary'), ('Atlantic/Cape_Verde', 'Atlantic/Cape_Verde'), ('Atlantic/Faroe', 'Atlantic/Faroe'), ('Atlantic/Madeira', 'Atlantic/Madeira'), ('Atlantic/Reykjavik', 'Atlantic/Reykjavik'), ('Atlantic/South_Georgia', 'Atlantic/South_Georgia'), ('Atlantic/St_Helena', 'Atlantic/St_Helena'), ('Atlantic/Stanley', 'Atlantic/Stanley'), ('Australia/Adelaide', 'Australia/Adelaide'), ('Australia/Brisbane', 'Australia/Brisbane'), ('Australia/Broken_Hill', 'Australia/Broken_Hill'), ('Australia/Currie', 'Australia/Currie'), ('Australia/Darwin', 'Australia/Darwin'), ('Australia/Eucla', 'Australia/Eucla'), ('Australia/Hobart', 'Australia/Hobart'), ('Australia/Lindeman', 'Australia/Lindeman'), ('Australia/Lord_Howe', 'Australia/Lord_Howe'), ('Australia/Melbourne', 'Australia/Melbourne'), ('Australia/Perth', 'Australia/Perth'), ('Australia/Sydney', 'Australia/Sydney'), ('Canada/Atlantic', 'Canada/Atlantic'), ('Canada/Central', 'Canada/Central'), ('Canada/Eastern', 'Canada/Eastern'), ('Canada/Mountain', 'Canada/Mountain'), ('Canada/Newfoundland', 'Canada/Newfoundland'), ('Canada/Pacific', 'Canada/Pacific'), ('Europe/Amsterdam', 'Europe/Amsterdam'), ('Europe/Andorra', 'Europe/Andorra'), ('Europe/Astrakhan', 'Europe/Astrakhan'), ('Europe/Athens', 'Europe/Athens'), ('Europe/Belgrade', 'Europe/Belgrade'), ('Europe/Berlin', 'Europe/Berlin'), ('Europe/Bratislava', 'Europe/Bratislava'), ('Europe/Brussels', 'Europe/Brussels'), ('Europe/Bucharest', 'Europe/Bucharest'), ('Europe/Budapest', 'Europe/Budapest'), ('Europe/Busingen', 'Europe/Busingen'), ('Europe/Chisinau', 'Europe/Chisinau'), ('Europe/Copenhagen', 'Europe/Copenhagen'), ('Europe/Dublin', 'Europe/Dublin'), ('Europe/Gibraltar', 'Europe/Gibraltar'), ('Europe/Guernsey', 'Europe/Guernsey'), ('Europe/Helsinki', 'Europe/Helsinki'), ('Europe/Isle_of_Man', 'Europe/Isle_of_Man'), ('Europe/Istanbul', 'Europe/Istanbul'), ('Europe/Jersey', 'Europe/Jersey'), ('Europe/Kaliningrad', 'Europe/Kaliningrad'), ('Europe/Kiev', 'Europe/Kiev'), ('Europe/Kirov', 'Europe/Kirov'), ('Europe/Lisbon', 'Europe/Lisbon'), ('Europe/Ljubljana', 'Europe/Ljubljana'), ('Europe/London', 'Europe/London'), ('Europe/Luxembourg', 'Europe/Luxembourg'), ('Europe/Madrid', 'Europe/Madrid'), ('Europe/Malta', 'Europe/Malta'), ('Europe/Mariehamn', 'Europe/Mariehamn'), ('Europe/Minsk', 'Europe/Minsk'), ('Europe/Monaco', 'Europe/Monaco'), ('Europe/Moscow', 'Europe/Moscow'), ('Europe/Oslo', 'Europe/Oslo'), ('Europe/Paris', 'Europe/Paris'), ('Europe/Podgorica', 'Europe/Podgorica'), ('Europe/Prague', 'Europe/Prague'), ('Europe/Riga', 'Europe/Riga'), ('Europe/Rome', 'Europe/Rome'), ('Europe/Samara', 'Europe/Samara'), ('Europe/San_Marino', 'Europe/San_Marino'), ('Europe/Sarajevo', 'Europe/Sarajevo'), ('Europe/Saratov', 'Europe/Saratov'), ('Europe/Simferopol', 'Europe/Simferopol'), ('Europe/Skopje', 'Europe/Skopje'), ('Europe/Sofia', 'Europe/Sofia'), ('Europe/Stockholm', 'Europe/Stockholm'), ('Europe/Tallinn', 'Europe/Tallinn'), ('Europe/Tirane', 'Europe/Tirane'), ('Europe/Ulyanovsk', 'Europe/Ulyanovsk'), ('Europe/Uzhgorod', 'Europe/Uzhgorod'), ('Europe/Vaduz', 'Europe/Vaduz'), ('Europe/Vatican', 'Europe/Vatican'), ('Europe/Vienna', 'Europe/Vienna'), ('Europe/Vilnius', 'Europe/Vilnius'), ('Europe/Volgograd', 'Europe/Volgograd'), ('Europe/Warsaw', 'Europe/Warsaw'), ('Europe/Zagreb', 'Europe/Zagreb'), ('Europe/Zaporozhye', 'Europe/Zaporozhye'), ('Europe/Zurich', 'Europe/Zurich'), ('GMT', 'GMT'), ('Indian/Antananarivo', 'Indian/Antananarivo'), ('Indian/Chagos', 'Indian/Chagos'), ('Indian/Christmas', 'Indian/Christmas'), ('Indian/Cocos', 'Indian/Cocos'), ('Indian/Comoro', 'Indian/Comoro'), ('Indian/Kerguelen', 'Indian/Kerguelen'), ('Indian/Mahe', 'Indian/Mahe'), ('Indian/Maldives', 'Indian/Maldives'), ('Indian/Mauritius', 'Indian/Mauritius'), ('Indian/Mayotte', 'Indian/Mayotte'), ('Indian/Reunion', 'Indian/Reunion'), ('Pacific/Apia', 'Pacific/Apia'), ('Pacific/Auckland', 'Pacific/Auckland'), ('Pacific/Bougainville', 'Pacific/Bougainville'), ('Pacific/Chatham', 'Pacific/Chatham'), ('Pacific/Chuuk', 'Pacific/Chuuk'), ('Pacific/Easter', 'Pacific/Easter'), ('Pacific/Efate', 'Pacific/Efate'), ('Pacific/Enderbury', 'Pacific/Enderbury'), ('Pacific/Fakaofo', 'Pacific/Fakaofo'), ('Pacific/Fiji', 'Pacific/Fiji'), ('Pacific/Funafuti', 'Pacific/Funafuti'), ('Pacific/Galapagos', 'Pacific/Galapagos'), ('Pacific/Gambier', 'Pacific/Gambier'), ('Pacific/Guadalcanal', 'Pacific/Guadalcanal'), ('Pacific/Guam', 'Pacific/Guam'), ('Pacific/Honolulu', 'Pacific/Honolulu'), ('Pacific/Kiritimati', 'Pacific/Kiritimati'), ('Pacific/Kosrae', 'Pacific/Kosrae'), ('Pacific/Kwajalein', 'Pacific/Kwajalein'), ('Pacific/Majuro', 'Pacific/Majuro'), ('Pacific/Marquesas', 'Pacific/Marquesas'), ('Pacific/Midway', 'Pacific/Midway'), ('Pacific/Nauru', 'Pacific/Nauru'), ('Pacific/Niue', 'Pacific/Niue'), ('Pacific/Norfolk', 'Pacific/Norfolk'), ('Pacific/Noumea', 'Pacific/Noumea'), ('Pacific/Pago_Pago', 'Pacific/Pago_Pago'), ('Pacific/Palau', 'Pacific/Palau'), ('Pacific/Pitcairn', 'Pacific/Pitcairn'), ('Pacific/Pohnpei', 'Pacific/Pohnpei'), ('Pacific/Port_Moresby', 'Pacific/Port_Moresby'), ('Pacific/Rarotonga', 'Pacific/Rarotonga'), ('Pacific/Saipan', 'Pacific/Saipan'), ('Pacific/Tahiti', 'Pacific/Tahiti'), ('Pacific/Tarawa', 'Pacific/Tarawa'), ('Pacific/Tongatapu', 'Pacific/Tongatapu'), ('Pacific/Wake', 'Pacific/Wake'), ('Pacific/Wallis', 'Pacific/Wallis'), ('US/Alaska', 'US/Alaska'), ('US/Arizona', 'US/Arizona'), ('US/Central', 'US/Central'), ('US/Eastern', 'US/Eastern'), ('US/Hawaii', 'US/Hawaii'), ('US/Mountain', 'US/Mountain'), ('US/Pacific', 'US/Pacific'), ('UTC', 'UTC')], default='UTC', max_length=255),
),
]
| 964.263158 | 17,984 | 0.699853 |
79411bd05d6ab83862becdc9e029de46302ef4ab | 1,883 | py | Python | Crawler/canadagames_schedule/schedule.py | AlbatrossBill/COSC4P02Project | c48682c014ab9de4847d46cffc710d386db93c0f | [
"MIT"
] | 4 | 2022-01-15T22:04:06.000Z | 2022-01-24T01:46:46.000Z | Crawler/canadagames_schedule/schedule.py | AlbatrossBill/COSC4P02Project | c48682c014ab9de4847d46cffc710d386db93c0f | [
"MIT"
] | null | null | null | Crawler/canadagames_schedule/schedule.py | AlbatrossBill/COSC4P02Project | c48682c014ab9de4847d46cffc710d386db93c0f | [
"MIT"
] | 1 | 2022-01-24T01:31:57.000Z | 2022-01-24T01:31:57.000Z | import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from bs4 import BeautifulSoup
import json
import codecs
import os
import sys
import json
from datetime import datetime
import base64
# Config
api_url = "https://cg2022.gems.pro/Result/Sport_List.aspx?SetLanguage=en-CA&Gems_ScreenWidth=1512&Gems_ScreenHeight=982&Gems_ScreenAvailWidth=1471&Gems_ScreenAvailHeight=944";
time_list = ["Sport","2022/08/06","2022/08/07","2022/08/08","2022/08/09","2022/08/10","2022/08/11","2022/08/12","2022/08/13","2022/08/14","2022/08/15","2022/08/16","2022/08/17","2022/08/18","2022/08/19","2022/08/20","2022/08/21"]
def request_brock_api():
url = api_url
r = requests.get(url, verify=False)
return r.text
def main():
print("> Fetching...")
bs_table=BeautifulSoup(request_brock_api(), features="html.parser")
data = bs_table.select('.SportMatrix tr')
cg_schedule = []
for tr in data:
ifHead = False
first = True
count = 0
cg_schedule_temp = []
title = ""
# Table Head
for th in tr.select('th'):
# time_list.append(th.text)
ifHead = True
if (ifHead):
continue
# Timetable
for td in tr.select('td'):
if first:
title = td.select('a')[0].text
first = False
count=count+1;
continue
if (not str(td.decode_contents()).replace(" ", "").replace(" ", "") == ""):
url = "https://cg2022.gems.pro/Result/"+td.select('a')[0].attrs['href']
cg_schedule_temp.append([time_list[count],url])
count=count+1;
data = {
"title":title,
"time":cg_schedule_temp
}
cg_schedule.append(data)
print(cg_schedule)
print("> Saving Data to files...")
json_temp = json.dumps(cg_schedule)
f = codecs.open(os.path.join(sys.path[0], "cg_schedule.json"),'w','utf-8')
f.write(json_temp)
f.close()
if __name__ == "__main__":
main() | 27.289855 | 229 | 0.690919 |
79411c84970652816635737bfdfff38c3ce9ed71 | 1,128 | py | Python | xlsxwriter/test/comparison/test_header_image15.py | dthadi3/XlsxWriter | f1801e82240aa9c746ce14948ef95990b83162cf | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2020-07-01T07:24:37.000Z | 2020-07-01T07:24:37.000Z | xlsxwriter/test/comparison/test_header_image15.py | dthadi3/XlsxWriter | f1801e82240aa9c746ce14948ef95990b83162cf | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | xlsxwriter/test/comparison/test_header_image15.py | dthadi3/XlsxWriter | f1801e82240aa9c746ce14948ef95990b83162cf | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2020, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('header_image15.xlsx')
self.ignore_elements = {'xl/worksheets/sheet1.xml': ['<pageMargins', '<pageSetup'],
'xl/worksheets/sheet2.xml': ['<pageMargins', '<pageSetup']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet1 = workbook.add_worksheet()
worksheet2 = workbook.add_worksheet()
worksheet1.set_header('&L&G', {'image_left': self.image_dir + 'red.jpg'})
worksheet2.set_header('&L&G', {'image_left': self.image_dir + 'red.jpg'})
workbook.close()
self.assertExcelEqual()
| 29.684211 | 91 | 0.606383 |
79411d9576cb624b264f8bf245c3900665df5faf | 1,118 | py | Python | car/servers/imu_server_2.py | mwmajew/Tonic | f09b9f95ac5281e9299638e4bd513d31ef703bc9 | [
"MIT"
] | 91 | 2019-03-21T22:27:34.000Z | 2022-02-17T09:19:09.000Z | car/servers/imu_server_2.py | mwmajew/Tonic | f09b9f95ac5281e9299638e4bd513d31ef703bc9 | [
"MIT"
] | 19 | 2018-11-30T16:53:15.000Z | 2021-01-04T20:32:51.000Z | car/servers/imu_server_2.py | mwmajew/Tonic | f09b9f95ac5281e9299638e4bd513d31ef703bc9 | [
"MIT"
] | 11 | 2019-07-02T13:07:43.000Z | 2021-12-17T04:43:00.000Z | 1#!/usr/bin/env python
''' Async TCP server to make first tests of newly received GPS trackers '''
import asyncore
import socket
import logging
import json
from server_management import BaseServer, BaseClientHandler
from imu_interceptor import ImuEuler
class ImuClientHandler(BaseClientHandler):
def __init__(self, sock, address, jsonify=True):
BaseClientHandler.__init__(self, sock, address, jsonify=jsonify)
self.imu = ImuEuler()
self.imu.connect()
def readout(self, input_data):
imu_data = self.imu.read()
if 'data' in imu_data:
return imu_data['data']
elif 'info' in imu_data:
self.logger.info(' ==========[{}]==========='.format(imu_data['info']))
class ImuServer(BaseServer):
def __init__(self, address):
BaseServer.__init__(self, address)
def create_client(self, *args, **kwargs):
return ImuClientHandler(*args, **kwargs)
def main():
logging.basicConfig(level=logging.DEBUG, format='%(name)s:[%(levelname)s]: %(message)s')
HOST = ''
PORT = 2204
s = ImuServer((HOST, PORT))
asyncore.loop()
if __name__ == '__main__':
main() | 24.844444 | 90 | 0.684258 |
79411dfcac9dbbcb441fe0c77648bc714582535a | 1,638 | py | Python | test/base/workflows_format_2/main.py | emily101-gif/immport-galaxy | 8f353d1f9b4e0d044e1a9d0b1f928b440df78b8c | [
"CC-BY-3.0"
] | 1 | 2020-01-06T21:04:22.000Z | 2020-01-06T21:04:22.000Z | test/base/workflows_format_2/main.py | emily101-gif/immport-galaxy | 8f353d1f9b4e0d044e1a9d0b1f928b440df78b8c | [
"CC-BY-3.0"
] | 7 | 2019-04-26T12:29:58.000Z | 2022-03-02T04:33:12.000Z | test/base/workflows_format_2/main.py | emily101-gif/immport-galaxy | 8f353d1f9b4e0d044e1a9d0b1f928b440df78b8c | [
"CC-BY-3.0"
] | 7 | 2016-11-03T19:11:01.000Z | 2020-05-11T14:23:52.000Z | """Module containing :func:`convert_and_import_workflow`."""
import os
import yaml
from .converter import python_to_workflow, yaml_to_workflow
from .interface import BioBlendImporterGalaxyInterface
def convert_and_import_workflow(has_workflow, **kwds):
"""Function is main entry for conversion and import of Format 2 workflows."""
galaxy_interface = kwds.get("galaxy_interface", None)
if galaxy_interface is None:
galaxy_interface = BioBlendImporterGalaxyInterface(**kwds)
source_type = kwds.get("source_type", None)
workflow_directory = kwds.get("workflow_directory", None)
if source_type == "path":
workflow_path = has_workflow
if workflow_directory is None:
workflow_directory = os.path.dirname(has_workflow)
with open(workflow_path, "r") as f:
has_workflow = yaml.safe_load(f)
if workflow_directory is not None:
workflow_directory = os.path.abspath(workflow_directory)
if isinstance(has_workflow, dict):
workflow = python_to_workflow(has_workflow, galaxy_interface, workflow_directory)
else:
workflow = yaml_to_workflow(has_workflow, galaxy_interface, workflow_directory)
name = kwds.get("name", None)
if name is not None:
workflow["name"] = name
publish = kwds.get("publish", False)
exact_tools = kwds.get("exact_tools", False)
import_kwds = {}
if publish:
import_kwds["publish"] = True
if exact_tools:
import_kwds["exact_tools"] = True
return galaxy_interface.import_workflow(workflow, **import_kwds)
__all__ = (
'convert_and_import_workflow',
)
| 33.428571 | 89 | 0.711844 |
79411e53298519e13262625917c9dc5d9c261af8 | 29 | py | Python | version.py | Darkunov/OoT-Randomizer | 23cef8939bd68c12b76bd6e39e07af9a2e6bd9e2 | [
"MIT"
] | null | null | null | version.py | Darkunov/OoT-Randomizer | 23cef8939bd68c12b76bd6e39e07af9a2e6bd9e2 | [
"MIT"
] | null | null | null | version.py | Darkunov/OoT-Randomizer | 23cef8939bd68c12b76bd6e39e07af9a2e6bd9e2 | [
"MIT"
] | null | null | null | __version__ = '5.2.12 f.LUM'
| 14.5 | 28 | 0.655172 |
79411e566da6f517535dfdc3b4fa57bc6706d1a8 | 5,670 | py | Python | launch/nav2_bringup_launch.py | skylerpan/nav2_step | 54f25ff640549234ab5eb33736c6267cfdddf3b4 | [
"Apache-2.0"
] | 1 | 2021-07-03T17:47:04.000Z | 2021-07-03T17:47:04.000Z | launch/nav2_bringup_launch.py | skylerpan/nav2_step | 54f25ff640549234ab5eb33736c6267cfdddf3b4 | [
"Apache-2.0"
] | null | null | null | launch/nav2_bringup_launch.py | skylerpan/nav2_step | 54f25ff640549234ab5eb33736c6267cfdddf3b4 | [
"Apache-2.0"
] | 1 | 2020-04-15T10:13:53.000Z | 2020-04-15T10:13:53.000Z | # Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ament_index_python.packages import get_package_prefix
from ament_index_python.packages import get_package_share_directory
from launch.conditions import IfCondition
import launch.actions
import launch_ros.actions
from nav2_common.launch import RewrittenYaml
def generate_launch_description():
# Get the launch directory
launch_dir = os.path.join(get_package_share_directory('omnibot_bringup'), 'launch')
# Create the launch configuration variables
map_yaml_file = launch.substitutions.LaunchConfiguration('map')
use_sim_time = launch.substitutions.LaunchConfiguration('use_sim_time')
params_file = launch.substitutions.LaunchConfiguration('params')
bt_xml_file = launch.substitutions.LaunchConfiguration('bt_xml_file')
autostart = launch.substitutions.LaunchConfiguration('autostart')
stdout_linebuf_envvar = launch.actions.SetEnvironmentVariable(
'RCUTILS_CONSOLE_STDOUT_LINE_BUFFERED', '1')
# Create our own temporary YAML files that include substitutions
param_substitutions = {
'use_sim_time': use_sim_time,
'yaml_filename': map_yaml_file,
'bt_xml_filename': bt_xml_file,
'autostart': autostart
}
configured_params = RewrittenYaml(
source_file=params_file, rewrites=param_substitutions,
convert_types=True)
# Declare the launch arguments
declare_map_yaml_cmd = launch.actions.DeclareLaunchArgument(
'map',
default_value=[launch.substitutions.ThisLaunchFileDir(), '/../maps/map_lab.yaml'],
description='Full path to map file to load')
declare_use_sim_time_cmd = launch.actions.DeclareLaunchArgument(
'use_sim_time',
default_value='false',
description='Use simulation (Gazebo) clock if true')
declare_params_file_cmd = launch.actions.DeclareLaunchArgument(
'params',
default_value=[launch.substitutions.ThisLaunchFileDir(), '/../params/nav2_params.yaml'],
description='Full path to the ROS2 parameters file to use for all launched nodes')
declare_autostart_cmd = launch.actions.DeclareLaunchArgument(
'autostart', default_value='true',
description='Automatically startup the nav2 stack')
declare_bt_xml_cmd = launch.actions.DeclareLaunchArgument(
'bt_xml_file',
default_value=os.path.join(get_package_prefix('nav2_bt_navigator'),
'behavior_trees', 'navigate_w_replanning_and_recovery.xml'),
description='Full path to the behavior tree xml file to use')
start_map_server_cmd = launch_ros.actions.Node(
package='nav2_map_server',
node_executable='map_server',
node_name='map_server',
output='screen',
parameters=[configured_params])
start_localizer_cmd = launch_ros.actions.Node(
package='nav2_amcl',
node_executable='amcl',
node_name='amcl',
output='screen',
parameters=[configured_params])
start_world_model_cmd = launch_ros.actions.Node(
package='nav2_world_model',
node_executable='world_model',
output='screen',
parameters=[configured_params])
start_dwb_cmd = launch_ros.actions.Node(
package='dwb_controller',
node_executable='dwb_controller',
output='screen',
parameters=[configured_params])
start_planner_cmd = launch_ros.actions.Node(
package='nav2_navfn_planner',
node_executable='navfn_planner',
node_name='navfn_planner',
output='screen',
parameters=[configured_params])
start_recovery_cmd = launch_ros.actions.Node(
package='nav2_recoveries',
node_executable='recoveries_node',
node_name='recoveries',
output='screen',
parameters=[{'use_sim_time': use_sim_time}])
start_navigator_cmd = launch_ros.actions.Node(
package='nav2_bt_navigator',
node_executable='bt_navigator',
node_name='bt_navigator',
output='screen',
parameters=[configured_params])
start_lifecycle_manager_cmd = launch_ros.actions.Node(
package='nav2_lifecycle_manager',
node_executable='lifecycle_manager',
node_name='lifecycle_manager',
output='screen',
parameters=[configured_params])
# Create the launch description and populate
ld = launch.LaunchDescription()
# Set environment variables
ld.add_action(stdout_linebuf_envvar)
# Declare the launch options
ld.add_action(declare_map_yaml_cmd)
ld.add_action(declare_use_sim_time_cmd)
ld.add_action(declare_params_file_cmd)
ld.add_action(declare_autostart_cmd)
ld.add_action(declare_bt_xml_cmd)
# Add the actions to launch all of the navigation nodes
ld.add_action(start_lifecycle_manager_cmd)
ld.add_action(start_map_server_cmd)
ld.add_action(start_localizer_cmd)
ld.add_action(start_world_model_cmd)
ld.add_action(start_dwb_cmd)
ld.add_action(start_planner_cmd)
ld.add_action(start_navigator_cmd)
ld.add_action(start_recovery_cmd)
return ld
| 36.11465 | 96 | 0.724691 |
79411ecc58b5b561457ad79a93bc9a682a4ef59b | 1,670 | py | Python | Settings/set5-osm-model-variable-widths-depths/set5_w64_depth3_d1.py | previtus/MGR-Project-Code | 1126215059eb3f731dcf78ec24d9a480e73abce6 | [
"MIT"
] | null | null | null | Settings/set5-osm-model-variable-widths-depths/set5_w64_depth3_d1.py | previtus/MGR-Project-Code | 1126215059eb3f731dcf78ec24d9a480e73abce6 | [
"MIT"
] | null | null | null | Settings/set5-osm-model-variable-widths-depths/set5_w64_depth3_d1.py | previtus/MGR-Project-Code | 1126215059eb3f731dcf78ec24d9a480e73abce6 | [
"MIT"
] | null | null | null | def Setup(Settings, DefaultModel):
# set5-osm-model-variable-widths-depths/set5_w64_depth3_d1.py
Settings["experiment_name"] = "set5_w64_depth3_d1"
Settings["graph_histories"] = [] # ['all','together',[],[1,0],[0,0,0],[]]
n = 0
#d1 5556x_markable_640x640 SegmentsData_marked_R100_4Tables
#d2 5556x_markable_640x640_2x_expanded SegmentsData_marked_R100_4Tables_expanded.dump
#d3 5556x_minlen30_640px SegmentsData_marked_R100_4Tables.dump
#d4 5556x_minlen30_640px_2x_expanded SegmentsData_marked_R100_4Tables_expanded.dump
#d5 5556x_minlen10_640px SegmentsData_marked_R100_4Tables.dump
#d6 5556x_minlen20_640px SegmentsData_marked_R100_4Tables.dump
#d7 5556x_mark_res_299x299 SegmentsData_marked_R100_4Tables.dump
Settings["models"][n]["dataset_name"] = "5556x_markable_640x640"
Settings["models"][n]["dump_file_override"] = 'SegmentsData_marked_R100_4Tables.dump'
Settings["models"][n]["pixels"] = 640
Settings["models"][n]["model_type"] = 'osm_only' # osm_only simple_cnn_with_top img_osm_mix
Settings["models"][n]["unique_id"] = 'osm'
# Depth
Settings["models"][n]["top_repeat_FC_block"] = 3
# try 1
# try 2 =def
# try 3
# try 4
# Width
Settings["models"][n]["osm_manual_width"] = 64
# try 32
# try 64
# try 128
# try 256 def
# dont try 512
Settings["models"][n]["epochs"] = 1000
Settings["models"][n]["k_fold_crossvalidation"] = True
Settings["models"][n]["crossvalidation_k"] = 10
Settings["graph_histories"] = []
return Settings
| 35.531915 | 95 | 0.671257 |
794120372331b4568c449dfd508089619021c8ec | 11,900 | py | Python | src/python/pants/backend/project_info/source_file_validator.py | thamenato/pants | bc4a8fb3f07f6145649f02b06a1e5599aa28b36c | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/project_info/source_file_validator.py | thamenato/pants | bc4a8fb3f07f6145649f02b06a1e5599aa28b36c | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/project_info/source_file_validator.py | thamenato/pants | bc4a8fb3f07f6145649f02b06a1e5599aa28b36c | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import re
import textwrap
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Set, Tuple, cast
from pants.base.exiter import PANTS_FAILED_EXIT_CODE, PANTS_SUCCEEDED_EXIT_CODE
from pants.engine.collection import Collection
from pants.engine.console import Console
from pants.engine.fs import Digest, DigestContents, SourcesSnapshot
from pants.engine.goal import Goal, GoalSubsystem
from pants.engine.rules import Get, collect_rules, goal_rule
from pants.option.subsystem import Subsystem
from pants.util.frozendict import FrozenDict
from pants.util.memo import memoized_method
class DetailLevel(Enum):
"""How much detail about validation to emit to the console.
none: Emit nothing.
summary: Emit a summary only.
nonmatching: Emit details for files that failed to match at least one pattern.
name_only: Emit just the paths of files that failed to match at least one pattern.
all: Emit details for all files.
"""
none = "none"
summary = "summary"
nonmatching = "nonmatching"
names = "names"
all = "all"
class ValidateSubsystem(GoalSubsystem):
"""Validate sources against regexes."""
name = "validate"
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--detail-level",
type=DetailLevel,
default=DetailLevel.nonmatching,
help="How much detail to emit to the console.",
)
@property
def detail_level(self) -> DetailLevel:
return cast(DetailLevel, self.options.detail_level)
class Validate(Goal):
subsystem_cls = ValidateSubsystem
@dataclass(frozen=True)
class PathPattern:
name: str
pattern: str
inverted: bool = False
content_encoding: str = "utf8"
@dataclass(frozen=True)
class ContentPattern:
name: str
pattern: str
inverted: bool = False
@dataclass(frozen=True)
class ValidationConfig:
path_patterns: Tuple[PathPattern, ...]
content_patterns: Tuple[ContentPattern, ...]
required_matches: FrozenDict[str, Tuple[str]] # path pattern name -> content pattern names.
@classmethod
def from_dict(cls, d: Dict[str, Any]) -> "ValidationConfig":
return cls(
path_patterns=tuple(PathPattern(**kwargs) for kwargs in d["path_patterns"]),
content_patterns=tuple(ContentPattern(**kwargs) for kwargs in d["content_patterns"]),
required_matches=FrozenDict({k: tuple(v) for k, v in d["required_matches"].items()}),
)
class SourceFileValidation(Subsystem):
"""Configuration for source file validation."""
options_scope = "sourcefile-validation"
@classmethod
def register_options(cls, register):
schema_help = textwrap.dedent(
"""
Config schema is as follows:
{
'path_patterns': [
{
'name': path_pattern1',
'pattern': <path regex pattern>,
'inverted': True|False (defaults to False),
'content_encoding': <encoding> (defaults to utf8)
},
...
],
'content_patterns': [
{
'name': 'content_pattern1',
'pattern': <content regex pattern>,
'inverted': True|False (defaults to False)
}
...
],
'required_matches': {
'path_pattern1': [content_pattern1, content_pattern2],
'path_pattern2': [content_pattern1, content_pattern3],
...
}
}
Meaning: if a file matches some path pattern, its content must match all
the corresponding content patterns.
"""
)
super().register_options(register)
register("--config", type=dict, fromfile=True, help=schema_help)
@memoized_method
def get_multi_matcher(self):
return MultiMatcher(ValidationConfig.from_dict(self.options.config))
@dataclass(frozen=True)
class RegexMatchResult:
"""The result of running regex matches on a source file."""
path: str
matching: Tuple
nonmatching: Tuple
class RegexMatchResults(Collection[RegexMatchResult]):
pass
class Matcher:
"""Class to match a single (possibly inverted) regex.
Matches are allowed anywhere in the string (so really a "search" in the Python regex parlance).
To anchor a match at the beginning of a string, use the ^ anchor. To anchor at the beginning of
any line, use the ^ anchor along with the MULTILINE directive (?m). See test for examples.
"""
def __init__(self, pattern, inverted=False):
self.compiled_regex = re.compile(pattern)
self.inverted = inverted
def matches(self, s):
"""Whether the pattern matches anywhere in the string s."""
regex_matches = self.compiled_regex.search(s) is not None
return not regex_matches if self.inverted else regex_matches
class PathMatcher(Matcher):
"""A matcher for matching file paths."""
def __init__(self, path_pattern: PathPattern):
super().__init__(path_pattern.pattern, path_pattern.inverted)
# The expected encoding of the content of files whose paths match this pattern.
self.content_encoding = path_pattern.content_encoding
class ContentMatcher(Matcher):
"""A matcher for matching file content."""
def __init__(self, content_pattern: ContentPattern):
super().__init__(content_pattern.pattern, content_pattern.inverted)
class MultiMatcher:
def __init__(self, config: ValidationConfig):
"""Class to check multiple regex matching on files.
:param dict config: Regex matching config (see above).
"""
# Validate the pattern names mentioned in required_matches.
path_patterns_used: Set[str] = set()
content_patterns_used: Set[str] = set()
for k, v in config.required_matches.items():
path_patterns_used.add(k)
if not isinstance(v, (tuple, list)):
raise ValueError(
"Value for path pattern {} in required_matches must be tuple of "
"content pattern names, but was {}".format(k, v)
)
content_patterns_used.update(v)
unknown_path_patterns = path_patterns_used.difference(
pp.name for pp in config.path_patterns
)
if unknown_path_patterns:
raise ValueError(
"required_matches uses unknown path pattern names: "
"{}".format(", ".join(sorted(unknown_path_patterns)))
)
unknown_content_patterns = content_patterns_used.difference(
cp.name for cp in config.content_patterns
)
if unknown_content_patterns:
raise ValueError(
"required_matches uses unknown content pattern names: "
"{}".format(", ".join(sorted(unknown_content_patterns)))
)
self._path_matchers = {pp.name: PathMatcher(pp) for pp in config.path_patterns}
self._content_matchers = {cp.name: ContentMatcher(cp) for cp in config.content_patterns}
self._required_matches = config.required_matches
def check_source_file(self, path, content):
content_pattern_names, encoding = self.get_applicable_content_pattern_names(path)
matching, nonmatching = self.check_content(content_pattern_names, content, encoding)
return RegexMatchResult(path, matching, nonmatching)
def check_content(self, content_pattern_names, content, encoding):
"""Check which of the named patterns matches the given content.
Returns a pair (matching, nonmatching), in which each element is a tuple of pattern names.
:param iterable content_pattern_names: names of content patterns to check.
:param bytes content: the content to check.
:param str encoding: the expected encoding of content.
"""
if not content_pattern_names or not encoding:
return (), ()
matching = []
nonmatching = []
for content_pattern_name in content_pattern_names:
if self._content_matchers[content_pattern_name].matches(content.decode(encoding)):
matching.append(content_pattern_name)
else:
nonmatching.append(content_pattern_name)
return tuple(matching), tuple(nonmatching)
def get_applicable_content_pattern_names(self, path):
"""Return the content patterns applicable to a given path.
Returns a tuple (applicable_content_pattern_names, content_encoding).
If path matches no path patterns, the returned content_encoding will be None (and
applicable_content_pattern_names will be empty).
"""
encodings = set()
applicable_content_pattern_names = set()
for path_pattern_name, content_pattern_names in self._required_matches.items():
m = self._path_matchers[path_pattern_name]
if m.matches(path):
encodings.add(m.content_encoding)
applicable_content_pattern_names.update(content_pattern_names)
if len(encodings) > 1:
raise ValueError(
"Path matched patterns with multiple content encodings ({}): {}".format(
", ".join(sorted(encodings)), path
)
)
content_encoding = next(iter(encodings)) if encodings else None
return applicable_content_pattern_names, content_encoding
# TODO: Consider switching this to `lint`. The main downside is that we would no longer be able to
# run on files with no owning targets, such as running on BUILD files.
@goal_rule
async def validate(
console: Console,
sources_snapshot: SourcesSnapshot,
validate_subsystem: ValidateSubsystem,
source_file_validation: SourceFileValidation,
) -> Validate:
multi_matcher = source_file_validation.get_multi_matcher()
digest_contents = await Get(DigestContents, Digest, sources_snapshot.snapshot.digest)
regex_match_results = RegexMatchResults(
multi_matcher.check_source_file(file_content.path, file_content.content)
for file_content in sorted(digest_contents, key=lambda fc: fc.path)
)
detail_level = validate_subsystem.detail_level
num_matched_all = 0
num_nonmatched_some = 0
for rmr in regex_match_results:
if not rmr.matching and not rmr.nonmatching:
continue
if detail_level == DetailLevel.names:
if rmr.nonmatching:
console.print_stdout(rmr.path)
continue
if rmr.nonmatching:
icon = "X"
num_nonmatched_some += 1
else:
icon = "V"
num_matched_all += 1
matched_msg = " Matched: {}".format(",".join(rmr.matching)) if rmr.matching else ""
nonmatched_msg = (
" Didn't match: {}".format(",".join(rmr.nonmatching)) if rmr.nonmatching else ""
)
if detail_level == DetailLevel.all or (
detail_level == DetailLevel.nonmatching and nonmatched_msg
):
console.print_stdout("{} {}:{}{}".format(icon, rmr.path, matched_msg, nonmatched_msg))
if detail_level not in (DetailLevel.none, DetailLevel.names):
console.print_stdout("\n{} files matched all required patterns.".format(num_matched_all))
console.print_stdout(
"{} files failed to match at least one required pattern.".format(num_nonmatched_some)
)
if num_nonmatched_some:
exit_code = PANTS_FAILED_EXIT_CODE
else:
exit_code = PANTS_SUCCEEDED_EXIT_CODE
return Validate(exit_code)
def rules():
return collect_rules()
| 35.522388 | 99 | 0.661176 |
7941209b8313fd0bf5c8cbc9bfdf9f50bfd98e85 | 4,692 | py | Python | test/integrationtests/voight_kampff/tools.py | assistent-cat/mycroft-core | 6f8bae6ba136c9dd66ca47aaadd75e214d006190 | [
"Apache-2.0"
] | 1 | 2021-01-25T01:06:23.000Z | 2021-01-25T01:06:23.000Z | test/integrationtests/voight_kampff/tools.py | assistent-cat/mycroft-core | 6f8bae6ba136c9dd66ca47aaadd75e214d006190 | [
"Apache-2.0"
] | null | null | null | test/integrationtests/voight_kampff/tools.py | assistent-cat/mycroft-core | 6f8bae6ba136c9dd66ca47aaadd75e214d006190 | [
"Apache-2.0"
] | 2 | 2020-09-28T01:38:34.000Z | 2020-12-03T03:14:32.000Z | # Copyright 2020 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Common tools to use when creating step files for behave tests."""
import time
from mycroft.messagebus import Message
TIMEOUT = 10
def then_wait(msg_type, criteria_func, context, timeout=None):
"""Wait for a specified time for criteria to be fulfilled.
Arguments:
msg_type: message type to watch
criteria_func: Function to determine if a message fulfilling the
test case has been found.
context: behave context
timeout: Time allowance for a message fulfilling the criteria, if
provided will override the normal normal step timeout.
Returns:
tuple (bool, str) test status and debug output
"""
timeout = timeout or context.step_timeout
start_time = time.monotonic()
debug = ''
while time.monotonic() < start_time + timeout:
for message in context.bus.get_messages(msg_type):
status, test_dbg = criteria_func(message)
debug += test_dbg
if status:
context.matched_message = message
context.bus.remove_message(message)
return True, debug
context.bus.new_message_available.wait(0.5)
# Timed out return debug from test
return False, debug
def then_wait_fail(msg_type, criteria_func, context, timeout=None):
"""Wait for a specified time, failing if criteria is fulfilled.
Arguments:
msg_type: message type to watch
criteria_func: Function to determine if a message fulfilling the
test case has been found.
context: behave context
timeout: Time allowance for a message fulfilling the criteria
Returns:
tuple (bool, str) test status and debug output
"""
status, debug = then_wait(msg_type, criteria_func, context, timeout)
return (not status, debug)
def mycroft_responses(context):
"""Collect and format mycroft responses from context.
Arguments:
context: behave context to extract messages from.
Returns: (str) Mycroft responses including skill and dialog file
"""
responses = ''
messages = context.bus.get_messages('speak')
if len(messages) > 0:
responses = 'Mycroft responded with:\n'
for m in messages:
responses += 'Mycroft: '
if 'dialog' in m.data['meta']:
responses += '{}.dialog'.format(m.data['meta']['dialog'])
responses += '({})\n'.format(m.data['meta'].get('skill'))
responses += '"{}"\n'.format(m.data['utterance'])
return responses
def print_mycroft_responses(context):
print(mycroft_responses(context))
def emit_utterance(bus, utt):
"""Emit an utterance on the bus.
Arguments:
bus (InterceptAllBusClient): Bus instance to listen on
dialogs (list): list of acceptable dialogs
"""
bus.emit(Message('recognizer_loop:utterance',
data={'utterances': [utt],
'lang': 'en-us',
'session': '',
'ident': time.time()},
context={'client_name': 'mycroft_listener'}))
def wait_for_dialog(bus, dialogs, context=None, timeout=None):
"""Wait for one of the dialogs given as argument.
Arguments:
bus (InterceptAllBusClient): Bus instance to listen on
dialogs (list): list of acceptable dialogs
context (behave Context): optional context providing scenario timeout
timeout (int): how long to wait for the message, defaults to timeout
provided by context or 10 seconds
"""
if context:
timeout = timeout or context.step_timeout
else:
timeout = timeout or TIMEOUT
start_time = time.monotonic()
while time.monotonic() < start_time + timeout:
for message in bus.get_messages('speak'):
dialog = message.data.get('meta', {}).get('dialog')
if dialog in dialogs:
bus.clear_messages()
return
bus.new_message_available.wait(0.5)
bus.clear_messages()
| 34.5 | 77 | 0.639812 |
794121ef5de5814cb43a36c8f5e3c5b4aaae90cc | 2,749 | py | Python | study-drills/sdex33.py | dark-teal-coder/book-learn-python-the-hard-way | e63abddde8c29dcb1c24d8a98116a78b05be67eb | [
"MIT"
] | null | null | null | study-drills/sdex33.py | dark-teal-coder/book-learn-python-the-hard-way | e63abddde8c29dcb1c24d8a98116a78b05be67eb | [
"MIT"
] | null | null | null | study-drills/sdex33.py | dark-teal-coder/book-learn-python-the-hard-way | e63abddde8c29dcb1c24d8a98116a78b05be67eb | [
"MIT"
] | null | null | null | # Study Drills 33
# 1. Convert this while-loop to a function that you can call, and replace 6 in the test (i < 6) with a variable.
# 2. Use this function to rewrite the script to try different numbers.
# 3. Add another variable to the function arguments that you can pass in that lets you change the
# + 1 on line 8 so you can change how much it increments by.
# 4. Rewrite the script again to use this function to see what effect that has.
# 5. Write it to use for-loops and range. Do you need the incrementor in the middle anymore?
# What happens if you do not get rid of it?
# NOTES:
# A while-loop tests the condition and runs the code block until the expression is False.
# Unlike an if-statement which runs the code block once, it jumps back to the top of "while" and repeats.
# While-loops do not stop so we want to end them at some point.
# Some rules:
# 1. Use while-loops sparingly. A for-loop is preferred.
# 2. Make sure that the Boolean test will become False at some point.
# 3. When in doubt, print out your test variable at the top and bottom of the while-loop.
# Difference between between for-loop and while-loop:
# A for-loop can only iterate (loop) over collections of things.
# A while-loop can do any kind of iteration (looping).
# The while-loops are harder to get right.
i = 0
numbers = []
while i < 6:
print(f"At the top i is {i}")
numbers.append(i)
i = i + 1
print("Numbers now: ", numbers)
print(f"At the bottom i is {i}")
print("The numbers: ")
for num in numbers:
print(num)
# Convert the while-loop to a function:
numbers1 = []
def function1(j):
print(f"At the top j is {j}")
numbers1.append(j)
j += 1
print("Numbers now: ", numbers1)
print(f"At the bottom j is {j}")
# if j != 6:
# if j < 6:
if j in numbers:
function1(j)
function1(0)
print("The numbers: ")
for num1 in numbers1:
print(num1)
# Add increment variable t:
numbers2 = []
def function2(r, s, t):
print(f"At the top r is {r}")
numbers2.append(r)
r += t
print("Numbers now: ", numbers2)
print(f"At the bottom r is {r}")
if r < s:
function2(r, s, t)
function2(0, 6, 2)
print("The numbers: ")
for num2 in numbers2:
print(num2)
# Use for-loops and range():
numbers3 = []
def function3(r, s, t):
for i in range(r, s, t):
print(f"At the top r is {i}")
numbers3.append(i)
i += t
# i += t will only affect the variable i below this line within the current iteration.
# The next iteration i will still be (r+t)
print("Numbers now: ", numbers3)
print(f"At the bottom r is {i}")
function3(0, 20, 3)
print("The numbers: ")
for num3 in numbers3:
print(num3) | 27.217822 | 112 | 0.650782 |
79412238168f44be8732d91cc8c6d8d669348edd | 2,955 | py | Python | python-packages/mne-python-0.10/mne/io/kit/constants.py | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | ee45bee6f96cdb6d91184abc16f41bba1546c943 | [
"BSD-3-Clause"
] | 2 | 2017-08-13T14:09:32.000Z | 2018-07-16T23:39:00.000Z | python-packages/mne-python-0.10/mne/io/kit/constants.py | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | ee45bee6f96cdb6d91184abc16f41bba1546c943 | [
"BSD-3-Clause"
] | null | null | null | python-packages/mne-python-0.10/mne/io/kit/constants.py | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | ee45bee6f96cdb6d91184abc16f41bba1546c943 | [
"BSD-3-Clause"
] | 2 | 2018-04-02T06:45:11.000Z | 2018-07-16T23:39:02.000Z | """KIT constants"""
# Author: Teon Brooks <[email protected]>
#
# License: BSD (3-clause)
from ..constants import Bunch
KIT = Bunch()
# byte values
KIT.SHORT = 2
KIT.INT = 4
KIT.DOUBLE = 8
KIT.STRING = 128
# pointer locations
KIT.AMPLIFIER_INFO = 112
KIT.BASIC_INFO = 16
KIT.CHAN_SENS = 80
KIT.RAW_OFFSET = 144
KIT.AVE_OFFSET = 160
KIT.SAMPLE_INFO = 128
KIT.MRK_INFO = 192
KIT.CHAN_LOC_OFFSET = 64
# parameters
KIT.VOLTAGE_RANGE = 5.
KIT.CALIB_FACTOR = 1.0 # mne_manual p.272
KIT.RANGE = 1. # mne_manual p.272
KIT.UNIT_MUL = 0 # default is 0 mne_manual p.273
# gain: 0:x1, 1:x2, 2:x5, 3:x10, 4:x20, 5:x50, 6:x100, 7:x200
KIT.GAINS = [1, 2, 5, 10, 20, 50, 100, 200]
# BEF options: 0:THRU, 1:50Hz, 2:60Hz, 3:50Hz
KIT.BEFS = [0, 50, 60, 50]
# coreg constants
KIT.DIG_POINTS = 10000
# create system specific dicts
KIT_NY = Bunch(**KIT)
KIT_AD = Bunch(**KIT)
# NYU-system channel information
KIT_NY.NCHAN = 192
KIT_NY.NMEGCHAN = 157
KIT_NY.NREFCHAN = 3
KIT_NY.NMISCCHAN = 32
KIT_NY.N_SENS = KIT_NY.NMEGCHAN + KIT_NY.NREFCHAN
# 12-bit A-to-D converter, one bit for signed integer. range +/- 2048
KIT_NY.DYNAMIC_RANGE = 2 ** 12 / 2
# amplifier information
KIT_NY.GAIN1_BIT = 11 # stored in Bit 11-12
KIT_NY.GAIN1_MASK = 2 ** 11 + 2 ** 12
KIT_NY.GAIN2_BIT = 0 # stored in Bit 0-2
KIT_NY.GAIN2_MASK = 2 ** 0 + 2 ** 1 + 2 ** 2 # (0x0007)
KIT_NY.GAIN3_BIT = None
KIT_NY.GAIN3_MASK = None
KIT_NY.HPF_BIT = 4 # stored in Bit 4-5
KIT_NY.HPF_MASK = 2 ** 4 + 2 ** 5
KIT_NY.LPF_BIT = 8 # stored in Bit 8-10
KIT_NY.LPF_MASK = 2 ** 8 + 2 ** 9 + 2 ** 10
KIT_NY.BEF_BIT = 14 # stored in Bit 14-15
KIT_NY.BEF_MASK = 2 ** 14 + 2 ** 15
# HPF options: 0:0, 1:1, 2:3
KIT_NY.HPFS = [0, 1, 3]
# LPF options: 0:10Hz, 1:20Hz, 2:50Hz, 3:100Hz, 4:200Hz, 5:500Hz,
# 6:1,000Hz, 7:2,000Hz
KIT_NY.LPFS = [10, 20, 50, 100, 200, 500, 1000, 2000]
# AD-system channel information
KIT_AD.NCHAN = 256
KIT_AD.NMEGCHAN = 208
KIT_AD.NREFCHAN = 16
KIT_AD.NMISCCHAN = 32
KIT_AD.N_SENS = KIT_AD.NMEGCHAN + KIT_AD.NREFCHAN
# 16-bit A-to-D converter, one bit for signed integer. range +/- 32768
KIT_AD.DYNAMIC_RANGE = 2 ** 16 / 2
# amplifier information
KIT_AD.GAIN1_BIT = 12 # stored in Bit 12-14
KIT_AD.GAIN1_MASK = 2 ** 12 + 2 ** 13 + 2 ** 14
KIT_AD.GAIN2_BIT = 28 # stored in Bit 28-30
KIT_AD.GAIN2_MASK = 2 ** 28 + 2 ** 29 + 2 ** 30
KIT_AD.GAIN3_BIT = 24 # stored in Bit 24-26
KIT_AD.GAIN3_MASK = 2 ** 24 + 2 ** 25 + 2 ** 26
KIT_AD.HPF_BIT = 8 # stored in Bit 8-10
KIT_AD.HPF_MASK = 2 ** 8 + 2 ** 9 + 2 ** 10
KIT_AD.LPF_BIT = 16 # stored in Bit 16-18
KIT_AD.LPF_MASK = 2 ** 16 + 2 ** 17 + 2 ** 18
KIT_AD.BEF_BIT = 0 # stored in Bit 0-1
KIT_AD.BEF_MASK = 2 ** 0 + 2 ** 1
# HPF options: 0:0Hz, 1:0.03Hz, 2:0.1Hz, 3:0.3Hz, 4:1Hz, 5:3Hz, 6:10Hz, 7:30Hz
KIT_AD.HPFS = [0, 0.03, 0.1, 0.3, 1, 3, 10, 30]
# LPF options: 0:10Hz, 1:20Hz, 2:50Hz, 3:100Hz, 4:200Hz, 5:500Hz,
# 6:1,000Hz, 7:10,000Hz
KIT_AD.LPFS = [10, 20, 50, 100, 200, 500, 1000, 10000]
| 29.55 | 78 | 0.654822 |
7941226d6b559991bb5620aaf3133ac31e2672d2 | 4,478 | py | Python | oncopolicy/utils/learn.py | yala/Tempo | bf3e0e78d64869bb2079c582a4a35982f78386ad | [
"MIT"
] | 6 | 2022-01-15T11:57:19.000Z | 2022-02-13T21:15:22.000Z | oncopolicy/utils/learn.py | yala/Tempo | bf3e0e78d64869bb2079c582a4a35982f78386ad | [
"MIT"
] | null | null | null | oncopolicy/utils/learn.py | yala/Tempo | bf3e0e78d64869bb2079c582a4a35982f78386ad | [
"MIT"
] | 2 | 2022-02-02T13:09:29.000Z | 2022-02-18T07:06:19.000Z | import pickle
import json
import warnings
import torch
import numpy as np
from torch.utils import data
import sklearn.metrics
from collections import defaultdict, OrderedDict, Counter
from oncopolicy.metrics.factory import get_metrics_with_cis
def init_metrics_dictionary():
'''
Return empty metrics dict
'''
stats_dict = defaultdict(list)
stats_dict['best_epoch'] = 0
return stats_dict
def get_train_and_dev_dataset_loaders(args, train_data, dev_data, batch_size):
'''
Given arg configuration, return appropriate torch.DataLoader
for train_data and dev_data
returns:
train_data_loader: iterator that returns batches
dev_data_loader: iterator that returns batches
'''
if args.class_bal:
sampler = torch.utils.data.sampler.WeightedRandomSampler(
weights=train_data.weights,
num_samples=len(train_data),
replacement=True)
train_data_loader = torch.utils.data.DataLoader(
train_data,
num_workers=args.num_workers,
sampler=sampler,
pin_memory=True,
batch_size=batch_size,
collate_fn=ignore_None_collate)
else:
train_data_loader = torch.utils.data.DataLoader(
train_data,
batch_size=batch_size,
shuffle=True,
num_workers=args.num_workers,
collate_fn=ignore_None_collate,
pin_memory=True,
drop_last=True)
dev_data_loader = torch.utils.data.DataLoader(
dev_data,
batch_size=batch_size,
shuffle=True,
num_workers=args.num_workers,
collate_fn=ignore_None_collate,
pin_memory=True,
drop_last=False)
return train_data_loader, dev_data_loader
def collate_eval_metrics(args, loss, preds, ssns, exams, metrics, stats_dict, key_prefix):
stats_dict['{}_loss'.format(key_prefix)].append(loss)
stats_dict['preds'] = preds
stats_dict['exams'] = exams
stats_dict['ssns'] = ssns
log_statement = '--\nLoss: {:.6f} '.format(loss)
for key in metrics:
if 'list' in key:
stats_dict['{}_{}'.format(key_prefix, key)] = metrics[key]
for key in ['total_reward'] + get_metrics_with_cis():
if key in metrics:
stat_name = "{}_{}".format(key_prefix, key)
stats_dict[stat_name].append(metrics[key])
log_statement += "--{} {:.6f} ".format(stat_name, metrics[key])
if args.task == 'screening':
actions = []
for pred_arr in preds:
action_arr = (lambda pred_arr: [pred_arr[i+1] - pred_arr[i] for i in range(len(pred_arr) -1) if pred_arr[i+1] != pred_arr[i] ])(pred_arr)
actions.append(action_arr)
stats_dict['actions'] = actions
all_actions = []
for action_arr in actions:
all_actions.extend(action_arr)
histogram = Counter(all_actions)
stats_dict['action_histogram'] = histogram
log_statement += '--action_historgram {}'.format(histogram)
stats_dict["{}_efficiency".format(key_prefix)] = -stats_dict['{}_mo_to_cancer'.format(key_prefix)][-1] / stats_dict['{}_annualized_mammography_cost'.format(key_prefix)][-1]
log_statement += '--efficiency {}'.format(stats_dict["{}_efficiency".format(key_prefix)])
if args.get_conf_intervals:
stats_dict["{}_efficiency_lower_95".format(key_prefix)] = -stats_dict['{}_mo_to_cancer_lower_95'.format(key_prefix)][-1] / stats_dict['{}_annualized_mammography_cost_lower_95'.format(key_prefix)][-1]
stats_dict["{}_efficiency_upper_95".format(key_prefix)] = -stats_dict['{}_mo_to_cancer_upper_95'.format(key_prefix)][-1] / stats_dict['{}_annualized_mammography_cost_upper_95'.format(key_prefix)][-1]
log_statement += ' ({} , {})'.format(stats_dict["{}_efficiency_lower_95".format(key_prefix)], stats_dict["{}_efficiency_upper_95".format(key_prefix)])
return log_statement, stats_dict
def ignore_None_collate(batch):
'''
dataloader.default_collate wrapper that creates batches only of not None values.
Useful for cases when the dataset.__getitem__ can return None because of some
exception and then we will want to exclude that sample from the batch.
'''
batch = [x for x in batch if x is not None]
if len(batch) == 0:
return None
return data.dataloader.default_collate(batch)
| 40.709091 | 211 | 0.663243 |
7941240daad3b90b75a20512e3197256dc1e8148 | 10,127 | py | Python | src/policy/nn_q_table.py | matpalm/drivebot | 2b6c30209f7a50e289fa70b68fdc93f5e2bd7e88 | [
"MIT"
] | 70 | 2016-02-20T02:59:14.000Z | 2021-12-30T04:19:09.000Z | src/policy/nn_q_table.py | matpalm/drivebot | 2b6c30209f7a50e289fa70b68fdc93f5e2bd7e88 | [
"MIT"
] | 1 | 2016-05-03T15:57:58.000Z | 2016-05-04T13:55:53.000Z | src/policy/nn_q_table.py | matpalm/drivebot | 2b6c30209f7a50e289fa70b68fdc93f5e2bd7e88 | [
"MIT"
] | 17 | 2016-02-20T03:53:46.000Z | 2021-03-17T07:38:18.000Z | from collections import Counter
import numpy as np
import random
import rospy
import states
import tensorflow as tf
import util as u
def flatten(state):
return np.asarray(state).reshape(1, -1)
# build a (grouped) copy op that copies the values of all variables between two namespaces.
# use an affine_coefficient to denote the amount copied.
# target = affine_coefficient * src + (1.0-affine_coefficient * target)
# affine_coefficient = 0.0 => noop
# affine_coefficient = 0.5 => average
# affine_coefficient = 1.0 => totally clobber 'target' with 'src'
def copy_all_vars(from_namespace, to_namespace, affine_coefficient=1.0):
assert affine_coefficient >= 0.0 and affine_coefficient <= 1.0
copy_ops = []
with tf.variable_scope("", reuse=True): # for grabbing the targets by full namespace
for src_var in tf.all_variables():
# ignore any variable not in src namespace
if not src_var.name.startswith(from_namespace):
continue
# fetch reference to target variable with the same name as the src variable
assert src_var.name.endswith(":0")
target_var_name = src_var.name.replace(from_namespace, to_namespace).replace(":0", "")
target_var = tf.get_variable(target_var_name, src_var.get_shape())
# create a copy op to clobber target with src
# target = alpha * src + (1.0-alpha) * target
copy_ops.append(target_var.assign_sub(affine_coefficient * (target_var - src_var)))
single_copy_op = tf.group(*copy_ops)
return single_copy_op
def mlp_layer(namespace, input, input_size, output_size, include_non_linearity=False):
with tf.variable_scope(namespace):
projection = tf.get_variable("projection", [input_size, output_size])
bias = tf.get_variable("bias", [1, output_size], initializer=tf.constant_initializer(0.0))
output = tf.matmul(input, projection) + bias
return tf.nn.sigmoid(output) if include_non_linearity else output
def build_model(namespace, state_size, num_actions, hidden_layer_size):
# input is a sequence of 5 * A readings; 5 for last 5 in history, A for readings (F, L, R, B, (whatever?))
# (i.e. they are just concatted for this version as opposed to treated as a seqeucen)
input_state = tf.placeholder(dtype = tf.float32, shape = [None, state_size], name="input_state")
with tf.variable_scope(namespace):
hidden = mlp_layer("h1", input_state, state_size, hidden_layer_size, include_non_linearity=True)
model = mlp_layer("out", hidden, hidden_layer_size, num_actions, include_non_linearity=False)
return input_state, model
# simple single hidden layer neural net for regressing q value for actions
class NNQTablePolicy(object):
def __init__(self, state_size, num_actions, hidden_layer_size, gradient_clip, target_network_update_coeff, summary_file):
self.refreshable_params_inited = False
self.refresh_params()
self.state_size = state_size
self.num_actions = num_actions
self.gradient_clip = gradient_clip
self.target_network_update_coeff = target_network_update_coeff
with tf.device("/cpu:0"):
self.setup_models(hidden_layer_size, summary_file)
self.stats = Counter()
self.calls_to_train = 0
self.one_hot = np.eye(num_actions)
def refresh_params(self):
params = rospy.get_param("q_table_policy")
print "REFRESH_PARAM\t%s" % params
self.discount = params['discount']
self.learning_rate = params['learning_rate']
self.state_normalisation_squash = params['state_normalisation_squash']
self.summary_log_freq = params['summary_log_freq']
self.target_network_update_freq = params['target_network_update_freq']
def setup_models(self, hidden_layer_size, summary_file):
# setup the seperate core and target networks
self.core_state, self.core_q_values = build_model("core", self.state_size, self.num_actions, hidden_layer_size)
self.target_state, self.target_q_values = build_model("target", self.state_size, self.num_actions, hidden_layer_size)
# build the global copy op that will copy core network onto target
self.clobber_target_net_op = copy_all_vars(from_namespace="core", to_namespace="target",
affine_coefficient=self.target_network_update_coeff)
# left hand side of the bellman update; Q(s1, a)
self.core_action_mask = tf.placeholder(dtype=tf.float32, shape=[None, self.num_actions],
name="core_action_mask")
self.core_q_value_for_action = tf.reduce_sum(self.core_q_values * self.core_action_mask)
# right hand side of bellman update; reward + max_a Q(s2, a')
self.reward = tf.placeholder(dtype=tf.float32, name="reward")
self.discount_p = tf.placeholder(dtype=tf.float32, name="discount")
self.max_target_q_value_plus_reward = self.reward + (self.discount_p * tf.stop_gradient(tf.reduce_max(self.target_q_values)))
# for loss just use squared loss on the difference
self.temporal_difference_loss = tf.reduce_mean(tf.pow(self.max_target_q_value_plus_reward - self.core_q_value_for_action, 2))
self.learning_rate_p = tf.placeholder(dtype=tf.float32, name="learning_rate")
optimizer = tf.train.GradientDescentOptimizer(self.learning_rate_p)
#optimizer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate, decay=0.9)
gradients = optimizer.compute_gradients(self.temporal_difference_loss)
for i, (gradient, variable) in enumerate(gradients):
if gradient is None: # eg stop gradient cases
continue
gradients[i] = (tf.clip_by_norm(gradient, self.gradient_clip), variable)
tf.histogram_summary(variable.name, variable)
tf.histogram_summary(variable.name + '/gradients', gradient)
tf.scalar_summary("temporal_difference_loss", self.temporal_difference_loss)
self.train_op = optimizer.apply_gradients(gradients)
# build session
self.sess = tf.Session()
self.sess.run(tf.initialize_all_variables())
self.summaries = tf.merge_all_summaries()
self.summary_writer = tf.train.SummaryWriter(summary_file, self.sess.graph_def)
def action_given_state(self, state):
state = flatten(state)
q_values = self.sess.run(self.core_q_values, feed_dict={self.core_state: state})
normed = u.normalised(u.raised(q_values[0], self.state_normalisation_squash))
action = u.weighted_choice(normed)
if random.random() <= 0.05:
q_values_str = " ".join(map(str, ["%0.2f" % v for v in q_values[0]]))
normed_str = " ".join(map(str, ["%0.2f" % v for v in normed]))
print ">action_given_state state %s q_values %s normed %s action %s" % (state, q_values_str, normed_str, action)
return action
def train(self, state_1, action, reward, state_2):
self.stats['>train'] += 1
self.calls_to_train += 1
self.stats["train a %s r %s" % (action, reward)] += 1
state_1 = flatten(state_1)
state_2 = flatten(state_2)
# >>> DEBUG
debug = False
if debug:
print ">>>>DEBUG learning_rate", self.learning_rate, "discount", self.discount
print "state1 %s action %s reward %s state2 %s" % (state_1, action, reward, state_2)
print "core_q_values BEFORE", self.sess.run(self.core_q_values, feed_dict={self.core_state: state_1})
print "target_q_values", self.sess.run(self.target_q_values, feed_dict={self.target_state: state_2})
print "max_target_q_value_plus_reward", self.sess.run(self.max_target_q_value_plus_reward,
feed_dict={self.reward: reward,
self.discount_p: self.discount,
self.target_state: state_2})
print "temporal_difference_loss", self.sess.run(self.temporal_difference_loss,
feed_dict={self.core_action_mask: [self.one_hot[action]],
self.core_state: state_1,
self.reward: reward,
self.discount_p: self.discount,
self.target_state: state_2})
# <<< DEBUG
# train against temporal difference. write summaries every 100th call
training_feed_dict = {self.core_state: state_1,
self.core_action_mask: [self.one_hot[action]],
self.reward: reward,
self.discount_p: self.discount,
self.target_state: state_2,
self.learning_rate_p: self.learning_rate}
if self.calls_to_train % self.summary_log_freq == 0:
_opt, summaries = self.sess.run([self.train_op, self.summaries], feed_dict=training_feed_dict)
self.summary_writer.add_summary(summaries, self.calls_to_train)
else:
_opt = self.sess.run(self.train_op, feed_dict=training_feed_dict)
if debug:
print "core_q_values AFTER", self.sess.run(self.core_q_values, feed_dict={self.core_state: state_1})
# copy across target network from time to time
if self.calls_to_train % self.target_network_update_freq == 0:
self.sess.run(self.clobber_target_net_op)
# occasionally dump debug
if self.calls_to_train % self.summary_log_freq == 0:
self.refresh_params()
print "STATS", self.stats
self.stats = Counter()
| 54.740541 | 133 | 0.643132 |
794124161b981f5a84341d2dd395cd293561d08c | 4,195 | py | Python | setup.py | Ery4z/PyClip | 259c9e255aef818a7ed5cf6fe5ebff8215c93c6d | [
"X11"
] | 1 | 2021-11-23T14:11:33.000Z | 2021-11-23T14:11:33.000Z | setup.py | Ery4z/PyClip | 259c9e255aef818a7ed5cf6fe5ebff8215c93c6d | [
"X11"
] | null | null | null | setup.py | Ery4z/PyClip | 259c9e255aef818a7ed5cf6fe5ebff8215c93c6d | [
"X11"
] | null | null | null | import os
import tkinter
from tkinter import ttk
import json
import sounddevice as sd
def Settings():
config = load_setings()
devices_selected = []
for entry in config["entries"]:
devices_selected.append([entry["name"], entry["label"]])
list_devices = sd.query_devices()
real_list = []
for device in list_devices:
if device["hostapi"] == 0 and device["max_input_channels"] > 0:
real_list.append(device)
name_list = [device["name"] for device in real_list]
window = tkinter.Tk()
window.title("PyClip Settings")
# window.geometry("500x")
label1 = tkinter.Label(window, text="In")
label1.grid(column=0, columnspan=1, row=1)
labelPeriph = tkinter.Label(window, text="Periph")
labelPeriph.grid(column=1, columnspan=5, row=0)
labelLabel = tkinter.Label(window, text="Label")
labelLabel.grid(column=6, columnspan=5, row=0)
entry1 = tkinter.Entry(window)
entry1.grid(column=6, columnspan=5, row=1)
combo_1 = ttk.Combobox(window)
combo_1["values"] = name_list
combo_1.current(1) # set the selected item
combo_1.grid(column=1, columnspan=5, row=1)
b_add = tkinter.Button(
window,
text="Add",
command=lambda: add_entry(
combo_1.get(),
entry1.get(),
list_box_devices,
config,
),
)
b_add.grid(column=11, columnspan=1, row=1)
list_box_devices = tkinter.Listbox(window, width=50, height=5)
list_box_devices.grid(column=1, columnspan=10, row=3)
for devices in devices_selected:
list_box_devices.insert("end", f"{devices[0]} | {devices[1]}")
b_remove = tkinter.Button(
window,
text="Remove entry",
command=lambda: remove_entry(
list_box_devices.get(list_box_devices.curselection()),
list_box_devices,
config,
),
)
b_remove.grid(column=1, columnspan=1, row=4)
if "startup" in config:
default_check = config["startup"]
else:
default_check = 0
check = tkinter.IntVar(value=default_check)
startupbutton = tkinter.Checkbutton(
window,
text="Start on startup",
variable=check,
onvalue=1,
offvalue=0,
command=lambda: startup(check, config),
)
startupbutton.grid(column=12, columnspan=1, row=3)
b_valid = tkinter.Button(
window,
text="Save Settings",
command=lambda: save_settings(config, window=window),
)
b_valid.grid(column=1, columnspan=1, row=5)
window.mainloop()
def add_entry(entry, label, list_box_devices, config):
list_box_devices.insert("end", f"{entry} | {label}")
config["entries"].append({"name": entry, "label": label})
def remove_entry(entry, list_box_devices, config):
selected_checkboxs = list_box_devices.curselection()
for selected_checkbox in selected_checkboxs[::-1]:
list_box_devices.delete(selected_checkbox)
l = entry.split(" | ")
device = l[0]
label = l[1]
for entry in config["entries"]:
if entry["name"] == device:
config["entries"].remove(entry)
return 0
def startup(value, config):
config["startup"] = value.get()
def load_setings():
config_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "config.txt"
)
with open(config_file, "r") as f:
try:
config = json.loads(f.read())
except:
return {"entries": []}
return config
def save_settings(config, window=None):
config_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "config.txt"
)
start_app_address = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "run.py"
)
startup_file = f"{os.path.dirname(os.path.realpath(__file__))}\\StartupPyClip.cmd"
if config["startup"]:
with open(startup_file, "w") as f:
f.write(
f"cd {os.path.dirname(os.path.realpath(__file__))}\npython run.py")
if window is not None:
window.destroy()
with open(config_file, "w") as f:
f.write(json.dumps(config))
Settings()
| 27.24026 | 86 | 0.622884 |
794124575e1316aed68ca7c718df5047682335ec | 5,104 | py | Python | src/transformers/models/data2vec/__init__.py | shangz-ai/transformers | 75259b44bf2e2b98b5a4d431fb400b7190342a01 | [
"Apache-2.0"
] | null | null | null | src/transformers/models/data2vec/__init__.py | shangz-ai/transformers | 75259b44bf2e2b98b5a4d431fb400b7190342a01 | [
"Apache-2.0"
] | null | null | null | src/transformers/models/data2vec/__init__.py | shangz-ai/transformers | 75259b44bf2e2b98b5a4d431fb400b7190342a01 | [
"Apache-2.0"
] | null | null | null | # flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_import_structure = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_data2vec_audio"] = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
_import_structure["modeling_data2vec_text"] = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
_import_structure["modeling_data2vec_vision"] = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
_import_structure["modeling_tf_data2vec_vision"] = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_data2vec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, Data2VecAudioConfig
from .configuration_data2vec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
Data2VecTextConfig,
Data2VecTextOnnxConfig,
)
from .configuration_data2vec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
Data2VecVisionConfig,
Data2VecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_data2vec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
Data2VecAudioForAudioFrameClassification,
Data2VecAudioForCTC,
Data2VecAudioForSequenceClassification,
Data2VecAudioForXVector,
Data2VecAudioModel,
Data2VecAudioPreTrainedModel,
)
from .modeling_data2vec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
Data2VecTextForCausalLM,
Data2VecTextForMaskedLM,
Data2VecTextForMultipleChoice,
Data2VecTextForQuestionAnswering,
Data2VecTextForSequenceClassification,
Data2VecTextForTokenClassification,
Data2VecTextModel,
Data2VecTextPreTrainedModel,
)
from .modeling_data2vec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
Data2VecVisionForImageClassification,
Data2VecVisionForMaskedImageModeling,
Data2VecVisionForSemanticSegmentation,
Data2VecVisionModel,
Data2VecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_data2vec_vision import (
TFData2VecVisionForImageClassification,
TFData2VecVisionForSemanticSegmentation,
TFData2VecVisionModel,
TFData2VecVisionPreTrainedModel,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 36.457143 | 113 | 0.717281 |
794124c921c54dd632151d767629cef0887cf2b8 | 15,661 | py | Python | desdeo_emo/EAs/IOPIS.py | giomara-larraga/desdeo-emo | d891e9a12b25d02af5dfba5b17b4c2c4c0bd3d53 | [
"MIT"
] | 3 | 2021-05-02T17:42:39.000Z | 2022-02-16T05:22:56.000Z | desdeo_emo/EAs/IOPIS.py | giomara-larraga/desdeo-emo | d891e9a12b25d02af5dfba5b17b4c2c4c0bd3d53 | [
"MIT"
] | 32 | 2019-10-30T08:33:13.000Z | 2022-03-12T00:54:02.000Z | desdeo_emo/EAs/IOPIS.py | giomara-larraga/desdeo-emo | d891e9a12b25d02af5dfba5b17b4c2c4c0bd3d53 | [
"MIT"
] | 12 | 2019-10-16T10:00:47.000Z | 2022-03-17T13:31:41.000Z | from typing import Dict, Union, List
from desdeo_emo.EAs.BaseEA import eaError
from desdeo_emo.EAs.BaseEA import BaseDecompositionEA, BaseEA
from desdeo_emo.EAs.RVEA import RVEA
from desdeo_emo.population.Population import Population
from desdeo_emo.selection.IOPIS_APD import IOPIS_APD_Select
from desdeo_emo.selection.IOPIS_NSGAIII import IOPIS_NSGAIII_select
from desdeo_problem import MOProblem
from desdeo_tools.scalarization import StomASF, PointMethodASF, AugmentedGuessASF
from desdeo_emo.utilities.ReferenceVectors import ReferenceVectors
from desdeo_tools.interaction import (
ReferencePointPreference,
validate_ref_point_with_ideal_and_nadir,
)
import numpy as np
import pandas as pd
class BaseIOPISDecompositionEA(BaseDecompositionEA, BaseEA):
def __init__(
self,
problem: MOProblem,
population_size: int = None,
population_params: Dict = None,
initial_population: Population = None,
lattice_resolution: int = None,
n_iterations: int = 10,
n_gen_per_iter: int = 100,
total_function_evaluations: int = 0,
use_surrogates: bool = False,
):
a_priori: bool = True
interact: bool = True
if problem.ideal is None or problem.nadir is None:
msg = (
f"The problem instance should contain the information about ideal and "
f"nadir point."
)
raise eaError(msg)
BaseEA.__init__(
self=self,
a_priori=a_priori,
interact=interact,
n_iterations=n_iterations,
n_gen_per_iter=n_gen_per_iter,
total_function_evaluations=total_function_evaluations,
use_surrogates=use_surrogates,
)
scalarization_methods = [
StomASF(ideal=problem.ideal * problem._max_multiplier),
# PointMethodASF(
# nadir=problem.nadir * problem._max_multiplier,
# ideal=problem.ideal * problem._max_multiplier,
# ),
AugmentedGuessASF(
nadir=problem.nadir * problem._max_multiplier,
ideal=problem.ideal * problem._max_multiplier,
indx_to_exclude=[],
),
]
if lattice_resolution is None:
lattice_res_options = [49, 13, 7, 5, 4, 3, 3, 3, 3]
if len(scalarization_methods) < 11:
lattice_resolution = lattice_res_options[len(scalarization_methods) - 2]
else:
lattice_resolution = 3
reference_vectors = ReferenceVectors(
lattice_resolution=lattice_resolution,
number_of_objectives=len(scalarization_methods),
)
population_size = reference_vectors.number_of_vectors
population = Population(problem, population_size, population_params)
self.reference_vectors = reference_vectors
self.scalarization_methods = scalarization_methods
if initial_population is not None:
# Population should be compatible.
self.population = initial_population # TODO put checks here.
elif initial_population is None:
if population_size is None:
population_size = self.reference_vectors.number_of_vectors
self.population = Population(
problem, population_size, population_params, use_surrogates
)
self._function_evaluation_count += population_size
self._ref_vectors_are_focused: bool = False
def manage_preferences(self, preference=None):
"""Run the interruption phase of EA.
Use this phase to make changes to RVEA.params or other objects.
Updates Reference Vectors (adaptation), conducts interaction with the user.
"""
if preference is None:
msg = "Giving preferences is mandatory"
raise eaError(msg)
if not isinstance(preference, ReferencePointPreference):
msg = (
f"Wrong object sent as preference. Expected type = "
f"{type(ReferencePointPreference)} or None\n"
f"Recieved type = {type(preference)}"
)
raise eaError(msg)
if preference.request_id != self._interaction_request_id:
msg = (
f"Wrong preference object sent. Expected id = "
f"{self._interaction_request_id}.\n"
f"Recieved id = {preference.request_id}"
)
raise eaError(msg)
refpoint = preference.response.values * self.population.problem._max_multiplier
self._preference = refpoint
scalarized_space_fitness = np.asarray(
[
scalar(self.population.fitness, self._preference)
for scalar in self.scalarization_methods
]
).T
self.reference_vectors.adapt(scalarized_space_fitness)
self.reference_vectors.neighbouring_angles()
def request_preferences(self) -> Union[None, ReferencePointPreference]:
dimensions_data = pd.DataFrame(
index=["minimize", "ideal", "nadir"],
columns=self.population.problem.get_objective_names(),
)
dimensions_data.loc["minimize"] = self.population.problem._max_multiplier
dimensions_data.loc["ideal"] = self.population.ideal_objective_vector
dimensions_data.loc["nadir"] = self.population.nadir_objective_vector
message = (
f"Provide a reference point worse than to the ideal point and better than"
f" the nadir point.\n"
f"Ideal point: \n{dimensions_data.loc['ideal']}\n"
f"Nadir point: \n{dimensions_data.loc['nadir']}\n"
f"The reference point will be used to create scalarization functions in "
f"the preferred region.\n"
)
interaction_priority = "required"
self._interaction_request_id = np.random.randint(0, 1e7)
return ReferencePointPreference(
dimensions_data=dimensions_data,
message=message,
interaction_priority=interaction_priority,
preference_validator=validate_ref_point_with_ideal_and_nadir,
request_id=self._interaction_request_id,
)
def _select(self) -> List:
return self.selection_operator.do(
self.population, self.reference_vectors, self._preference
)
class IOPIS_RVEA(BaseIOPISDecompositionEA, RVEA):
"""The python version reference vector guided evolutionary algorithm.
Most of the relevant code is contained in the super class. This class just assigns
the APD selection operator to BaseDecompositionEA.
NOTE: The APD function had to be slightly modified to accomodate for the fact that
this version of the algorithm is interactive, and does not have a set termination
criteria. There is a time component in the APD penalty function formula of the type:
(t/t_max)^alpha. As there is no set t_max, the formula has been changed. See below,
the documentation for the argument: penalty_time_component
See the details of RVEA in the following paper
R. Cheng, Y. Jin, M. Olhofer and B. Sendhoff, A Reference Vector Guided
Evolutionary Algorithm for Many-objective Optimization, IEEE Transactions on
Evolutionary Computation, 2016
Parameters
----------
problem : MOProblem
The problem class object specifying the details of the problem.
population_size : int, optional
The desired population size, by default None, which sets up a default value
of population size depending upon the dimensionaly of the problem.
population_params : Dict, optional
The parameters for the population class, by default None. See
desdeo_emo.population.Population for more details.
initial_population : Population, optional
An initial population class, by default None. Use this if you want to set up
a specific starting population, such as when the output of one EA is to be
used as the input of another.
alpha : float, optional
The alpha parameter in the APD selection mechanism. Read paper for details.
lattice_resolution : int, optional
The number of divisions along individual axes in the objective space to be
used while creating the reference vector lattice by the simplex lattice
design. By default None
a_priori : bool, optional
A bool variable defining whether a priori preference is to be used or not.
By default False
interact : bool, optional
A bool variable defining whether interactive preference is to be used or
not. By default False
n_iterations : int, optional
The total number of iterations to be run, by default 10. This is not a hard
limit and is only used for an internal counter.
n_gen_per_iter : int, optional
The total number of generations in an iteration to be run, by default 100.
This is not a hard limit and is only used for an internal counter.
total_function_evaluations :int, optional
Set an upper limit to the total number of function evaluations. When set to
zero, this argument is ignored and other termination criteria are used.
penalty_time_component: Union[str, float], optional
The APD formula had to be slightly changed.
If penalty_time_component is a float between [0, 1], (t/t_max) is replaced by
that constant for the entire algorithm.
If penalty_time_component is "original", the original intent of the paper is
followed and (t/t_max) is calculated as
(current generation count/total number of generations).
If penalty_time_component is "function_count", (t/t_max) is calculated as
(current function evaluation count/total number of function evaluations)
If penalty_time_component is "interactive", (t/t_max) is calculated as
(Current gen count within an iteration/Total gen count within an iteration).
Hence, time penalty is always zero at the beginning of each iteration, and one
at the end of each iteration.
Note: If the penalty_time_component ever exceeds one, the value one is used as
the penalty_time_component.
If no value is provided, an appropriate default is selected.
If `interact` is true, penalty_time_component is "interactive" by default.
If `interact` is false, but `total_function_evaluations` is provided,
penalty_time_component is "function_count" by default.
If `interact` is false, but `total_function_evaluations` is not provided,
penalty_time_component is "original" by default.
"""
def __init__(
self,
problem: MOProblem,
population_size: int = None,
population_params: Dict = None,
initial_population: Population = None,
alpha: float = None,
lattice_resolution: int = None,
n_iterations: int = 10,
n_gen_per_iter: int = 100,
total_function_evaluations: int = 0,
time_penalty_component: Union[str, float] = None,
use_surrogates: bool = False,
):
super().__init__(
problem=problem,
population_size=population_size,
population_params=population_params,
initial_population=initial_population,
lattice_resolution=lattice_resolution,
n_iterations=n_iterations,
n_gen_per_iter=n_gen_per_iter,
total_function_evaluations=total_function_evaluations,
use_surrogates=use_surrogates,
)
self.time_penalty_component = time_penalty_component
time_penalty_component_options = ["original", "function_count", "interactive"]
if time_penalty_component is None:
if self.interact is True:
time_penalty_component = "interactive"
elif total_function_evaluations > 0:
time_penalty_component = "function_count"
else:
time_penalty_component = "original"
if not (type(time_penalty_component) is float or str):
msg = (
f"type(time_penalty_component) should be float or str"
f"Provided type: {type(time_penalty_component)}"
)
eaError(msg)
if type(time_penalty_component) is float:
if (time_penalty_component <= 0) or (time_penalty_component >= 1):
msg = (
f"time_penalty_component should either be a float in the range"
f"[0, 1], or one of {time_penalty_component_options}.\n"
f"Provided value = {time_penalty_component}"
)
eaError(msg)
time_penalty_function = self._time_penalty_constant
if type(time_penalty_component) is str:
if time_penalty_component == "original":
time_penalty_function = self._time_penalty_original
elif time_penalty_component == "function_count":
time_penalty_function = self._time_penalty_function_count
elif time_penalty_component == "interactive":
time_penalty_function = self._time_penalty_interactive
else:
msg = (
f"time_penalty_component should either be a float in the range"
f"[0, 1], or one of {time_penalty_component_options}.\n"
f"Provided value = {time_penalty_component}"
)
eaError(msg)
self.time_penalty_function = time_penalty_function
self.alpha = alpha
selection_operator = IOPIS_APD_Select(
self.time_penalty_function, self.scalarization_methods, self.alpha
)
self.selection_operator = selection_operator
def _time_penalty_constant(self):
"""Returns the constant time penalty value.
"""
return self.time_penalty_component
def _time_penalty_original(self):
"""Calculates the appropriate time penalty value, by the original formula.
"""
return self._current_gen_count / self.total_gen_count
def _time_penalty_interactive(self):
"""Calculates the appropriate time penalty value.
"""
return self._gen_count_in_curr_iteration / self.n_gen_per_iter
def _time_penalty_function_count(self):
"""Calculates the appropriate time penalty value.
"""
return self._function_evaluation_count / self.total_function_evaluations
class IOPIS_NSGAIII(BaseIOPISDecompositionEA):
def __init__(
self,
problem: MOProblem,
population_size: int = None,
population_params: Dict = None,
initial_population: Population = None,
lattice_resolution: int = None,
n_iterations: int = 10,
n_gen_per_iter: int = 100,
total_function_evaluations: int = 0,
use_surrogates: bool = False,
):
super().__init__(
problem=problem,
population_size=population_size,
population_params=population_params,
initial_population=initial_population,
lattice_resolution=lattice_resolution,
n_iterations=n_iterations,
n_gen_per_iter=n_gen_per_iter,
total_function_evaluations=total_function_evaluations,
use_surrogates=use_surrogates,
)
self.selection_operator = IOPIS_NSGAIII_select(
self.scalarization_methods, self.population
)
| 43.991573 | 88 | 0.661133 |
794124d695a4650cf4201b7ca0fe3002296ad401 | 279 | py | Python | models/sum.py | INTENS-FI/intens | b2a2131241a88b0d80a5091679d6efb6c56098fb | [
"MIT"
] | null | null | null | models/sum.py | INTENS-FI/intens | b2a2131241a88b0d80a5091679d6efb6c56098fb | [
"MIT"
] | null | null | null | models/sum.py | INTENS-FI/intens | b2a2131241a88b0d80a5091679d6efb6c56098fb | [
"MIT"
] | null | null | null | """A mock model for simulator server testing.
"""
from concurrent.futures import CancelledError
import dask
@dask.delayed
def task(spec, cancel):
if cancel.get():
raise CancelledError("Cancelled by request")
return {"sum": spec.inputs['x'] + spec.inputs['y']}
| 21.461538 | 55 | 0.691756 |
7941253d2ef3e790034259c5eebe7430bfaef1f2 | 61 | py | Python | 03_EstruturasRepeticao/09_impares_ate_150.py | eduardovivi/Python_tests | b70d009d6180b136c50ccfec343a13f2c09b8029 | [
"MIT"
] | null | null | null | 03_EstruturasRepeticao/09_impares_ate_150.py | eduardovivi/Python_tests | b70d009d6180b136c50ccfec343a13f2c09b8029 | [
"MIT"
] | null | null | null | 03_EstruturasRepeticao/09_impares_ate_150.py | eduardovivi/Python_tests | b70d009d6180b136c50ccfec343a13f2c09b8029 | [
"MIT"
] | null | null | null | for i in range(0, 150):
if (i % 2 != 0):
print i
| 15.25 | 23 | 0.42623 |
7941270513c577b1480f840375ae85678bf6d829 | 13,894 | py | Python | projeto/main/consumers.py | neilom18/g5-chess | 8998199b3432f0b83aa27e5c2126173ecc87f311 | [
"MIT"
] | null | null | null | projeto/main/consumers.py | neilom18/g5-chess | 8998199b3432f0b83aa27e5c2126173ecc87f311 | [
"MIT"
] | 1 | 2021-10-03T22:26:45.000Z | 2021-10-03T22:26:45.000Z | projeto/main/consumers.py | neilom18/g5-chess | 8998199b3432f0b83aa27e5c2126173ecc87f311 | [
"MIT"
] | null | null | null |
# chat/consumers.py
from time import time
import json
from main.game.especialMoves import EnPassant
from main.game.game import selectPiece
from main.game.ConvertStringArray import arrayToStringallPieces, arrayTostring, stringToArray, arrayToHistory
from asgiref.sync import async_to_sync
from channels.generic.websocket import WebsocketConsumer
from main.game.verifyCheck import verificarMate
from .models import Room,GameHistory
class RoomConsumer(WebsocketConsumer):
def connect(self):
#por causa do all auth já estar como padrão ao executarmos o self.scope ele já nos retorna o usuário logado
self.time = time
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = 'chat_%s' % self.room_name
self.Room,created = Room.objects.get_or_create(roomCode=self.room_group_name)
if created:
self.Room.user1= str(self.scope['user'])
self.historico,created = GameHistory.objects.get_or_create(RoomName=str(self.room_name),
user1=self.Room.user1,
user2=self.Room.user2,
timer1=self.Room.timer1,
timer2=self.Room.timer2,
history='')
self.Room.save()
else:
if self.Room.user1 == str(self.scope['user']):
pass
else:
self.Room.user2 = str(self.scope['user'])
self.Room.save()
# Join room group
async_to_sync(self.channel_layer.group_add)(
self.room_group_name,
self.channel_name
)
if self.Room.user1 != '' and self.Room.user2 != '':
async_to_sync(self.channel_layer.group_send)(
self.room_group_name,
{
'type':'start_game',
'data':{
'user1':self.Room.user1,
'user2':self.Room.user2
}
}
)
self.Room.save()
self.accept()
def disconnect(self, close_code):
# Leave room group
async_to_sync(self.channel_layer.group_discard)(
self.room_group_name,
self.channel_name
)
# Receive message from room group
def chat_message(self, event):
message = event['data']['message']
# Send message to WebSocket
self.send(text_data=json.dumps({
'message': message,
'usuario':event['usuario']
}))
def start_game(self,data):
self.Room,created = Room.objects.get_or_create(roomCode=self.room_group_name)
usuario1 = data['data']['user1']
usuario2 = data['data']['user2']
if usuario1 == str(self.scope['user']):
self.send(text_data=json.dumps({
'user1': usuario1,
'user2': usuario2,
'userColor':'w',
'message':'game has been started you are white pieces',
'startGame':self.Room.pieces
}))
elif usuario2 == str(self.scope['user']):
self.send(text_data=json.dumps({
'user1': usuario1,
'user2': usuario2,
'userColor':'b',
'message':'game has been started you are black pieces',
'startGame':self.Room.pieces
}))
# functios inside function
def timerHandler(self,who):
# timer temporário
if self.Room.tempTimer == 0:
self.Room.tempTimer = int(self.time()%10000)
self.Room.save()
return
tempTimer = self.Room.tempTimer
if who == self.Room.user1:
newTempTimer = int(self.time()%10000)
self.Room.timer1 = self.Room.timer1 - (newTempTimer-tempTimer)
self.send(text_data=json.dumps({
'message':'o brancho mexeu e o tempo é: {}'.format(self.Room.timer1)
}))
elif who == self.Room.user2:
newTempTimer = int(self.time()%10000)
self.Room.timer2 = self.Room.timer2 - (newTempTimer-tempTimer)
self.send(text_data=json.dumps({
'message':'o brancho mexeu e o tempo é: {}'.format(self.Room.timer2)
}))
self.Room.save()
self.Room.tempTimer = int(self.time()%10000)
if self.Room.timer1 <= 0:
async_to_sync(self.channel_layer.group_send)(
self.room_group_name,
{
'type':'game_end',
'data':'w'
}
)
elif self.Room.timer2 <= 0:
async_to_sync(self.channel_layer.group_send)(
self.room_group_name,
{
'type':'game_end',
'data':'b'
}
)
def timer_end(self):
if self.Room.user1 == str(self.scope['user']):
self.timerHandler(self,self.Room.user1)
elif self.Room.user2 == str(self.scope['user']):
self.timerHandler(self,self.Room.user2)
def select_piece(self,data):
#recolhe a peça que foi selecionada
allPieces = stringToArray(self.Room.pieces)
piece = data['data']['piece']
color = piece[1]
if color =='w' and self.Room.user1 == str(self.scope['user']) and self.Room.whoMove == True:
#recolhe todas as peças no backend
#checa se a peça existe
for line in allPieces:
for pieceInBack in line:
if pieceInBack == piece:
#se a peça existir vou retornar o movimentos possíveis caso haja se não apenas retorno a peça
moves = selectPiece(allPieces,piece,self.Room)
if piece == moves.strip():
self.send(text_data=json.dumps({
'message':'nenhum movimento possível',
'piece':piece
}))
else:
self.send(text_data=json.dumps({
'message':'moves',
'moves':moves.strip()
}))
elif color=='b' and self.Room.user2 == str(self.scope['user']) and self.Room.whoMove == False:
for line in allPieces:
for pieceInBack in line:
if pieceInBack == piece:
#se a peça existir vou retornar o movimentos possíveis caso haja se não apenas retorno a peça
moves = selectPiece(allPieces,piece,self.Room)
if piece == moves.strip():
self.send(text_data=json.dumps({
'message':'nenhum movimento possível',
'piece':piece
}))
else:
self.send(text_data=json.dumps({
'message':'moves',
'moves':moves.strip()
}))
def actualizeWhoMove(self,data):
move = data['data']['data']['move']
move = move.split(' ')
color = move[0][1]
if color == 'w':
self.Room.whoMove = False
else:
self.Room.whoMove = True
#executa os movimentos para a peça selecionada
def move_piece(self,data):
self.timerHandler(data['usuario'])
EnPassant = False
#actualize move for all players
async_to_sync(self.channel_layer.group_send)(
self.room_group_name,
{
'type':'actualizeWhoMove',
'data':data
}
)
move = data['data']['move']
move = move.split(' ')
pieces = self.Room.pieces
piecesArray = stringToArray(pieces)
#atualiza o histórico para os players
if self.Room.history != '':
self.Room.history = self.Room.history + arrayToHistory(move) +','
else:
self.Room.history = arrayToHistory(move)+','
for line in piecesArray:
for piece in line:
if piece == move[0]:
#verifica se é um peão
if move[0][0] == 'p':
#verifica promoção
if move[1][2] == '7' and move[1][1] == 'w':
move[1] = 'q'+move[1][1]+move[1][2]+move[1][3]
elif move[1][2] == '0' and move[1][1] == 'b':
move[1] = 'q'+move[1][1]+move[1][2]+move[1][3]
#verifica se é um movimento EnPassant
if move[0][3] != move[1][3]:
if piecesArray[int(move[1][2])][int(move[1][3])] == '----':
#aplica o EnPassant
piecesArray[int(piece[2])][int(piece[3])] = '----'
piecesArray[int(move[1][2])][int(move[1][3])] = move[1]
if move[0][1] == 'w':
move.append(piecesArray[int(move[1][2])-1][int(move[1][3])])
piecesArray[int(move[1][2])-1][int(move[1][3])] = '----'
else:
move.append(piecesArray[int(move[1][2])+1][int(move[1][3])])
piecesArray[int(move[1][2])+1][int(move[1][3])] = '----'
EnPassant = True
self.send(text_data=json.dumps({
'message':'moved',
'enPassant':move
}))
elif move[0][0] == 'k':
movimento = int(move[1][3])
if movimento == int(move[0][3])+2 or movimento == int(move[0][3])-2:
if movimento == int(move[0][3])+2:
move.append(piecesArray[int(move[0][2])][movimento+1])
move.append('r'+move[0][1]+move[0][2]+str(movimento-1))
piecesArray[int(move[0][2])][int(move[0][3])] = '----'
piecesArray[int(move[0][2])][movimento-1] = move[3]
piecesArray[int(move[1][2])][int(move[1][3])] = move[1]
elif movimento == int(move[0][3])-2:
move.append(piecesArray[int(move[0][2])][movimento-2])
move.append('r'+move[0][1]+move[0][2]+str(movimento+1))
piecesArray[int(move[0][2])][int(move[0][3])] = '----'
piecesArray[int(move[0][2])][movimento+1] = move[3]
piecesArray[int(move[1][2])][int(move[1][3])] = move[1]
self.send(text_data=json.dumps({
'message':'moved',
'castles':move
}))
EnPassant = True
if EnPassant == False:
piecesArray[int(piece[2])][int(piece[3])] = '----'
piecesArray[int(move[1][2])][int(move[1][3])] = move[1]
move_piece = move
self.send(text_data=json.dumps({
'message':'moved',
'movePiece':move_piece
}))
self.Room.pieces = arrayToStringallPieces(piecesArray)
if move[0][1] == 'w':
mate = verificarMate(piecesArray,'b')
else:
mate = verificarMate(piecesArray,'w')
if mate:
self.send(text_data=json.dumps({
'gameEnd':'acabou',
'whoLost':mate
}))
if self.Room.user1 == str(self.scope['user']):
async_to_sync(self.channel_layer.group_send)(
self.room_group_name,
{
'type':'game_end',
'data':mate
}
)
def game_end(self,data):
loser = data['data']
winner = ''
if loser == 'w':
winner = 'b'
elif loser == 'b':
winner = 'w'
else:
winner = 'd'
if self.Room.user1 == str(self.scope['user']):
self.historico.result = winner
self.historico.user1 = self.Room.user1
self.historico.user2 = self.Room.user2
self.historico.history = self.Room.history
self.historico.timer1 = self.Room.timer1
self.historico.timer2 - self.Room.timer2
self.historico.save()
# Receive message from WebSocket
def receive(self, text_data):
text_data_json = json.loads(text_data)
command = text_data_json['command']
usuario = str(self.scope['user'])
# Send message to room group
async_to_sync(self.channel_layer.group_send)(
self.room_group_name,
{
'type':command,
'data':text_data_json,
'usuario':usuario
}
)
| 43.01548 | 121 | 0.456168 |
79412766f3a33106150c3d3c9f538d4d3e0179be | 2,521 | py | Python | experiments/YOLOv3/creating-files-data-and-name.py | moeraza/juice-box | 6bf9ae9bf95ebc68ef0620466467e97ff4260774 | [
"MIT"
] | 1 | 2021-02-18T02:16:17.000Z | 2021-02-18T02:16:17.000Z | experiments/YOLOv3/creating-files-data-and-name.py | moeraza/juice-box | 6bf9ae9bf95ebc68ef0620466467e97ff4260774 | [
"MIT"
] | null | null | null | experiments/YOLOv3/creating-files-data-and-name.py | moeraza/juice-box | 6bf9ae9bf95ebc68ef0620466467e97ff4260774 | [
"MIT"
] | null | null | null |
"""
Course: Training YOLO v3 for Objects Detection with Custom Data
Section-3
Labelling new Dataset in YOLO format
File: creating-files-data-and-name.py
"""
# Creating files labelled_data.data and classes.names
# for training in Darknet framework
#
# Algorithm:
# Setting up full paths --> Reading file classes.txt -->
# --> Creating file classes.names -->
# --> Creating file labelled_data.data
#
# Result:
# Files classes.names and labelled_data.data needed to train
# in Darknet framework
"""
Start of:
Setting up full path to directory with labelled images
"""
# Full or absolute path to the folder with images
# Find it with Py file getting-full-path.py
# Pay attention! If you're using Windows, yours path might looks like:
# r'C:\Users\my_name\Downloads\video-to-annotate'
# or:
# 'C:\\Users\\my_name\\Downloads\\video-to-annotate'
full_path_to_images = 'custom_data'
"""
End of:
Setting up full path to directory with labelled images
"""
"""
Start of:
Creating file classes.names
"""
# Defining counter for classes
c = 0
# Creating file classes.names from existing one classes.txt
# Pay attention! If you're using Windows, it might need to change
# this: + '/' +
# to this: + '\' +
# or to this: + '\\' +
with open(full_path_to_images + '/' + 'classes.names', 'w') as names, \
open(full_path_to_images + '/' + 'classes.txt', 'r') as txt:
# Going through all lines in txt file and writing them into names file
for line in txt:
names.write(line) # Copying all info from file txt to names
# Increasing counter
c += 1
"""
End of:
Creating file classes.names
"""
"""
Start of:
Creating file labelled_data.data
"""
# Creating file labelled_data.data
# Pay attention! If you're using Windows, it might need to change
# this: + '/' +
# to this: + '\' +
# or to this: + '\\' +
with open(full_path_to_images + '/' + 'labelled_data.data', 'w') as data:
# Writing needed 5 lines
# Number of classes
# By using '\n' we move to the next line
data.write('classes = ' + str(c) + '\n')
# Location of the train.txt file
data.write('train = ' + full_path_to_images + '/' + 'train.txt' + '\n')
# Location of the test.txt file
data.write('valid = ' + full_path_to_images + '/' + 'test.txt' + '\n')
# Location of the classes.names file
data.write('names = ' + full_path_to_images + '/' + 'classes.names' + '\n')
# Location where to save weights
data.write('backup = backup')
"""
End of:
Creating file labelled_data.data
"""
| 24.009524 | 79 | 0.668386 |
794128769b1695c2bb32c73d7c9b78f58f7108e4 | 6,286 | py | Python | octavia/tests/unit/common/test_base_taskflow.py | zhangi/octavia | e68c851fecf55e1b5ffe7d5b849f729626af28a3 | [
"Apache-2.0"
] | 129 | 2015-06-23T08:06:23.000Z | 2022-03-31T12:38:20.000Z | octavia/tests/unit/common/test_base_taskflow.py | zhangi/octavia | e68c851fecf55e1b5ffe7d5b849f729626af28a3 | [
"Apache-2.0"
] | 6 | 2016-05-20T11:05:27.000Z | 2021-03-23T06:05:52.000Z | octavia/tests/unit/common/test_base_taskflow.py | zhangi/octavia | e68c851fecf55e1b5ffe7d5b849f729626af28a3 | [
"Apache-2.0"
] | 166 | 2015-07-15T16:24:05.000Z | 2022-03-02T20:54:36.000Z | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import concurrent.futures
from unittest import mock
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from taskflow import engines as tf_engines
from octavia.common import base_taskflow
import octavia.tests.unit.base as base
MAX_WORKERS = 1
ENGINE = 'parallel'
_engine_mock = mock.MagicMock()
class TestBaseTaskFlowEngine(base.TestCase):
def setUp(self):
conf = oslo_fixture.Config(cfg.CONF)
conf.config(group="task_flow", max_workers=MAX_WORKERS)
conf.config(group="task_flow", engine=ENGINE)
conf.config(group="task_flow", disable_revert=True)
super().setUp()
@mock.patch('concurrent.futures.ThreadPoolExecutor',
return_value='TESTEXECUTOR')
@mock.patch('taskflow.engines.load',
return_value=_engine_mock)
def test_taskflow_load(self,
mock_tf_engine_load,
mock_ThreadPoolExecutor):
# Test __init__
base_taskflow_engine = base_taskflow.BaseTaskFlowEngine()
concurrent.futures.ThreadPoolExecutor.assert_called_once_with(
max_workers=MAX_WORKERS)
# Test taskflow_load
base_taskflow_engine.taskflow_load('TEST')
tf_engines.load.assert_called_once_with(
'TEST',
engine=ENGINE,
executor='TESTEXECUTOR',
never_resolve=True)
_engine_mock.compile.assert_called_once_with()
_engine_mock.prepare.assert_called_once_with()
class TestTaskFlowServiceController(base.TestCase):
_mock_uuid = '9a2ebc48-cd3e-429e-aa04-e32f5fc5442a'
def setUp(self):
self.conf = oslo_fixture.Config(cfg.CONF)
self.conf.config(group="task_flow", engine='parallel')
self.conf.config(group="task_flow", max_workers=MAX_WORKERS)
self.driver_mock = mock.MagicMock()
self.persistence_mock = mock.MagicMock()
self.jobboard_mock = mock.MagicMock()
self.driver_mock.job_board.return_value = self.jobboard_mock
self.driver_mock.persistence_driver.get_persistence.return_value = (
self.persistence_mock)
self.service_controller = base_taskflow.TaskFlowServiceController(
self.driver_mock)
super().setUp()
@mock.patch('oslo_utils.uuidutils.generate_uuid', return_value=_mock_uuid)
@mock.patch('taskflow.engines.save_factory_details')
def test_run_poster(self, mock_engines, mockuuid):
flow_factory = mock.MagicMock()
flow_factory.__name__ = 'testname'
job_name = 'testname-%s' % self._mock_uuid
job_details = {'store': 'test'}
with mock.patch.object(self.service_controller, '_wait_for_job'
) as wait:
uuid = self.service_controller.run_poster(flow_factory,
**job_details)
save_logbook = self.persistence_mock.__enter__().get_connection(
).save_logbook
save_logbook.assert_called()
self.assertEqual(job_name, save_logbook.call_args[0][0].name)
mock_engines.assert_called()
save_args = mock_engines.call_args
self.assertEqual(job_name, save_args[0][0].name)
self.assertEqual(self._mock_uuid, save_args[0][0].uuid)
self.assertEqual(flow_factory, save_args[0][1])
self.assertEqual(self.persistence_mock.__enter__(),
save_args[1]['backend'])
self.jobboard_mock.__enter__().post.assert_called()
post_args = self.jobboard_mock.__enter__().post.call_args
self.assertEqual(job_name, post_args[0][0])
self.assertEqual(job_details, post_args[1]['details'])
wait.assert_not_called()
self.assertEqual(self._mock_uuid, uuid)
@mock.patch('oslo_utils.uuidutils.generate_uuid', return_value=_mock_uuid)
@mock.patch('taskflow.engines.save_factory_details')
def test_run_poster_wait(self, mock_engines, mockuuid):
flow_factory = mock.MagicMock()
flow_factory.__name__ = 'testname'
job_details = {'store': 'test'}
with mock.patch.object(self.service_controller, '_wait_for_job'
) as wait:
uuid = self.service_controller.run_poster(flow_factory, wait=True,
**job_details)
self.persistence_mock.__enter__().get_connection(
).save_logbook.assert_called()
mock_engines.assert_called()
self.jobboard_mock.__enter__().post.assert_called()
wait.assert_called_once_with(self.jobboard_mock.__enter__())
self.assertEqual(self._mock_uuid, uuid)
@mock.patch('octavia.common.base_taskflow.RedisDynamicLoggingConductor')
@mock.patch('octavia.common.base_taskflow.DynamicLoggingConductor')
def test_run_conductor(self, dynamiccond, rediscond):
self.service_controller.run_conductor("test")
rediscond.assert_called_once_with(
"test", self.jobboard_mock.__enter__(),
persistence=self.persistence_mock.__enter__(),
engine='parallel',
engine_options={
'max_workers': MAX_WORKERS,
})
self.conf.config(group="task_flow",
jobboard_backend_driver='zookeeper_taskflow_driver')
self.service_controller.run_conductor("test2")
dynamiccond.assert_called_once_with(
"test2", self.jobboard_mock.__enter__(),
persistence=self.persistence_mock.__enter__(),
engine='parallel')
| 40.294872 | 78 | 0.664333 |
79412905ff6bc3ca4afbf35ff3ff4fc4c7bdbb06 | 22,696 | py | Python | sidechainnet/utils/load.py | heiidii/sidechainnet | d0d04447af567d2ebf0f80427b0d9330d922de27 | [
"BSD-3-Clause"
] | 1 | 2022-03-09T01:56:37.000Z | 2022-03-09T01:56:37.000Z | sidechainnet/utils/load.py | heiidii/sidechainnet | d0d04447af567d2ebf0f80427b0d9330d922de27 | [
"BSD-3-Clause"
] | null | null | null | sidechainnet/utils/load.py | heiidii/sidechainnet | d0d04447af567d2ebf0f80427b0d9330d922de27 | [
"BSD-3-Clause"
] | null | null | null | """Implements SidechainNet loading functionality."""
import pickle
import os
from sidechainnet.dataloaders.SCNDataset import SCNDataset
import requests
import tqdm
import sidechainnet as scn
from sidechainnet.create import format_sidechainnet_path
from sidechainnet.dataloaders.collate import prepare_dataloaders
def _get_local_sidechainnet_path(casp_version, thinning, scn_dir):
"""Return local path to SidechainNet file iff it exists, else returns None."""
filepath = os.path.join(scn_dir, format_sidechainnet_path(casp_version, thinning))
if os.path.isfile(filepath):
return filepath
else:
return None
def _copyfileobj(fsrc, fdst, length=0, chunks=0.):
"""Copy data from file-like object fsrc to file-like object fdst.
Modified from shutil.copyfileobj to include a progress bar with tqdm.
"""
# Localize variable access to minimize overhead.
if not length:
length = 64 * 1024
fsrc_read = fsrc.read
fdst_write = fdst.write
if chunks:
pbar = tqdm.tqdm(total=int(chunks),
desc='Downloading file chunks (estimated)',
unit='chunk',
dynamic_ncols=True)
while True:
buf = fsrc_read(length)
if not buf:
break
fdst_write(buf)
if chunks:
pbar.update()
def _download(url, file_name):
"""Download a file at a given URL to a specified local file_name with shutil."""
# File length can only be approximated from the resulting GET, unfortunately
r = requests.get(url, stream=True)
if 'Content-Length' in r.headers:
file_len = int(r.headers['Content-Length'])
elif 'X-Original-Content-Length' in r.headers:
file_len = int(r.headers['X-Original-Content-Length'])
else:
file_len = 0
r.raw.decode_content = True
with open(file_name, 'wb') as f:
_copyfileobj(r.raw, f, chunks=(file_len / (64. * 1024)))
r.close()
return file_name
def _download_sidechainnet(casp_version, thinning, scn_dir):
"""Download the specified version of Sidechainnet."""
# Prepare destination paths for downloading
if format_sidechainnet_path(casp_version, thinning) not in BOXURLS:
raise FileNotFoundError(
"The requested file is currently unavailable. Please check back later.")
outfile_path = os.path.join(scn_dir, format_sidechainnet_path(casp_version, thinning))
os.makedirs(os.path.dirname(outfile_path), exist_ok=True)
print("Downloading from", BOXURLS[format_sidechainnet_path(casp_version, thinning)])
# Use a data-agnostic tool for downloading URL data from Box to a specified local file
_download(BOXURLS[format_sidechainnet_path(casp_version, thinning)], outfile_path)
print(f"Downloaded SidechainNet to {outfile_path}.")
return outfile_path
def _load_dict(local_path):
"""Load a pickled dictionary."""
with open(local_path, "rb") as f:
d = pickle.load(f)
print(f"SidechainNet was loaded from {local_path}.")
return d
def load(casp_version=12,
thinning=30,
scn_dir="./sidechainnet_data",
force_download=False,
with_pytorch=None,
aggregate_model_input=True,
collate_fn=None,
batch_size=32,
seq_as_onehot=None,
dynamic_batching=True,
num_workers=2,
optimize_for_cpu_parallelism=False,
train_eval_downsample=.2,
filter_by_resolution=False,
complete_structures_only=False,
local_scn_path=None,
scn_dataset=False):
#: Okay
"""Load and return the specified SidechainNet dataset as a dictionary or DataLoaders.
This function flexibly allows the user to load SidechainNet in a format that is most
convenient to them. The user can specify which version and "thinning" of the dataset
to load, and whether or not they would like the data prepared as a PyTorch DataLoader
(with_pytorch='dataloaders') for easy access for model training with PyTorch. Several
arguments are also available to allow the user to specify how the data should be
loaded and batched when provided as DataLoaders (aggregate_model_input, collate_fn,
batch_size, seq_as_one_hot, dynamic_batching, num_workers,
optimize_for_cpu_parallelism, and train_eval_downsample.)
Args:
casp_version (int, optional): CASP version to load (7-12). Defaults to 12.
thinning (int, optional): ProteinNet/SidechainNet "thinning" to load. A thinning
represents the minimum sequence similarity each protein sequence must have to
all other sequences in the same thinning. The 100 thinning contains all of the
protein entries in SidechainNet, while the 30 thinning has a much smaller
amount. Defaults to 30.
scn_dir (str, optional): Path where SidechainNet data will be stored locally.
Defaults to "./sidechainnet_data".
force_download (bool, optional): If true, download SidechainNet data from the web
even if it already exists locally. Defaults to False.
with_pytorch (str, optional): If equal to 'dataloaders', returns a dictionary
mapping dataset splits (e.g. 'train', 'test', 'valid-X') to PyTorch
DataLoaders for data batching and model training. Defaults to None.
aggregate_model_input (bool, optional): If True, the batches in the DataLoader
contain a single entry for all of the SidechainNet data that is favored for
use in a predictive model (sequences and PSSMs). This entry is a single
Tensor. However, if False, when batching these entries are returned
separately. See method description. Defaults to True.
collate_fn (Callable, optional): A collating function. Defaults to None. See:
https://pytorch.org/docs/stable/data.html#dataloader-collate-fn.
batch_size (int, optional): Batch size to be used with PyTorch DataLoaders. Note
that if dynamic_batching is True, then the size of the batch will not
necessarily be equal to this number (though, on average, it will be close
to this number). Only applicable when with_pytorch='dataloaders' is provided.
Defaults to 32.
seq_as_onehot (bool, optional): By default, the None value of this argument causes
sequence data to be represented as one-hot vectors (L x 20) when batching and
aggregate_model_input=True or to be represented as integer sequences (shape L,
values 0 through 21 with 21 being a pad character). The user may override this
option with seq_as_onehot=False only when aggregate_model_input=False.
dynamic_batching (bool, optional): If True, uses a dynamic batch size when
training that increases when the proteins within a batch have short sequences
or decreases when the proteins within a batch have long sequences. Behind the
scenes, this function bins the sequences in the training Dataset/DataLoader
by their length. For every batch, it selects a bin at random (with a
probability proportional to the number of proteins within that bin), and then
selects N proteins within that batch, where:
N = (batch_size * average_length_in_dataset)/max_length_in_bin.
This means that, on average, each batch will have about the same number of
amino acids. If False, uses a constant value (specified by batch_size) for
batch size.
num_workers (int, optional): Number of workers passed to DataLoaders. Defaults to
2. See the description of workers in the PyTorch documentation:
https://pytorch.org/docs/stable/data.html#single-and-multi-process-data-loading.
optimize_for_cpu_parallelism (bool, optional): If True, ensure that the size of
each batch is a multiple of the number of available CPU cores. Defaults to
False.
train_eval_downsample (float, optional): The fraction of the training set to
include in the 'train-eval' DataLoader/Dataset that is returned. This is
included so that, instead of evaluating the entire training set during each
epoch of training (which can be expensive), we can first downsample the
training set at the start of training, and use that downsampled dataset during
the whole of model training. Defaults to .2.
filter_by_resolution (float, bool, optional): If True, only use structures with a
reported resolution < 3 Angstroms. Structures wit no reported resolutions will
also be excluded. If filter_by_resolution is a float, then only structures
having a resolution value LESS than or equal this threshold will be included.
For example, a value of 2.5 will exclude all structures with resolution
greater than 2.5 Angstrom. Only the training set is filtered.
complete_structures_only (bool, optional): If True, yield only structures from the
training set that have no missing residues. Filter not applied to other data
splits. Default False.
local_scn_path (str, optional): The path for a locally saved SidechainNet file.
This is especially useful for loading custom SidechainNet datasets.
scn_dataset (bool, optional): If True, return a sidechainnet.SCNDataset object
for conveniently accessing properties of the data.
(See sidechainnet.SCNDataset) for more information.
Returns:
A Python dictionary that maps data splits ('train', 'test', 'train-eval',
'valid-X') to either more dictionaries containing protein data ('seq', 'ang',
'crd', etc.) or to PyTorch DataLoaders that can be used for training. See below.
Option 1 (Python dictionary):
By default, the function returns a dictionary that is organized by training/
validation/testing splits. For example, the following code loads CASP 12 with
the 30% thinning option:
>>> import sidechainnet as scn
>>> data = scn.load(12, 30)
`data` is a Python dictionary with the following structure:
data = {"train": {"seq": [seq1, seq2, ...], # Sequences
"ang": [ang1, ang2, ...], # Angles
"crd": [crd1, crd2, ...], # Coordinates
"evo": [evo1, evo2, ...], # PSSMs and Information Content
"ids": [id1, id2, ...], # Corresponding ProteinNet IDs
},
"valid-10": {...},
...
"valid-90": {...},
"test": {...},
"settings": {...},
"description" : "SidechainNet for CASP 12."
"date": "September 20, 2020"
}
Option 2 (PyTorch DataLoaders):
Alternatively, if the user provides `with_pytorch='dataloaders'`, `load` will
return a dictionary mapping dataset "splits" (e.g. 'train', 'test', 'valid-X'
where 'X' is one of the validation set splits defined by ProteinNet/
SidechainNet).
By default, the provided `DataLoader`s use a custom batching method that
randomly generates batches of proteins of similar length for faster training.
The probability of selecting small-length batches is decreased so that each
protein in SidechainNet is included in a batch with equal probability. See
`dynamic_batching` and `collate_fn` arguments for more information on
modifying this behavior. In the example below, `model_input` is a collated
Tensor containing sequence and PSSM information.
>>> dataloaders = scn.load(casp_version=12, with_pytorch="dataloaders")
>>> dataloaders.keys()
['train', 'train_eval', 'valid-10', ..., 'valid-90', 'test']
>>> for (protein_id, protein_seqs, model_input, true_angles,
true_coords) in dataloaders['train']:
.... predicted_angles = model(model_input)
.... predicted_coords = angles_to_coordinates(predicted_angles)
.... loss = compute_loss(predicted_angles, predicted_coords,
true_angles, true_coords)
.... ...
We have also made it possible to access the protein sequence and PSSM data
directly when training by adding `aggregate_model_input=False` to `scn.load`.
>>> dataloaders = scn.load(casp_version=12, with_pytorch="dataloaders",
aggregate_model_input=False)
>>> for (protein_id, sequence, pssm, true_angles,
true_coords) in dataloaders['train']:
.... prediction = model(sequence, pssm)
.... ...
"""
if local_scn_path:
local_path = local_scn_path
else:
local_path = _get_local_sidechainnet_path(casp_version, thinning, scn_dir)
if not local_path:
print(f"SidechainNet{(casp_version, thinning)} was not found in {scn_dir}.")
if not local_path or force_download:
# Download SidechainNet if it does not exist locally, or if requested
print("Downloading ...")
local_path = _download_sidechainnet(casp_version, thinning, scn_dir)
scn_dict = _load_dict(local_path)
# Patch for removing 1GJJ_1_A, see Issue #38
scn_dict = scn.utils.manual_adjustment._repair_1GJJ_1_A(scn_dict)
scn_dict = filter_dictionary_by_resolution(scn_dict, threshold=filter_by_resolution)
if complete_structures_only:
scn_dict = filter_dictionary_by_missing_residues(scn_dict)
# By default, the load function returns a dictionary
if not with_pytorch and not scn_dataset:
return scn_dict
elif not with_pytorch and scn_dataset:
return SCNDataset(scn_dict)
if with_pytorch == "dataloaders":
return prepare_dataloaders(
scn_dict,
aggregate_model_input=aggregate_model_input,
collate_fn=collate_fn,
batch_size=batch_size,
num_workers=num_workers,
seq_as_onehot=seq_as_onehot,
dynamic_batching=dynamic_batching,
optimize_for_cpu_parallelism=optimize_for_cpu_parallelism,
train_eval_downsample=train_eval_downsample)
return
def filter_dictionary_by_resolution(raw_data, threshold=False):
"""Filter SidechainNet data by removing poor-resolution training entries.
Args:
raw_data (dict): SidechainNet dictionary.
threshold (float, bool): Entries with resolution values greater than this value
are discarded. Test set entries have no measured resolution and are not
excluded. Default is 3 Angstroms. If False, nothing is filtered.
Returns:
Filtered dictionary.
"""
if not threshold:
return raw_data
if isinstance(threshold, bool) and threshold is True:
threshold = 3
new_data = {
"seq": [],
"ang": [],
"ids": [],
"evo": [],
"msk": [],
"crd": [],
"sec": [],
"res": [],
"ums": [],
"mod": []
}
train = raw_data["train"]
n_filtered_entries = 0
total_entires = 0.
for seq, ang, crd, msk, evo, _id, res, sec, ums, mod in zip(
train['seq'], train['ang'], train['crd'], train['msk'], train['evo'],
train['ids'], train['res'], train['sec'], train['ums'], train['mod']):
total_entires += 1
if not res or res > threshold:
n_filtered_entries += 1
continue
else:
new_data["seq"].append(seq)
new_data["ang"].append(ang)
new_data["ids"].append(_id)
new_data["evo"].append(evo)
new_data["msk"].append(msk)
new_data["crd"].append(crd)
new_data["sec"].append(sec)
new_data["res"].append(res)
new_data["ums"].append(ums)
new_data["mod"].append(mod)
if n_filtered_entries:
print(f"{n_filtered_entries} ({n_filtered_entries/total_entires:.1%})"
" training set entries were excluded based on resolution.")
raw_data["train"] = new_data
return raw_data
def filter_dictionary_by_missing_residues(raw_data):
"""Return new SidechainNet dictionary that omits training data with missing residues.
Args:
raw_data (dict): SidechainNet dictionary.
Returns:
Filtered dictionary.
"""
new_data = {
"seq": [],
"ang": [],
"ids": [],
"evo": [],
"msk": [],
"crd": [],
"sec": [],
"res": [],
"ums": [],
"mod": []
}
train = raw_data["train"]
n_filtered_entries = 0
total_entires = 0.
for seq, ang, crd, msk, evo, _id, res, sec, ums, mod in zip(
train['seq'], train['ang'], train['crd'], train['msk'], train['evo'],
train['ids'], train['res'], train['sec'], train['ums'], train['mod']):
total_entires += 1
if "-" in msk:
n_filtered_entries += 1
continue
else:
new_data["seq"].append(seq)
new_data["ang"].append(ang)
new_data["ids"].append(_id)
new_data["evo"].append(evo)
new_data["msk"].append(msk)
new_data["crd"].append(crd)
new_data["sec"].append(sec)
new_data["res"].append(res)
new_data["ums"].append(ums)
new_data["mod"].append(mod)
if n_filtered_entries:
print(f"{n_filtered_entries} ({n_filtered_entries/total_entires:.1%})"
" training set entries were excluded based on missing residues.")
raw_data["train"] = new_data
return raw_data
BOXURLS = {
# CASP 12
"sidechainnet_casp12_30.pkl":
"https://pitt.box.com/shared/static/hbatd2a750tx8e27yizwinc3hsceeeui.pkl",
"sidechainnet_casp12_50.pkl":
"https://pitt.box.com/shared/static/7cng5zdi2s4doruh1m512d281w2cmk0z.pkl",
"sidechainnet_casp12_70.pkl":
"https://pitt.box.com/shared/static/xfaktrj8ole0eqktxi5fa4qp9efum8f2.pkl",
"sidechainnet_casp12_90.pkl":
"https://pitt.box.com/shared/static/nh7vybjjm224m1nezrgmnywxsa4st2uk.pkl",
"sidechainnet_casp12_95.pkl":
"https://pitt.box.com/shared/static/wcz1kex8idnpy8zx7a59r3h6e216tlq1.pkl",
"sidechainnet_casp12_100.pkl":
"https://pitt.box.com/shared/static/ey5xh6l4p8iwzrxtxwpxt7oeg70eayl4.pkl",
# CASP 11
"sidechainnet_casp11_30.pkl":
"https://pitt.box.com/shared/static/fzil4bgxt4fqpp416xw0e3y0ew4c7yct.pkl",
"sidechainnet_casp11_50.pkl":
"https://pitt.box.com/shared/static/rux3p18k523y8zbo40u1l856826buvui.pkl",
"sidechainnet_casp11_70.pkl":
"https://pitt.box.com/shared/static/tl51ym0hzjdvq4qs5f5shsj0sl9mkvd0.pkl",
"sidechainnet_casp11_90.pkl":
"https://pitt.box.com/shared/static/iheqs3vqszoxsdq46nkzf5kylt8ecjbx.pkl",
"sidechainnet_casp11_95.pkl":
"https://pitt.box.com/shared/static/gbme2a5yifpugtmthwu2989xxyg5b8i6.pkl",
"sidechainnet_casp11_100.pkl":
"https://pitt.box.com/shared/static/3cfx02k2yw4ux2mrbvwrrj91zsftcpbj.pkl",
# CASP 10
"sidechainnet_casp10_30.pkl":
"https://pitt.box.com/shared/static/fe0hpjrldi2y1g374mgdzfpdipajd6s4.pkl",
"sidechainnet_casp10_50.pkl":
"https://pitt.box.com/shared/static/tsnt6s07txas0h37cpzepck580yme9vv.pkl",
"sidechainnet_casp10_70.pkl":
"https://pitt.box.com/shared/static/awmzr4jj68p61ab031smixryt69p8ykm.pkl",
"sidechainnet_casp10_90.pkl":
"https://pitt.box.com/shared/static/it6zcugy997c1550kima3m3fu8kamnh8.pkl",
"sidechainnet_casp10_95.pkl":
"https://pitt.box.com/shared/static/q6ld9h276kobhmmtvdq581qnm61oevup.pkl",
"sidechainnet_casp10_100.pkl":
"https://pitt.box.com/shared/static/fpixgzh9n86xyzpwtlc74lle4fd3p5es.pkl",
# CASP 9
"sidechainnet_casp9_30.pkl":
"https://pitt.box.com/shared/static/j1h3181d2mibqvc7jrqm17dprzj6pxmc.pkl",
"sidechainnet_casp9_50.pkl":
"https://pitt.box.com/shared/static/l363lu9ztpdmcybthtytwnrvvkib2228.pkl",
"sidechainnet_casp9_70.pkl":
"https://pitt.box.com/shared/static/4uh1yggpdhm0aoeisomnyfuac4j20qzc.pkl",
"sidechainnet_casp9_90.pkl":
"https://pitt.box.com/shared/static/scv7l6qfr2j93pn4cu40ouhmxbns6k7x.pkl",
"sidechainnet_casp9_95.pkl":
"https://pitt.box.com/shared/static/tqpugpr7wamvmkyrtd8tqnzft6u53zha.pkl",
"sidechainnet_casp9_100.pkl":
"https://pitt.box.com/shared/static/jjtubu2lxwlv1aw8tfc7u27vcf2yz39v.pkl",
# CASP 8
"sidechainnet_casp8_30.pkl":
"https://pitt.box.com/shared/static/1hx2n3y2gn3flnlsw2wb1e4l4nlru5mz.pkl",
"sidechainnet_casp8_50.pkl":
"https://pitt.box.com/shared/static/4u8tuqkm5pv34hm139uw9dqc4ieebsue.pkl",
"sidechainnet_casp8_70.pkl":
"https://pitt.box.com/shared/static/vj58yaeph55zjb04jezmqams66mn4bil.pkl",
"sidechainnet_casp8_90.pkl":
"https://pitt.box.com/shared/static/1ry2j47lde7zk5fxzvuffv05k1gq29oh.pkl",
"sidechainnet_casp8_95.pkl":
"https://pitt.box.com/shared/static/9uaw2tv61xyfd8gtw9n8e3hfcken4t4x.pkl",
"sidechainnet_casp8_100.pkl":
"https://pitt.box.com/shared/static/crk59vz6dw9cbbvne10owa450zgv1j79.pkl",
# CASP 7
"sidechainnet_casp7_30.pkl":
"https://pitt.box.com/shared/static/hjblmbwei2dkwhfjatttdmamznt1k9ef.pkl",
"sidechainnet_casp7_50.pkl":
"https://pitt.box.com/shared/static/4pw56huei1123a5rd6g460886kg0pex7.pkl",
"sidechainnet_casp7_70.pkl":
"https://pitt.box.com/shared/static/afyow2ki9mwuoago0bzlsp5ame8dq12g.pkl",
"sidechainnet_casp7_90.pkl":
"https://pitt.box.com/shared/static/phsbdw8bj1oiv61d6hps0j62324820f3.pkl",
"sidechainnet_casp7_95.pkl":
"https://pitt.box.com/shared/static/2lgbtdw6c5df0qpe7dtnlaawowy9ic5r.pkl",
"sidechainnet_casp7_100.pkl":
"https://pitt.box.com/shared/static/6qipxz2z2n12a06vln5ucmzu4dcyw5ee.pkl",
# Other
"sidechainnet_debug.pkl":
"https://pitt.box.com/shared/static/tevlb6nuii6kk520vi4x0u7li0eoxuep.pkl"
}
| 46.318367 | 92 | 0.653816 |
7941291d62cd99f57cf67a266d4892d74386e623 | 800 | py | Python | weatherapp/weatherapp/urls.py | dhavall13/Weather-App | cac3f997612d2ab2d80c6f8ad4917f04821762bc | [
"MIT"
] | null | null | null | weatherapp/weatherapp/urls.py | dhavall13/Weather-App | cac3f997612d2ab2d80c6f8ad4917f04821762bc | [
"MIT"
] | null | null | null | weatherapp/weatherapp/urls.py | dhavall13/Weather-App | cac3f997612d2ab2d80c6f8ad4917f04821762bc | [
"MIT"
] | null | null | null | """weatherapp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('weather.urls')),
]
| 34.782609 | 77 | 0.70375 |
794129fafddb523c15abdfc1b29e4767038bb575 | 12,174 | py | Python | montreal_forced_aligner/alignment/adapting.py | nrgslp/Montreal-Forced-Aligner | 582d841694537f2c69f45ec9ef7b8235ddd84b24 | [
"MIT"
] | null | null | null | montreal_forced_aligner/alignment/adapting.py | nrgslp/Montreal-Forced-Aligner | 582d841694537f2c69f45ec9ef7b8235ddd84b24 | [
"MIT"
] | null | null | null | montreal_forced_aligner/alignment/adapting.py | nrgslp/Montreal-Forced-Aligner | 582d841694537f2c69f45ec9ef7b8235ddd84b24 | [
"MIT"
] | null | null | null | """Class definitions for adapting acoustic models"""
from __future__ import annotations
import multiprocessing as mp
import os
import shutil
import subprocess
import time
from queue import Empty
from typing import TYPE_CHECKING, List
import tqdm
from montreal_forced_aligner.abc import AdapterMixin
from montreal_forced_aligner.alignment.multiprocessing import AccStatsArguments, AccStatsFunction
from montreal_forced_aligner.alignment.pretrained import PretrainedAligner
from montreal_forced_aligner.exceptions import KaldiProcessingError
from montreal_forced_aligner.models import AcousticModel
from montreal_forced_aligner.utils import (
KaldiProcessWorker,
Stopped,
log_kaldi_errors,
thirdparty_binary,
)
if TYPE_CHECKING:
from montreal_forced_aligner.models import MetaDict
__all__ = ["AdaptingAligner"]
class AdaptingAligner(PretrainedAligner, AdapterMixin):
"""
Adapt an acoustic model to a new dataset
Parameters
----------
mapping_tau: int
Tau to use in mapping stats between new domain data and pretrained model
See Also
--------
:class:`~montreal_forced_aligner.alignment.pretrained.PretrainedAligner`
For dictionary, corpus, and alignment parameters
:class:`~montreal_forced_aligner.abc.AdapterMixin`
For adapting parameters
Attributes
----------
initialized: bool
Flag for whether initialization is complete
adaptation_done: bool
Flag for whether adaptation is complete
"""
def __init__(self, mapping_tau: int = 20, **kwargs):
super().__init__(**kwargs)
self.mapping_tau = mapping_tau
self.initialized = False
self.adaptation_done = False
def map_acc_stats_arguments(self, alignment=False) -> List[AccStatsArguments]:
"""
Generate Job arguments for :func:`~montreal_forced_aligner.alignment.multiprocessing.AccStatsFunction`
Returns
-------
list[:class:`~montreal_forced_aligner.alignment.multiprocessing.AccStatsArguments`]
Arguments for processing
"""
feat_strings = self.construct_feature_proc_strings()
if alignment:
model_path = self.alignment_model_path
else:
model_path = self.model_path
return [
AccStatsArguments(
os.path.join(self.working_log_directory, f"map_acc_stats.{j.name}.log"),
j.current_dictionary_names,
feat_strings[j.name],
j.construct_path_dictionary(self.working_directory, "ali", "ark"),
j.construct_path_dictionary(self.working_directory, "map", "acc"),
model_path,
)
for j in self.jobs
]
def acc_stats(self, alignment=False):
arguments = self.map_acc_stats_arguments(alignment)
if alignment:
initial_mdl_path = os.path.join(self.working_directory, "0.alimdl")
final_mdl_path = os.path.join(self.working_directory, "0.alimdl")
else:
initial_mdl_path = os.path.join(self.working_directory, "0.mdl")
final_mdl_path = os.path.join(self.working_directory, "final.mdl")
if not os.path.exists(initial_mdl_path):
return
self.logger.info("Accumulating statistics...")
with tqdm.tqdm(total=self.num_utterances, disable=True) as pbar:
if self.use_mp:
manager = mp.Manager()
error_dict = manager.dict()
return_queue = manager.Queue()
stopped = Stopped()
procs = []
for i, args in enumerate(arguments):
function = AccStatsFunction(args)
p = KaldiProcessWorker(i, return_queue, function, error_dict, stopped)
procs.append(p)
p.start()
while True:
try:
num_utterances, errors = return_queue.get(timeout=1)
if stopped.stop_check():
continue
except Empty:
for proc in procs:
if not proc.finished.stop_check():
break
else:
break
continue
pbar.update(num_utterances + errors)
for p in procs:
p.join()
if error_dict:
for v in error_dict.values():
raise v
else:
for args in arguments:
function = AccStatsFunction(args)
for num_utterances, errors in function.run():
pbar.update(num_utterances + errors)
log_path = os.path.join(self.working_log_directory, "map_model_est.log")
occs_path = os.path.join(self.working_directory, "final.occs")
with open(log_path, "w", encoding="utf8") as log_file:
acc_files = []
for j in arguments:
acc_files.extend(j.acc_paths.values())
sum_proc = subprocess.Popen(
[thirdparty_binary("gmm-sum-accs"), "-"] + acc_files,
stderr=log_file,
stdout=subprocess.PIPE,
env=os.environ,
)
ismooth_proc = subprocess.Popen(
[
thirdparty_binary("gmm-ismooth-stats"),
"--smooth-from-model",
f"--tau={self.mapping_tau}",
initial_mdl_path,
"-",
"-",
],
stderr=log_file,
stdin=sum_proc.stdout,
stdout=subprocess.PIPE,
env=os.environ,
)
est_proc = subprocess.Popen(
[
thirdparty_binary("gmm-est"),
"--update-flags=m",
f"--write-occs={occs_path}",
"--remove-low-count-gaussians=false",
initial_mdl_path,
"-",
final_mdl_path,
],
stdin=ismooth_proc.stdout,
stderr=log_file,
env=os.environ,
)
est_proc.communicate()
@property
def workflow_identifier(self) -> str:
"""Adaptation identifier"""
return "adapt_acoustic_model"
@property
def align_directory(self) -> str:
"""Align directory"""
return os.path.join(self.output_directory, "adapted_align")
@property
def working_directory(self) -> str:
"""Current working directory"""
if self.adaptation_done:
return self.align_directory
return self.workflow_directory
@property
def working_log_directory(self) -> str:
"""Current log directory"""
return os.path.join(self.working_directory, "log")
@property
def model_path(self):
"""Current acoustic model path"""
if not self.adaptation_done:
return os.path.join(self.working_directory, "0.mdl")
return os.path.join(self.working_directory, "final.mdl")
@property
def next_model_path(self):
"""Mapped acoustic model path"""
return os.path.join(self.working_directory, "final.mdl")
def train_map(self) -> None:
"""
Trains an adapted acoustic model through mapping model states and update those with
enough data.
See Also
--------
:class:`~montreal_forced_aligner.alignment.multiprocessing.AccStatsFunction`
Multiprocessing helper function for each job
:meth:`.AdaptingAligner.map_acc_stats_arguments`
Job method for generating arguments for the helper function
:kaldi_src:`gmm-sum-accs`
Relevant Kaldi binary
:kaldi_src:`gmm-ismooth-stats`
Relevant Kaldi binary
:kaldi_src:`gmm-est`
Relevant Kaldi binary
:kaldi_steps:`train_map`
Reference Kaldi script
"""
begin = time.time()
log_directory = self.working_log_directory
os.makedirs(log_directory, exist_ok=True)
self.acc_stats(alignment=False)
if self.uses_speaker_adaptation:
self.acc_stats(alignment=True)
self.logger.debug(f"Mapping models took {time.time() - begin}")
def adapt(self) -> None:
"""Run the adaptation"""
self.setup()
dirty_path = os.path.join(self.working_directory, "dirty")
done_path = os.path.join(self.working_directory, "done")
if os.path.exists(done_path):
self.logger.info("Adaptation already done, skipping.")
return
self.logger.info("Generating initial alignments...")
for f in ["final.mdl", "final.alimdl"]:
p = os.path.join(self.working_directory, f)
if not os.path.exists(p):
continue
os.rename(p, os.path.join(self.working_directory, f.replace("final", "0")))
self.align()
os.makedirs(self.align_directory, exist_ok=True)
try:
self.logger.info("Adapting pretrained model...")
self.train_map()
self.export_model(os.path.join(self.working_log_directory, "acoustic_model.zip"))
shutil.copyfile(
os.path.join(self.working_directory, "final.mdl"),
os.path.join(self.align_directory, "final.mdl"),
)
shutil.copyfile(
os.path.join(self.working_directory, "final.occs"),
os.path.join(self.align_directory, "final.occs"),
)
shutil.copyfile(
os.path.join(self.working_directory, "tree"),
os.path.join(self.align_directory, "tree"),
)
if os.path.exists(os.path.join(self.working_directory, "final.alimdl")):
shutil.copyfile(
os.path.join(self.working_directory, "final.alimdl"),
os.path.join(self.align_directory, "final.alimdl"),
)
if os.path.exists(os.path.join(self.working_directory, "lda.mat")):
shutil.copyfile(
os.path.join(self.working_directory, "lda.mat"),
os.path.join(self.align_directory, "lda.mat"),
)
self.adaptation_done = True
except Exception as e:
with open(dirty_path, "w"):
pass
if isinstance(e, KaldiProcessingError):
log_kaldi_errors(e.error_logs, self.logger)
e.update_log_file(self.logger)
raise
with open(done_path, "w"):
pass
@property
def meta(self) -> MetaDict:
"""Acoustic model metadata"""
from datetime import datetime
from ..utils import get_mfa_version
data = {
"phones": sorted(self.non_silence_phones),
"version": get_mfa_version(),
"architecture": self.acoustic_model.meta["architecture"],
"train_date": str(datetime.now()),
"features": self.feature_options,
"phone_set_type": str(self.phone_set_type),
}
return data
def export_model(self, output_model_path: str) -> None:
"""
Output an acoustic model to the specified path
Parameters
----------
output_model_path : str
Path to save adapted acoustic model
"""
directory, filename = os.path.split(output_model_path)
basename, _ = os.path.splitext(filename)
acoustic_model = AcousticModel.empty(basename, root_directory=self.working_log_directory)
acoustic_model.add_meta_file(self)
acoustic_model.add_model(self.align_directory)
if directory:
os.makedirs(directory, exist_ok=True)
basename, _ = os.path.splitext(output_model_path)
acoustic_model.dump(output_model_path)
| 36.890909 | 110 | 0.578364 |
79412ad4772d8e039983c05481ed4a3fd1fafadd | 2,827 | py | Python | api/namex/services/nro/utils.py | sumesh-aot/namex | 53e11aed5ea550b71b7b983f1b57b65db5a06766 | [
"Apache-2.0"
] | 4 | 2018-10-05T23:41:05.000Z | 2019-06-19T16:17:50.000Z | api/namex/services/nro/utils.py | sumesh-aot/namex | 53e11aed5ea550b71b7b983f1b57b65db5a06766 | [
"Apache-2.0"
] | 635 | 2018-05-31T04:12:46.000Z | 2022-03-31T18:45:42.000Z | api/namex/services/nro/utils.py | rarmitag/namex | 1b308bf96130619d4a61d44e075cc7ab177dc6cd | [
"Apache-2.0"
] | 71 | 2018-05-14T20:47:55.000Z | 2022-03-31T23:08:30.000Z | import re
def nro_examiner_name(examiner_name): # -> (str)
"""returns an examiner name, formated and tuncated to fit in NRO
:examiner_name (str): an examiner name, as found in NameX
:returns (str): an examiner name that is 7 or less chars in length
"""
# namex examiner_names are {domain}{/}{username}
start = examiner_name.find('/')+1
return examiner_name[start:start+7]
def row_to_dict(row):
"""
This takes a row from a resultset and returns a dict with the same structure
:param row:
:return: dict
"""
return {key: value for (key, value) in row.items()}
def ora_row_to_dict(col_names, row):
"""
This takes a row from a resultset and returns a dict with the same structure
:param row:
:return: dict
"""
return dict(zip([col.lower() for col in col_names], row))
def validNRFormat(nr):
'''NR should be of the format "NR 1234567"
'''
if len(nr) != 10 or nr[:2] != 'NR' or nr[2:3] != ' ':
return False
try:
num = int(nr[3:])
except:
return False
return True
def generate_compressed_name(original_name: str) -> str:
"""
returns a compressed name, formatted and truncated to fit in NRO
:param original_name : a company full name
:return: (str): a compressed name
"""
# Removing all instances of "THE " and " THE "; no need to removed " THE".
def _delete_the(in_name):
out_name = in_name
if len(in_name) > 4:
if in_name[:4] == "THE ":
out_name = in_name[4:]
out_name = out_name.replace(" THE ", "")
return out_name
def _remove_char(in_name):
chars = set('ABCDEFGHIJKLMNOPQRSTUVWXYZ#&0123456789')
return ''.join([c for c in in_name if c in chars])
def _translate_char(in_name):
rep = {"&": "AND",
"#": "NUMBER",
"1": "ONE",
"2": "TWO",
"3": "THREE",
"4": "FOUR",
"5": "FIVE",
"6": "SIX",
"7": "SEVEN",
"8": "EIGHT",
"9": "NINE",
"0": "ZERO"} # define desired replacements here
rep = dict((re.escape(k), v) for k, v in rep.items())
pattern = re.compile("|".join(rep.keys()))
return pattern.sub(lambda m: rep[re.escape(m.group(0))], in_name)
result_name = original_name.strip().upper()
result_name = _delete_the(result_name)
result_name = result_name.replace(" ", "")
result_name = _remove_char(result_name)
result_name = _translate_char(result_name)
if result_name.startswith("BRITISHCOLUMBIA"):
result_name = result_name.replace("BRITISHCOLUMBIA", "BC", 1)
result_name = result_name[:30] # Maximum 30 chars
return result_name
| 27.446602 | 80 | 0.577998 |
79412bbf5d4dda16ab7f9cd234e43c71c29f2775 | 34,926 | py | Python | tensorflow/contrib/framework/python/ops/variables_test.py | breandan/tensorflow | 7509bad95200e1baed4eb488dbeaaa2c505a2824 | [
"Apache-2.0"
] | 4 | 2016-09-26T08:55:23.000Z | 2019-05-06T15:26:03.000Z | tensorflow/contrib/framework/python/ops/variables_test.py | breandan/tensorflow | 7509bad95200e1baed4eb488dbeaaa2c505a2824 | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/framework/python/ops/variables_test.py | breandan/tensorflow | 7509bad95200e1baed4eb488dbeaaa2c505a2824 | [
"Apache-2.0"
] | 4 | 2017-01-17T10:19:15.000Z | 2019-05-13T02:23:07.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""variables tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
class LocalVariableTest(tf.test.TestCase):
def test_local_variable(self):
with self.test_session() as sess:
self.assertEquals([], tf.local_variables())
value0 = 42
tf.contrib.framework.local_variable(value0)
value1 = 43
tf.contrib.framework.local_variable(value1)
variables = tf.local_variables()
self.assertEquals(2, len(variables))
self.assertRaises(tf.OpError, sess.run, variables)
tf.initialize_variables(variables).run()
self.assertAllEqual(set([value0, value1]), set(sess.run(variables)))
def testLocalVariableNameAndShape(self):
with self.test_session():
with tf.variable_scope('A'):
a = tf.contrib.framework.local_variable([1, 1, 1, 1, 1], name='a')
self.assertEquals(a.op.name, 'A/a')
self.assertListEqual(a.get_shape().as_list(), [5])
self.assertListEqual([a], tf.contrib.framework.get_local_variables())
def testLocalVariableNotInAllVariables(self):
with self.test_session():
with tf.variable_scope('A'):
a = tf.contrib.framework.local_variable(0)
self.assertFalse(a in tf.all_variables())
self.assertTrue(a in tf.local_variables())
def testLocalVariableNotInVariablesToRestore(self):
with self.test_session():
with tf.variable_scope('A'):
a = tf.contrib.framework.local_variable(0)
self.assertFalse(a in tf.contrib.framework.get_variables_to_restore())
self.assertTrue(a in tf.local_variables())
def testGetVariablesDontReturnsTransients(self):
with self.test_session():
with tf.variable_scope('A'):
tf.contrib.framework.local_variable(0)
with tf.variable_scope('B'):
tf.contrib.framework.local_variable(0)
self.assertEquals([], tf.contrib.framework.get_variables('A'))
self.assertEquals([], tf.contrib.framework.get_variables('B'))
def testGetLocalVariablesReturnsTransients(self):
with self.test_session():
with tf.variable_scope('A'):
a = tf.contrib.framework.local_variable(0)
with tf.variable_scope('B'):
b = tf.contrib.framework.local_variable(0)
self.assertEquals([a], tf.contrib.framework.get_local_variables('A'))
self.assertEquals([b], tf.contrib.framework.get_local_variables('B'))
def testInitializedVariableValue(self):
with self.test_session() as sess:
a = tf.contrib.framework.local_variable([0, 0, 0, 0, 0], name='a')
sess.run(tf.initialize_local_variables())
self.assertAllEqual(a.eval(), [0]*5)
class GlobalStepTest(tf.test.TestCase):
def _assert_global_step(self, global_step, expected_dtype=tf.int64):
self.assertEquals('%s:0' % tf.GraphKeys.GLOBAL_STEP, global_step.name)
self.assertEquals(expected_dtype, global_step.dtype.base_dtype)
self.assertEquals([], global_step.get_shape().as_list())
def test_invalid_dtype(self):
with tf.Graph().as_default() as g:
self.assertEquals(None, tf.contrib.framework.get_global_step())
tf.Variable(
0.0, trainable=False, dtype=tf.float32, name=tf.GraphKeys.GLOBAL_STEP)
self.assertRaisesRegexp(
TypeError, 'does not have integer type',
tf.contrib.framework.get_global_step)
self.assertRaisesRegexp(
TypeError, 'does not have integer type',
tf.contrib.framework.get_global_step, g)
def test_invalid_shape(self):
with tf.Graph().as_default() as g:
self.assertEquals(None, tf.contrib.framework.get_global_step())
tf.Variable(
[0], trainable=False, dtype=tf.int32, name=tf.GraphKeys.GLOBAL_STEP)
self.assertRaisesRegexp(
TypeError, 'not scalar',
tf.contrib.framework.get_global_step)
self.assertRaisesRegexp(
TypeError, 'not scalar',
tf.contrib.framework.get_global_step, g)
def test_create_global_step(self):
self.assertEquals(None, tf.contrib.framework.get_global_step())
with tf.Graph().as_default() as g:
global_step = tf.contrib.framework.create_global_step()
self._assert_global_step(global_step)
self.assertRaisesRegexp(
ValueError, 'already exists', tf.contrib.framework.create_global_step)
self.assertRaisesRegexp(
ValueError, 'already exists', tf.contrib.framework.create_global_step,
g)
self._assert_global_step(
tf.contrib.framework.create_global_step(tf.Graph()))
def test_get_global_step(self):
with tf.Graph().as_default() as g:
self.assertEquals(None, tf.contrib.framework.get_global_step())
tf.Variable(
0, trainable=False, dtype=tf.int32, name=tf.GraphKeys.GLOBAL_STEP)
self._assert_global_step(
tf.contrib.framework.get_global_step(), expected_dtype=tf.int32)
self._assert_global_step(
tf.contrib.framework.get_global_step(g), expected_dtype=tf.int32)
def test_get_or_create_global_step(self):
with tf.Graph().as_default() as g:
self.assertEquals(None, tf.contrib.framework.get_global_step())
self._assert_global_step(
tf.contrib.framework.get_or_create_global_step())
self._assert_global_step(
tf.contrib.framework.get_or_create_global_step(g))
class VariablesTest(tf.test.TestCase):
def testCreateVariable(self):
with self.test_session():
with tf.variable_scope('A'):
a = tf.contrib.framework.variable('a', [5])
self.assertEquals(a.op.name, 'A/a')
self.assertListEqual(a.get_shape().as_list(), [5])
def testGetVariables(self):
with self.test_session():
with tf.variable_scope('A'):
a = tf.contrib.framework.variable('a', [5])
with tf.variable_scope('B'):
b = tf.contrib.framework.variable('a', [5])
self.assertEquals([a, b], tf.contrib.framework.get_variables())
self.assertEquals([a], tf.contrib.framework.get_variables('A'))
self.assertEquals([b], tf.contrib.framework.get_variables('B'))
def testGetVariablesSuffix(self):
with self.test_session():
with tf.variable_scope('A'):
a = tf.contrib.framework.variable('a', [5])
with tf.variable_scope('A'):
b = tf.contrib.framework.variable('b', [5])
self.assertEquals([a], tf.contrib.framework.get_variables(suffix='a'))
self.assertEquals([b], tf.contrib.framework.get_variables(suffix='b'))
def testGetVariableWithSingleVar(self):
with self.test_session():
with tf.variable_scope('parent'):
a = tf.contrib.framework.variable('child', [5])
self.assertEquals(
a, tf.contrib.framework.get_unique_variable('parent/child'))
def testGetVariableWithDistractors(self):
with self.test_session():
with tf.variable_scope('parent'):
a = tf.contrib.framework.variable('child', [5])
with tf.variable_scope('child'):
tf.contrib.framework.variable('grandchild1', [7])
tf.contrib.framework.variable('grandchild2', [9])
self.assertEquals(
a, tf.contrib.framework.get_unique_variable('parent/child'))
def testGetVariableThrowsExceptionWithNoMatch(self):
var_name = 'cant_find_me'
with self.test_session():
with self.assertRaises(ValueError):
tf.contrib.framework.get_unique_variable(var_name)
def testGetThrowsExceptionWithChildrenButNoMatch(self):
var_name = 'parent/child'
with self.test_session():
with tf.variable_scope(var_name):
tf.contrib.framework.variable('grandchild1', [7])
tf.contrib.framework.variable('grandchild2', [9])
with self.assertRaises(ValueError):
tf.contrib.framework.get_unique_variable(var_name)
def testGetVariablesToRestore(self):
with self.test_session():
with tf.variable_scope('A'):
a = tf.contrib.framework.variable('a', [5])
with tf.variable_scope('B'):
b = tf.contrib.framework.variable('a', [5])
self.assertEquals([a, b],
tf.contrib.framework.get_variables_to_restore())
def testIncludeGetVariablesToRestore(self):
with self.test_session():
with tf.variable_scope('A'):
a = tf.contrib.framework.variable('a', [5])
with tf.variable_scope('B'):
b = tf.contrib.framework.variable('a', [5])
self.assertEquals([a, b], tf.contrib.framework.get_variables())
self.assertEquals([a],
tf.contrib.framework.get_variables_to_restore(['A']))
def testExcludeGetVariablesToRestore(self):
with self.test_session():
with tf.variable_scope('A'):
a = tf.contrib.framework.variable('a', [5])
with tf.variable_scope('B'):
b = tf.contrib.framework.variable('a', [5])
self.assertEquals([a, b], tf.contrib.framework.get_variables())
self.assertEquals([a],
tf.contrib.framework.get_variables_to_restore(
exclude=['B']))
def testWrongIncludeGetVariablesToRestore(self):
with self.test_session():
with tf.variable_scope('A'):
a = tf.contrib.framework.variable('a', [5])
with tf.variable_scope('B'):
b = tf.contrib.framework.variable('a', [5])
self.assertEquals([a, b], tf.contrib.framework.get_variables())
self.assertEquals([],
tf.contrib.framework.get_variables_to_restore(['a']))
def testGetMixedVariablesToRestore(self):
with self.test_session():
with tf.variable_scope('A'):
a = tf.contrib.framework.variable('a', [5])
b = tf.contrib.framework.variable('b', [5])
with tf.variable_scope('B'):
c = tf.contrib.framework.variable('c', [5])
d = tf.contrib.framework.variable('d', [5])
self.assertEquals([a, b, c, d], tf.contrib.framework.get_variables())
self.assertEquals([a, c],
tf.contrib.framework.get_variables_to_restore(
include=['A/a', 'B/c']))
def testExcludeGetMixedVariablesToRestore(self):
with self.test_session():
with tf.variable_scope('A'):
a = tf.contrib.framework.variable('a', [5])
b = tf.contrib.framework.variable('b', [5])
with tf.variable_scope('B'):
c = tf.contrib.framework.variable('c', [5])
d = tf.contrib.framework.variable('d', [5])
self.assertEquals([a, b, c, d], tf.contrib.framework.get_variables())
self.assertEquals([b, d],
tf.contrib.framework.get_variables_to_restore(
exclude=['A/a', 'B/c']))
def testReuseVariable(self):
with self.test_session():
with tf.variable_scope('A'):
a = tf.contrib.framework.variable('a', [])
with tf.variable_scope('A', reuse=True):
b = tf.contrib.framework.variable('a', [])
self.assertEquals(a, b)
self.assertListEqual([a], tf.contrib.framework.get_variables())
def testVariableWithRegularizer(self):
with self.test_session():
with tf.variable_scope('A'):
a = tf.contrib.framework.variable('a', [], regularizer=tf.nn.l2_loss)
loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertDeviceEqual(loss.device, a.device)
def testVariableWithRegularizerColocate(self):
with self.test_session():
with tf.variable_scope('A'):
a = tf.contrib.framework.variable('a', [], device='gpu:0',
regularizer=tf.nn.l2_loss)
loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertDeviceEqual(loss.device, a.device)
def testVariableWithDevice(self):
with self.test_session():
with tf.variable_scope('A'):
a = tf.contrib.framework.variable('a', [], device='cpu:0')
b = tf.contrib.framework.variable('b', [], device='cpu:1')
self.assertDeviceEqual(a.device, 'cpu:0')
self.assertDeviceEqual(b.device, 'cpu:1')
def testVariableWithDeviceFromScope(self):
with self.test_session():
with tf.device('/cpu:0'):
a = tf.contrib.framework.variable('a', [])
b = tf.contrib.framework.variable('b', [], device='cpu:1')
self.assertDeviceEqual(a.device, 'cpu:0')
self.assertDeviceEqual(b.device, 'cpu:1')
def testVariableWithDeviceFunction(self):
class DevFn(object):
def __init__(self):
self.counter = -1
def __call__(self, op):
self.counter += 1
return 'cpu:%d' % self.counter
with self.test_session():
with tf.contrib.framework.arg_scope([tf.contrib.framework.variable],
device=DevFn()):
a = tf.contrib.framework.variable('a', [])
b = tf.contrib.framework.variable('b', [])
c = tf.contrib.framework.variable('c', [], device='cpu:12')
d = tf.contrib.framework.variable('d', [])
with tf.device('cpu:99'):
e_init = tf.constant(12)
e = tf.contrib.framework.variable('e', initializer=e_init)
self.assertDeviceEqual(a.device, 'cpu:0')
self.assertDeviceEqual(a.initial_value.device, 'cpu:0')
self.assertDeviceEqual(b.device, 'cpu:1')
self.assertDeviceEqual(b.initial_value.device, 'cpu:1')
self.assertDeviceEqual(c.device, 'cpu:12')
self.assertDeviceEqual(c.initial_value.device, 'cpu:12')
self.assertDeviceEqual(d.device, 'cpu:2')
self.assertDeviceEqual(d.initial_value.device, 'cpu:2')
self.assertDeviceEqual(e.device, 'cpu:3')
self.assertDeviceEqual(e.initial_value.device, 'cpu:99')
def testVariableWithReplicaDeviceSetter(self):
with self.test_session():
with tf.device(tf.train.replica_device_setter(ps_tasks=2)):
a = tf.contrib.framework.variable('a', [])
b = tf.contrib.framework.variable('b', [])
c = tf.contrib.framework.variable('c', [], device='cpu:12')
d = tf.contrib.framework.variable('d', [])
with tf.device('cpu:99'):
e_init = tf.constant(12)
e = tf.contrib.framework.variable('e', initializer=e_init)
# The values below highlight how the replica_device_setter puts initial
# values on the worker job, and how it merges explicit devices.
self.assertDeviceEqual(a.device, '/job:ps/task:0/cpu:0')
self.assertDeviceEqual(a.initial_value.device, a.device)
self.assertDeviceEqual(b.device, '/job:ps/task:1/cpu:0')
self.assertDeviceEqual(b.initial_value.device, b.device)
self.assertDeviceEqual(c.device, '/job:ps/task:0/cpu:12')
self.assertDeviceEqual(c.initial_value.device, c.device)
self.assertDeviceEqual(d.device, '/job:ps/task:1/cpu:0')
self.assertDeviceEqual(d.initial_value.device, d.device)
self.assertDeviceEqual(e.device, '/job:ps/task:0/cpu:0')
self.assertDeviceEqual(e.initial_value.device, '/job:worker/cpu:99')
def testVariableWithVariableDeviceChooser(self):
with tf.Graph().as_default():
device_fn = tf.contrib.framework.VariableDeviceChooser(num_tasks=2)
with tf.contrib.framework.arg_scope([tf.contrib.framework.variable],
device=device_fn):
a = tf.contrib.framework.variable('a', [])
b = tf.contrib.framework.variable('b', [])
c = tf.contrib.framework.variable('c', [], device='cpu:12')
d = tf.contrib.framework.variable('d', [])
with tf.device('cpu:99'):
e_init = tf.constant(12)
e = tf.contrib.framework.variable('e', initializer=e_init)
# The values below highlight how the VariableDeviceChooser puts initial
# values on the same device as the variable job.
self.assertDeviceEqual(a.device, '/job:ps/task:0/cpu:0')
self.assertDeviceEqual(a.initial_value.device, a.device)
self.assertDeviceEqual(b.device, '/job:ps/task:1/cpu:0')
self.assertDeviceEqual(b.initial_value.device, b.device)
self.assertDeviceEqual(c.device, '/cpu:12')
self.assertDeviceEqual(c.initial_value.device, c.device)
self.assertDeviceEqual(d.device, '/job:ps/task:0/cpu:0')
self.assertDeviceEqual(d.initial_value.device, d.device)
self.assertDeviceEqual(e.device, '/job:ps/task:1/cpu:0')
self.assertDeviceEqual(e.initial_value.device, '/cpu:99')
def testVariableGPUPlacement(self):
with tf.Graph().as_default():
device_fn = tf.contrib.framework.VariableDeviceChooser(device_type='GPU')
with tf.contrib.framework.arg_scope([tf.contrib.framework.variable],
device=device_fn):
a = tf.contrib.framework.variable('a', [])
b = tf.contrib.framework.variable('b', [])
c = tf.contrib.framework.variable('c', [], device='cpu:12')
d = tf.contrib.framework.variable('d', [])
with tf.device('cpu:99'):
e_init = tf.constant(12)
e = tf.contrib.framework.variable('e', initializer=e_init)
# The values below highlight how the VariableDeviceChooser puts initial
# values on the same device as the variable job.
self.assertDeviceEqual(a.device, '/gpu:0')
self.assertDeviceEqual(a.initial_value.device, a.device)
self.assertDeviceEqual(b.device, '/gpu:0')
self.assertDeviceEqual(b.initial_value.device, b.device)
self.assertDeviceEqual(c.device, '/cpu:12')
self.assertDeviceEqual(c.initial_value.device, c.device)
self.assertDeviceEqual(d.device, '/gpu:0')
self.assertDeviceEqual(d.initial_value.device, d.device)
self.assertDeviceEqual(e.device, '/gpu:0')
self.assertDeviceEqual(e.initial_value.device, '/cpu:99')
class ModelVariablesTest(tf.test.TestCase):
def testNameAndShape(self):
with self.test_session():
with tf.variable_scope('A'):
a = tf.contrib.framework.model_variable('a', [5])
self.assertEquals(a.op.name, 'A/a')
self.assertListEqual(a.get_shape().as_list(), [5])
self.assertListEqual([a], tf.contrib.framework.get_model_variables('A'))
def testNotInLocalVariables(self):
with self.test_session():
with tf.variable_scope('A'):
a = tf.contrib.framework.model_variable('a', [5])
self.assertTrue(a in tf.all_variables())
self.assertFalse(a in tf.local_variables())
def testGetVariablesReturns(self):
with self.test_session():
with tf.variable_scope('A'):
a = tf.contrib.framework.model_variable('a', [5])
with tf.variable_scope('B'):
b = tf.contrib.framework.model_variable('a', [5])
self.assertEquals([a], tf.contrib.framework.get_variables('A'))
self.assertEquals([b], tf.contrib.framework.get_variables('B'))
def testGetModelVariables(self):
with self.test_session():
with tf.variable_scope('A'):
a = tf.contrib.framework.model_variable('a', [5])
with tf.variable_scope('B'):
b = tf.contrib.framework.model_variable('a', [5])
self.assertEquals([a], tf.contrib.framework.get_model_variables('A'))
self.assertEquals([b], tf.contrib.framework.get_model_variables('B'))
def testGetLocalVariables(self):
with self.test_session():
with tf.variable_scope('A'):
_ = tf.contrib.framework.model_variable('a', [5])
with tf.variable_scope('B'):
_ = tf.contrib.framework.model_variable('a', [5])
self.assertEquals([], tf.contrib.framework.get_local_variables('A'))
self.assertEquals([], tf.contrib.framework.get_local_variables('B'))
def testInitializedVariableValue(self):
with self.test_session() as sess:
a = tf.contrib.framework.model_variable('a', [5], initializer=tf.ones)
sess.run(tf.initialize_all_variables())
self.assertAllEqual(a.eval(), [1]*5)
def testDeviceFn(self):
class DevFn(object):
def __init__(self):
self.counter = -1
def __call__(self, op):
self.counter += 1
return '/cpu:%d' % self.counter
with tf.Graph().as_default():
with tf.contrib.framework.arg_scope([tf.contrib.framework.model_variable],
device=DevFn()):
a = tf.contrib.framework.model_variable('a', [5])
b = tf.contrib.framework.model_variable('b', [20])
self.assertDeviceEqual(a.device, '/cpu:0')
self.assertDeviceEqual(a.initial_value.device, '/cpu:0')
self.assertDeviceEqual(b.device, '/cpu:1')
self.assertDeviceEqual(b.initial_value.device, '/cpu:1')
def testVariableWithVariableDeviceChooser(self):
with tf.Graph().as_default():
device_fn = tf.contrib.framework.VariableDeviceChooser()
with tf.contrib.framework.arg_scope([tf.contrib.framework.model_variable],
device=device_fn):
a = tf.contrib.framework.model_variable('a', [5])
b = tf.contrib.framework.model_variable('b', [20])
self.assertDeviceEqual(a.device, 'cpu:0')
self.assertDeviceEqual(a.initial_value.device, a.device)
self.assertDeviceEqual(b.device, 'cpu:0')
self.assertDeviceEqual(b.initial_value.device, b.device)
class GetVariablesCollections(tf.test.TestCase):
def testVariableCollection(self):
with self.test_session():
a = tf.contrib.framework.variable('a', [], collections='A')
b = tf.contrib.framework.variable('b', [], collections='B')
self.assertEquals(a, tf.get_collection('A')[0])
self.assertEquals(b, tf.get_collection('B')[0])
def testVariableCollections(self):
with self.test_session():
a = tf.contrib.framework.variable('a', [], collections=['A', 'C'])
b = tf.contrib.framework.variable('b', [], collections=['B', 'C'])
self.assertEquals(a, tf.get_collection('A')[0])
self.assertEquals(b, tf.get_collection('B')[0])
self.assertListEqual([a, b], tf.get_collection('C'))
def testVariableCollectionsWithArgScope(self):
with self.test_session():
with tf.contrib.framework.arg_scope([tf.contrib.framework.variable],
collections='A'):
a = tf.contrib.framework.variable('a', [])
b = tf.contrib.framework.variable('b', [])
self.assertListEqual([a, b], tf.get_collection('A'))
def testVariableCollectionsWithArgScopeNested(self):
with self.test_session():
with tf.contrib.framework.arg_scope([tf.contrib.framework.variable],
collections='A'):
a = tf.contrib.framework.variable('a', [])
with tf.contrib.framework.arg_scope([tf.contrib.framework.variable],
collections='B'):
b = tf.contrib.framework.variable('b', [])
self.assertEquals(a, tf.get_collection('A')[0])
self.assertEquals(b, tf.get_collection('B')[0])
def testVariableCollectionsWithArgScopeNonNested(self):
with self.test_session():
with tf.contrib.framework.arg_scope([tf.contrib.framework.variable],
collections='A'):
a = tf.contrib.framework.variable('a', [])
with tf.contrib.framework.arg_scope([tf.contrib.framework.variable],
collections='B'):
b = tf.contrib.framework.variable('b', [])
tf.contrib.framework.variable('c', [])
self.assertListEqual([a], tf.get_collection('A'))
self.assertListEqual([b], tf.get_collection('B'))
def testVariableRestoreWithArgScopeNested(self):
with self.test_session():
a = tf.contrib.framework.variable('a', [])
with tf.contrib.framework.arg_scope([tf.contrib.framework.variable],
trainable=False,
collections=['A', 'B']):
b = tf.contrib.framework.variable('b', [])
c = tf.contrib.framework.variable('c', [], trainable=False)
self.assertEquals([a, c], tf.contrib.framework.get_variables_to_restore())
self.assertEquals([a], tf.trainable_variables())
self.assertEquals([b], tf.get_collection('A'))
self.assertEquals([b], tf.get_collection('B'))
class GetVariablesBySuffixTest(tf.test.TestCase):
def testGetVariableGivenNameScoped(self):
with self.test_session():
with tf.variable_scope('A'):
a = tf.contrib.framework.variable('a', [5])
b = tf.contrib.framework.variable('b', [5])
self.assertEquals([a],
tf.contrib.framework.get_variables_by_suffix('a'))
self.assertEquals([b],
tf.contrib.framework.get_variables_by_suffix('b'))
def testGetVariableWithScope(self):
with self.test_session():
with tf.variable_scope('A'):
a = tf.contrib.framework.variable('a', [5])
fooa = tf.contrib.framework.variable('fooa', [5])
with tf.variable_scope('B'):
a2 = tf.contrib.framework.variable('a', [5])
matched_variables = tf.contrib.framework.get_variables_by_suffix('a')
self.assertEquals([a, fooa, a2], matched_variables)
matched_variables = tf.contrib.framework.get_variables_by_suffix('/a')
self.assertEquals([a, a2], matched_variables)
matched_variables = tf.contrib.framework.get_variables_by_suffix(
'a', scope='A')
self.assertEquals([a, fooa], matched_variables)
def testGetVariableWithoutScope(self):
with self.test_session():
a = tf.contrib.framework.variable('a', [5])
fooa = tf.contrib.framework.variable('fooa', [5])
b_a = tf.contrib.framework.variable('B/a', [5])
matched_variables = tf.contrib.framework.get_variables_by_suffix('a')
self.assertEquals([a, fooa, b_a], matched_variables)
matched_variables = tf.contrib.framework.get_variables_by_suffix('fooa')
self.assertEquals([fooa], matched_variables)
class GetVariablesByNameTest(tf.test.TestCase):
def testGetVariableGivenNameScoped(self):
with self.test_session():
with tf.variable_scope('A'):
a = tf.contrib.framework.variable('a', [5])
b = tf.contrib.framework.variable('b', [5])
self.assertEquals([a], tf.contrib.framework.get_variables_by_name('a'))
self.assertEquals([b], tf.contrib.framework.get_variables_by_name('b'))
def testGetVariableWithScope(self):
with self.test_session():
with tf.variable_scope('A'):
a = tf.contrib.framework.variable('a', [5])
fooa = tf.contrib.framework.variable('fooa', [5])
with tf.variable_scope('B'):
a2 = tf.contrib.framework.variable('a', [5])
matched_variables = tf.contrib.framework.get_variables_by_name('a')
self.assertEquals([a, a2], matched_variables)
matched_variables = tf.contrib.framework.get_variables_by_name('fooa')
self.assertEquals([fooa], matched_variables)
matched_variables = tf.contrib.framework.get_variables_by_name('/a')
self.assertEquals([], matched_variables)
matched_variables = tf.contrib.framework.get_variables_by_name('a',
scope='A')
self.assertEquals([a], matched_variables)
def testGetVariableWithoutScope(self):
with self.test_session():
a = tf.contrib.framework.variable('a', [5])
fooa = tf.contrib.framework.variable('fooa', [5])
b_a = tf.contrib.framework.variable('B/a', [5])
matched_variables = tf.contrib.framework.get_variables_by_name('a')
self.assertEquals([a, b_a], matched_variables)
matched_variables = tf.contrib.framework.get_variables_by_name('fooa')
self.assertEquals([fooa], matched_variables)
class AssignFromValuesTest(tf.test.TestCase):
def testNoScopes(self):
init_value0 = np.asarray([1.0, 3.0, 9.0]).reshape((1, 3, 1))
init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0]).reshape((2, 1, 2))
with self.test_session() as sess:
initializer = tf.truncated_normal_initializer(stddev=.1)
var0 = tf.contrib.framework.variables.variable(
'my_var0', shape=[1, 3, 1], initializer=initializer)
var1 = tf.contrib.framework.variables.variable(
'my_var1', shape=[2, 1, 2], initializer=initializer)
var_names_to_values = {'my_var0': init_value0, 'my_var1': init_value1}
assign_op, feed_dict = tf.contrib.framework.variables.assign_from_values(
var_names_to_values)
# Initialize the variables.
sess.run(tf.initialize_all_variables())
# Perform the assignment.
sess.run(assign_op, feed_dict)
# Request and test the variable values:
var0, var1 = sess.run([var0, var1])
self.assertAllEqual(init_value0, var0)
self.assertAllEqual(init_value1, var1)
def testWithScopes(self):
init_value0 = np.asarray([1.0, 3.0, 9.0]).reshape((1, 3, 1))
init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0]).reshape((2, 1, 2))
with self.test_session() as sess:
initializer = tf.truncated_normal_initializer(stddev=.1)
with tf.variable_scope('my_model/my_layer0'):
var0 = tf.contrib.framework.variables.variable(
'my_var0', shape=[1, 3, 1], initializer=initializer)
with tf.variable_scope('my_model/my_layer1'):
var1 = tf.contrib.framework.variables.variable(
'my_var1', shape=[2, 1, 2], initializer=initializer)
var_names_to_values = {'my_model/my_layer0/my_var0': init_value0,
'my_model/my_layer1/my_var1': init_value1}
assign_op, feed_dict = tf.contrib.framework.variables.assign_from_values(
var_names_to_values)
# Initialize the variables.
sess.run(tf.initialize_all_variables())
# Perform the assignment.
sess.run(assign_op, feed_dict)
# Request and test the variable values:
var0, var1 = sess.run([var0, var1])
self.assertAllEqual(init_value0, var0)
self.assertAllEqual(init_value1, var1)
class AssignFromCheckpointTest(tf.test.TestCase):
def create_checkpoint_from_values(self, var_names_to_values, checkpoint_dir,
global_step=None):
"""Creates a checkpoint from a mapping of name to values in model_dir.
Args:
var_names_to_values: a map from variable names to values.
checkpoint_dir: the directory where the checkpoint will be saved.
global_step: the global step used to save the checkpoint.
Returns:
the model_path to the checkpoint.
"""
var_list = []
with tf.Session('', graph=tf.Graph()) as sess:
# Create a set of variables to save in the checkpoint.
for var_name in var_names_to_values:
var_value = var_names_to_values[var_name]
var_list.append(tf.Variable(var_value, name=var_name))
saver = tf.train.Saver(var_list)
init_op = tf.initialize_variables(var_list)
sess.run(init_op)
# Save the initialized values in the file at 'checkpoint_dir'
return saver.save(sess, checkpoint_dir, global_step=global_step)
def testLoadExistingVariables(self):
init_value0 = 10.0
init_value1 = 20.0
var_names_to_values = {'v0': init_value0, 'v1': init_value1}
model_dir = os.path.join(self.get_temp_dir(), 'model')
with self.test_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
var0 = tf.contrib.framework.variables.variable('my_var0', shape=[])
var1 = tf.contrib.framework.variables.variable('my_var1', shape=[])
vars_to_restore = {'v0': var0, 'v1': var1}
op, feed_dict = tf.contrib.framework.variables.assign_from_checkpoint(
model_path, vars_to_restore)
# Initialize the variables.
sess.run(tf.initialize_all_variables())
# Perform the assignment.
sess.run(op, feed_dict)
# Request and test the variable values:
self.assertEqual(init_value0, var0.eval())
self.assertEqual(init_value1, var1.eval())
def testRaisesValueErrorIfAVariableIsntFound(self):
init_value0 = 10.0
init_value1 = 20.0
var_names_to_values = {'v0': init_value0, 'v1': init_value1}
model_dir = os.path.join(self.get_temp_dir(), 'model')
with self.test_session():
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
var0 = tf.contrib.framework.variables.variable('my_var0', shape=[])
var1 = tf.contrib.framework.variables.variable('my_var1', shape=[])
vars_to_restore = {'v0_fake': var0, 'v1': var1}
with self.assertRaises(ValueError):
tf.contrib.framework.variables.assign_from_checkpoint(model_path,
vars_to_restore)
def testInitFromCheckpointWithScopes(self):
init_value0 = np.asarray([1.0, 3.0, 9.0],
dtype=np.float32).reshape((1, 3, 1))
init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0],
dtype=np.float32).reshape((2, 1, 2))
var_names_to_values = {'layer0/v0': init_value0, 'layer1/v1': init_value1}
model_dir = os.path.join(self.get_temp_dir(), 'model')
with self.test_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
with tf.variable_scope('my_model/my_layer0'):
var0 = tf.contrib.framework.variables.variable('my_var0',
shape=init_value0.shape)
with tf.variable_scope('my_model/my_layer1'):
var1 = tf.contrib.framework.variables.variable('my_var1',
shape=init_value1.shape)
vars_to_restore = {'layer0/v0': var0, 'layer1/v1': var1}
op, feed_dict = tf.contrib.framework.variables.assign_from_checkpoint(
model_path,
vars_to_restore)
# Initialize the variables.
sess.run(tf.initialize_all_variables())
# Perform the assignment.
sess.run(op, feed_dict)
# Request and test the variable values:
self.assertAllEqual(init_value0, var0.eval())
self.assertAllEqual(init_value1, var1.eval())
if __name__ == '__main__':
tf.test.main()
| 43.065351 | 80 | 0.654498 |
79412bed4c546a319c7278215291d61b98dbfd99 | 2,916 | py | Python | inspector-pkg/src/shminspector/components/network.py | sha1n/macos-devenv-dump-poc | be439ad4a0c0ac265fe62d44bded73eab1a0c31d | [
"MIT"
] | null | null | null | inspector-pkg/src/shminspector/components/network.py | sha1n/macos-devenv-dump-poc | be439ad4a0c0ac265fe62d44bded73eab1a0c31d | [
"MIT"
] | null | null | null | inspector-pkg/src/shminspector/components/network.py | sha1n/macos-devenv-dump-poc | be439ad4a0c0ac265fe62d44bded73eab1a0c31d | [
"MIT"
] | null | null | null | import urllib.request as request
from collections import namedtuple
from time import time
from typing import List
from shminspector.api.collector import Collector
from shminspector.api.context import Context
from shminspector.api.validator import Validator, ValidationResult, Status
from shminspector.util.diag import timeit_if
NetConnectivityInfo = namedtuple(typename="NetConnectivityInfo", field_names=["address", "ok", "time", "message"])
Spec = namedtuple(typename="Spec", field_names=["address", "failure_message"])
class UrlConnectivityInfoCollector(Collector):
def collect(self, ctx: Context) -> List[NetConnectivityInfo]:
specs = self._collect_specs(ctx)
ctx.logger.progress("Checking network connectivity...")
return list((self._check_connectivity(spec, ctx) for spec in specs))
@timeit_if(more_than_sec=3)
def _check_connectivity(self, spec, ctx):
start_time = time()
def elapsed():
return time() - start_time
try:
address = spec.address
ctx.logger.progress("Checking connectivity to {}".format(spec.address))
request.urlopen(address, timeout=10)
return NetConnectivityInfo(address=address, ok=True, time=elapsed(), message=None)
except request.HTTPError as error:
if error.code < 400 or error.code > 499:
return NetConnectivityInfo(address=spec.address, ok=False, time=elapsed(), message=spec.failure_message)
else:
return NetConnectivityInfo(address=spec.address, ok=True, time=elapsed(), message=None)
except request.URLError:
return NetConnectivityInfo(address=spec.address, ok=False, time=elapsed(), message=spec.failure_message)
def _collect_specs(self, ctx):
specs = []
if ctx.config.get("network", None) is not None and \
ctx.config.get("network").get("check_specs", None) is not None:
for raw_spec in ctx.config["network"]["check_specs"]:
specs.append(Spec(raw_spec["address"], raw_spec["failure_message"]))
return specs
class UrlConnectivityInfoValidator(Validator):
def validate(self, input_data, ctx: Context) -> ValidationResult:
if input_data is None:
ctx.logger.error("Network connectivity check returned no data!")
return ValidationResult(input_data, Status.ERROR)
for check in input_data:
if check.ok:
if check.time > 2:
ctx.logger.warn(
"Network connectivity check to {} took {} seconds".format(check.address, check.time)
)
else:
ctx.logger.warn(
"Network connectivity check to {} failed! Error: {}".format(check.address, check.message)
)
return ValidationResult(input_data, Status.OK)
| 39.945205 | 120 | 0.653978 |
79412c5e0d4fc98507eba74edc341eba77ce936e | 1,230 | py | Python | ddtrace/contrib/pymongo/__init__.py | mykytarudenko/new-project | e06a912382239739dd3f93b54d545b9506102372 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | ddtrace/contrib/pymongo/__init__.py | mykytarudenko/new-project | e06a912382239739dd3f93b54d545b9506102372 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2021-01-27T04:53:24.000Z | 2021-01-27T04:53:24.000Z | ddtrace/contrib/pymongo/__init__.py | mykytarudenko/new-project | e06a912382239739dd3f93b54d545b9506102372 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | """Instrument pymongo to report MongoDB queries.
The pymongo integration works by wrapping pymongo's MongoClient to trace
network calls. Pymongo 3.0 and greater are the currently supported versions.
``patch_all`` will automatically patch your MongoClient instance to make it work.
::
# Be sure to import pymongo and not pymongo.MongoClient directly,
# otherwise you won't have access to the patched version
from ddtrace import Pin, patch
import pymongo
# If not patched yet, you can patch pymongo specifically
patch(pymongo=True)
# At that point, pymongo is instrumented with the default settings
client = pymongo.MongoClient()
# Example of instrumented query
db = client["test-db"]
db.teams.find({"name": "Toronto Maple Leafs"})
# Use a pin to specify metadata related to this client
client = pymongo.MongoClient()
pin = Pin.override(client, service="mongo-master")
"""
from ...utils.importlib import require_modules
required_modules = ['pymongo']
with require_modules(required_modules) as missing_modules:
if not missing_modules:
from .patch import patch
from .patch import trace_mongo_client
__all__ = ['trace_mongo_client', 'patch']
| 33.243243 | 81 | 0.730894 |
79412cf73ff1e0ad7347f9f74746f63b981b9173 | 10,442 | py | Python | pymel/mayautils.py | GlenWalker/pymel | 8b69b72e1bb726a66792707af39626a987bf5c21 | [
"BSD-3-Clause"
] | null | null | null | pymel/mayautils.py | GlenWalker/pymel | 8b69b72e1bb726a66792707af39626a987bf5c21 | [
"BSD-3-Clause"
] | null | null | null | pymel/mayautils.py | GlenWalker/pymel | 8b69b72e1bb726a66792707af39626a987bf5c21 | [
"BSD-3-Clause"
] | null | null | null | """Utilities for getting Maya resource directories, sourcing scripts, and executing deferred.
These do not require initialization of maya.standalone"""
import os
import sys
import re
import platform
import pymel.versions as versions
import pymel.internal as _internal
_logger = _internal.getLogger(__name__)
from pymel.util import path as _path
if False:
from typing import *
sep = os.path.pathsep
def source(file, searchPath=None, recurse=False):
# type: (Any, Iterable[str], bool) -> None
"""
Execute a python script.
Search for a python script in the specified path and execute it using
``execfile``.
Parameters
----------
searchPath : Iterable[str]
list of directories in which to search for ``file``.
uses ``sys.path`` if no path is specified
recurse : bool
whether to recurse into directories in ``searchPath``
"""
filepath = unicode(file)
filename = os.path.basename(filepath)
dirname = os.path.dirname(filepath)
if searchPath is None:
searchPath = sys.path
if isinstance(searchPath, basestring):
searchPath = [searchPath]
itpath = iter(searchPath)
_logger.debug("looking for file as: " + filepath)
while not os.path.exists(filepath):
try:
p = os.path.abspath(os.path.realpath(itpath.next()))
filepath = os.path.join(p, filename)
_logger.debug('looking for file as: ' + filepath)
if recurse and not filepath.exists():
itsub = os.walk(p)
while not os.path.exists(filepath):
try:
root, dirs, files = itsub.next()
itdirs = iter(dirs)
while not os.path.exists(filepath):
try:
filepath = os.path.join(root, itdirs.next(), filename)
_logger.debug('looking for file as: ' + filepath)
except:
pass
except:
pass
except:
raise ValueError, "File '" + filename + "' not found in path"
# In case the raise exception is replaced by a warning don't
# forget to return here
return
# _logger.debug("Executing: "+filepath)
return execfile(filepath)
def getMayaLocation(version=None):
# type: (bool) -> Optional[str]
"""
Get the path to the Maya install directory.
.. note:: The Maya location is defined as the directory above /bin.
Uses the ``MAYA_LOCATION`` environment variable and ``sys.executable`` path.
Returns None if not found.
Parameters
----------
version : bool
if passed, will attempt to find a matching Maya location. If the
version found above does not match the requested version,
this function uses a simple find/replace heuristic to modify the path and test
if the desired version exists.
Returns
-------
Optional[str]
"""
try:
loc = os.path.realpath(os.environ['MAYA_LOCATION'])
except:
loc = os.path.dirname(os.path.dirname(sys.executable))
# get the path of a different maya version than current
if version:
# note that a recursive loop between getMayaLocation / getMayaVersion
# is avoided because getMayaVersion always calls getMayaLocation with
# version == None
actual_long_version = versions.installName()
actual_short_version = versions.shortName()
if version != actual_long_version:
short_version = versions.parseVersionStr(version, extension=False)
if version == short_version:
try_version = actual_long_version.replace(actual_short_version,
short_version)
else:
try_version = version
try_loc = loc.replace(actual_long_version, try_version)
if os.path.exists(try_loc):
loc = try_loc
else:
_logger.warn("No Maya found for version %s" % version)
return None
return loc
def getMayaAppDir(versioned=False):
# type: (bool) -> Optional[str]
"""
Get the path to the current user's Maya application directory.
First checks ``MAYA_APP_DIR``, then tries OS-specific defaults.
Returns None, if not found
Parameters
----------
versioned : bool
if True, the current Maya version including '-x64' suffix, if
applicable, will be appended.
Returns
-------
Optional[str]
"""
appDir = os.environ.get('MAYA_APP_DIR', None)
if appDir is None:
if os.name == 'nt':
appDir = os.environ.get('USERPROFILE', os.environ.get('HOME', None))
if appDir is None:
return
# Vista or newer... version() returns "6.x.x"
if int(platform.version().split('.')[0]) > 5:
appDir = os.path.join(appDir, 'Documents')
else:
appDir = os.path.join(appDir, 'My Documents')
else:
appDir = os.environ.get('HOME', None)
if appDir is None:
return
if platform.system() == 'Darwin':
appDir = os.path.join(appDir, 'Library/Preferences/Autodesk/maya')
else:
appDir = os.path.join(appDir, 'maya')
if versioned and appDir:
appDir = os.path.join(appDir, versions.installName())
return appDir
def getUserPrefsDir():
"""Get the prefs directory below the Maya application directory"""
appDir = getMayaAppDir(versioned=True)
if appDir:
return os.path.join(appDir, 'prefs')
def getUserScriptsDir():
"""Get the scripts directory below the Maya application directory"""
appDir = getMayaAppDir(versioned=True)
if appDir:
return os.path.join(appDir, 'scripts')
def executeDeferred(func, *args, **kwargs):
"""
This is a wrap for maya.utils.executeDeferred. Maya's version does not
execute at all when in batch mode, so this function does a simple check to
see if we're in batch or interactive mode. In interactive it runs
`maya.utils.executeDeferred`, and if we're in batch mode, it just executes
the function.
Use this function in your userSetup.py file if:
1. you are importing pymel there
2. you want to execute some code that relies on maya.cmds
3. you want your userSetup.py to work in both interactive and
standalone mode
Example userSetup.py file::
from pymel.all import *
def delayedStartup():
print "executing a command"
pymel.about(apiVersion=1)
mayautils.executeDeferred( delayedStartup )
Takes a single parameter which should be a callable function.
"""
import maya.utils
import maya.OpenMaya
if maya.OpenMaya.MGlobal.mayaState() == maya.OpenMaya.MGlobal.kInteractive:
maya.utils.executeDeferred(func, *args, **kwargs)
else:
if isinstance(func, basestring):
if args or kwargs:
raise ValueError('if passing a string to be executed, no '
'additional args may be passed')
exec func
else:
func(*args, **kwargs)
def recurseMayaScriptPath(roots=[], verbose=False, excludeRegex=None,
errors='warn', prepend=False):
# type: (Union[str, List[str], Tuple[str]], bool, str, Any, Any) -> None
"""
Given a path or list of paths, recurses through directories appending to
the ``MAYA_SCRIPT_PATH`` environment variable any found directories
containing mel scripts.
The root directories, if given, are always added to the
``MAYA_SCRIPT_PATH``, even if they don't contain any mel scripts.
Parameters
----------
roots : Union[str, List[str], Tuple[str]]
a single path or list of paths to recurse. if left to its default,
will use the current ``MAYA_SCRIPT_PATH`` values
verbose : bool
verbose on or off
excludeRegex : str
string to be compiled to a regular expression of paths to skip.
This regex only needs to match the folder name
"""
regex = '[.]|(obsolete)'
if excludeRegex:
assert isinstance(excludeRegex, basestring), \
"please pass a regular expression as a string"
regex = regex + '|' + excludeRegex
includeRegex = "(?!(" + regex + "))" # add a negative lookahead assertion
scriptPath = os.environ["MAYA_SCRIPT_PATH"]
varList = scriptPath.split(os.path.pathsep)
initalLen = len(varList)
def addDir(toAdd):
if toAdd not in varList:
if prepend:
_logger.debug("Prepending script path directory %s" % toAdd)
varList.insert(0, toAdd)
else:
_logger.debug("Appending script path directory %s" % toAdd)
varList.append(toAdd)
if roots:
if isinstance(roots, list) or isinstance(roots, tuple):
rootVars = list(roots)
else:
rootVars = [roots]
# Roots are always added to the script path, even if they don't have
# .mel files
for d in rootVars:
addDir(d)
# else expand the whole environment currently set
else:
rootVars = varList[:]
_logger.debug("Recursing Maya script path")
_logger.debug("Only directories which match %s will be traversed" %
includeRegex)
for rootVar in rootVars:
root = _path(rootVar)
if re.match(includeRegex, root.name) and root.exists():
_logger.debug("Searching for all valid script directories "
"below %s" % rootVar)
for f in root.walkdirs(errors=errors, regex=includeRegex):
try:
if len(f.files("*.mel")):
addDir(str(f))
except OSError:
pass
if len(varList) > initalLen:
os.environ["MAYA_SCRIPT_PATH"] = os.path.pathsep.join(varList)
_logger.info("Added %d directories to Maya script path" %
(len(varList) - initalLen))
else:
_logger.info("Maya script path recursion did not find any paths to add")
| 34.462046 | 93 | 0.598736 |
79412e82faa8a1923206331d715fd0da2de3f2ae | 22,764 | py | Python | pymatgen/symmetry/maggroups.py | hpatel1567/pymatgen | 8304b25464206c74305214e45935df90bab95500 | [
"MIT"
] | 1 | 2021-01-12T03:17:58.000Z | 2021-01-12T03:17:58.000Z | pymatgen/symmetry/maggroups.py | hpatel1567/pymatgen | 8304b25464206c74305214e45935df90bab95500 | [
"MIT"
] | null | null | null | pymatgen/symmetry/maggroups.py | hpatel1567/pymatgen | 8304b25464206c74305214e45935df90bab95500 | [
"MIT"
] | null | null | null | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Magnetic space groups.
"""
import os
from fractions import Fraction
import numpy as np
from monty.design_patterns import cached_class
import textwrap
from pymatgen.electronic_structure.core import Magmom
from pymatgen.symmetry.groups import SymmetryGroup, in_array_list
from pymatgen.core.operations import MagSymmOp
from pymatgen.util.string import transformation_to_string
import sqlite3
from array import array
__author__ = "Matthew Horton, Shyue Ping Ong"
__copyright__ = "Copyright 2017, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matthew Horton"
__email__ = "[email protected]"
__status__ = "Beta"
__date__ = "Feb 2017"
MAGSYMM_DATA = os.path.join(os.path.dirname(__file__), "symm_data_magnetic.sqlite")
@cached_class
class MagneticSpaceGroup(SymmetryGroup):
"""
Representation of a magnetic space group.
"""
def __init__(self, id):
"""
Initializes a MagneticSpaceGroup from its Belov, Neronova and
Smirnova (BNS) number supplied as a list or its label supplied
as a string. To create a magnetic structure in pymatgen, the
Structure.from_magnetic_spacegroup() method can be used, which
relies on this class.
The main difference between magnetic space groups and normal
crystallographic space groups is the inclusion of a time reversal
operator that acts on an atom's magnetic moment. This is
indicated by a prime symbol (') next to the respective symmetry
operation in its label, e.g. the standard crystallographic
space group Pnma has magnetic subgroups Pn'ma, Pnm'a, Pnma',
Pn'm'a, Pnm'a', Pn'ma', Pn'm'a'.
The magnetic space groups are classified as one of 4 types
where G = magnetic space group, and F = parent crystallographic
space group:
1. G=F no time reversal, i.e. the same as corresponding
crystallographic group
2. G=F+F1', "grey" groups, where avg. magnetic moment is zero,
e.g. a paramagnet in zero ext. mag. field
3. G=D+(F-D)1', where D is an equi-translation subgroup of F of
index 2, lattice translations do not include time reversal
4. G=D+(F-D)1', where D is an equi-class subgroup of F of index 2
There are two common settings for magnetic space groups, BNS
and OG. In case 4, the BNS setting != OG setting, and so a
transformation to go between the two settings is required:
specifically, the BNS setting is derived from D, and the OG
setting is derived from F.
This means that the OG setting refers to the unit cell if magnetic
order is neglected, and requires multiple unit cells to reproduce
the full crystal periodicity when magnetic moments are present.
This does not make the OG setting, in general, useful for
electronic structure calculations and the BNS setting is preferred.
However, this class does contain information on the OG setting and
can be initialized from OG labels or numbers if required.
Conventions: ITC monoclinic unique axis b, monoclinic cell choice 1,
hexagonal axis for trigonal groups, origin choice 2 for groups with
more than one origin choice (ISO-MAG).
Raw data comes from ISO-MAG, ISOTROPY Software Suite, iso.byu.edu
http://stokes.byu.edu/iso/magnetic_data.txt
with kind permission from Professor Branton Campbell, BYU
Data originally compiled from:
(1) Daniel B. Litvin, Magnetic Group Tables (International Union
of Crystallography, 2013) www.iucr.org/publ/978-0-9553602-2-0.
(2) C. J. Bradley and A. P. Cracknell, The Mathematical Theory of
Symmetry in Solids (Clarendon Press, Oxford, 1972).
See http://stokes.byu.edu/iso/magneticspacegroupshelp.php for more
information on magnetic symmetry.
:param id: BNS number supplied as list of 2 ints or BNS label as
str or index as int (1-1651) to iterate over all space groups"""
self._data = {}
# Datafile is stored as sqlite3 database since (a) it can be easily
# queried for various different indexes (BNS/OG number/labels) and (b)
# allows binary data to be stored in a compact form similar to that in
# the source data file, significantly reducing file size.
# Note that a human-readable JSON format was tested first but was 20x
# larger and required *much* longer initial loading times.
# retrieve raw data
db = sqlite3.connect(MAGSYMM_DATA)
c = db.cursor()
if isinstance(id, str):
id = "".join(id.split()) # remove any white space
c.execute('SELECT * FROM space_groups WHERE BNS_label=?;', (id,))
elif isinstance(id, list):
c.execute('SELECT * FROM space_groups WHERE BNS1=? AND BNS2=?;', (id[0], id[1]))
elif isinstance(id, int):
# OG3 index is a 'master' index, going from 1 to 1651
c.execute('SELECT * FROM space_groups WHERE OG3=?;', (id,))
raw_data = list(c.fetchone())
self._data['magtype'] = raw_data[0] # int from 1 to 4
self._data['bns_number'] = [raw_data[1], raw_data[2]]
self._data['bns_label'] = raw_data[3]
self._data['og_number'] = [raw_data[4], raw_data[5], raw_data[6]]
self._data['og_label'] = raw_data[7] # can differ from BNS_label
def _get_point_operator(idx):
"""Retrieve information on point operator (rotation matrix and Seitz label)."""
hex = self._data['bns_number'][0] >= 143 and self._data['bns_number'][0] <= 194
c.execute('SELECT symbol, matrix FROM point_operators WHERE idx=? AND hex=?;', (idx - 1, hex))
op = c.fetchone()
op = {'symbol': op[0], 'matrix': np.array(op[1].split(','), dtype='f').reshape(3, 3)}
return op
def _parse_operators(b):
"""Parses compact binary representation into list of MagSymmOps."""
if len(b) == 0: # e.g. if magtype != 4, OG setting == BNS setting, and b == [] for OG symmops
return None
raw_symops = [b[i:i + 6] for i in range(0, len(b), 6)]
symops = []
for r in raw_symops:
point_operator = _get_point_operator(r[0])
translation_vec = [r[1] / r[4], r[2] / r[4], r[3] / r[4]]
time_reversal = r[5]
op = MagSymmOp.from_rotation_and_translation_and_time_reversal(rotation_matrix=point_operator['matrix'],
translation_vec=translation_vec,
time_reversal=time_reversal)
# store string representation, e.g. (2x|1/2,1/2,1/2)'
seitz = '({0}|{1},{2},{3})'.format(point_operator['symbol'],
Fraction(translation_vec[0]),
Fraction(translation_vec[1]),
Fraction(translation_vec[2]))
if time_reversal == -1:
seitz += '\''
symops.append({'op': op, 'str': seitz})
return symops
def _parse_wyckoff(b):
"""Parses compact binary representation into list of Wyckoff sites."""
if len(b) == 0:
return None
wyckoff_sites = []
def get_label(idx):
if idx <= 25:
return chr(97 + idx) # returns a-z when idx 0-25
else:
return 'alpha' # when a-z labels exhausted, use alpha, only relevant for a few space groups
o = 0 # offset
n = 1 # nth Wyckoff site
num_wyckoff = b[0]
while len(wyckoff_sites) < num_wyckoff:
m = b[1 + o] # multiplicity
label = str(b[2 + o] * m) + get_label(num_wyckoff - n)
sites = []
for j in range(m):
s = b[3 + o + (j * 22):3 + o + (j * 22) + 22] # data corresponding to specific Wyckoff position
translation_vec = [s[0] / s[3], s[1] / s[3], s[2] / s[3]]
matrix = [[s[4], s[7], s[10]],
[s[5], s[8], s[11]],
[s[6], s[9], s[12]]]
matrix_magmom = [[s[13], s[16], s[19]],
[s[14], s[17], s[20]],
[s[15], s[18], s[21]]]
# store string representation, e.g. (x,y,z;mx,my,mz)
wyckoff_str = "({};{})".format(transformation_to_string(matrix, translation_vec),
transformation_to_string(matrix_magmom, c='m'))
sites.append({'translation_vec': translation_vec,
'matrix': matrix,
'matrix_magnetic': matrix_magmom,
'str': wyckoff_str})
# only keeping string representation of Wyckoff sites for now
# could do something else with these in future
wyckoff_sites.append({'label': label,
'str': ' '.join([s['str'] for s in sites])})
n += 1
o += m * 22 + 2
return wyckoff_sites
def _parse_lattice(b):
"""Parses compact binary representation into list of lattice vectors/centerings."""
if len(b) == 0:
return None
raw_lattice = [b[i:i + 4] for i in range(0, len(b), 4)]
lattice = []
for r in raw_lattice:
lattice.append({'vector': [r[0] / r[3], r[1] / r[3], r[2] / r[3]],
'str': '({0},{1},{2})+'.format(Fraction(r[0] / r[3]).limit_denominator(),
Fraction(r[1] / r[3]).limit_denominator(),
Fraction(r[2] / r[3]).limit_denominator())})
return lattice
def _parse_transformation(b):
"""Parses compact binary representation into transformation between OG and BNS settings."""
if len(b) == 0:
return None
# capital letters used here by convention,
# IUCr defines P and p specifically
P = [[b[0], b[3], b[6]],
[b[1], b[4], b[7]],
[b[2], b[5], b[8]]]
p = [b[9] / b[12], b[10] / b[12], b[11] / b[12]]
P = np.array(P).transpose()
P_string = transformation_to_string(P, components=('a', 'b', 'c'))
p_string = "{},{},{}".format(Fraction(p[0]).limit_denominator(),
Fraction(p[1]).limit_denominator(),
Fraction(p[2]).limit_denominator())
return P_string + ";" + p_string
for i in range(8, 15):
try:
raw_data[i] = array('b', raw_data[i]) # construct array from sql binary blobs
except Exception:
# array() behavior changed, need to explicitly convert buffer to str in earlier Python
raw_data[i] = array('b', str(raw_data[i]))
self._data['og_bns_transform'] = _parse_transformation(raw_data[8])
self._data['bns_operators'] = _parse_operators(raw_data[9])
self._data['bns_lattice'] = _parse_lattice(raw_data[10])
self._data['bns_wyckoff'] = _parse_wyckoff(raw_data[11])
self._data['og_operators'] = _parse_operators(raw_data[12])
self._data['og_lattice'] = _parse_lattice(raw_data[13])
self._data['og_wyckoff'] = _parse_wyckoff(raw_data[14])
db.close()
@classmethod
def from_og(cls, id):
"""
Initialize from Opechowski and Guccione (OG) label or number.
:param id: OG number supplied as list of 3 ints or
or OG label as str
:return:
"""
db = sqlite3.connect(MAGSYMM_DATA)
c = db.cursor()
if isinstance(id, str):
c.execute('SELECT BNS_label FROM space_groups WHERE OG_label=?', (id,))
elif isinstance(id, list):
c.execute('SELECT BNS_label FROM space_groups WHERE OG1=? and OG2=? and OG3=?', (id[0], id[1], id[2]))
bns_label = c.fetchone()[0]
db.close()
return cls(bns_label)
def __eq__(self, other):
return self._data == other._data
@property
def crystal_system(self):
"""
:return: Crystal system, e.g., cubic, hexagonal, etc.
"""
i = self._data["bns_number"][0]
if i <= 2:
return "triclinic"
elif i <= 15:
return "monoclinic"
elif i <= 74:
return "orthorhombic"
elif i <= 142:
return "tetragonal"
elif i <= 167:
return "trigonal"
elif i <= 194:
return "hexagonal"
else:
return "cubic"
@property
def sg_symbol(self):
"""
:return: Space group symbol
"""
return self._data["bns_label"]
@property
def symmetry_ops(self):
"""
Retrieve magnetic symmetry operations of the space group.
:return: List of :class:`pymatgen.core.operations.MagSymmOp`
"""
ops = [op_data['op'] for op_data in self._data['bns_operators']]
# add lattice centerings
centered_ops = []
lattice_vectors = [l['vector'] for l in self._data['bns_lattice']]
for vec in lattice_vectors:
if not (np.array_equal(vec, [1, 0, 0])
or np.array_equal(vec, [0, 1, 0])
or np.array_equal(vec, [0, 0, 1])):
for op in ops:
new_vec = op.translation_vector + vec
new_op = MagSymmOp.from_rotation_and_translation_and_time_reversal(op.rotation_matrix,
translation_vec=new_vec,
time_reversal=op.time_reversal)
centered_ops.append(new_op)
ops = ops + centered_ops
return ops
def get_orbit(self, p, m, tol=1e-5):
"""
Returns the orbit for a point and its associated magnetic moment.
Args:
p: Point as a 3x1 array.
m: A magnetic moment, compatible with
:class:`pymatgen.electronic_structure.core.Magmom`
tol: Tolerance for determining if sites are the same. 1e-5 should
be sufficient for most purposes. Set to 0 for exact matching
(and also needed for symbolic orbits).
Returns:
(([array], [array])) Tuple of orbit for point and magnetic moments for orbit.
"""
orbit = []
orbit_magmoms = []
m = Magmom(m)
for o in self.symmetry_ops:
pp = o.operate(p)
pp = np.mod(np.round(pp, decimals=10), 1)
mm = o.operate_magmom(m)
if not in_array_list(orbit, pp, tol=tol):
orbit.append(pp)
orbit_magmoms.append(mm)
return orbit, orbit_magmoms
def is_compatible(self, lattice, tol=1e-5, angle_tol=5):
"""
Checks whether a particular lattice is compatible with the
*conventional* unit cell.
Args:
lattice (Lattice): A Lattice.
tol (float): The tolerance to check for equality of lengths.
angle_tol (float): The tolerance to check for equality of angles
in degrees.
"""
# function from pymatgen.symmetry.groups.SpaceGroup
abc = lattice.lengths
angles = lattice.angles
crys_system = self.crystal_system
def check(param, ref, tolerance):
return all([abs(i - j) < tolerance for i, j in zip(param, ref)
if j is not None])
if crys_system == "cubic":
a = abc[0]
return check(abc, [a, a, a], tol) and check(angles, [90, 90, 90], angle_tol)
elif crys_system == "hexagonal" or (crys_system == "trigonal" and
self.symbol.endswith("H")):
a = abc[0]
return check(abc, [a, a, None], tol) and check(angles, [90, 90, 120], angle_tol)
elif crys_system == "trigonal":
a = abc[0]
return check(abc, [a, a, a], tol)
elif crys_system == "tetragonal":
a = abc[0]
return check(abc, [a, a, None], tol) and check(angles, [90, 90, 90], angle_tol)
elif crys_system == "orthorhombic":
return check(angles, [90, 90, 90], angle_tol)
elif crys_system == "monoclinic":
return check(angles, [90, None, 90], angle_tol)
return True
def data_str(self, include_og=True):
"""
Get description of all data, including information for OG setting.
:return: str
"""
# __str__() omits information on OG setting to reduce confusion
# as to which set of symops are active, this property gives
# all stored data including OG setting
desc = {} # dictionary to hold description strings
# parse data into strings
desc['magtype'] = self._data['magtype']
desc['bns_number'] = ".".join(map(str, self._data["bns_number"]))
desc['bns_label'] = self._data["bns_label"]
desc['og_id'] = ("\t\tOG: " + ".".join(map(str, self._data["og_number"])) + " " + self._data["og_label"]
if include_og else '')
desc['bns_operators'] = ' '.join([op_data['str'] for op_data in self._data['bns_operators']])
desc['bns_lattice'] = (' '.join([lattice_data['str'] for lattice_data in self._data['bns_lattice'][3:]])
if len(self._data['bns_lattice']) > 3 else '') # don't show (1,0,0)+ (0,1,0)+ (0,0,1)+
desc['bns_wyckoff'] = '\n'.join([textwrap.fill(wyckoff_data['str'],
initial_indent=wyckoff_data['label'] + " ",
subsequent_indent=" " * len(wyckoff_data['label'] + " "),
break_long_words=False, break_on_hyphens=False)
for wyckoff_data in self._data['bns_wyckoff']])
desc['og_bns_transformation'] = ('OG-BNS Transform: ({})\n'.format(self._data['og_bns_transform'])
if desc['magtype'] == 4 and include_og else '')
bns_operators_prefix = "Operators{}: ".format(' (BNS)' if desc['magtype'] == 4 and include_og else '')
bns_wyckoff_prefix = "Wyckoff Positions{}: ".format(' (BNS)' if desc['magtype'] == 4 and include_og else '')
# apply textwrap on long lines
desc['bns_operators'] = textwrap.fill(desc['bns_operators'],
initial_indent=bns_operators_prefix,
subsequent_indent=" " * len(bns_operators_prefix),
break_long_words=False, break_on_hyphens=False)
description = ("BNS: {d[bns_number]} {d[bns_label]}{d[og_id]}\n"
"{d[og_bns_transformation]}"
"{d[bns_operators]}\n"
"{bns_wyckoff_prefix}{d[bns_lattice]}\n"
"{d[bns_wyckoff]}").format(d=desc, bns_wyckoff_prefix=bns_wyckoff_prefix)
if desc['magtype'] == 4 and include_og:
desc['og_operators'] = ' '.join([op_data['str'] for op_data in self._data['og_operators']])
# include all lattice vectors because (1,0,0)+ (0,1,0)+ (0,0,1)+
# not always present in OG setting
desc['og_lattice'] = ' '.join([lattice_data['str'] for lattice_data in self._data['og_lattice']])
desc['og_wyckoff'] = '\n'.join([textwrap.fill(wyckoff_data['str'],
initial_indent=wyckoff_data['label'] + " ",
subsequent_indent=" " * len(wyckoff_data['label'] + " "),
break_long_words=False, break_on_hyphens=False)
for wyckoff_data in self._data['og_wyckoff']])
og_operators_prefix = "Operators (OG): "
# apply textwrap on long lines
desc['og_operators'] = textwrap.fill(desc['og_operators'],
initial_indent=og_operators_prefix,
subsequent_indent=" " * len(og_operators_prefix),
break_long_words=False, break_on_hyphens=False)
description += ("\n{d[og_operators]}\n"
"Wyckoff Positions (OG): {d[og_lattice]}\n"
"{d[og_wyckoff]}").format(d=desc)
elif desc['magtype'] == 4:
description += '\nAlternative OG setting exists for this space group.'
return description
def __str__(self):
"""
String representation of the space group, specifying the setting
of the space group, its magnetic symmetry operators and Wyckoff
positions.
:return: str
"""
return self.data_str(include_og=False)
def _write_all_magnetic_space_groups_to_file(filename):
"""
Write all magnetic space groups to a human-readable text file.
Should contain same information as text files provided by ISO-MAG.
:param filename:
:return:
"""
s = ('Data parsed from raw data from:\n'
'ISO-MAG, ISOTROPY Software Suite, iso.byu.edu\n'
'http://stokes.byu.edu/iso/magnetic_data.txt\n'
'Used with kind permission from Professor Branton Campbell, BYU\n\n')
all_msgs = []
for i in range(1, 1652):
all_msgs.append(MagneticSpaceGroup(i))
for msg in all_msgs:
s += '\n{}\n\n--------\n'.format(msg.data_str())
f = open(filename, 'w')
f.write(s)
f.close()
| 44.460938 | 120 | 0.543841 |
79412ebd1aa5ea0d8c16c20add56bd170d7666fd | 23,558 | py | Python | scipy/optimize/_linprog.py | avivajpeyi/scipy | dbfe06e6618232b26c241cbe8861e2ea1489b535 | [
"BSD-3-Clause"
] | 353 | 2020-12-10T10:47:17.000Z | 2022-03-31T23:08:29.000Z | scipy/optimize/_linprog.py | avivajpeyi/scipy | dbfe06e6618232b26c241cbe8861e2ea1489b535 | [
"BSD-3-Clause"
] | 80 | 2020-12-10T09:54:22.000Z | 2022-03-30T22:08:45.000Z | scipy/optimize/_linprog.py | avivajpeyi/scipy | dbfe06e6618232b26c241cbe8861e2ea1489b535 | [
"BSD-3-Clause"
] | 63 | 2020-12-10T17:10:34.000Z | 2022-03-28T16:27:07.000Z | """
A top-level linear programming interface. Currently this interface solves
linear programming problems via the Simplex and Interior-Point methods.
.. versionadded:: 0.15.0
Functions
---------
.. autosummary::
:toctree: generated/
linprog
linprog_verbose_callback
linprog_terse_callback
"""
import numpy as np
from .optimize import OptimizeResult, OptimizeWarning
from warnings import warn
from ._linprog_ip import _linprog_ip
from ._linprog_simplex import _linprog_simplex
from ._linprog_rs import _linprog_rs
from ._linprog_util import (
_parse_linprog, _presolve, _get_Abc, _postprocess, _LPProblem, _autoscale)
from copy import deepcopy
__all__ = ['linprog', 'linprog_verbose_callback', 'linprog_terse_callback']
__docformat__ = "restructuredtext en"
def linprog_verbose_callback(res):
"""
A sample callback function demonstrating the linprog callback interface.
This callback produces detailed output to sys.stdout before each iteration
and after the final iteration of the simplex algorithm.
Parameters
----------
res : A `scipy.optimize.OptimizeResult` consisting of the following fields:
x : 1-D array
The independent variable vector which optimizes the linear
programming problem.
fun : float
Value of the objective function.
success : bool
True if the algorithm succeeded in finding an optimal solution.
slack : 1-D array
The values of the slack variables. Each slack variable corresponds
to an inequality constraint. If the slack is zero, then the
corresponding constraint is active.
con : 1-D array
The (nominally zero) residuals of the equality constraints, that is,
``b - A_eq @ x``
phase : int
The phase of the optimization being executed. In phase 1 a basic
feasible solution is sought and the T has an additional row
representing an alternate objective function.
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties encountered
nit : int
The number of iterations performed.
message : str
A string descriptor of the exit status of the optimization.
"""
x = res['x']
fun = res['fun']
phase = res['phase']
status = res['status']
nit = res['nit']
message = res['message']
complete = res['complete']
saved_printoptions = np.get_printoptions()
np.set_printoptions(linewidth=500,
formatter={'float': lambda x: "{0: 12.4f}".format(x)})
if status:
print('--------- Simplex Early Exit -------\n'.format(nit))
print('The simplex method exited early with status {0:d}'.format(status))
print(message)
elif complete:
print('--------- Simplex Complete --------\n')
print('Iterations required: {}'.format(nit))
else:
print('--------- Iteration {0:d} ---------\n'.format(nit))
if nit > 0:
if phase == 1:
print('Current Pseudo-Objective Value:')
else:
print('Current Objective Value:')
print('f = ', fun)
print()
print('Current Solution Vector:')
print('x = ', x)
print()
np.set_printoptions(**saved_printoptions)
def linprog_terse_callback(res):
"""
A sample callback function demonstrating the linprog callback interface.
This callback produces brief output to sys.stdout before each iteration
and after the final iteration of the simplex algorithm.
Parameters
----------
res : A `scipy.optimize.OptimizeResult` consisting of the following fields:
x : 1-D array
The independent variable vector which optimizes the linear
programming problem.
fun : float
Value of the objective function.
success : bool
True if the algorithm succeeded in finding an optimal solution.
slack : 1-D array
The values of the slack variables. Each slack variable corresponds
to an inequality constraint. If the slack is zero, then the
corresponding constraint is active.
con : 1-D array
The (nominally zero) residuals of the equality constraints, that is,
``b - A_eq @ x``.
phase : int
The phase of the optimization being executed. In phase 1 a basic
feasible solution is sought and the T has an additional row
representing an alternate objective function.
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties encountered
nit : int
The number of iterations performed.
message : str
A string descriptor of the exit status of the optimization.
"""
nit = res['nit']
x = res['x']
if nit == 0:
print("Iter: X:")
print("{0: <5d} ".format(nit), end="")
print(x)
def linprog(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
bounds=None, method='interior-point', callback=None,
options=None, x0=None):
r"""
Linear programming: minimize a linear objective function subject to linear
equality and inequality constraints.
Linear programming solves problems of the following form:
.. math::
\min_x \ & c^T x \\
\mbox{such that} \ & A_{ub} x \leq b_{ub},\\
& A_{eq} x = b_{eq},\\
& l \leq x \leq u ,
where :math:`x` is a vector of decision variables; :math:`c`,
:math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
:math:`A_{ub}` and :math:`A_{eq}` are matrices.
Informally, that's:
minimize::
c @ x
such that::
A_ub @ x <= b_ub
A_eq @ x == b_eq
lb <= x <= ub
Note that by default ``lb = 0`` and ``ub = None`` unless specified with
``bounds``.
Parameters
----------
c : 1-D array
The coefficients of the linear objective function to be minimized.
A_ub : 2-D array, optional
The inequality constraint matrix. Each row of ``A_ub`` specifies the
coefficients of a linear inequality constraint on ``x``.
b_ub : 1-D array, optional
The inequality constraint vector. Each element represents an
upper bound on the corresponding value of ``A_ub @ x``.
A_eq : 2-D array, optional
The equality constraint matrix. Each row of ``A_eq`` specifies the
coefficients of a linear equality constraint on ``x``.
b_eq : 1-D array, optional
The equality constraint vector. Each element of ``A_eq @ x`` must equal
the corresponding element of ``b_eq``.
bounds : sequence, optional
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
the minimum and maximum values of that decision variable. Use ``None`` to
indicate that there is no bound. By default, bounds are ``(0, None)``
(all decision variables are non-negative).
If a single tuple ``(min, max)`` is provided, then ``min`` and
``max`` will serve as bounds for all decision variables.
method : {'interior-point', 'revised simplex', 'simplex'}, optional
The algorithm used to solve the standard form problem.
:ref:`'interior-point' <optimize.linprog-interior-point>` (default),
:ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
:ref:`'simplex' <optimize.linprog-simplex>` (legacy)
are supported.
callback : callable, optional
If a callback function is provided, it will be called at least once per
iteration of the algorithm. The callback function must accept a single
`scipy.optimize.OptimizeResult` consisting of the following fields:
x : 1-D array
The current solution vector.
fun : float
The current value of the objective function ``c @ x``.
success : bool
``True`` when the algorithm has completed successfully.
slack : 1-D array
The (nominally positive) values of the slack,
``b_ub - A_ub @ x``.
con : 1-D array
The (nominally zero) residuals of the equality constraints,
``b_eq - A_eq @ x``.
phase : int
The phase of the algorithm being executed.
status : int
An integer representing the status of the algorithm.
``0`` : Optimization proceeding nominally.
``1`` : Iteration limit reached.
``2`` : Problem appears to be infeasible.
``3`` : Problem appears to be unbounded.
``4`` : Numerical difficulties encountered.
nit : int
The current iteration number.
message : str
A string descriptor of the algorithm status.
options : dict, optional
A dictionary of solver options. All methods accept the following
options:
maxiter : int
Maximum number of iterations to perform.
Default: see method-specific documentation.
disp : bool
Set to ``True`` to print convergence messages.
Default: ``False``.
autoscale : bool
Set to ``True`` to automatically perform equilibration.
Consider using this option if the numerical values in the
constraints are separated by several orders of magnitude.
Default: ``False``.
presolve : bool
Set to ``False`` to disable automatic presolve.
Default: ``True``.
rr : bool
Set to ``False`` to disable automatic redundancy removal.
Default: ``True``.
For method-specific options, see
:func:`show_options('linprog') <show_options>`.
x0 : 1-D array, optional
Guess values of the decision variables, which will be refined by
the optimization algorithm. This argument is currently used only by the
'revised simplex' method, and can only be used if `x0` represents a
basic feasible solution.
Returns
-------
res : OptimizeResult
A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
x : 1-D array
The values of the decision variables that minimizes the
objective function while satisfying the constraints.
fun : float
The optimal value of the objective function ``c @ x``.
slack : 1-D array
The (nominally positive) values of the slack variables,
``b_ub - A_ub @ x``.
con : 1-D array
The (nominally zero) residuals of the equality constraints,
``b_eq - A_eq @ x``.
success : bool
``True`` when the algorithm succeeds in finding an optimal
solution.
status : int
An integer representing the exit status of the algorithm.
``0`` : Optimization terminated successfully.
``1`` : Iteration limit reached.
``2`` : Problem appears to be infeasible.
``3`` : Problem appears to be unbounded.
``4`` : Numerical difficulties encountered.
nit : int
The total number of iterations performed in all phases.
message : str
A string descriptor of the exit status of the algorithm.
See Also
--------
show_options : Additional options accepted by the solvers.
Notes
-----
This section describes the available solvers that can be selected by the
'method' parameter.
:ref:`'interior-point' <optimize.linprog-interior-point>` is the default
as it is typically the fastest and most robust method.
:ref:`'revised simplex' <optimize.linprog-revised_simplex>` is more
accurate for the problems it solves.
:ref:`'simplex' <optimize.linprog-simplex>` is the legacy method and is
included for backwards compatibility and educational purposes.
Method *interior-point* uses the primal-dual path following algorithm
as outlined in [4]_. This algorithm supports sparse constraint matrices and
is typically faster than the simplex methods, especially for large, sparse
problems. Note, however, that the solution returned may be slightly less
accurate than those of the simplex methods and will not, in general,
correspond with a vertex of the polytope defined by the constraints.
.. versionadded:: 1.0.0
Method *revised simplex* uses the revised simplex method as described in
[9]_, except that a factorization [11]_ of the basis matrix, rather than
its inverse, is efficiently maintained and used to solve the linear systems
at each iteration of the algorithm.
.. versionadded:: 1.3.0
Method *simplex* uses a traditional, full-tableau implementation of
Dantzig's simplex algorithm [1]_, [2]_ (*not* the
Nelder-Mead simplex). This algorithm is included for backwards
compatibility and educational purposes.
.. versionadded:: 0.15.0
Before applying any method, a presolve procedure based on [8]_ attempts
to identify trivial infeasibilities, trivial unboundedness, and potential
problem simplifications. Specifically, it checks for:
- rows of zeros in ``A_eq`` or ``A_ub``, representing trivial constraints;
- columns of zeros in ``A_eq`` `and` ``A_ub``, representing unconstrained
variables;
- column singletons in ``A_eq``, representing fixed variables; and
- column singletons in ``A_ub``, representing simple bounds.
If presolve reveals that the problem is unbounded (e.g. an unconstrained
and unbounded variable has negative cost) or infeasible (e.g., a row of
zeros in ``A_eq`` corresponds with a nonzero in ``b_eq``), the solver
terminates with the appropriate status code. Note that presolve terminates
as soon as any sign of unboundedness is detected; consequently, a problem
may be reported as unbounded when in reality the problem is infeasible
(but infeasibility has not been detected yet). Therefore, if it is
important to know whether the problem is actually infeasible, solve the
problem again with option ``presolve=False``.
If neither infeasibility nor unboundedness are detected in a single pass
of the presolve, bounds are tightened where possible and fixed
variables are removed from the problem. Then, linearly dependent rows
of the ``A_eq`` matrix are removed, (unless they represent an
infeasibility) to avoid numerical difficulties in the primary solve
routine. Note that rows that are nearly linearly dependent (within a
prescribed tolerance) may also be removed, which can change the optimal
solution in rare cases. If this is a concern, eliminate redundancy from
your problem formulation and run with option ``rr=False`` or
``presolve=False``.
Several potential improvements can be made here: additional presolve
checks outlined in [8]_ should be implemented, the presolve routine should
be run multiple times (until no further simplifications can be made), and
more of the efficiency improvements from [5]_ should be implemented in the
redundancy removal routines.
After presolve, the problem is transformed to standard form by converting
the (tightened) simple bounds to upper bound constraints, introducing
non-negative slack variables for inequality constraints, and expressing
unbounded variables as the difference between two non-negative variables.
Optionally, the problem is automatically scaled via equilibration [12]_.
The selected algorithm solves the standard form problem, and a
postprocessing routine converts the result to a solution to the original
problem.
References
----------
.. [1] Dantzig, George B., Linear programming and extensions. Rand
Corporation Research Study Princeton Univ. Press, Princeton, NJ,
1963
.. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
Mathematical Programming", McGraw-Hill, Chapter 4.
.. [3] Bland, Robert G. New finite pivoting rules for the simplex method.
Mathematics of Operations Research (2), 1977: pp. 103-107.
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
optimizer for linear programming: an implementation of the
homogeneous algorithm." High performance optimization. Springer US,
2000. 197-232.
.. [5] Andersen, Erling D. "Finding all linearly dependent rows in
large-scale linear programming." Optimization Methods and Software
6.3 (1995): 219-227.
.. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear
Programming based on Newton's Method." Unpublished Course Notes,
March 2004. Available 2/25/2017 at
https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
.. [7] Fourer, Robert. "Solving Linear Programs by Interior-Point Methods."
Unpublished Course Notes, August 26, 2005. Available 2/25/2017 at
http://www.4er.org/CourseNotes/Book%20B/B-III.pdf
.. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
programming." Mathematical Programming 71.2 (1995): 221-245.
.. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
programming." Athena Scientific 1 (1997): 997.
.. [10] Andersen, Erling D., et al. Implementation of interior point
methods for large scale linear programming. HEC/Universite de
Geneve, 1996.
.. [11] Bartels, Richard H. "A stabilization of the simplex method."
Journal in Numerische Mathematik 16.5 (1971): 414-434.
.. [12] Tomlin, J. A. "On scaling linear programming problems."
Mathematical Programming Study 4 (1975): 146-166.
Examples
--------
Consider the following problem:
.. math::
\min_{x_0, x_1} \ -x_0 + 4x_1 & \\
\mbox{such that} \ -3x_0 + x_1 & \leq 6,\\
-x_0 - 2x_1 & \geq -4,\\
x_1 & \geq -3.
The problem is not presented in the form accepted by `linprog`. This is
easily remedied by converting the "greater than" inequality
constraint to a "less than" inequality constraint by
multiplying both sides by a factor of :math:`-1`. Note also that the last
constraint is really the simple bound :math:`-3 \leq x_1 \leq \infty`.
Finally, since there are no bounds on :math:`x_0`, we must explicitly
specify the bounds :math:`-\infty \leq x_0 \leq \infty`, as the
default is for variables to be non-negative. After collecting coeffecients
into arrays and tuples, the input for this problem is:
>>> c = [-1, 4]
>>> A = [[-3, 1], [1, 2]]
>>> b = [6, 4]
>>> x0_bounds = (None, None)
>>> x1_bounds = (-3, None)
>>> from scipy.optimize import linprog
>>> res = linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds])
Note that the default method for `linprog` is 'interior-point', which is
approximate by nature.
>>> print(res)
con: array([], dtype=float64)
fun: -21.99999984082494 # may vary
message: 'Optimization terminated successfully.'
nit: 6 # may vary
slack: array([3.89999997e+01, 8.46872439e-08] # may vary
status: 0
success: True
x: array([ 9.99999989, -2.99999999]) # may vary
If you need greater accuracy, try 'revised simplex'.
>>> res = linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds], method='revised simplex')
>>> print(res)
con: array([], dtype=float64)
fun: -22.0 # may vary
message: 'Optimization terminated successfully.'
nit: 1 # may vary
slack: array([39., 0.]) # may vary
status: 0
success: True
x: array([10., -3.]) # may vary
"""
meth = method.lower()
if x0 is not None and meth != "revised simplex":
warning_message = "x0 is used only when method is 'revised simplex'. "
warn(warning_message, OptimizeWarning)
lp = _LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0)
lp, solver_options = _parse_linprog(lp, options)
tol = solver_options.get('tol', 1e-9)
iteration = 0
complete = False # will become True if solved in presolve
undo = []
# Keep the original arrays to calculate slack/residuals for original
# problem.
lp_o = deepcopy(lp)
# Solve trivial problem, eliminate variables, tighten bounds, etc.
c0 = 0 # we might get a constant term in the objective
if solver_options.pop('presolve', True):
rr = solver_options.pop('rr', True)
(lp, c0, x, undo, complete, status, message) = _presolve(lp, rr, tol)
C, b_scale = 1, 1 # for trivial unscaling if autoscale is not used
postsolve_args = (lp_o._replace(bounds=lp.bounds), undo, C, b_scale)
if not complete:
A, b, c, c0, x0 = _get_Abc(lp, c0, undo)
if solver_options.pop('autoscale', False):
A, b, c, x0, C, b_scale = _autoscale(A, b, c, x0)
postsolve_args = postsolve_args[:-2] + (C, b_scale)
if meth == 'simplex':
x, status, message, iteration = _linprog_simplex(
c, c0=c0, A=A, b=b, callback=callback,
postsolve_args=postsolve_args, **solver_options)
elif meth == 'interior-point':
x, status, message, iteration = _linprog_ip(
c, c0=c0, A=A, b=b, callback=callback,
postsolve_args=postsolve_args, **solver_options)
elif meth == 'revised simplex':
x, status, message, iteration = _linprog_rs(
c, c0=c0, A=A, b=b, x0=x0, callback=callback,
postsolve_args=postsolve_args, **solver_options)
else:
raise ValueError('Unknown solver %s' % method)
# Eliminate artificial variables, re-introduce presolved variables, etc.
# need modified bounds here to translate variables appropriately
disp = solver_options.get('disp', False)
x, fun, slack, con, status, message = _postprocess(x, postsolve_args,
complete, status,
message, tol,
iteration, disp)
sol = {
'x': x,
'fun': fun,
'slack': slack,
'con': con,
'status': status,
'message': message,
'nit': iteration,
'success': status == 0}
return OptimizeResult(sol)
| 40.899306 | 143 | 0.628279 |
79412f2fdac659cde5534fb23cde8df4895cd4f4 | 841 | py | Python | src/homework/homework1.py | acc-cosc-1336/cosc-1336-spring-2018-brianmiller7 | 78bb08379aba7a07838ed91643b8bf274f2227ae | [
"MIT"
] | null | null | null | src/homework/homework1.py | acc-cosc-1336/cosc-1336-spring-2018-brianmiller7 | 78bb08379aba7a07838ed91643b8bf274f2227ae | [
"MIT"
] | null | null | null | src/homework/homework1.py | acc-cosc-1336/cosc-1336-spring-2018-brianmiller7 | 78bb08379aba7a07838ed91643b8bf274f2227ae | [
"MIT"
] | null | null | null | def get_hours_since_midnight(seconds):
'''
Type the code to calculate total hours given n(number) of seconds
For example, given 3800 seconds the total hours is 1
'''
return seconds // 3600
'''
IF YOU ARE OK WITH A GRADE OF 70 FOR THIS ASSIGNMENT STOP HERE.
'''
def get_minutes(seconds):
'''
Type the code to calculate total minutes less whole hour given n(number) of seconds
For example, given 3800 seconds the total minutes is 3
'''
numTotalMin = seconds // 60
numTotalHours = seconds // 3600
return numTotalMin-(numTotalHours*60)
def get_seconds(seconds):
'''
Type the code to calculate total minutes less whole hour given n(number) of seconds
For example, given 3800 seconds the total minutes is 20
'''
numTotalMin = seconds // 60
return seconds-(numTotalMin*60)
| 30.035714 | 87 | 0.694411 |
79412fe78db5f3b6df7e1dc45623da5c27166298 | 1,208 | py | Python | tests/python_on_whales/components/test_node.py | ucam-department-of-psychiatry/python-on-whales | f3171814089b16b88c407f316048f830f45eaa4e | [
"MIT"
] | 191 | 2020-12-02T19:35:00.000Z | 2022-03-31T22:41:48.000Z | tests/python_on_whales/components/test_node.py | ucam-department-of-psychiatry/python-on-whales | f3171814089b16b88c407f316048f830f45eaa4e | [
"MIT"
] | 94 | 2020-12-18T16:36:38.000Z | 2022-03-31T00:06:39.000Z | tests/python_on_whales/components/test_node.py | ucam-department-of-psychiatry/python-on-whales | f3171814089b16b88c407f316048f830f45eaa4e | [
"MIT"
] | 33 | 2020-12-17T20:32:31.000Z | 2022-03-29T10:23:06.000Z | import pytest
from python_on_whales import docker
from python_on_whales.components.node.models import NodeInspectResult
from python_on_whales.test_utils import get_all_jsons
@pytest.mark.parametrize("json_file", get_all_jsons("nodes"))
def test_load_json(json_file):
json_as_txt = json_file.read_text()
a: NodeInspectResult = NodeInspectResult.parse_raw(json_as_txt)
if json_file.name == "1.json":
assert (
a.description.resources.generic_resources[0].named_resource_spec.kind
== "gpu"
)
assert (
a.description.resources.generic_resources[0].named_resource_spec.value
== "gpu-0"
)
assert a.description.resources.nano_cpus == 4000000001
@pytest.mark.usefixtures("swarm_mode")
def test_list_nodes():
nodes = docker.node.list()
assert len(nodes) == 1
@pytest.mark.usefixtures("swarm_mode")
def test_tasks():
service = docker.service.create("busybox", ["sleep", "infinity"])
current_node = docker.node.list()[0]
tasks = current_node.ps()
assert len(tasks) > 0
assert tasks[0].desired_state == "running"
docker.service.remove(service)
| 30.974359 | 83 | 0.675497 |
794131318245e811c80755b45f7d9c08a4e91f04 | 47,012 | py | Python | test/integration/component/test_project_limits.py | redbridge/cloudstack | 2218053fb11d501950e4beb80e9bee4ae472b5b4 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2015-02-10T07:21:58.000Z | 2021-05-07T08:52:17.000Z | test/integration/component/test_project_limits.py | redbridge/cloudstack | 2218053fb11d501950e4beb80e9bee4ae472b5b4 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2015-06-11T02:17:06.000Z | 2015-06-22T20:46:42.000Z | test/integration/component/test_project_limits.py | redbridge/cloudstack | 2218053fb11d501950e4beb80e9bee4ae472b5b4 | [
"ECL-2.0",
"Apache-2.0"
] | 4 | 2015-05-25T15:53:52.000Z | 2018-05-23T14:08:07.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 tests for Resource limits
"""
#Import Local Modules
import marvin
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
from marvin.codes import PASS
import datetime
class Services:
"""Test Resource Limits Services
"""
def __init__(self):
self.services = {
"domain": {
"name": "Domain",
},
"project": {
"name": "Project",
"displaytext": "Test project",
},
"account": {
"email": "[email protected]",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended for unique
# username
"password": "password",
},
"user": {
"email": "[email protected]",
"firstname": "User",
"lastname": "User",
"username": "User",
# Random characters are appended for unique
# username
"password": "password",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100, # in MHz
"memory": 128, # In MBs
},
"disk_offering": {
"displaytext": "Tiny Disk Offering",
"name": "Tiny Disk Offering",
"disksize": 1
},
"volume": {
"diskname": "Test Volume",
},
"server": {
"displayname": "TestVM",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"template": {
"displaytext": "Cent OS Template",
"name": "Cent OS Template",
"ostype": 'CentOS 5.3 (64-bit)',
"templatefilter": 'self',
},
"network_offering": {
"name": 'Network offering-VR services',
"displaytext": 'Network offering-VR services',
"guestiptype": 'Isolated',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Firewall,Lb,UserData,StaticNat',
"traffictype": 'GUEST',
"availability": 'Optional',
"serviceProviderList": {
"Dhcp": 'VirtualRouter',
"Dns": 'VirtualRouter',
"SourceNat": 'VirtualRouter',
"PortForwarding": 'VirtualRouter',
"Vpn": 'VirtualRouter',
"Firewall": 'VirtualRouter',
"Lb": 'VirtualRouter',
"UserData": 'VirtualRouter',
"StaticNat": 'VirtualRouter',
},
},
"network": {
"name": "Test Network",
"displaytext": "Test Network",
},
"ostype": 'CentOS 5.3 (64-bit)',
# Cent OS 5.3 (64 bit)
"sleep": 60,
"timeout": 10,
}
class TestProjectLimits(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestProjectLimits, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
# Create domains, account etc.
cls.domain = Domain.create(
cls.api_client,
cls.services["domain"]
)
cls.admin = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls.user = Account.create(
cls.api_client,
cls.services["user"],
domainid=cls.domain.id
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
cls._cleanup = [
cls.admin,
cls.user,
cls.domain,
cls.disk_offering
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created accounts, domains etc
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "basic", "sg", "eip", "advancedns", "simulator", "selfservice"])
def test_01_project_limits(self):
""" Test project limits for domain admin
"""
# Validate the following
# 1. Create a Project. Verify once projects are created, they inherit
# a default set of resource limits as configured by the Cloud Stack
# ROOT admin.
# 2. Reduce Project resources limits. Verify limits can be reduced by
# the Project Owner of each project and project limit applies to
# number of virtual instances, disk volumes, snapshots, IP address.
# Also, verify resource limits for the project are independent of
# account resource limits
# 3. Increase Projects Resources limits above domains limit. Verify
# project can't have more resources than domain level limit allows.
# 4. Create Resource more than its set limit for the parent domain.
# Verify resource allocation should fail giving proper message
# Create project as a domain admin
project = Project.create(
self.apiclient,
self.services["project"],
account=self.admin.name,
domainid=self.admin.domainid
)
# Cleanup created project at end of test
self.cleanup.append(project)
self.debug("Created project with domain admin with ID: %s" %
project.id)
list_projects_reponse = Project.list(
self.apiclient,
id=project.id,
listall=True
)
self.assertEqual(
isinstance(list_projects_reponse, list),
True,
"Check for a valid list projects response"
)
list_project = list_projects_reponse[0]
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
self.assertEqual(
project.name,
list_project.name,
"Check project name from list response"
)
# Get the resource limits for ROOT domain
resource_limits = list_resource_limits(self.apiclient)
self.assertEqual(
isinstance(resource_limits, list),
True,
"List resource API should return a valid list"
)
self.assertNotEqual(
len(resource_limits),
0,
"List resource API response should not be empty"
)
# Reduce resource limits for project
# Resource: 0 - Instance. Number of instances a user can create.
# Resource: 1 - IP. Number of public IP addresses a user can own.
# Resource: 2 - Volume. Number of disk volumes a user can create.
# Resource: 3 - Snapshot. Number of snapshots a user can create.
# Resource: 4 - Template. Number of templates that a user can
# register/create
for resource in resource_limits:
update_resource_limit(
self.apiclient,
resource.resourcetype,
max=1,
projectid=project.id
)
self.debug(
"Updating resource (ID: %s) limit for project: %s" % (
resource,
project.id
))
resource_limits = list_resource_limits(
self.apiclient,
projectid=project.id
)
self.assertEqual(
isinstance(resource_limits, list),
True,
"List resource API should return a valid list"
)
self.assertNotEqual(
len(resource_limits),
0,
"List resource API response should not be empty"
)
for resource in resource_limits:
self.assertEqual(
resource.max,
1,
"Resource limit should be updated to 1"
)
# Get the resource limits for domain
resource_limits = list_resource_limits(
self.apiclient,
domainid=self.domain.id
)
self.assertEqual(
isinstance(resource_limits, list),
True,
"List resource API should return a valid list"
)
self.assertNotEqual(
len(resource_limits),
0,
"List resource API response should not be empty"
)
for resource in resource_limits:
# Update domain resource limits to 2
update_resource_limit(
self.apiclient,
resource.resourcetype,
domainid=self.domain.id,
max=1
)
max_value = 2
self.debug(
"Attempting to update project: %s resource limit to: %s" % (
project.id,
max_value
))
# Update project resource limits to 3
update_resource_limit(
self.apiclient,
resource.resourcetype,
max=max_value,
projectid=project.id
)
# Verify project can't have more resources then limit set for domain by adding volumes.
volume = Volume.create(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
diskofferingid=self.disk_offering.id,
projectid=project.id
)
# Exception should be raised for second volume
with self.assertRaises(Exception):
Volume.create(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
diskofferingid=self.disk_offering.id,
projectid=project.id
)
volume.delete(self.apiclient);
return
@attr(tags=["advanced", "basic", "sg", "eip", "advancedns", "simulator", "selfservice"])
def test_02_project_limits_normal_user(self):
""" Test project limits for normal user
"""
# Validate the following
# 1. Create a Project
# 2. Reduce the projects limits as a domain admin. Verify resource
# count is updated
# 3. Reduce the projects limits as a project user owner who is not a
# domain admin. Resource count should fail
# Create project as a domain admin
project = Project.create(
self.apiclient,
self.services["project"],
account=self.admin.name,
domainid=self.admin.domainid
)
# Cleanup created project at end of test
self.cleanup.append(project)
self.debug("Created project with domain admin with ID: %s" %
project.id)
list_projects_reponse = Project.list(
self.apiclient,
id=project.id,
listall=True
)
self.assertEqual(
isinstance(list_projects_reponse, list),
True,
"Check for a valid list projects response"
)
list_project = list_projects_reponse[0]
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
self.assertEqual(
project.name,
list_project.name,
"Check project name from list response"
)
# Get the resource limits for ROOT domain
resource_limits = list_resource_limits(self.apiclient)
self.assertEqual(
isinstance(resource_limits, list),
True,
"List resource API should return a valid list"
)
self.assertNotEqual(
len(resource_limits),
0,
"List resource API response should not be empty"
)
# Reduce resource limits for project
# Resource: 0 - Instance. Number of instances a user can create.
# Resource: 1 - IP. Number of public IP addresses a user can own.
# Resource: 2 - Volume. Number of disk volumes a user can create.
# Resource: 3 - Snapshot. Number of snapshots a user can create.
# Resource: 4 - Template. Number of templates that a user can
# register/create
for resource in resource_limits:
update_resource_limit(
self.apiclient,
resource.resourcetype,
max=1,
projectid=project.id
)
self.debug(
"Updating resource (ID: %s) limit for project: %s" % (
resource,
project.id
))
resource_limits = list_resource_limits(
self.apiclient,
projectid=project.id
)
self.assertEqual(
isinstance(resource_limits, list),
True,
"List resource API should return a valid list"
)
self.assertNotEqual(
len(resource_limits),
0,
"List resource API response should not be empty"
)
for resource in resource_limits:
self.assertEqual(
resource.max,
1,
"Resource limit should be updated to 1"
)
self.debug("Adding %s user to project: %s" % (
self.user.name,
project.name
))
# Add user to the project
project.addAccount(
self.apiclient,
self.user.name,
)
# Get the resource limits for domain
resource_limits = list_resource_limits(
self.apiclient,
domainid=self.domain.id
)
self.assertEqual(
isinstance(resource_limits, list),
True,
"List resource API should return a valid list"
)
self.assertNotEqual(
len(resource_limits),
0,
"List resource API response should not be empty"
)
for resource in resource_limits:
#with self.assertRaises(Exception):
self.debug(
"Attempting to update resource limit by user: %s" % (
self.user.name
))
# Update project resource limits to 3
update_resource_limit(
self.apiclient,
resource.resourcetype,
account=self.user.name,
domainid=self.user.domainid,
max=3,
projectid=project.id
)
return
class TestResourceLimitsProject(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestResourceLimitsProject, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["server"]["zoneid"] = cls.zone.id
# Create Domains, Account etc
cls.domain = Domain.create(
cls.api_client,
cls.services["domain"]
)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
# Create project as a domain admin
cls.project = Project.create(
cls.api_client,
cls.services["project"],
account=cls.account.name,
domainid=cls.account.domainid
)
cls.services["account"] = cls.account.name
# Create Service offering and disk offerings etc
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
cls._cleanup = [
cls.project,
cls.service_offering,
cls.disk_offering,
cls.account,
cls.domain
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "basic", "sg", "eip", "advancedns", "simulator", "selfservice"])
def test_03_vm_per_project(self):
"""Test VM limit per project
"""
# Validate the following
# 1. Set max VM per project to 2
# 2. Create account and start 2 VMs. Verify VM state is Up and Running
# 3. Try to create 3rd VM instance. The appropriate error or alert
# should be raised
self.debug(
"Updating instance resource limits for project: %s" %
self.project.id)
# Set usage_vm=1 for Account 1
update_resource_limit(
self.apiclient,
0, # Instance
max=2,
projectid=self.project.id
)
self.debug("Deploying VM for project: %s" % self.project.id)
virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
serviceofferingid=self.service_offering.id,
projectid=self.project.id
)
self.cleanup.append(virtual_machine_1)
# Verify VM state
self.assertEqual(
virtual_machine_1.state,
'Running',
"Check VM state is Running or not"
)
self.debug("Deploying VM for project: %s" % self.project.id)
virtual_machine_2 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
serviceofferingid=self.service_offering.id,
projectid=self.project.id
)
self.cleanup.append(virtual_machine_2)
# Verify VM state
self.assertEqual(
virtual_machine_2.state,
'Running',
"Check VM state is Running or not"
)
# Exception should be raised for second instance
with self.assertRaises(Exception):
VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
serviceofferingid=self.service_offering.id,
projectid=self.project.id
)
return
@attr(tags=["advanced", "eip", "advancedns", "simulator", "selfservice"])
def test_04_publicip_per_project(self):
"""Test Public IP limit per project
"""
# Validate the following
# 1. set max no of IPs per project to 2.
# 2. Create an account in this domain
# 3. Create 1 VM in this domain
# 4. Acquire 1 IP in the domain. IP should be successfully acquired
# 5. Try to acquire 3rd IP in this domain. It should give the user an
# appropriate error and an alert should be generated.
self.debug(
"Updating public IP resource limits for project: %s" %
self.project.id)
# Set usage_vm=1 for Account 1
update_resource_limit(
self.apiclient,
1, # Public Ip
max=2,
projectid=self.project.id
)
self.debug("Deploying VM for Project: %s" % self.project.id)
virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
serviceofferingid=self.service_offering.id,
projectid=self.project.id
)
self.cleanup.append(virtual_machine_1)
# Verify VM state
self.assertEqual(
virtual_machine_1.state,
'Running',
"Check VM state is Running or not"
)
networks = Network.list(
self.apiclient,
projectid=self.project.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"Check list networks response returns a valid response"
)
self.assertNotEqual(
len(networks),
0,
"Check list networks response returns a valid network"
)
network = networks[0]
self.debug("Associating public IP for project: %s" %
self.project.id)
public_ip_1 = PublicIPAddress.create(
self.apiclient,
zoneid=virtual_machine_1.zoneid,
services=self.services["server"],
networkid=network.id,
projectid=self.project.id
)
self.cleanup.append(public_ip_1)
# Verify Public IP state
self.assertEqual(
public_ip_1.ipaddress.state in [
'Allocated',
'Allocating'
],
True,
"Check Public IP state is allocated or not"
)
# Exception should be raised for second Public IP
with self.assertRaises(Exception):
public_ip_2 = PublicIPAddress.create(
self.apiclient,
zoneid=virtual_machine_1.zoneid,
services=self.services["server"],
networkid=network.id,
projectid=self.project.id
)
return
@attr(tags=["advanced", "basic", "sg", "eip", "advancedns", "simulator", "selfservice"])
def test_05_snapshots_per_project(self):
"""Test Snapshot limit per project
"""
# Validate the following
# 1. set max no of snapshots per project to 1.
# 2. Create one snapshot in the project. Snapshot should be
# successfully created
# 5. Try to create another snapshot in this project. It should give
# user an appropriate error and an alert should be generated.
self.debug(
"Updating snapshot resource limits for project: %s" %
self.project.id)
# Set usage_vm=1 for Account 1
update_resource_limit(
self.apiclient,
3, # Snapshot
max=1,
projectid=self.project.id
)
self.debug("Deploying VM for account: %s" % self.account.name)
virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
serviceofferingid=self.service_offering.id,
projectid=self.project.id
)
self.cleanup.append(virtual_machine_1)
# Verify VM state
self.assertEqual(
virtual_machine_1.state,
'Running',
"Check VM state is Running or not"
)
# Get the Root disk of VM
volumes = list_volumes(
self.apiclient,
virtualmachineid=virtual_machine_1.id,
projectid=self.project.id,
type='ROOT'
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check for list volume response return valid data"
)
volume = volumes[0]
self.debug("Creating snapshot from volume: %s" % volumes[0].id)
# Create a snapshot from the ROOTDISK
snapshot_1 = Snapshot.create(self.apiclient,
volumes[0].id,
projectid=self.project.id
)
self.cleanup.append(snapshot_1)
#list snapshots
snapshots = list_snapshots(self.apiclient, projectid=self.project.id)
self.debug("snapshots list: %s" % snapshots)
self.assertEqual(validateList(snapshots)[0], PASS, "Snapshots list validation failed")
self.assertEqual(len(snapshots), 1, "Snapshots list should have exactly one entity")
# Exception should be raised for second snapshot
with self.assertRaises(Exception):
Snapshot.create(self.apiclient,
volumes[0].id,
projectid=self.project.id
)
return
@attr(tags=["advanced", "basic", "sg", "eip", "advancedns", "simulator", "selfservice"])
def test_06_volumes_per_project(self):
"""Test Volumes limit per project
"""
# Validate the following
# 1. set max no of volume per project to 1.
# 2. Create 1 VM in this project
# 4. Try to Create another VM in the project. It should give the user
# an appropriate error that Volume limit is exhausted and an alert
# should be generated.
self.project_1 = Project.create(
self.api_client,
self.services["project"],
account=self.account.name,
domainid=self.account.domainid
)
self.cleanup.append(self.project_1)
self.debug(
"Updating volume resource limits for project: %s" %
self.project_1.id)
# Set usage_vm=1 for Account 1
update_resource_limit(
self.apiclient,
2, # Volume
max=1,
projectid=self.project_1.id
)
self.debug("Deploying VM for project: %s" % self.project_1.id)
virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
serviceofferingid=self.service_offering.id,
projectid=self.project_1.id
)
# Verify VM state
self.assertEqual(
virtual_machine_1.state,
'Running',
"Check VM state is Running or not"
)
# Exception should be raised for second volume
with self.assertRaises(Exception):
Volume.create(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
diskofferingid=self.disk_offering.id,
projectid=self.project_1.id
)
return
@attr(tags=["advanced", "basic", "sg", "eip", "advancedns", "selfservice"])
def test_07_templates_per_project(self):
"""Test Templates limit per project
"""
# 1. set max no of templates per project to 1.
# 2. Create a template in this project. Both template should be in
# ready state
# 3. Try create 2nd template in the project. It should give the user
# appropriate error and an alert should be generated.
# Reset the volume limits
update_resource_limit(
self.apiclient,
2, # Volume
max=5,
projectid=self.project.id
)
self.debug(
"Updating template resource limits for domain: %s" %
self.account.domainid)
# Set usage_vm=1 for Account 1
update_resource_limit(
self.apiclient,
4, # Template
max=1,
projectid=self.project.id
)
self.debug("Deploying VM for account: %s" % self.account.name)
virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
serviceofferingid=self.service_offering.id,
projectid=self.project.id
)
self.cleanup.append(virtual_machine_1)
# Verify VM state
self.assertEqual(
virtual_machine_1.state,
'Running',
"Check VM state is Running or not"
)
virtual_machine_1.stop(self.apiclient)
# Get the Root disk of VM
volumes = list_volumes(
self.apiclient,
virtualmachineid=virtual_machine_1.id,
projectid=self.project.id,
type='ROOT'
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check for list volume response return valid data"
)
volume = volumes[0]
self.debug("Creating template from volume: %s" % volume.id)
# Create a template from the ROOTDISK
template_1 = Template.create(
self.apiclient,
self.services["template"],
volumeid=volume.id,
projectid=self.project.id
)
self.cleanup.append(template_1)
# Verify Template state
self.assertEqual(
template_1.isready,
True,
"Check Template is in ready state or not"
)
# Exception should be raised for second template
with self.assertRaises(Exception):
Template.create(
self.apiclient,
self.services["template"],
volumeid=volume.id,
projectid=self.project.id
)
return
class TestMaxProjectNetworks(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestMaxProjectNetworks, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.domain = get_domain(cls.api_client)
cls.services['mode'] = cls.zone.networktype
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.network_offering = NetworkOffering.create(
cls.api_client,
cls.services["network_offering"],
conservemode=True
)
# Enable Network offering
cls.network_offering.update(cls.api_client, state='Enabled')
cls._cleanup = [
cls.service_offering,
cls.network_offering
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created network offerings
cleanup_resources(self.apiclient, self.cleanup)
self.account.delete(self.apiclient)
interval = list_configurations(
self.apiclient,
name='account.cleanup.interval'
)
# Sleep to ensure that all resources are deleted
time.sleep(int(interval[0].value) * 2)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "simulator",
"api", "eip"])
def test_maxAccountNetworks(self):
"""Test Limit number of guest account specific networks
"""
# Steps for validation
# 1. Fetch max.account.networks from configurations
# 2. Create an account. Create account more that max.accout.network
# 3. Create network should fail
self.debug("Creating project with '%s' as admin" %
self.account.name)
# Create project as a domain admin
project = Project.create(
self.apiclient,
self.services["project"],
account=self.account.name,
domainid=self.account.domainid
)
# Cleanup created project at end of test
self.cleanup.append(project)
self.debug("Created project with domain admin with ID: %s" %
project.id)
config = Configurations.list(
self.apiclient,
name='max.project.networks',
listall=True
)
self.assertEqual(
isinstance(config, list),
True,
"List configurations hsould have max.project.networks"
)
config_value = int(config[0].value)
self.debug("max.project.networks: %s" % config_value)
for ctr in range(config_value):
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.services["network"],
projectid=project.id,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % network.id)
self.debug(
"Creating network in account already having networks : %s" %
config_value)
with self.assertRaises(Exception):
Network.create(
self.apiclient,
self.services["network"],
projectid=project.id,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug('Create network failed (as expected)')
return
| 43.61039 | 128 | 0.429656 |
794131454b4ef2d21bd20f95d194eca0db5dddf1 | 636 | py | Python | first_lambda/aws-lambdavxuq2swg/bin/rst2man.py | mylar-pr/DaaS | e41fa9e9fbda66d7150f00e6db13dd3a76cd3501 | [
"MIT"
] | null | null | null | first_lambda/aws-lambdavxuq2swg/bin/rst2man.py | mylar-pr/DaaS | e41fa9e9fbda66d7150f00e6db13dd3a76cd3501 | [
"MIT"
] | null | null | null | first_lambda/aws-lambdavxuq2swg/bin/rst2man.py | mylar-pr/DaaS | e41fa9e9fbda66d7150f00e6db13dd3a76cd3501 | [
"MIT"
] | null | null | null | #!/home/prajwal_mylar/anaconda3/envs/lambda1/bin/python
# Author:
# Contact: [email protected]
# Copyright: This module has been placed in the public domain.
"""
man.py
======
This module provides a simple command line interface that uses the
man page writer to output from ReStructuredText source.
"""
import locale
try:
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
from docutils.writers import manpage
description = ("Generates plain unix manual documents. " + default_description)
publish_cmdline(writer=manpage.Writer(), description=description)
| 23.555556 | 80 | 0.773585 |
79413169bfd0ed815c03cdb7eea4d1045160f2e9 | 512 | py | Python | python/python_crash_course/chapter_5/5-8.hello_admin.py | lmonsalve22/Learning-to-Code | 2e32eba3fbd0bd63cc539e1e6d372ca346b765c9 | [
"MIT"
] | null | null | null | python/python_crash_course/chapter_5/5-8.hello_admin.py | lmonsalve22/Learning-to-Code | 2e32eba3fbd0bd63cc539e1e6d372ca346b765c9 | [
"MIT"
] | null | null | null | python/python_crash_course/chapter_5/5-8.hello_admin.py | lmonsalve22/Learning-to-Code | 2e32eba3fbd0bd63cc539e1e6d372ca346b765c9 | [
"MIT"
] | null | null | null | """ Make a list with 5 usernames and make an if-else statement,
for admin print an special message, but for the other just print
a simple 'hello' """
username = input('Please enter your username:\n')
usernames = ['admin', 'jamesnoria', 'jnoria', 'user95', 'ctirado']
if username in usernames and username == 'admin':
print('Hello admin, would you like to see a status report?')
elif username in usernames:
print(f'Hello {username}, thank you for logging in again')
else:
print('I do not know you') | 36.571429 | 66 | 0.707031 |
79413171e816f395c0191e67251331df265439d9 | 611 | py | Python | m-encode.py | shantanu561993/foolavc | 3014cf99c653275be408a3e7f7309f7130172116 | [
"MIT"
] | null | null | null | m-encode.py | shantanu561993/foolavc | 3014cf99c653275be408a3e7f7309f7130172116 | [
"MIT"
] | null | null | null | m-encode.py | shantanu561993/foolavc | 3014cf99c653275be408a3e7f7309f7130172116 | [
"MIT"
] | 1 | 2020-12-15T06:51:55.000Z | 2020-12-15T06:51:55.000Z | import os,sys
if len(sys.argv) == 1:
sys.stderr.write('Usage: {} [files...]\n'.format(sys.argv[0]))
sys.exit(1)
for i in sys.argv[1:]:
if os.path.exists(i):
ext = i.split('.')[-1]
filename = '.'.join(i.split('.')[:-1])
extensions = dict(exe='me', dll='ml')
if ext in extensions.keys():
filename += '.{}'.format(extensions[ext])
else:
filename += '.mf'
with open(filename,'wb') as f:
f.write(''.join(map(lambda x: '\\x{:02x}'.format(ord(x)), list(open(i,'rb').read()))))
print '{} saved.'.format(filename)
| 32.157895 | 98 | 0.505728 |
794131a3acc5a5859f4971c97cc5035b20729e02 | 2,854 | py | Python | examples/widgets/table/lv_example_table_2.py | ggvl/lvgl | d56ce08782852ff274141f69271464da87de96bc | [
"MIT"
] | 8 | 2022-02-11T08:20:49.000Z | 2022-03-22T06:19:59.000Z | examples/widgets/table/lv_example_table_2.py | ggvl/lvgl | d56ce08782852ff274141f69271464da87de96bc | [
"MIT"
] | 2 | 2022-03-22T03:22:45.000Z | 2022-03-22T06:09:13.000Z | examples/widgets/table/lv_example_table_2.py | ggvl/lvgl | d56ce08782852ff274141f69271464da87de96bc | [
"MIT"
] | 2 | 2022-02-19T10:51:00.000Z | 2022-03-22T03:11:35.000Z | from utime import ticks_ms
import gc
ITEM_CNT = 200
def draw_event_cb(e):
obj = e.get_target()
dsc = lv.obj_draw_part_dsc_t.__cast__(e.get_param())
# If the cells are drawn...
if dsc.part == lv.PART.ITEMS:
chk = obj.has_cell_ctrl(dsc.id, 0, lv.table.CELL_CTRL.CUSTOM_1)
rect_dsc = lv.draw_rect_dsc_t()
rect_dsc.init()
if chk:
rect_dsc.bg_color = lv.theme_get_color_primary(obj)
else:
rect_dsc.bg_color = lv.palette_lighten(lv.PALETTE.GREY, 2)
rect_dsc.radius = lv.RADIUS.CIRCLE
sw_area = lv.area_t()
sw_area.x1 = dsc.draw_area.x2 - 50
sw_area.x2 = sw_area.x1 + 40
sw_area.y1 = dsc.draw_area.y1 + dsc.draw_area.get_height() // 2 - 10
sw_area.y2 = sw_area.y1 + 20
dsc.draw_ctx.rect(rect_dsc, sw_area)
rect_dsc.bg_color = lv.color_white()
if chk:
sw_area.x2 -= 2
sw_area.x1 = sw_area.x2 - 16
else:
sw_area.x1 += 2
sw_area.x2 = sw_area.x1 + 16
sw_area.y1 += 2
sw_area.y2 -= 2
dsc.draw_ctx.rect(rect_dsc, sw_area)
def change_event_cb(e):
obj = e.get_target()
row = lv.C_Pointer()
col = lv.C_Pointer()
table.get_selected_cell(row, col)
# print("row: ",row.uint_val)
chk = table.has_cell_ctrl(row.uint_val, 0, lv.table.CELL_CTRL.CUSTOM_1)
if chk:
table.clear_cell_ctrl(row.uint_val, 0, lv.table.CELL_CTRL.CUSTOM_1)
else:
table.add_cell_ctrl(row.uint_val, 0, lv.table.CELL_CTRL.CUSTOM_1)
#
# A very light-weighted list created from table
#
# Measure memory usage
gc.enable()
gc.collect()
mem_free = gc.mem_free()
print("mem_free: ", mem_free)
t = ticks_ms()
print("ticks: ", t)
table = lv.table(lv.scr_act())
# Set a smaller height to the table. It'll make it scrollable
table.set_size(150, 200)
table.set_col_width(0, 150)
table.set_row_cnt(ITEM_CNT) # Not required but avoids a lot of memory reallocation lv_table_set_set_value
table.set_col_cnt(1)
# Don't make the cell pressed, we will draw something different in the event
table.remove_style(None, lv.PART.ITEMS | lv.STATE.PRESSED)
for i in range(ITEM_CNT):
table.set_cell_value(i, 0, "Item " + str(i+1))
table.align(lv.ALIGN.CENTER, 0, -20)
# Add an event callback to apply some custom drawing
table.add_event_cb(draw_event_cb, lv.EVENT.DRAW_PART_END, None)
table.add_event_cb(change_event_cb, lv.EVENT.VALUE_CHANGED, None)
gc.collect()
mem_used = mem_free - gc.mem_free()
elaps = ticks_ms()-t
label = lv.label(lv.scr_act())
label.set_text(str(ITEM_CNT) + " items were created in " + str(elaps) + " ms\n using " + str(mem_used) + " bytes of memory")
#label.set_text(str(ITEM_CNT) + " items were created in " + str(elaps) + " ms")
label.align(lv.ALIGN.BOTTOM_MID, 0, -10)
| 29.729167 | 124 | 0.655922 |
79413205425ba509658c91258a11ef0bc352c550 | 239 | py | Python | test/mitmproxy/data/addonscripts/stream_modify.py | fedosgad/mitmproxy | 7eacc41f3b1079e000cf6b6c19c0f337d6e01177 | [
"MIT"
] | null | null | null | test/mitmproxy/data/addonscripts/stream_modify.py | fedosgad/mitmproxy | 7eacc41f3b1079e000cf6b6c19c0f337d6e01177 | [
"MIT"
] | null | null | null | test/mitmproxy/data/addonscripts/stream_modify.py | fedosgad/mitmproxy | 7eacc41f3b1079e000cf6b6c19c0f337d6e01177 | [
"MIT"
] | null | null | null | from mitmproxy import ctx
def modify(chunks):
for chunk in chunks:
yield chunk.replace(b"foo", b"bar")
def running():
ctx.log.info("stream_modify running")
def responseheaders(flow):
flow.response.stream = modify
| 15.933333 | 43 | 0.686192 |
7941322cf5b41e6423f3bdb5dd9a1f321dd7f2f0 | 3,554 | py | Python | gds_metrics/__init__.py | alphagov-mirror/gds_metrics_python | 8e350f067765a57fd3f86d949caa786eadb3c982 | [
"MIT"
] | 1 | 2018-10-30T09:54:50.000Z | 2018-10-30T09:54:50.000Z | gds_metrics/__init__.py | alphagov-mirror/gds_metrics_python | 8e350f067765a57fd3f86d949caa786eadb3c982 | [
"MIT"
] | 10 | 2018-04-20T10:35:44.000Z | 2020-12-31T10:12:41.000Z | gds_metrics/__init__.py | alphagov-mirror/gds_metrics_python | 8e350f067765a57fd3f86d949caa786eadb3c982 | [
"MIT"
] | 5 | 2019-01-08T13:35:07.000Z | 2021-04-10T20:20:05.000Z | import gzip
import hmac
import json
import os
from time import monotonic
from flask import abort, g, request, Response
from flask.signals import got_request_exception, request_finished
# set multiprocess temp directory before we import prometheus_client
os.environ.setdefault('prometheus_multiproc_dir', '/tmp') # noqa
import prometheus_client
from prometheus_client import multiprocess, CollectorRegistry
from .metrics import ( # noqa proxy metric types imports
Counter,
Gauge,
Summary,
Histogram,
HTTP_SERVER_EXCEPTIONS_TOTAL,
HTTP_SERVER_REQUEST_DURATION_SECONDS,
HTTP_SERVER_REQUESTS_TOTAL,
)
class GDSMetrics(object):
def __init__(self):
self.metrics_path = os.environ.get('PROMETHEUS_METRICS_PATH', '/metrics')
if os.environ.get("METRICS_BASIC_AUTH", "true") == "true":
self.auth_token = json.loads(os.environ.get("VCAP_APPLICATION", "{}")).get("application_id")
else:
self.auth_token = False
self.registry = CollectorRegistry()
multiprocess.MultiProcessCollector(self.registry)
def init_app(self, app):
app.add_url_rule(self.metrics_path, 'metrics', self.metrics_endpoint)
app.before_request(self.before_request)
request_finished.connect(self.teardown_request, sender=app)
got_request_exception.connect(self.handle_exception, sender=app)
def metrics_endpoint(self):
if self.auth_token:
auth_header = request.headers.get('Authorization', '')
if not auth_header:
abort(401)
elif not hmac.compare_digest(auth_header, 'Bearer {}'.format(self.auth_token)):
abort(403)
response = Response(
prometheus_client.generate_latest(self.registry),
mimetype='text/plain; version=0.0.4; charset=utf-8',
headers={
'Cache-Control': 'no-cache, no-store, max-age=0, must-revalidate',
}
)
accept_encoding = request.headers.get('Accept-Encoding', '')
if 'gzip' not in accept_encoding.lower():
return response
response.data = gzip.compress(response.data)
response.headers['Content-Encoding'] = 'gzip'
response.headers['Content-Length'] = len(response.data)
response.headers['Vary'] = 'Accept-Encoding'
return response
def before_request(self, *args, **kwargs):
g._gds_metrics_start_time = monotonic()
def teardown_request(self, sender, response, *args, **kwargs):
if hasattr(g, "_gds_metrics_start_time"):
# There is not a guarantee that `g._gds_metrics_start_time` will exist
# as our `before_request` function will not run if the flask app has
# another `before_request` that runs before ours and throws an exception
resp_time = monotonic() - g._gds_metrics_start_time
HTTP_SERVER_REQUEST_DURATION_SECONDS.labels(
request.method,
request.host,
request.url_rule.rule if request.url_rule else 'No endpoint',
response.status_code
).observe(resp_time)
HTTP_SERVER_REQUESTS_TOTAL.labels(
request.method,
request.host,
request.url_rule.rule if request.url_rule else 'No endpoint',
response.status_code
).inc()
return response
def handle_exception(self, sender, exception, *args, **kwargs):
HTTP_SERVER_EXCEPTIONS_TOTAL.labels(type(exception)).inc()
| 35.188119 | 104 | 0.660664 |
7941322e93d57f799e62599bd06b93d01f90983a | 3,641 | py | Python | tests/unit/stage/test_stage.py | sahilbhosale63/dvc | 999c9e188801f971b75f51ca84f5bad533cb462c | [
"Apache-2.0"
] | null | null | null | tests/unit/stage/test_stage.py | sahilbhosale63/dvc | 999c9e188801f971b75f51ca84f5bad533cb462c | [
"Apache-2.0"
] | null | null | null | tests/unit/stage/test_stage.py | sahilbhosale63/dvc | 999c9e188801f971b75f51ca84f5bad533cb462c | [
"Apache-2.0"
] | null | null | null | import os
import signal
import subprocess
import threading
import mock
import pytest
from dvc.dependency.repo import RepoDependency
from dvc.stage import Stage
from dvc.stage.exceptions import StageUpdateError
TEST_STAGE_DICT = {
"md5": "123456",
"cmd": "mycmd",
"outs": [{"path": "a", "md5": "123456789"}],
"deps": [{"path": "b", "md5": "987654321"}],
}
def test_stage_checksum():
stage = Stage(None, "path", cmd="mycmd")
with mock.patch.object(stage, "dumpd", return_value=TEST_STAGE_DICT):
assert stage.compute_md5() == "e9521a22111493406ea64a88cda63e0b"
def test_wdir_default_ignored():
stage = Stage(None, "path", cmd="mycmd")
d = dict(TEST_STAGE_DICT, wdir=".")
with mock.patch.object(stage, "dumpd", return_value=d):
assert stage.compute_md5() == "e9521a22111493406ea64a88cda63e0b"
def test_wdir_non_default_is_not_ignored():
stage = Stage(None, "path", cmd="mycmd")
d = dict(TEST_STAGE_DICT, wdir="..")
with mock.patch.object(stage, "dumpd", return_value=d):
assert stage.compute_md5() == "2ceba15e87f6848aa756502c1e6d24e9"
def test_meta_ignored():
stage = Stage(None, "path", cmd="mycmd")
d = dict(TEST_STAGE_DICT, meta={"author": "Suor"})
with mock.patch.object(stage, "dumpd", return_value=d):
assert stage.compute_md5() == "e9521a22111493406ea64a88cda63e0b"
def test_path_conversion(dvc):
stage = Stage(dvc, "path")
stage.wdir = os.path.join("..", "..")
assert stage.dumpd()["wdir"] == "../.."
def test_stage_update(mocker):
dep = RepoDependency({"url": "example.com"}, None, "dep_path")
mocker.patch.object(dep, "update", return_value=None)
stage = Stage(None, "path", deps=[dep])
reproduce = mocker.patch.object(stage, "reproduce")
is_repo_import = mocker.patch(
__name__ + ".Stage.is_repo_import", new_callable=mocker.PropertyMock
)
is_repo_import.return_value = True
stage.update()
assert reproduce.called_once_with()
is_repo_import.return_value = False
with pytest.raises(StageUpdateError):
stage.update()
@pytest.mark.skipif(
not isinstance(threading.current_thread(), threading._MainThread),
reason="Not running in the main thread.",
)
def test_stage_run_ignore_sigint(dvc, mocker):
proc = mocker.Mock()
communicate = mocker.Mock()
proc.configure_mock(returncode=0, communicate=communicate)
popen = mocker.patch.object(subprocess, "Popen", return_value=proc)
signal_mock = mocker.patch("signal.signal")
dvc.run(cmd="path", single_stage=True)
assert popen.called_once()
assert communicate.called_once_with()
signal_mock.assert_any_call(signal.SIGINT, signal.SIG_IGN)
assert signal.getsignal(signal.SIGINT) == signal.default_int_handler
def test_always_changed(dvc):
stage = Stage(dvc, "path", always_changed=True)
stage.save()
with dvc.lock:
assert stage.changed()
assert stage.status()["path"] == ["always changed"]
def test_external_outs(tmp_path_factory, dvc):
from dvc.stage import create_stage
from dvc.stage.exceptions import StageExternalOutputsError
tmp_path = tmp_path_factory.mktemp("external-outs")
foo = tmp_path / "foo"
foo.write_text("foo")
with pytest.raises(StageExternalOutputsError):
create_stage(Stage, dvc, "path.dvc", outs=[os.fspath(foo)])
with dvc.config.edit() as conf:
conf["remote"]["myremote"] = {"url": os.fspath(tmp_path)}
create_stage(Stage, dvc, "path.dvc", outs=["remote://myremote/foo"])
create_stage(Stage, dvc, "path.dvc", outs=[os.fspath(foo)], external=True)
| 30.090909 | 78 | 0.69047 |
7941344f5d6bc160a909b62ddee12555eab78be2 | 2,462 | py | Python | doc/source/conf.py | mateusz-blaszkowski/searchlight-ui | 27763ffc3d309697b3492f0f9c93b2dc3dfc6e49 | [
"Apache-2.0"
] | null | null | null | doc/source/conf.py | mateusz-blaszkowski/searchlight-ui | 27763ffc3d309697b3492f0f9c93b2dc3dfc6e49 | [
"Apache-2.0"
] | 1 | 2016-04-20T23:50:10.000Z | 2016-04-20T23:50:10.000Z | doc/source/conf.py | mateusz-blaszkowski/searchlight-ui | 27763ffc3d309697b3492f0f9c93b2dc3dfc6e49 | [
"Apache-2.0"
] | 1 | 2016-04-05T23:33:23.000Z | 2016-04-05T23:33:23.000Z | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.intersphinx',
'oslosphinx'
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'searchlight-ui'
copyright = u'2016, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}
| 32.394737 | 79 | 0.696994 |
794134cc53beab1c7e0b35d18a88f1e99037ac30 | 9,311 | py | Python | examples/stats/plot_cluster_stats_time_frequency_repeated_measures_anova.py | TanayGahlot/mne-python | 857aa97c201451b82931c5eba50642975afc423d | [
"BSD-3-Clause"
] | null | null | null | examples/stats/plot_cluster_stats_time_frequency_repeated_measures_anova.py | TanayGahlot/mne-python | 857aa97c201451b82931c5eba50642975afc423d | [
"BSD-3-Clause"
] | null | null | null | examples/stats/plot_cluster_stats_time_frequency_repeated_measures_anova.py | TanayGahlot/mne-python | 857aa97c201451b82931c5eba50642975afc423d | [
"BSD-3-Clause"
] | null | null | null | """
====================================================================
Mass-univariate twoway repeated measures ANOVA on single trial power
====================================================================
This script shows how to conduct a mass-univariate repeated measures
ANOVA. As the model to be fitted assumes two fully crossed factors,
we will study the interplay between perceptual modality
(auditory VS visual) and the location of stimulus presentation
(left VS right). Here we use single trials as replications
(subjects) while iterating over time slices plus frequency bands
for to fit our mass-univariate model. For the sake of simplicity we
will confine this analysis to one single channel of which we know
that it exposes a strong induced response. We will then visualize
each effect by creating a corresponding mass-univariate effect
image. We conclude with accounting for multiple comparisons by
performing a permutation clustering test using the ANOVA as
clustering function. The results final will be compared to
multiple comparisons using False Discovery Rate correction.
"""
# Authors: Denis Engemann <[email protected]>
# Eric Larson <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
print(__doc__)
import numpy as np
import mne
from mne import io
from mne.time_frequency import single_trial_power
from mne.stats import f_threshold_twoway_rm, f_twoway_rm, fdr_correction
from mne.datasets import sample
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
event_id = 1
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.read_events(event_fname)
include = []
raw.info['bads'] += ['MEG 2443'] # bads
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, include=include, exclude='bads')
ch_name = raw.info['ch_names'][picks[0]]
# Load conditions
reject = dict(grad=4000e-13, eog=150e-6)
event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
reject=reject)
# make sure all conditions have the same counts, as the ANOVA expects a
# fully balanced data matrix and does not forgive imbalances that generously
# (risk of type-I error)
epochs.equalize_event_counts(event_id, copy=False)
# Time vector
times = 1e3 * epochs.times # change unit to ms
# Factor to downs-sample the temporal dimension of the PSD computed by
# single_trial_power.
decim = 2
frequencies = np.arange(7, 30, 3) # define frequencies of interest
sfreq = raw.info['sfreq'] # sampling in Hz
n_cycles = frequencies / frequencies[0]
baseline_mask = times[::decim] < 0
# now create TFR representations for all conditions
epochs_power = []
for condition in [epochs[k].get_data()[:, 97:98, :] for k in event_id]:
this_power = single_trial_power(condition, sfreq=sfreq, frequencies=frequencies,
n_cycles=n_cycles, use_fft=False,
decim=decim)
this_power = this_power[:, 0, :, :] # we only have one channel.
# Compute ratio with baseline power (be sure to correct time vector with
# decimation factor)
epochs_baseline = np.mean(this_power[:, :, baseline_mask], axis=2)
this_power /= epochs_baseline[..., np.newaxis]
epochs_power.append(this_power)
###############################################################################
# Setup repeated measures ANOVA
n_conditions = len(epochs.event_id)
n_replications = epochs.events.shape[0] / n_conditions
# we will tell the ANOVA how to interpret the data matrix in terms of
# factors. This done via the factor levels argument which is a list
# of the number factor levels for each factor.
factor_levels = [2, 2] # number of levels in each factor
effects = 'A*B' # this is the default signature for computing all effects
# Other possible options are 'A' or 'B' for the corresponding main effects
# or 'A:B' for the interaction effect only (this notation is borrowed from the
# R formula language)
n_frequencies = len(frequencies)
n_times = len(times[::decim])
# Now we'll assemble the data matrix and swap axes so the trial replications
# are the first dimension and the conditions are the second dimension
data = np.swapaxes(np.asarray(epochs_power), 1, 0)
# reshape last two dimensions in one mass-univariate observation-vector
data = data.reshape(n_replications, n_conditions, n_frequencies * n_times)
# so we have replications * conditions * observations:
print(data.shape)
# while the iteration scheme used above for assembling the data matrix
# makes sure the first two dimensions are organized as expected (with A =
# modality and B = location):
#
# A1B1 A1B2 A2B1 B2B2
# trial 1 1.34 2.53 0.97 1.74
# trial ... .... .... .... ....
# trial 56 2.45 7.90 3.09 4.76
#
# Now we're ready to run our repeated measures ANOVA.
fvals, pvals = f_twoway_rm(data, factor_levels, effects=effects)
effect_labels = ['modality', 'location', 'modality by location']
import matplotlib.pyplot as plt
# let's visualize our effects by computing f-images
for effect, sig, effect_label in zip(fvals, pvals, effect_labels):
plt.figure()
# show naive F-values in gray
plt.imshow(effect.reshape(8, 211), cmap=plt.cm.gray, extent=[times[0],
times[-1], frequencies[0], frequencies[-1]], aspect='auto',
origin='lower')
# create mask for significant Time-frequency locations
effect = np.ma.masked_array(effect, [sig > .05])
plt.imshow(effect.reshape(8, 211), cmap='RdBu_r', extent=[times[0],
times[-1], frequencies[0], frequencies[-1]], aspect='auto',
origin='lower')
plt.colorbar()
plt.xlabel('time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title(r"Time-locked response for '%s' (%s)" % (effect_label, ch_name))
plt.show()
# Note. As we treat trials as subjects, the test only accounts for
# time locked responses despite the 'induced' approach.
# For analysis for induced power at the group level averaged TRFs
# are required.
###############################################################################
# Account for multiple comparisons using FDR versus permutation clustering test
# First we need to slightly modify the ANOVA function to be suitable for
# the clustering procedure. Also want to set some defaults.
# Let's first override effects to confine the analysis to the interaction
effects = 'A:B'
# A stat_fun must deal with a variable number of input arguments.
def stat_fun(*args):
# Inside the clustering function each condition will be passed as
# flattened array, necessitated by the clustering procedure.
# The ANOVA however expects an input array of dimensions:
# subjects X conditions X observations (optional).
# The following expression catches the list input and swaps the first and
# the second dimension and finally calls the ANOVA function.
return f_twoway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels,
effects=effects, return_pvals=False)[0]
# The ANOVA returns a tuple f-values and p-values, we will pick the former.
pthresh = 0.00001 # set threshold rather high to save some time
f_thresh = f_threshold_twoway_rm(n_replications, factor_levels, effects,
pthresh)
tail = 1 # f-test, so tail > 0
n_permutations = 256 # Save some time (the test won't be too sensitive ...)
T_obs, clusters, cluster_p_values, h0 = mne.stats.permutation_cluster_test(
epochs_power, stat_fun=stat_fun, threshold=f_thresh, tail=tail, n_jobs=1,
n_permutations=n_permutations, buffer_size=None)
# Create new stats image with only significant clusters
good_clusers = np.where(cluster_p_values < .05)[0]
T_obs_plot = np.ma.masked_array(T_obs,
np.invert(clusters[np.squeeze(good_clusers)]))
plt.figure()
for f_image, cmap in zip([T_obs, T_obs_plot], [plt.cm.gray, 'RdBu_r']):
plt.imshow(f_image, cmap=cmap, extent=[times[0], times[-1],
frequencies[0], frequencies[-1]], aspect='auto',
origin='lower')
plt.xlabel('time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title('Time-locked response for \'modality by location\' (%s)\n'
' cluster-level corrected (p <= 0.05)' % ch_name)
plt.show()
# now using FDR
mask, _ = fdr_correction(pvals[2])
T_obs_plot2 = np.ma.masked_array(T_obs, np.invert(mask))
plt.figure()
for f_image, cmap in zip([T_obs, T_obs_plot2], [plt.cm.gray, 'RdBu_r']):
plt.imshow(f_image, cmap=cmap, extent=[times[0], times[-1],
frequencies[0], frequencies[-1]], aspect='auto',
origin='lower')
plt.xlabel('time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title('Time-locked response for \'modality by location\' (%s)\n'
' FDR corrected (p <= 0.05)' % ch_name)
plt.show()
# Both, cluster level and FDR correction help getting rid of
# putatively spots we saw in the naive f-images.
| 41.566964 | 84 | 0.683922 |
794134cff5d543463496cb7c77d3629f48dab77f | 937 | py | Python | eval.py | quangdaist123/catr | 3c6cb95b6be2f82ed45178bd6039bc49e1b973c7 | [
"Apache-2.0"
] | null | null | null | eval.py | quangdaist123/catr | 3c6cb95b6be2f82ed45178bd6039bc49e1b973c7 | [
"Apache-2.0"
] | null | null | null | eval.py | quangdaist123/catr | 3c6cb95b6be2f82ed45178bd6039bc49e1b973c7 | [
"Apache-2.0"
] | null | null | null | from nltk.translate.bleu_score import corpus_bleu
import json
truth_json = json.load(open("C:/Users/quang/Desktop/temp_test.json"))
pred_json = json.load(open("C:/Users/quang/Desktop/temp_test_res.json"))
references = []
candidates = []
for prediction in pred_json:
ref_temp = []
candidates.append([prediction["image_caption"]])
for ground_truth in truth_json["annotations"]:
if int(prediction["image_id"]) == int(ground_truth["image_id"]):
ref_temp.append(ground_truth["caption"])
references.append(ref_temp)
print('Cumulative 1-gram: %f' % corpus_bleu(references, candidates, weights=(1, 0, 0, 0)))
print('Cumulative 2-gram: %f' % corpus_bleu(references, candidates, weights=(0.5, 0.5, 0, 0)))
print('Cumulative 3-gram: %f' % corpus_bleu(references, candidates, weights=(0.33, 0.33, 0.33, 0)))
print('Cumulative 4-gram: %f' % corpus_bleu(references, candidates, weights=(0.25, 0.25, 0.25, 0.25))) | 46.85 | 102 | 0.707577 |
7941355a76edd9e4bff4988571bb3a5d0068a242 | 33,242 | py | Python | rally_openstack/task/scenarios/neutron/utils.py | jogeo/rally-openstack | 83437e7c5925d5d647cd28f1821b6d51687b0123 | [
"Apache-2.0"
] | null | null | null | rally_openstack/task/scenarios/neutron/utils.py | jogeo/rally-openstack | 83437e7c5925d5d647cd28f1821b6d51687b0123 | [
"Apache-2.0"
] | null | null | null | rally_openstack/task/scenarios/neutron/utils.py | jogeo/rally-openstack | 83437e7c5925d5d647cd28f1821b6d51687b0123 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014: Intel Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from rally.common import cfg
from rally.common import logging
from rally import exceptions
from rally.task import atomic
from rally.task import utils
from rally_openstack.common.services.network import neutron
from rally_openstack.task import scenario
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class NeutronBaseScenario(scenario.OpenStackScenario):
"""Base class for Neutron scenarios with basic atomic actions."""
def __init__(self, *args, **kwargs):
super(NeutronBaseScenario, self).__init__(*args, **kwargs)
if hasattr(self, "_clients"):
self.neutron = neutron.NeutronService(
clients=self._clients,
name_generator=self.generate_random_name,
atomic_inst=self.atomic_actions()
)
class NeutronScenario(NeutronBaseScenario):
"""Base class for Neutron scenarios with basic atomic actions."""
# TODO(rkiran): modify in case LBaaS-v2 requires
LB_METHOD = "ROUND_ROBIN"
LB_PROTOCOL = "HTTP"
LB_PROTOCOL_PORT = 80
HM_TYPE = "PING"
HM_MAX_RETRIES = 3
HM_DELAY = 20
HM_TIMEOUT = 10
def _get_network_id(self, network, **kwargs):
"""Get Neutron network ID for the network name.
:param network: str, network name/id
:param kwargs: dict, network options
:returns: str, Neutron network-id
"""
try:
return self.neutron.find_network(network)["id"]
except exceptions.GetResourceFailure:
raise exceptions.NotFoundException(
message="Network %s not found." % network)
@property
def _ext_gw_mode_enabled(self):
"""Determine if the ext-gw-mode extension is enabled.
Without this extension, we can't pass the enable_snat parameter.
"""
return self.neutron.supports_extension("ext-gw-mode", silent=True)
def _create_network(self, network_create_args):
"""Create neutron network.
:param network_create_args: dict, POST /v2.0/networks request options
:returns: neutron network dict
"""
network_create_args.pop("name", None)
return {"network": self.neutron.create_network(**network_create_args)}
def _list_networks(self, **kwargs):
"""Return user networks list.
:param kwargs: network list options
"""
return self.neutron.list_networks(**kwargs)
def _list_agents(self, **kwargs):
"""Fetches agents.
:param kwargs: neutron agent list options
:returns: user agents list
"""
return self.neutron.list_agents(**kwargs)
def _update_network(self, network, network_update_args):
"""Update the network.
This atomic function updates the network with network_update_args.
:param network: Network object
:param network_update_args: dict, POST /v2.0/networks update options
:returns: updated neutron network dict
"""
network_update_args["name"] = self.generate_random_name()
return {"network": self.neutron.update_network(
network["network"]["id"], **network_update_args)}
def _show_network(self, network, **kwargs):
"""show network details.
:param network: Network object
:param kwargs: dict, POST /v2.0/networks show options
:returns: details of the network
"""
network = self.neutron.get_network(network["network"]["id"], **kwargs)
return {"network": network}
def _delete_network(self, network):
"""Delete neutron network.
:param network: Network object
"""
self.neutron.delete_network(network["id"])
def _create_subnet(self, network, subnet_create_args, start_cidr=None):
"""Create neutron subnet.
:param network: neutron network dict
:param subnet_create_args: POST /v2.0/subnets request options
:returns: neutron subnet dict
"""
subnet_create_args.pop("name", None)
subnet_create_args["network_id"] = network["network"]["id"]
subnet_create_args["start_cidr"] = start_cidr
return {"subnet": self.neutron.create_subnet(**subnet_create_args)}
def _list_subnets(self):
"""Returns user subnetworks list."""
return self.neutron.list_subnets()
def _show_subnet(self, subnet, **kwargs):
"""show subnet details.
:param subnet: Subnet object
:param kwargs: Optional additional arguments for subnet show
:returns: details of the subnet
"""
return {"subnet": self.neutron.get_subnet(subnet["subnet"]["id"])}
def _update_subnet(self, subnet, subnet_update_args):
"""Update the neutron subnet.
This atomic function updates the subnet with subnet_update_args.
:param subnet: Subnet object
:param subnet_update_args: dict, PUT /v2.0/subnets update options
:returns: updated neutron subnet dict
"""
subnet_update_args["name"] = self.generate_random_name()
return {"subnet": self.neutron.update_subnet(
subnet["subnet"]["id"], **subnet_update_args)}
def _delete_subnet(self, subnet):
"""Delete neutron subnet
:param subnet: Subnet object
"""
self.neutron.delete_subnet(subnet["subnet"]["id"])
def _create_router(self, router_create_args, external_gw=False):
"""Create neutron router.
:param router_create_args: POST /v2.0/routers request options
:returns: neutron router dict
"""
router_create_args.pop("name", None)
if ("tenant_id" in router_create_args
and "project_id" not in router_create_args):
router_create_args["project_id"] = router_create_args.pop(
"tenant_id")
return {"router": self.neutron.create_router(
discover_external_gw=external_gw, **router_create_args)}
def _list_routers(self):
"""Returns user routers list."""
return self.neutron.list_routers()
def _show_router(self, router, **kwargs):
"""Show information of a given router.
:param router: ID or name of router to look up
:kwargs: dict, POST /v2.0/routers show options
:return: details of the router
"""
return {"router": self.neutron.get_router(
router["router"]["id"], **kwargs)}
def _delete_router(self, router):
"""Delete neutron router
:param router: Router object
"""
self.neutron.delete_router(router["router"]["id"])
def _update_router(self, router, router_update_args):
"""Update the neutron router.
This atomic function updates the router with router_update_args.
:param router: dict, neutron router
:param router_update_args: dict, PUT /v2.0/routers update options
:returns: updated neutron router dict
"""
router_update_args["name"] = self.generate_random_name()
return {"router": self.neutron.update_router(
router["router"]["id"], **router_update_args)}
def _create_port(self, network, port_create_args):
"""Create neutron port.
:param network: neutron network dict
:param port_create_args: POST /v2.0/ports request options
:returns: neutron port dict
"""
return {"port": self.neutron.create_port(
network_id=network["network"]["id"], **port_create_args)}
def _list_ports(self):
"""Return user ports list."""
return self.neutron.list_ports()
def _show_port(self, port, **params):
"""Return user port details.
:param port: dict, neutron port
:param params: neutron port show options
:returns: neutron port dict
"""
return {"port": self.neutron.get_port(port["port"]["id"], **params)}
def _update_port(self, port, port_update_args):
"""Update the neutron port.
This atomic function updates port with port_update_args.
:param port: dict, neutron port
:param port_update_args: dict, PUT /v2.0/ports update options
:returns: updated neutron port dict
"""
port_update_args["name"] = self.generate_random_name()
return {"port": self.neutron.update_port(port["port"]["id"],
**port_update_args)}
def _delete_port(self, port):
"""Delete neutron port.
:param port: Port object
"""
self.neutron.delete_port(port["port"]["id"])
@logging.log_deprecated_args(
"network_create_args is deprecated; use the network context instead",
"0.1.0", "network_create_args")
def _get_or_create_network(self, network_create_args=None):
"""Get a network from context, or create a new one.
This lets users either create networks with the 'network'
context, provide existing networks with the 'existing_network'
context, or let the scenario create a default network for
them. Running this without one of the network contexts is
deprecated.
:param network_create_args: Deprecated way to provide network
creation args; use the network
context instead.
:returns: Network dict
"""
if "networks" in self.context["tenant"]:
return {"network":
random.choice(self.context["tenant"]["networks"])}
else:
LOG.warning("Running this scenario without either the 'network' "
"or 'existing_network' context is deprecated")
return self._create_network(network_create_args or {})
def _create_subnets(self, network,
subnet_create_args=None,
subnet_cidr_start=None,
subnets_per_network=1):
"""Create <count> new subnets in the given network.
:param network: network to create subnets in
:param subnet_create_args: dict, POST /v2.0/subnets request options
:param subnet_cidr_start: str, start value for subnets CIDR
:param subnets_per_network: int, number of subnets for one network
:returns: List of subnet dicts
"""
return [self._create_subnet(network, subnet_create_args or {},
subnet_cidr_start)
for i in range(subnets_per_network)]
def _create_network_and_subnets(self,
network_create_args=None,
subnet_create_args=None,
subnets_per_network=1,
subnet_cidr_start="1.0.0.0/24"):
"""Create network and subnets.
:parm network_create_args: dict, POST /v2.0/networks request options
:parm subnet_create_args: dict, POST /v2.0/subnets request options
:parm subnets_per_network: int, number of subnets for one network
:parm subnet_cidr_start: str, start value for subnets CIDR
:returns: tuple of result network and subnets list
"""
subnet_create_args = dict(subnet_create_args or {})
subnet_create_args["start_cidr"] = subnet_cidr_start
net_topo = self.neutron.create_network_topology(
network_create_args=(network_create_args or {}),
subnet_create_args=subnet_create_args,
subnets_count=subnets_per_network
)
subnets = [{"subnet": s} for s in net_topo["subnets"]]
return {"network": net_topo["network"]}, subnets
def _create_network_structure(self, network_create_args=None,
subnet_create_args=None,
subnet_cidr_start=None,
subnets_per_network=None,
router_create_args=None):
"""Create a network and a given number of subnets and routers.
:param network_create_args: dict, POST /v2.0/networks request options
:param subnet_create_args: dict, POST /v2.0/subnets request options
:param subnet_cidr_start: str, start value for subnets CIDR
:param subnets_per_network: int, number of subnets for one network
:param router_create_args: dict, POST /v2.0/routers request options
:returns: tuple of (network, subnets, routers)
"""
subnet_create_args = dict(subnet_create_args or {})
subnet_create_args["start_cidr"] = subnet_cidr_start
net_topo = self.neutron.create_network_topology(
network_create_args=(network_create_args or {}),
router_create_args=(router_create_args or {}),
router_per_subnet=True,
subnet_create_args=subnet_create_args,
subnets_count=subnets_per_network
)
return ({"network": net_topo["network"]},
[{"subnet": s} for s in net_topo["subnets"]],
[{"router": r} for r in net_topo["routers"]])
def _add_interface_router(self, subnet, router):
"""Connect subnet to router.
:param subnet: dict, neutron subnet
:param router: dict, neutron router
"""
self.neutron.add_interface_to_router(router_id=router["id"],
subnet_id=subnet["id"])
def _remove_interface_router(self, subnet, router):
"""Remove subnet from router
:param subnet: dict, neutron subnet
:param router: dict, neutron router
"""
self.neutron.remove_interface_from_router(
router_id=router["id"], subnet_id=subnet["id"])
def _add_gateway_router(self, router, ext_net, enable_snat=None):
"""Set the external network gateway for a router.
:param router: dict, neutron router
:param ext_net: external network for the gateway
:param enable_snat: True if enable snat, None to avoid update
"""
self.neutron.add_gateway_to_router(
router_id=router["router"]["id"],
network_id=ext_net["network"]["id"],
enable_snat=enable_snat
)
def _remove_gateway_router(self, router):
"""Removes an external network gateway from the specified router.
:param router: dict, neutron router
"""
self.neutron.remove_gateway_from_router(router["router"]["id"])
@atomic.action_timer("neutron.create_pool")
def _create_lb_pool(self, subnet_id, **pool_create_args):
"""Create LB pool(v1)
:param subnet_id: str, neutron subnet-id
:param pool_create_args: dict, POST /lb/pools request options
:returns: dict, neutron lb pool
"""
args = {"lb_method": self.LB_METHOD,
"protocol": self.LB_PROTOCOL,
"name": self.generate_random_name(),
"subnet_id": subnet_id}
args.update(pool_create_args)
return self.clients("neutron").create_pool({"pool": args})
def _create_v1_pools(self, networks, **pool_create_args):
"""Create LB pools(v1)
:param networks: list, neutron networks
:param pool_create_args: dict, POST /lb/pools request options
:returns: list, neutron lb pools
"""
subnets = []
pools = []
for net in networks:
subnets.extend(net.get("subnets", []))
for subnet_id in subnets:
pools.append(self._create_lb_pool(
subnet_id, **pool_create_args))
return pools
@atomic.action_timer("neutron.list_pools")
def _list_v1_pools(self, **kwargs):
"""Return user lb pool list(v1)."""
return self.clients("neutron").list_pools(**kwargs)
@atomic.action_timer("neutron.delete_pool")
def _delete_v1_pool(self, pool):
"""Delete neutron pool.
:param pool: Pool object
"""
self.clients("neutron").delete_pool(pool["id"])
@atomic.action_timer("neutron.update_pool")
def _update_v1_pool(self, pool, **pool_update_args):
"""Update pool.
This atomic function updates the pool with pool_update_args.
:param pool: Pool object
:param pool_update_args: dict, POST /lb/pools update options
:returns: updated neutron pool dict
"""
pool_update_args["name"] = self.generate_random_name()
body = {"pool": pool_update_args}
return self.clients("neutron").update_pool(pool["pool"]["id"], body)
def _create_v1_vip(self, pool, **vip_create_args):
"""Create VIP(v1)
:parm pool: dict, neutron lb-pool
:parm vip_create_args: dict, POST /lb/vips request options
:returns: dict, neutron lb vip
"""
args = {"protocol": self.LB_PROTOCOL,
"protocol_port": self.LB_PROTOCOL_PORT,
"name": self.generate_random_name(),
"pool_id": pool["pool"]["id"],
"subnet_id": pool["pool"]["subnet_id"]}
args.update(vip_create_args)
return self.clients("neutron").create_vip({"vip": args})
@atomic.action_timer("neutron.list_vips")
def _list_v1_vips(self, **kwargs):
"""Return user lb vip list(v1)."""
return self.clients("neutron").list_vips(**kwargs)
@atomic.action_timer("neutron.delete_vip")
def _delete_v1_vip(self, vip):
"""Delete neutron vip.
:param vip: neutron Virtual IP object
"""
self.clients("neutron").delete_vip(vip["id"])
@atomic.action_timer("neutron.update_vip")
def _update_v1_vip(self, vip, **vip_update_args):
"""Updates vip.
This atomic function updates vip name and admin state
:param vip: Vip object
:param vip_update_args: dict, POST /lb/vips update options
:returns: updated neutron vip dict
"""
vip_update_args["name"] = self.generate_random_name()
body = {"vip": vip_update_args}
return self.clients("neutron").update_vip(vip["vip"]["id"], body)
def _create_floatingip(self, floating_network, **floating_ip_args):
"""Create floating IP with floating_network.
:param floating_network: str, external network to create floating IP
:param floating_ip_args: dict, POST /floatingips create options
:returns: dict, neutron floating IP
"""
return {"floatingip": self.neutron.create_floatingip(
floating_network=floating_network, **floating_ip_args)}
def _list_floating_ips(self, **kwargs):
"""Return floating IPs list."""
return {"floatingips": self.neutron.list_floatingips(**kwargs)}
def _delete_floating_ip(self, floating_ip):
"""Delete floating IP.
:param dict, floating IP object
"""
return self.neutron.delete_floatingip(floating_ip["id"])
def _associate_floating_ip(self, floatingip, port):
"""Associate floating IP with port.
:param floatingip: floating IP dict
:param port: port dict
:returns: updated floating IP dict
"""
return self.neutron.associate_floatingip(
port_id=port["id"],
floatingip_id=floatingip["id"])
def _dissociate_floating_ip(self, floatingip):
"""Dissociate floating IP from ports.
:param floatingip: floating IP dict
:returns: updated floating IP dict
"""
return self.neutron.dissociate_floatingip(
floatingip_id=floatingip["id"])
@atomic.action_timer("neutron.create_healthmonitor")
def _create_v1_healthmonitor(self, **healthmonitor_create_args):
"""Create LB healthmonitor.
This atomic function creates healthmonitor with the provided
healthmonitor_create_args.
:param healthmonitor_create_args: dict, POST /lb/healthmonitors
:returns: neutron healthmonitor dict
"""
args = {"type": self.HM_TYPE,
"delay": self.HM_DELAY,
"max_retries": self.HM_MAX_RETRIES,
"timeout": self.HM_TIMEOUT}
args.update(healthmonitor_create_args)
return self.clients("neutron").create_health_monitor(
{"health_monitor": args})
@atomic.action_timer("neutron.list_healthmonitors")
def _list_v1_healthmonitors(self, **kwargs):
"""List LB healthmonitors.
This atomic function lists all helthmonitors.
:param kwargs: optional parameters
:returns: neutron lb healthmonitor list
"""
return self.clients("neutron").list_health_monitors(**kwargs)
@atomic.action_timer("neutron.delete_healthmonitor")
def _delete_v1_healthmonitor(self, healthmonitor):
"""Delete neutron healthmonitor.
:param healthmonitor: neutron healthmonitor dict
"""
self.clients("neutron").delete_health_monitor(healthmonitor["id"])
@atomic.action_timer("neutron.update_healthmonitor")
def _update_v1_healthmonitor(self, healthmonitor,
**healthmonitor_update_args):
"""Update neutron healthmonitor.
:param healthmonitor: neutron lb healthmonitor dict
:param healthmonitor_update_args: POST /lb/healthmonitors
update options
:returns: updated neutron lb healthmonitor dict
"""
body = {"health_monitor": healthmonitor_update_args}
return self.clients("neutron").update_health_monitor(
healthmonitor["health_monitor"]["id"], body)
def _create_security_group(self, **security_group_create_args):
"""Create Neutron security-group.
:param security_group_create_args: dict, POST /v2.0/security-groups
request options
:returns: dict, neutron security-group
"""
security_group_create_args["name"] = self.generate_random_name()
return {"security_group": self.neutron.create_security_group(
**security_group_create_args)}
def _delete_security_group(self, security_group):
"""Delete Neutron security group.
:param security_group: dict, neutron security_group
"""
return self.neutron.delete_security_group(
security_group["security_group"]["id"])
def _list_security_groups(self, **kwargs):
"""Return list of Neutron security groups."""
return {"security_groups": self.neutron.list_security_groups(**kwargs)}
def _show_security_group(self, security_group, **kwargs):
"""Show security group details.
:param security_group: dict, neutron security_group
:param kwargs: Optional additional arguments for security_group show
:returns: security_group details
"""
return {"security_group": self.neutron.get_security_group(
security_group["security_group"]["id"], **kwargs)}
def _update_security_group(self, security_group,
**security_group_update_args):
"""Update Neutron security-group.
:param security_group: dict, neutron security_group
:param security_group_update_args: dict, POST /v2.0/security-groups
update options
:returns: dict, updated neutron security-group
"""
security_group_update_args["name"] = self.generate_random_name()
return {"security_group": self.neutron.update_security_group(
security_group["security_group"]["id"],
**security_group_update_args)}
def update_loadbalancer_resource(self, lb):
try:
new_lb = self.clients("neutron").show_loadbalancer(lb["id"])
except Exception as e:
if getattr(e, "status_code", 400) == 404:
raise exceptions.GetResourceNotFound(resource=lb)
raise exceptions.GetResourceFailure(resource=lb, err=e)
return new_lb["loadbalancer"]
@atomic.action_timer("neutron.create_lbaasv2_loadbalancer")
def _create_lbaasv2_loadbalancer(self, subnet_id, **lb_create_args):
"""Create LB loadbalancer(v2)
:param subnet_id: str, neutron subnet-id
:param lb_create_args: dict, POST /lbaas/loadbalancers request options
:returns: dict, neutron lb
"""
args = {"name": self.generate_random_name(),
"vip_subnet_id": subnet_id}
args.update(lb_create_args)
neutronclient = self.clients("neutron")
lb = neutronclient.create_loadbalancer({"loadbalancer": args})
lb = lb["loadbalancer"]
lb = utils.wait_for_status(
lb,
ready_statuses=["ACTIVE"],
status_attr="provisioning_status",
update_resource=self.update_loadbalancer_resource,
timeout=CONF.openstack.neutron_create_loadbalancer_timeout,
check_interval=(
CONF.openstack.neutron_create_loadbalancer_poll_interval)
)
return lb
@atomic.action_timer("neutron.list_lbaasv2_loadbalancers")
def _list_lbaasv2_loadbalancers(self, retrieve_all=True, **lb_list_args):
"""List LB loadbalancers(v2)
:param lb_list_args: dict, POST /lbaas/loadbalancers request options
:returns: dict, neutron lb loadbalancers(v2)
"""
return self.clients("neutron").list_loadbalancers(retrieve_all,
**lb_list_args)
@atomic.action_timer("neutron.create_bgpvpn")
def _create_bgpvpn(self, **kwargs):
"""Create Bgpvpn resource (POST /bgpvpn/bgpvpn)
:param kwargs: optional parameters to create BGP VPN
:returns dict, bgpvpn resource details
"""
kwargs["name"] = self.generate_random_name()
return self.admin_clients("neutron").create_bgpvpn({"bgpvpn": kwargs})
@atomic.action_timer("neutron.delete_bgpvpn")
def _delete_bgpvpn(self, bgpvpn):
"""Delete Bgpvpn resource.(DELETE /bgpvpn/bgpvpns/{id})
:param bgpvpn: dict, bgpvpn
:return dict, bgpvpn
"""
return self.admin_clients("neutron").delete_bgpvpn(
bgpvpn["bgpvpn"]["id"])
@atomic.action_timer("neutron.list_bgpvpns")
def _list_bgpvpns(self, **kwargs):
"""Return bgpvpns list.
:param kwargs: dict, GET /bgpvpn/bgpvpns request options
:returns: bgpvpns list
"""
return self.admin_clients("neutron").list_bgpvpns(
True, **kwargs)["bgpvpns"]
@atomic.action_timer("neutron.update_bgpvpn")
def _update_bgpvpn(self, bgpvpn, update_name=False, **kwargs):
"""Update a bgpvpn.
:param bgpvpn: dict, bgpvpn
:param update_name: update_name: bool, whether or not to modify
BGP VPN name
:param **kwargs: dict, PUT /bgpvpn/bgpvpns update options
:return dict, updated bgpvpn
"""
if update_name or "name" in kwargs:
kwargs["name"] = self.generate_random_name()
return self.admin_clients("neutron").update_bgpvpn(
bgpvpn["bgpvpn"]["id"], {"bgpvpn": kwargs})
@atomic.action_timer("neutron.create_bgpvpn_network_assoc")
def _create_bgpvpn_network_assoc(self, bgpvpn, network):
"""Creates a new BGP VPN network association.
:param bgpvpn: dict, bgpvpn
:param network: dict, network
:return dict: network_association
"""
netassoc = {"network_id": network["id"]}
return self.clients("neutron").create_bgpvpn_network_assoc(
bgpvpn["bgpvpn"]["id"], {"network_association": netassoc})
@atomic.action_timer("neutron.delete_bgpvpn_network_assoc")
def _delete_bgpvpn_network_assoc(self, bgpvpn, net_assoc):
"""Delete the specified BGP VPN network association
:param bgpvpn: dict, bgpvpn
:param net_assoc: dict, network
:return dict: network_association
"""
return self.clients("neutron").delete_bgpvpn_network_assoc(
bgpvpn["bgpvpn"]["id"], net_assoc["network_association"]["id"])
@atomic.action_timer("neutron.create_bgpvpn_router_assoc")
def _create_bgpvpn_router_assoc(self, bgpvpn, router):
"""Creates a new BGP VPN router association.
:param bgpvpn: dict, bgpvpn
:param router: dict, router
:return dict: network_association
"""
router_assoc = {"router_id": router["id"]}
return self.clients("neutron").create_bgpvpn_router_assoc(
bgpvpn["bgpvpn"]["id"], {"router_association": router_assoc})
@atomic.action_timer("neutron.delete_bgpvpn_router_assoc")
def _delete_bgpvpn_router_assoc(self, bgpvpn, router_assoc):
"""Delete the specified BGP VPN router association
:param bgpvpn: dict, bgpvpn
:param router_assoc: dict, router
:return dict: router_association
"""
return self.clients("neutron").delete_bgpvpn_router_assoc(
bgpvpn["bgpvpn"]["id"], router_assoc["router_association"]["id"])
@atomic.action_timer("neutron.list_bgpvpn_network_assocs")
def _list_bgpvpn_network_assocs(self, bgpvpn, **kwargs):
"""List network association of bgpvpn
:param bgpvpn: dict, bgpvpn
:param **kwargs: dict, optional parameters
:return dict: network_association
"""
return self.clients("neutron").list_bgpvpn_network_assocs(
bgpvpn["bgpvpn"]["id"], **kwargs)
@atomic.action_timer("neutron.list_bgpvpn_router_assocs")
def _list_bgpvpn_router_assocs(self, bgpvpn, **kwargs):
"""List router association of bgpvpn
:param bgpvpn: dict, bgpvpn
:param **kwargs: dict, optional parameters
:return dict: router_association
"""
return self.clients("neutron").list_bgpvpn_router_assocs(
bgpvpn["bgpvpn"]["id"], **kwargs)
def _create_security_group_rule(self, security_group_id,
**security_group_rule_args):
"""Create Neutron security-group-rule.
:param security_group_id: id of neutron security_group
:param security_group_rule_args: dict, POST
/v2.0/security-group-rules request options
:returns: dict, neutron security-group-rule
"""
return {"security_group_rule": self.neutron.create_security_group_rule(
security_group_id, **security_group_rule_args
)}
def _list_security_group_rules(self, **kwargs):
"""List all security group rules.
:param kwargs: Optional additional arguments for roles list
:return: list of security group rules
"""
result = self.neutron.list_security_group_rules(**kwargs)
return {"security_group_rules": result}
def _show_security_group_rule(self, security_group_rule, **kwargs):
"""Show information of a given security group rule.
:param security_group_rule: id of security group rule
:param kwargs: Optional additional arguments for roles list
:return: details of security group rule
"""
return {"security_group_rule": self.neutron.get_security_group_rule(
security_group_rule, **kwargs)}
def _delete_security_group_rule(self, security_group_rule):
"""Delete a given security group rule.
:param security_group_rule: id of security group rule
"""
self.neutron.delete_security_group_rule(security_group_rule)
@atomic.action_timer("neutron.delete_trunk")
def _delete_trunk(self, trunk_port):
self.clients("neutron").delete_trunk(trunk_port["port_id"])
@atomic.action_timer("neutron.create_trunk")
def _create_trunk(self, trunk_payload):
trunk_payload["name"] = self.generate_random_name()
return self.clients("neutron").create_trunk({"trunk": trunk_payload})
@atomic.action_timer("neutron.list_trunks")
def _list_trunks(self, **kwargs):
return self.clients("neutron").list_trunks(**kwargs)["trunks"]
@atomic.action_timer("neutron.list_subports_by_trunk")
def _list_subports_by_trunk(self, trunk_id):
return self.clients("neutron").trunk_get_subports(trunk_id)
@atomic.action_timer("neutron._add_subports_to_trunk")
def _add_subports_to_trunk(self, trunk_id, subports):
return self.clients("neutron").trunk_add_subports(
trunk_id, {"sub_ports": subports})
def _list_ports_by_device_id(self, device_id):
return self.neutron.list_ports(device_id=device_id)
| 38.430058 | 79 | 0.641357 |
794135f7408600359a33164754d7110c831bcaa8 | 151 | py | Python | manager.py | TuringEmmy/Movies | 54597698edd8f9d88a240970c180649a83cfddab | [
"MIT"
] | 1 | 2018-10-02T02:33:37.000Z | 2018-10-02T02:33:37.000Z | manager.py | TuringEmmy/Movies | 54597698edd8f9d88a240970c180649a83cfddab | [
"MIT"
] | null | null | null | manager.py | TuringEmmy/Movies | 54597698edd8f9d88a240970c180649a83cfddab | [
"MIT"
] | null | null | null | # coding=utf-8
# author TuringEmmy
# emial [email protected]
from app import app
if __name__ == '__main__':
# print(app.url_map)
app.run() | 16.777778 | 28 | 0.688742 |
7941372f855aa71c2579b434b917618f320ed3eb | 4,620 | py | Python | experiment.py | JeyDi/BayesianMLOptimization | ba3ddf5bb9919a5043b4e982dea46425631696d3 | [
"Apache-2.0"
] | 1 | 2019-06-30T08:50:54.000Z | 2019-06-30T08:50:54.000Z | experiment.py | JeyDi/BayesianMLOptimization | ba3ddf5bb9919a5043b4e982dea46425631696d3 | [
"Apache-2.0"
] | null | null | null | experiment.py | JeyDi/BayesianMLOptimization | ba3ddf5bb9919a5043b4e982dea46425631696d3 | [
"Apache-2.0"
] | null | null | null | # import of the required libraries
import numpy as np
import timeit
from pyGPGO.covfunc import squaredExponential
from pyGPGO.surrogates.GaussianProcess import GaussianProcess
from pyGPGO.surrogates.RandomForest import RandomForest
from pyGPGO.GPGO import GPGO
from pyGPGO.acquisition import Acquisition
from pyGPGO.covfunc import matern32
from sklearn import datasets
from sklearn import svm
from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pylab import meshgrid,cm,imshow,contour,clabel,colorbar,axis,title,show
#Import Load Wine Dataset
ds = datasets.load_wine()
print("Dataframe Data Shape: ",ds.data.shape)
print("Dataframe Target Shape: ", ds.target.shape)
def compute_accuracy_SVC(C,gamma,coef0):
clf = svm.SVC(C=C,gamma=gamma,coef0=coef0)
scores = cross_val_score(clf, ds.data, ds.target, cv=10)
return (scores.mean())
np.random.seed(42)
initialPoints = 30
furtherEvaluations = 120
# defining a dictionary on "x"
param = { 'C': ('cont', [0.1,5]),
'gamma': ('cont', [0.1,10]),
'coef0':('cont',[0.1,10])
}
# creating a GP surrogate model with a Squared Exponantial covariance function,
# aka kernel
sexp = squaredExponential()
sur_model_1 = GaussianProcess(sexp)
sur_model_2 = RandomForest()
# setting the acquisition function
acq_1 = Acquisition(mode="ExpectedImprovement")
acq_2 = Acquisition(mode="ProbabilityImprovement")
acq_3 = Acquisition(mode="UCB")
# creating an object Bayesian Optimization
gpgo_gaussian_model_1 = GPGO(sur_model_1,acq_1,compute_accuracy_SVC,param)
gpgo_gaussian_model_2 = GPGO(sur_model_1,acq_2,compute_accuracy_SVC,param)
gpgo_gaussian_model_3 = GPGO(sur_model_1,acq_3,compute_accuracy_SVC,param)
gpgo_random_forest_1 = GPGO(sur_model_2,acq_1,compute_accuracy_SVC,param)
gpgo_random_forest_2 = GPGO(sur_model_2,acq_2,compute_accuracy_SVC,param)
gpgo_random_forest_3 = GPGO(sur_model_2,acq_3,compute_accuracy_SVC,param)
#Run models
gaussianModel_1_start = timeit.default_timer()
gpgo_gaussian_model_1.run(max_iter=furtherEvaluations,init_evals=initialPoints)
gaussianModel_1_stop = timeit.default_timer()
gaussianModel_2_start = timeit.default_timer()
gpgo_gaussian_model_2.run(max_iter=furtherEvaluations,init_evals=initialPoints)
gaussianModel_2_stop = timeit.default_timer()
gaussianModel_3_start = timeit.default_timer()
gpgo_gaussian_model_3.run(max_iter=furtherEvaluations,init_evals=initialPoints)
gaussianModel_3_stop = timeit.default_timer()
randomForest_1_start = timeit.default_timer()
gpgo_random_forest_1.run(max_iter=furtherEvaluations,init_evals=initialPoints)
randomForest_1_stop = timeit.default_timer()
randomForest_2_start = timeit.default_timer()
gpgo_random_forest_2.run(max_iter=furtherEvaluations,init_evals=initialPoints)
randomForest_2_stop = timeit.default_timer()
randomForest_3_start = timeit.default_timer()
gpgo_random_forest_3.run(max_iter=furtherEvaluations,init_evals=initialPoints)
randomForest_3_stop = timeit.default_timer()
#Get the results
print("\n---Results---\n")
print("\n", gpgo_gaussian_model_1.getResult())
print('Gaussian Model 1 Time: ', gaussianModel_1_start - gaussianModel_1_stop)
print("\n", gpgo_gaussian_model_2.getResult())
print('Gaussian Model 2 Time: ', gaussianModel_2_start - gaussianModel_2_stop)
print("\n", gpgo_gaussian_model_3.getResult())
print('Gaussian Model 3 Time: ', gaussianModel_3_start - gaussianModel_3_start)
print("\n", gpgo_random_forest_1.getResult())
print('Random Forest 1 Time: ', randomForest_1_start - randomForest_1_stop)
print("\n", gpgo_random_forest_2.getResult())
print('Random Forest 2 Time: ', randomForest_2_start - randomForest_2_stop)
print("\n",gpgo_random_forest_3.getResult())
print('Random Forest 3 Time: ', randomForest_3_start - randomForest_3_stop)
#------------------------------
#GRID SEARCH
xrange = list(param.values())[0][1]
yrange = list(param.values())[1][1]
zrange = list(param.values())[2][1]
C_values = np.linspace(xrange[0],xrange[1],5)
gamma_values = np.linspace(yrange[0],yrange[1],6)
def0 = np.linspace(zrange[0],zrange[1],5)
res = [0 for n in range(150)]
count = 0
grid_search_start = timeit.default_timer()
for cc in C_values:
for gg in gamma_values:
for dd in def0:
res[count] = compute_accuracy_SVC( cc, gg, dd )
count = count+1
grid_search_stop = timeit.default_timer()
print("\nGrid Search, Best on Grid:"+str(round(max(np.asarray(res)),2))+"%%")
print('Grid Search Time: ', grid_search_start - grid_search_stop)
print("\n\n---Finish---") | 35.267176 | 81 | 0.774459 |
794137329b37795e9b4cf779d0ca3888862a1e5d | 1,241 | py | Python | py/load_data.py | fedderw/polls | 70d997d9d91ac90501cb91f5e7aef536cb5df9dd | [
"MIT"
] | 1 | 2020-08-25T18:14:45.000Z | 2020-08-25T18:14:45.000Z | py/load_data.py | fedderw/polls | 70d997d9d91ac90501cb91f5e7aef536cb5df9dd | [
"MIT"
] | 94 | 2020-08-25T18:23:55.000Z | 2022-01-26T15:48:39.000Z | py/load_data.py | fedderw/polls | 70d997d9d91ac90501cb91f5e7aef536cb5df9dd | [
"MIT"
] | 3 | 2021-05-19T01:30:13.000Z | 2021-06-23T20:49:17.000Z | import pandas as pd
VARIABLE_MAPPING = {
"Poll ID": "poll_id",
"Question ID": "question_id",
"Cross-tab variable 1": "xtab1_var",
"Cross-tab value 1": "xtab1_val",
"Cross-tab variable 2": "xtab2_var",
"Cross-tab value 2": "xtab2_val",
"Sample size": "sample_size",
"Question text": "question_text",
"percentage": "pct",
"Response": "response",
"Favorability": "favorability",
"Date": "date",
"Pollster": "pollster",
"Notes": "notes",
}
def get_data(gid, f):
temp = (
pd.read_csv(
"https://docs.google.com/spreadsheets/d/"
+ "1ulqohI6YLYFsz3wm8f5j6BS_JVelu2Ea8SXXa-wwkhQ"
+ "/export?gid="
+ str(gid)
+ "&format=csv",
# Set first column as rownames in data frame
index_col=0,
)
.reset_index()
.rename(columns=VARIABLE_MAPPING)
)
# strip any whitespace from the text
temp.columns = temp.columns.str.strip()
temp.to_csv("data/" + f + ".csv", index=False)
get_data(0, "responses")
get_data(1080881848, "polls")
get_data(1983382452, "pollsters")
get_data(109990425, "questions")
get_data(1152857355, "favorability")
get_data(935642594, "xtab_order")
| 26.978261 | 60 | 0.601934 |
794139c3cb4d1db9fc8a5a1dbd3a676374f069c6 | 1,295 | py | Python | mkt/constants/submit.py | oremj/zamboni | a751dc6d22f7af947da327b0a091cbab0a999f49 | [
"BSD-3-Clause"
] | null | null | null | mkt/constants/submit.py | oremj/zamboni | a751dc6d22f7af947da327b0a091cbab0a999f49 | [
"BSD-3-Clause"
] | null | null | null | mkt/constants/submit.py | oremj/zamboni | a751dc6d22f7af947da327b0a091cbab0a999f49 | [
"BSD-3-Clause"
] | null | null | null | from tower import ugettext_lazy as _
APP_STEPS = [
('terms', _('Agreement')),
('manifest', _('Submit')),
('details', _('Details')),
('done', _('Done!')),
]
APP_STEPS_TITLE = dict(APP_STEPS)
# The sizes for the image assets for apps.
APP_IMAGE_SIZES = [
{'size': (32, 32),
'has_background': False,
'required': False,
'slug': 'featured_tile',
'name': 'Featured Tile',
'description': _("The icon shown when your app is featured at the top of "
"category landing pages.")},
{'size': (106, 106),
'has_background': False,
'required': False,
'slug': 'mobile_tile',
'name': 'Mobile Tile',
'description': _("The image used for the app's tile in the mobile "
"Marketplace.")},
{'size': (150, 130),
'has_background': False,
'required': False,
'slug': 'desktop_tile',
'name': 'Desktop Tile',
'description': _("The image used for the app's tile in the desktop "
"Marketplace.")},
]
# Preview sizes in the format (width, height, type)
APP_PREVIEW_MINIMUMS = (320, 480)
APP_PREVIEW_SIZES = [
(180, 270, 'mobile'),
(700, 1050, 'full'), # Because it's proportional, that's why.
]
MAX_PACKAGED_APP_SIZE = 50 * 1024 * 1024 # 50MB
| 28.777778 | 79 | 0.579923 |
79413a76bc7c641286e0e8dcbfbe6a80c043bfa7 | 7,316 | py | Python | mimesis/schema.py | gptubpkCsHKzjC8fKcRXUdK6SbECPM49P5Xu46U/mimesis | 675bd3a8702508c585d808aeb503c4397ada123d | [
"MIT"
] | 1 | 2021-12-21T12:03:54.000Z | 2021-12-21T12:03:54.000Z | mimesis/schema.py | gptubpkCsHKzjC8fKcRXUdK6SbECPM49P5Xu46U/mimesis | 675bd3a8702508c585d808aeb503c4397ada123d | [
"MIT"
] | null | null | null | mimesis/schema.py | gptubpkCsHKzjC8fKcRXUdK6SbECPM49P5Xu46U/mimesis | 675bd3a8702508c585d808aeb503c4397ada123d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Implements classes for generating data by schema."""
import warnings
from typing import Any, Callable, ClassVar, Iterator, List, Optional, Sequence
from mimesis.exceptions import FieldError, SchemaError
from mimesis.locales import Locale
from mimesis.providers.generic import Generic
from mimesis.types import JSON, SchemaType, Seed
__all__ = ["BaseField", "Field", "Schema"]
class BaseField:
"""
BaseField is a class for generating data by the name of the method.
Instance of this object takes any string which represents the name
of any method of any supported data provider (:class:`~mimesis.Generic`)
and the ``**kwargs`` of the method.
See :class:`~mimesis.schema.BaseField.perform` for more details.
"""
class Meta:
base = True
def __init__(
self,
locale: Locale = Locale.DEFAULT,
seed: Seed = None,
providers: Optional[Sequence[Any]] = None,
) -> None:
"""Initialize field.
:param locale: Locale
:param seed: Seed for random.
"""
self._gen = Generic(locale, seed)
if providers:
self._gen.add_providers(*providers)
self._table = {} # type: ignore
def perform(
self,
name: Optional[str] = None,
key: Optional[Callable[[Any], Any]] = None,
**kwargs: Any
) -> Any:
"""Performs the value of the field by its name.
It takes any string which represents the name of any method of
any supported data provider and the ``**kwargs`` of this method.
.. note:: Some data providers have methods with the same names
and in such cases, you can explicitly define that the method
belongs to data-provider ``name='provider.name'`` otherwise
it will return the data from the first provider which
has a method ``name``.
You can apply a *key function* to the result returned by
the method, bt passing a parameter **key** with a callable
object which returns the final result.
:param name: Name of the method.
:param key: A key function (or any other callable object)
which will be applied to result.
:param kwargs: Kwargs of method.
:return: Value which represented by method.
:raises ValueError: if provider not
supported or if field not defined.
"""
if name is None:
raise FieldError()
def tail_parser(tails: str, obj: Any) -> Any:
"""Return method from end of tail.
:param tails: Tail string
:param obj: Search tail from this object
:return last tailed method
"""
provider_name, method_name = tails.split(".", 1)
if "." in method_name:
raise FieldError(name)
attr = getattr(obj, provider_name)
if attr is not None:
try:
return getattr(attr, method_name)
except AttributeError:
raise FieldError(name)
try:
if name not in self._table:
if "." not in name:
# Fix https://github.com/lk-geimfari/mimesis/issues/619
if name == self._gen.choice.Meta.name:
self._table[name] = self._gen.choice
else:
for provider in dir(self._gen):
provider = getattr(self._gen, provider)
if name in dir(provider):
self._table[name] = getattr(provider, name)
else:
self._table[name] = tail_parser(name, self._gen)
result = self._table[name](**kwargs)
if key and callable(key):
return key(result)
return result
except KeyError:
raise FieldError(name)
def __str__(self) -> str:
return "{} <{}>".format(self.__class__.__name__, self._gen.locale)
class Field(BaseField):
"""Greedy field.
The field whcih evaluates immediately.
Example:
>>> _ = Field()
>>> _('username')
Dogtag_1836
"""
def __call__(self, *args: Any, **kwargs: Any) -> Any:
return self.perform(*args, **kwargs)
class Schema:
"""Class which return list of filled schemas."""
_MIN_ITERATIONS_VALUE: ClassVar[int] = 1
__slots__ = ("_schema",)
def __init__(self, schema: SchemaType) -> None:
"""Initialize schema.
:param schema: A schema (must be a callable object).
"""
if schema and callable(schema):
self._schema = schema
else:
raise SchemaError()
def create(self, iterations: int = 1) -> List[JSON]:
"""Creates a list of a fulfilled schemas.
.. note::
This method evaluates immediately, so be careful on creating
large datasets otherwise you're risking running out of memory.
If you need a lazy version of this method, see
:meth:`iterator`
:param iterations: Number of iterations.
:return: List of fulfilled schemas.
"""
if iterations < self._MIN_ITERATIONS_VALUE:
raise ValueError("The number of iterations must be greater than 0.")
return [self._schema() for _ in range(iterations)]
def loop(self) -> Iterator[JSON]:
"""Fulfills a schema **infinitely** in a lazy way.
This method can be useful when you have some dynamic
conditions in depend on which the generation must be interrupted.
If you accepting all risks below and want to suppress
the warnings then use :py:class:`warnings.catch_warnings`
.. note::
Since data `mimesis` provides are limited, frequent calls of
this method can cause data duplication.
.. note::
Before using this method, ask yourself: **Do I really need this**?
In most cases, the answer is: Nah, :meth:`iterator` is enough.
.. warning::
Do not use this method without interrupt conditions, otherwise,
you're risking running out of memory.
.. warning::
**Never** (seriously) call `list()`, `tuple()` or any other callable which tries to
evaluate the whole lazy object on this method — infinite called infinite
for a reason.
:return: An infinite iterator with fulfilled schemas.
"""
warnings.warn(
"You're iterating over the infinite object! "
"The schema.loop() can cause a serious memory leak."
"Please, see: https://mimesis.name/api.html#mimesis.schema.Schema.loop"
)
while True:
yield self._schema()
def iterator(self, iterations: int = 1) -> Iterator[JSON]:
"""Fulfills schema in a lazy way.
:param iterations: Number of iterations.
:return: List of fulfilled schemas.
"""
if iterations < self._MIN_ITERATIONS_VALUE:
raise ValueError("The number of iterations must be greater than 0.")
for item in range(iterations):
yield self._schema()
| 32.515556 | 95 | 0.585429 |
79413b0913242ef44aae9921eb9da2987dd13b22 | 1,792 | py | Python | Source/Server/mqtt.py | HgN37/HGN_LVTN | 1ccdca5bb41665cbd70b360f8cac82037b4a8518 | [
"MIT"
] | null | null | null | Source/Server/mqtt.py | HgN37/HGN_LVTN | 1ccdca5bb41665cbd70b360f8cac82037b4a8518 | [
"MIT"
] | null | null | null | Source/Server/mqtt.py | HgN37/HGN_LVTN | 1ccdca5bb41665cbd70b360f8cac82037b4a8518 | [
"MIT"
] | null | null | null | import paho.mqtt.client as mqtt
import json
import ast
class MQTT():
''' Communication with server via WiFi or Ethernet '''
payload_queue = []
def __init__(self, broker, port, topicin, topicout):
self.topicin = topicin
self.topicout = topicout
def on_connect(client, userdata, flags, rc):
print('Connected with code ' + str(rc))
client.subscribe(self.topicin)
print('Connected to ' + broker)
print('Subscribed to ' + self.topicin)
def on_message(client, userdata, msg):
# print(msg.topic + ': ' + msg.payload.decode('utf-8'))
# print('Get msg')
self.payload_queue.append(msg.payload.decode('utf-8'))
def on_disconnect(client, userdata, rc):
print('Disconnect from ' + broker)
print('Try to reconnect')
client.reconnect()
self.client = mqtt.Client()
self.client.on_connect = on_connect
self.client.on_message = on_message
self.client.on_disconnect = on_disconnect
self.client.connect(broker, port, 60)
def send(self, payload):
self.client.publish(self.topicout, payload)
def get(self):
next_msg = ''
if len(self.payload_queue):
next_msg = self.payload_queue[0]
del self.payload_queue[0]
return next_msg
def run(self):
self.client.loop()
def send_frame(self, addr, func, dev1, dev2, *data):
tx = {}
tx['ADDR'] = addr
tx['FUNC'] = func
tx['DEV1'] = dev1
tx['DEV2'] = dev2
tx['DATA'] = {}
for i in range(len(data)):
tx['DATA'][str(i+1)] = data[i]
self.send(json.dumps(tx))
def main():
tpin = rasp_get_id() + '/m2s'
tpout = rasp_get_id() + '/s2m'
mqtt = MQTT('iot.eclipse.org', 1883, tpin, tpout)
msg = ''
while True:
msg = mqtt.get()
if (msg != ''):
print(msg)
mqtt.send_frame('USER1', 'CONTROL', 'LED', 'NONE', 'ON')
msg = ''
mqtt.run()
if __name__ == '__main__':
main()
| 24.547945 | 59 | 0.654018 |
79413b42c5ea6b69dc7650fcafd6f3caa4c9de62 | 1,733 | py | Python | validate_export.py | salman1993/twitter-customer-support-collector | ed1f8bc8c8401da57cd3c0ddc430b2c83779e0f6 | [
"MIT"
] | 2 | 2017-11-28T14:48:03.000Z | 2019-02-28T09:17:41.000Z | validate_export.py | salman1993/twitter-customer-support-collector | ed1f8bc8c8401da57cd3c0ddc430b2c83779e0f6 | [
"MIT"
] | 2 | 2017-10-05T07:56:29.000Z | 2017-11-30T01:30:01.000Z | validate_export.py | salman1993/twitter-customer-support-collector | ed1f8bc8c8401da57cd3c0ddc430b2c83779e0f6 | [
"MIT"
] | 2 | 2017-11-28T20:14:57.000Z | 2020-01-30T14:00:46.000Z | """ Validates exported dataset. """
import csv
import logging
import os
import sys
from collections import namedtuple
import toolz
def validate_export(export_path: str):
logging.info(f'Validating export at "{export_path}"...')
logging.info("Reading exported dataset...")
with open(export_path) as infile:
reader = csv.reader(infile)
Tweet = namedtuple('Tweet', next(reader))
tweets = {int(line[0]): Tweet(*line) for line in reader if len(line) > 0}
logging.info(f"Read {len(tweets)} tweets.")
all_author_ids = set(t.author_id for t in tweets.values())
logging.info(f"Found {len(all_author_ids)} different authors.")
orphans = [t for t in tweets.values()
if len(t.response_tweet_id) == 0 and len(t.in_response_to_tweet_id) == 0]
logging.info(f"Found {len(orphans)} orphan tweets.")
first_requests = [
t for t in tweets.values() if t.in_response_to_tweet_id == ''
]
logging.info(f"Found {len(first_requests)} conversation starts.")
replies = list(toolz.concat(
[tweets[int(tid)] for tid in t.response_tweet_id.split(',') if int(tid) in tweets]
for t in first_requests if len(t.response_tweet_id) != ''
))
non_cs_replies = [t for t in replies if not t.inbound]
logging.info(f"Found {len(non_cs_replies)} non-inbound response tweets out of {len(replies)}.")
if __name__ == '__main__':
export_path = 'twcs.csv' if len(sys.argv) < 2 else sys.argv[1]
logging.basicConfig(
format='%(levelname)s:%(asctime)s.%(msecs)03d [%(threadName)s] - %(message)s',
datefmt='%Y-%m-%d,%H:%M:%S',
level=getattr(logging, os.environ.get('LOG_LEVEL', 'INFO')))
validate_export(export_path)
| 35.367347 | 99 | 0.660704 |
79413c5ed0215231083aa12c68aead7d23464ea7 | 21,423 | py | Python | astropy/io/fits/hdu/groups.py | zonca/astropy | 522ad1db0e5e419ba4bc57aead3ec7fb8e4fc50f | [
"BSD-3-Clause"
] | null | null | null | astropy/io/fits/hdu/groups.py | zonca/astropy | 522ad1db0e5e419ba4bc57aead3ec7fb8e4fc50f | [
"BSD-3-Clause"
] | 10 | 2017-03-15T16:14:43.000Z | 2018-11-22T14:40:54.000Z | astropy/io/fits/hdu/groups.py | tstickel/astropy | 351fa20305886a907ec0829dd5fa53f30a5ba6ff | [
"BSD-3-Clause"
] | 1 | 2020-01-23T00:41:10.000Z | 2020-01-23T00:41:10.000Z | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import sys
import numpy as np
from .base import DTYPE2BITPIX
from .image import PrimaryHDU
from .table import _TableLikeHDU
from astropy.io.fits.column import Column, ColDefs, FITS2NUMPY
from astropy.io.fits.fitsrec import FITS_rec, FITS_record
from astropy.io.fits.util import _is_int, _is_pseudo_unsigned, _unsigned_zero
from astropy.utils import lazyproperty
class Group(FITS_record):
"""
One group of the random group data.
"""
def __init__(self, input, row=0, start=None, end=None, step=None,
base=None):
super().__init__(input, row, start, end, step, base)
@property
def parnames(self):
return self.array.parnames
@property
def data(self):
# The last column in the coldefs is the data portion of the group
return self.field(self.array._coldefs.names[-1])
@lazyproperty
def _unique(self):
return _par_indices(self.parnames)
def par(self, parname):
"""
Get the group parameter value.
"""
if _is_int(parname):
result = self.array[self.row][parname]
else:
indx = self._unique[parname.upper()]
if len(indx) == 1:
result = self.array[self.row][indx[0]]
# if more than one group parameter have the same name
else:
result = self.array[self.row][indx[0]].astype('f8')
for i in indx[1:]:
result += self.array[self.row][i]
return result
def setpar(self, parname, value):
"""
Set the group parameter value.
"""
# TODO: It would be nice if, instead of requiring a multi-part value to
# be an array, there were an *option* to automatically split the value
# into multiple columns if it doesn't already fit in the array data
# type.
if _is_int(parname):
self.array[self.row][parname] = value
else:
indx = self._unique[parname.upper()]
if len(indx) == 1:
self.array[self.row][indx[0]] = value
# if more than one group parameter have the same name, the
# value must be a list (or tuple) containing arrays
else:
if isinstance(value, (list, tuple)) and \
len(indx) == len(value):
for i in range(len(indx)):
self.array[self.row][indx[i]] = value[i]
else:
raise ValueError('Parameter value must be a sequence with '
'{} arrays/numbers.'.format(len(indx)))
class GroupData(FITS_rec):
"""
Random groups data object.
Allows structured access to FITS Group data in a manner analogous
to tables.
"""
_record_type = Group
def __new__(cls, input=None, bitpix=None, pardata=None, parnames=[],
bscale=None, bzero=None, parbscales=None, parbzeros=None):
"""
Parameters
----------
input : array or FITS_rec instance
input data, either the group data itself (a
`numpy.ndarray`) or a record array (`FITS_rec`) which will
contain both group parameter info and the data. The rest
of the arguments are used only for the first case.
bitpix : int
data type as expressed in FITS ``BITPIX`` value (8, 16, 32,
64, -32, or -64)
pardata : sequence of arrays
parameter data, as a list of (numeric) arrays.
parnames : sequence of str
list of parameter names.
bscale : int
``BSCALE`` of the data
bzero : int
``BZERO`` of the data
parbscales : sequence of int
list of bscales for the parameters
parbzeros : sequence of int
list of bzeros for the parameters
"""
if not isinstance(input, FITS_rec):
if pardata is None:
npars = 0
else:
npars = len(pardata)
if parbscales is None:
parbscales = [None] * npars
if parbzeros is None:
parbzeros = [None] * npars
if parnames is None:
parnames = ['PAR{}'.format(idx + 1) for idx in range(npars)]
if len(parnames) != npars:
raise ValueError('The number of parameter data arrays does '
'not match the number of parameters.')
unique_parnames = _unique_parnames(parnames + ['DATA'])
if bitpix is None:
bitpix = DTYPE2BITPIX[input.dtype.name]
fits_fmt = GroupsHDU._bitpix2tform[bitpix] # -32 -> 'E'
format = FITS2NUMPY[fits_fmt] # 'E' -> 'f4'
data_fmt = '{}{}'.format(str(input.shape[1:]), format)
formats = ','.join(([format] * npars) + [data_fmt])
gcount = input.shape[0]
cols = [Column(name=unique_parnames[idx], format=fits_fmt,
bscale=parbscales[idx], bzero=parbzeros[idx])
for idx in range(npars)]
cols.append(Column(name=unique_parnames[-1], format=fits_fmt,
bscale=bscale, bzero=bzero))
coldefs = ColDefs(cols)
self = FITS_rec.__new__(cls,
np.rec.array(None,
formats=formats,
names=coldefs.names,
shape=gcount))
# By default the data field will just be 'DATA', but it may be
# uniquified if 'DATA' is already used by one of the group names
self._data_field = unique_parnames[-1]
self._coldefs = coldefs
self.parnames = parnames
for idx, name in enumerate(unique_parnames[:-1]):
column = coldefs[idx]
# Note: _get_scale_factors is used here and in other cases
# below to determine whether the column has non-default
# scale/zero factors.
# TODO: Find a better way to do this than using this interface
scale, zero = self._get_scale_factors(column)[3:5]
if scale or zero:
self._cache_field(name, pardata[idx])
else:
np.rec.recarray.field(self, idx)[:] = pardata[idx]
column = coldefs[self._data_field]
scale, zero = self._get_scale_factors(column)[3:5]
if scale or zero:
self._cache_field(self._data_field, input)
else:
np.rec.recarray.field(self, npars)[:] = input
else:
self = FITS_rec.__new__(cls, input)
self.parnames = None
return self
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
if isinstance(obj, GroupData):
self.parnames = obj.parnames
elif isinstance(obj, FITS_rec):
self.parnames = obj._coldefs.names
def __getitem__(self, key):
out = super().__getitem__(key)
if isinstance(out, GroupData):
out.parnames = self.parnames
return out
@property
def data(self):
"""
The raw group data represented as a multi-dimensional `numpy.ndarray`
array.
"""
# The last column in the coldefs is the data portion of the group
return self.field(self._coldefs.names[-1])
@lazyproperty
def _unique(self):
return _par_indices(self.parnames)
def par(self, parname):
"""
Get the group parameter values.
"""
if _is_int(parname):
result = self.field(parname)
else:
indx = self._unique[parname.upper()]
if len(indx) == 1:
result = self.field(indx[0])
# if more than one group parameter have the same name
else:
result = self.field(indx[0]).astype('f8')
for i in indx[1:]:
result += self.field(i)
return result
class GroupsHDU(PrimaryHDU, _TableLikeHDU):
"""
FITS Random Groups HDU class.
See the :ref:`random-groups` section in the Astropy documentation for more
details on working with this type of HDU.
"""
_bitpix2tform = {8: 'B', 16: 'I', 32: 'J', 64: 'K', -32: 'E', -64: 'D'}
_data_type = GroupData
_data_field = 'DATA'
"""
The name of the table record array field that will contain the group data
for each group; 'DATA' by default, but may be preceded by any number of
underscores if 'DATA' is already a parameter name
"""
def __init__(self, data=None, header=None):
super().__init__(data=data, header=header)
# Update the axes; GROUPS HDUs should always have at least one axis
if len(self._axes) <= 0:
self._axes = [0]
self._header['NAXIS'] = 1
self._header.set('NAXIS1', 0, after='NAXIS')
@classmethod
def match_header(cls, header):
keyword = header.cards[0].keyword
return (keyword == 'SIMPLE' and 'GROUPS' in header and
header['GROUPS'] is True)
@lazyproperty
def data(self):
"""
The data of a random group FITS file will be like a binary table's
data.
"""
data = self._get_tbdata()
data._coldefs = self.columns
data.parnames = self.parnames
del self.columns
return data
@lazyproperty
def parnames(self):
"""The names of the group parameters as described by the header."""
pcount = self._header['PCOUNT']
# The FITS standard doesn't really say what to do if a parname is
# missing, so for now just assume that won't happen
return [self._header['PTYPE' + str(idx + 1)] for idx in range(pcount)]
@lazyproperty
def columns(self):
if self._has_data and hasattr(self.data, '_coldefs'):
return self.data._coldefs
format = self._bitpix2tform[self._header['BITPIX']]
pcount = self._header['PCOUNT']
parnames = []
bscales = []
bzeros = []
for idx in range(pcount):
bscales.append(self._header.get('PSCAL' + str(idx + 1), None))
bzeros.append(self._header.get('PZERO' + str(idx + 1), None))
parnames.append(self._header['PTYPE' + str(idx + 1)])
formats = [format] * len(parnames)
dim = [None] * len(parnames)
# Now create columns from collected parameters, but first add the DATA
# column too, to contain the group data.
parnames.append('DATA')
bscales.append(self._header.get('BSCALE'))
bzeros.append(self._header.get('BZEROS'))
data_shape = self.shape[:-1]
formats.append(str(int(np.prod(data_shape))) + format)
dim.append(data_shape)
parnames = _unique_parnames(parnames)
self._data_field = parnames[-1]
cols = [Column(name=name, format=fmt, bscale=bscale, bzero=bzero,
dim=dim)
for name, fmt, bscale, bzero, dim in
zip(parnames, formats, bscales, bzeros, dim)]
coldefs = ColDefs(cols)
return coldefs
@property
def _nrows(self):
if not self._data_loaded:
# The number of 'groups' equates to the number of rows in the table
# representation of the data
return self._header.get('GCOUNT', 0)
else:
return len(self.data)
@lazyproperty
def _theap(self):
# Only really a lazyproperty for symmetry with _TableBaseHDU
return 0
@property
def is_image(self):
return False
@property
def size(self):
"""
Returns the size (in bytes) of the HDU's data part.
"""
size = 0
naxis = self._header.get('NAXIS', 0)
# for random group image, NAXIS1 should be 0, so we skip NAXIS1.
if naxis > 1:
size = 1
for idx in range(1, naxis):
size = size * self._header['NAXIS' + str(idx + 1)]
bitpix = self._header['BITPIX']
gcount = self._header.get('GCOUNT', 1)
pcount = self._header.get('PCOUNT', 0)
size = abs(bitpix) * gcount * (pcount + size) // 8
return size
def update_header(self):
old_naxis = self._header.get('NAXIS', 0)
if self._data_loaded:
if isinstance(self.data, GroupData):
self._axes = list(self.data.data.shape)[1:]
self._axes.reverse()
self._axes = [0] + self._axes
field0 = self.data.dtype.names[0]
field0_code = self.data.dtype.fields[field0][0].name
elif self.data is None:
self._axes = [0]
field0_code = 'uint8' # For lack of a better default
else:
raise ValueError('incorrect array type')
self._header['BITPIX'] = DTYPE2BITPIX[field0_code]
self._header['NAXIS'] = len(self._axes)
# add NAXISi if it does not exist
for idx, axis in enumerate(self._axes):
if (idx == 0):
after = 'NAXIS'
else:
after = 'NAXIS' + str(idx)
self._header.set('NAXIS' + str(idx + 1), axis, after=after)
# delete extra NAXISi's
for idx in range(len(self._axes) + 1, old_naxis + 1):
try:
del self._header['NAXIS' + str(idx)]
except KeyError:
pass
if self._has_data and isinstance(self.data, GroupData):
self._header.set('GROUPS', True,
after='NAXIS' + str(len(self._axes)))
self._header.set('PCOUNT', len(self.data.parnames), after='GROUPS')
self._header.set('GCOUNT', len(self.data), after='PCOUNT')
column = self.data._coldefs[self._data_field]
scale, zero = self.data._get_scale_factors(column)[3:5]
if scale:
self._header.set('BSCALE', column.bscale)
if zero:
self._header.set('BZERO', column.bzero)
for idx, name in enumerate(self.data.parnames):
self._header.set('PTYPE' + str(idx + 1), name)
column = self.data._coldefs[idx]
scale, zero = self.data._get_scale_factors(column)[3:5]
if scale:
self._header.set('PSCAL' + str(idx + 1), column.bscale)
if zero:
self._header.set('PZERO' + str(idx + 1), column.bzero)
# Update the position of the EXTEND keyword if it already exists
if 'EXTEND' in self._header:
if len(self._axes):
after = 'NAXIS' + str(len(self._axes))
else:
after = 'NAXIS'
self._header.set('EXTEND', after=after)
def _writedata_internal(self, fileobj):
"""
Basically copy/pasted from `_ImageBaseHDU._writedata_internal()`, but
we have to get the data's byte order a different way...
TODO: Might be nice to store some indication of the data's byte order
as an attribute or function so that we don't have to do this.
"""
size = 0
if self.data is not None:
self.data._scale_back()
# Based on the system type, determine the byteorders that
# would need to be swapped to get to big-endian output
if sys.byteorder == 'little':
swap_types = ('<', '=')
else:
swap_types = ('<',)
# deal with unsigned integer 16, 32 and 64 data
if _is_pseudo_unsigned(self.data.dtype):
# Convert the unsigned array to signed
output = np.array(
self.data - _unsigned_zero(self.data.dtype),
dtype=f'>i{self.data.dtype.itemsize}')
should_swap = False
else:
output = self.data
fname = self.data.dtype.names[0]
byteorder = self.data.dtype.fields[fname][0].str[0]
should_swap = (byteorder in swap_types)
if not fileobj.simulateonly:
if should_swap:
if output.flags.writeable:
output.byteswap(True)
try:
fileobj.writearray(output)
finally:
output.byteswap(True)
else:
# For read-only arrays, there is no way around making
# a byteswapped copy of the data.
fileobj.writearray(output.byteswap(False))
else:
fileobj.writearray(output)
size += output.size * output.itemsize
return size
def _verify(self, option='warn'):
errs = super()._verify(option=option)
# Verify locations and values of mandatory keywords.
self.req_cards('NAXIS', 2,
lambda v: (_is_int(v) and 1 <= v <= 999), 1,
option, errs)
self.req_cards('NAXIS1', 3, lambda v: (_is_int(v) and v == 0), 0,
option, errs)
after = self._header['NAXIS'] + 3
pos = lambda x: x >= after
self.req_cards('GCOUNT', pos, _is_int, 1, option, errs)
self.req_cards('PCOUNT', pos, _is_int, 0, option, errs)
self.req_cards('GROUPS', pos, lambda v: (v is True), True, option,
errs)
return errs
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# We have the data to be used.
# Check the byte order of the data. If it is little endian we
# must swap it before calculating the datasum.
# TODO: Maybe check this on a per-field basis instead of assuming
# that all fields have the same byte order?
byteorder = \
self.data.dtype.fields[self.data.dtype.names[0]][0].str[0]
if byteorder != '>':
if self.data.flags.writeable:
byteswapped = True
d = self.data.byteswap(True)
d.dtype = d.dtype.newbyteorder('>')
else:
# If the data is not writeable, we just make a byteswapped
# copy and don't bother changing it back after
d = self.data.byteswap(False)
d.dtype = d.dtype.newbyteorder('>')
byteswapped = False
else:
byteswapped = False
d = self.data
byte_data = d.view(type=np.ndarray, dtype=np.ubyte)
cs = self._compute_checksum(byte_data)
# If the data was byteswapped in this method then return it to
# its original little-endian order.
if byteswapped:
d.byteswap(True)
d.dtype = d.dtype.newbyteorder('<')
return cs
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a generic manner.
return super()._calculate_datasum()
def _summary(self):
summary = super()._summary()
name, ver, classname, length, shape, format, gcount = summary
# Drop the first axis from the shape
if shape:
shape = shape[1:]
if shape and all(shape):
# Update the format
format = self.columns[0].dtype.name
# Update the GCOUNT report
gcount = f'{self._gcount} Groups {self._pcount} Parameters'
return (name, ver, classname, length, shape, format, gcount)
def _par_indices(names):
"""
Given a list of objects, returns a mapping of objects in that list to the
index or indices at which that object was found in the list.
"""
unique = {}
for idx, name in enumerate(names):
# Case insensitive
name = name.upper()
if name in unique:
unique[name].append(idx)
else:
unique[name] = [idx]
return unique
def _unique_parnames(names):
"""
Given a list of parnames, including possible duplicates, returns a new list
of parnames with duplicates prepended by one or more underscores to make
them unique. This is also case insensitive.
"""
upper_names = set()
unique_names = []
for name in names:
name_upper = name.upper()
while name_upper in upper_names:
name = '_' + name
name_upper = '_' + name_upper
unique_names.append(name)
upper_names.add(name_upper)
return unique_names
| 34.386838 | 79 | 0.545255 |
79413d7c6869695a73f64d30744aca8e883f5501 | 131,135 | py | Python | src/python/packages/amwg/amwg.py | susburrows/uvcmetrics | 5a3c1266f3e5e97398a7671b01fa2816fb307c38 | [
"X11",
"MIT"
] | null | null | null | src/python/packages/amwg/amwg.py | susburrows/uvcmetrics | 5a3c1266f3e5e97398a7671b01fa2816fb307c38 | [
"X11",
"MIT"
] | null | null | null | src/python/packages/amwg/amwg.py | susburrows/uvcmetrics | 5a3c1266f3e5e97398a7671b01fa2816fb307c38 | [
"X11",
"MIT"
] | null | null | null | #!/usr/local/uvcdat/1.3.1/bin/python
# Top-leve definition of AMWG Diagnostics.
# AMWG = Atmospheric Model Working Group
import pdb
from metrics.packages.diagnostic_groups import *
from metrics.computation.reductions import *
from metrics.computation.plotspec import *
from metrics.frontend.uvcdat import *
from metrics.common.id import *
from metrics.packages.amwg.derivations import *
from unidata import udunits
import cdutil.times, numpy
from numbers import Number
from pprint import pprint
seasonsyr=cdutil.times.Seasons('JFMAMJJASOND')
class AMWG(BasicDiagnosticGroup):
"""This class defines features unique to the AMWG Diagnostics."""
def __init__(self):
pass
def list_variables( self, filetable1, filetable2=None, diagnostic_set_name="" ):
if diagnostic_set_name!="":
# I added str() where diagnostic_set_name is set, but jsut to be sure.
# spent 2 days debuging a QT Str failing to compare to a "regular" python str
dset = self.list_diagnostic_sets().get( str(diagnostic_set_name), None )
if dset is None:
return self._list_variables( filetable1, filetable2 )
else: # Note that dset is a class not an object.
return dset._list_variables( filetable1, filetable2 )
else:
return self._list_variables( filetable1, filetable2 )
@staticmethod
def _list_variables( filetable1, filetable2=None, diagnostic_set_name="" ):
return BasicDiagnosticGroup._list_variables( filetable1, filetable2, diagnostic_set_name )
@staticmethod
def _all_variables( filetable1, filetable2, diagnostic_set_name ):
return BasicDiagnosticGroup._all_variables( filetable1, filetable2, diagnostic_set_name )
def list_variables_with_levelaxis( self, filetable1, filetable2=None, diagnostic_set="" ):
"""like list_variables, but only returns variables which have a level axis
"""
return self._list_variables_with_levelaxis( filetable1, filetable2, diagnostic_set )
@staticmethod
def _list_variables_with_levelaxis( filetable1, filetable2=None, diagnostic_set_name="" ):
"""like _list_variables, but only returns variables which have a level axis
"""
if filetable1 is None: return []
vars1 = filetable1.list_variables_with_levelaxis()
if not isinstance( filetable2, basic_filetable ): return vars1
vars2 = filetable2.list_variables_with_levelaxis()
varset = set(vars1).intersection(set(vars2))
vars = list(varset)
vars.sort()
return vars
def list_diagnostic_sets( self ):
psets = amwg_plot_spec.__subclasses__()
plot_sets = psets
for cl in psets:
plot_sets = plot_sets + cl.__subclasses__()
return { aps.name:aps for aps in plot_sets if
hasattr(aps,'name') and aps.name.find('dummy')<0 }
class amwg_plot_spec(plot_spec):
package = AMWG # Note that this is a class not an object; also not a string.
# Standard variables are derived variables which are as general-interest as most dataset
# variables (which soon become reduced variables). So it makes sense for all plot sets
# (for the physical realm) to share them. We use the derived_var class here to
# contain their information ,i.e. inputs and how to compute. But, if one be used, another
# derived_var object will have to be built using the full variable ids, including season
# and filetable information.
# standard_variables is a dict. The key is a variable name and the value is a list of
# derived_var objects, each of which gives a way to compute the variable. The first on the
# list is the preferred method. Of course, if the variable be already available as data,
# then that is preferred over any computation.
standard_variables = {
'PRECT':[derived_var(
vid='PRECT', inputs=['PRECC','PRECL'], outputs=['PRECT'],
func=(lambda a,b,units="mm/day": aplusb(a,b,units) ))],
'AODVIS':[derived_var(
vid='AODVIS', inputs=['AOD_550'], outputs=['AODVIS'],
func=(lambda x: setunits(x,'')) )],
# AOD normally has no units, but sometimes the units attribute is set anyway.
'TREFHT':[derived_var(
vid='TREFHT', inputs=['TREFHT_LAND'], outputs=['TREFHT'],
func=(lambda x: x) )],
'RESTOM':[derived_var(
vid='RESTOM', inputs=['FSNT','FLNT'], outputs=['RESTOM'],
func=aminusb )], # RESTOM = net radiative flux
'CLISCCP':[
derived_var(
# old style vid='CLISCCP', inputs=['FISCCP1_COSP','cosp_prs','cosp_tau'], outputs=['CLISCCP'],
# old style func=uncompress_fisccp1 )
vid='CLISCCP', inputs=['FISCCP1_COSP'], outputs=['CLISCCP'],
func=(lambda x: x) )
],
'CLDTOT_ISCCP':[
derived_var( vid='CLDTOT_ISCCP', inputs=['CLDTOT_ISCCPCOSP'], outputs=['CLDTOT_ISCCP'],
func=(lambda x:x) ) ],
'CLDHGH_ISCCP':[
derived_var( vid='CLDHGH_ISCCP', inputs=['CLDHGH_ISCCPCOSP'], outputs=['CLDHGH_ISCCP'],
func=(lambda x:x) ) ],
'CLDMED_ISCCP':[
derived_var( vid='CLDMED_ISCCP', inputs=['CLDMED_ISCCPCOSP'], outputs=['CLDMED_ISCCP'],
func=(lambda x:x) ) ],
'CLDLOW_ISCCP':[
derived_var( vid='CLDLOW_ISCCP', inputs=['CLDLOW_ISCCPCOSP'], outputs=['CLDLOW_ISCCP'],
func=(lambda x:x) ) ],
'CLMISR':[
derived_var( vid='CLMISR', inputs=['CLD_MISR'], outputs=['CLMISR'],
func=(lambda x:x) ) ],
# Note: CLDTOT is different from CLDTOT_CAL, CLDTOT_ISCCPCOSP, etc. But translating
# from one to the other might be better than returning nothing. Also, I'm not so sure that
# reduce_prs_tau is producing the right answers, but that's a problem for later.
#1-ISCCP
'CLDTOT_TAU1.3_ISCCP':[
derived_var(
vid='CLDTOT_TAU1.3_ISCCP', inputs=['CLISCCP'], outputs=['CLDTOT_TAU1.3_ISCCP'],
func=(lambda clisccp: reduce_height_thickness( clisccp, None,None, 1.3,379) ) )
],
#2-ISCCP
'CLDTOT_TAU1.3-9.4_ISCCP':[
derived_var(
vid='CLDTOT_TAU1.3-9.4_ISCCP', inputs=['CLISCCP'], outputs=['CLDTOT_TAU1.3-9.4_ISCCP'],
func=(lambda clisccp: reduce_height_thickness( clisccp, None,None, 1.3,9.4) ) )
],
#3-ISCCP
'CLDTOT_TAU9.4_ISCCP':[
derived_var(
vid='CLDTOT_TAU9.4_ISCCP', inputs=['CLISCCP'], outputs=['CLDTOT_TAU9.4_ISCCP'],
func=(lambda clisccp: reduce_height_thickness( clisccp, None,None, 9.4,379) ) )
],
#1-MODIS
'CLDTOT_TAU1.3_MODIS':[
derived_var(
vid='CLDTOT_TAU1.3_MODIS', inputs=['CLMODIS'], outputs=['CLDTOT_TAU1.3_MODIS'],
func=(lambda clmodis: reduce_height_thickness( clmodis, None,None, 1.3,379 ) ) )
],
#2-MODIS
'CLDTOT_TAU1.3-9.4_MODIS':[
derived_var(
vid='CLDTOT_TAU1.3-9.4_MODIS', inputs=['CLMODIS'], outputs=['CLDTOT_TAU1.3-9.4_MODIS'],
func=(lambda clmodis: reduce_height_thickness( clmodis, None,None, 1.3,9.4 ) ) )
],
#3-MODIS
'CLDTOT_TAU9.4_MODIS':[
derived_var(
vid='CLDTOT_TAU9.4_MODIS', inputs=['CLMODIS'], outputs=['CLDTOT_TAU9.4_MODIS'],
func=(lambda clmodis: reduce_height_thickness( clmodis, None,None, 9.4,379 ) ) )
],
#4-MODIS
'CLDHGH_TAU1.3_MODIS':[
derived_var(
vid='CLDHGH_TAU1.3_MODIS', inputs=['CLMODIS'], outputs=['CLDHGH_TAU1.3_MODIS'],
func=(lambda clmodis: reduce_height_thickness( clmodis, 0,440, 1.3,379 ) ) )
],
#5-MODIS
'CLDHGH_TAU1.3-9.4_MODIS':[
derived_var(
vid='CLDHGH_TAU1.3-9.4_MODIS', inputs=['CLMODIS'], outputs=['CLDHGH_TAU1.3-9.4_MODIS'],
#func=(lambda clmodis: reduce_prs_tau( clmodis( modis_prs=(0,440), modis_tau=(1.3,9.4) ))) )
func=(lambda clmodis: reduce_height_thickness(
clmodis, 0,440, 1.3,9.4) ) )
],
#6-MODIS
'CLDHGH_TAU9.4_MODIS':[
derived_var(
vid='CLDHGH_TAU9.4_MODIS', inputs=['CLMODIS'], outputs=['CLDHGH_TAU9.4_MODIS'],
func=(lambda clmodis: reduce_height_thickness( clmodis, 0,440, 9.4,379) ) )
],
#1-MISR
'CLDTOT_TAU1.3_MISR':[
derived_var(
vid='CLDTOT_TAU1.3_MISR', inputs=['CLMISR'], outputs=['CLDTOT_TAU1.3_MISR'],
func=(lambda clmisr: reduce_height_thickness( clmisr, None,None, 1.3,379) ) )
],
#2-MISR
'CLDTOT_TAU1.3-9.4_MISR':[
derived_var(
vid='CLDTOT_TAU1.3-9.4_MISR', inputs=['CLMISR'], outputs=['CLDTOT_TAU1.3-9.4_MISR'],
func=(lambda clmisr: reduce_height_thickness( clmisr, None,None, 1.3,9.4) ) )
],
#3-MISR
'CLDTOT_TAU9.4_MISR':[
derived_var(
vid='CLDTOT_TAU9.4_MISR', inputs=['CLMISR'], outputs=['CLDTOT_TAU9.4_MISR'],
func=(lambda clmisr: reduce_height_thickness( clmisr, None,None, 9.4,379) ) )
],
#4-MISR
'CLDLOW_TAU1.3_MISR':[
derived_var(
vid='CLDLOW_TAU1.3_MISR', inputs=['CLMISR'], outputs=['CLDLOW_TAU1.3_MISR'],
func=(lambda clmisr, h0=0,h1=3,t0=1.3,t1=379: reduce_height_thickness(
clmisr, h0,h1, t0,t1) ) )
],
#5-MISR
'CLDLOW_TAU1.3-9.4_MISR':[
derived_var(
vid='CLDLOW_TAU1.3-9.4_MISR', inputs=['CLMISR'], outputs=['CLDLOW_TAU1.3-9.4_MISR'],
func=(lambda clmisr, h0=0,h1=3, t0=1.3,t1=9.4: reduce_height_thickness( clmisr, h0,h1, t0,t1) ) )
#func=(lambda clmisr, h0=0,h1=6, t0=2,t1=4: reduce_height_thickness( clmisr, h0,h1, t0,t1) ) )
],
#6-MISR
'CLDLOW_TAU9.4_MISR':[
derived_var(
vid='CLDLOW_TAU9.4_MISR', inputs=['CLMISR'], outputs=['CLDLOW_TAU9.4_MISR'],
func=(lambda clmisr, h0=0,h1=3, t0=9.4,t1=379: reduce_height_thickness(
clmisr, h0,h1, t0,t1) ) )
],
'TGCLDLWP':[derived_var(
vid='TGCLDLWP', inputs=['TGCLDLWP_OCEAN'], outputs=['TGCLDLWP'],
func=(lambda x: x) ) ]
}
@staticmethod
def _list_variables( filetable1, filetable2=None ):
return amwg_plot_spec.package._list_variables( filetable1, filetable2, "amwg_plot_spec" )
@staticmethod
def _all_variables( filetable1, filetable2=None ):
return amwg_plot_spec.package._all_variables( filetable1, filetable2, "amwg_plot_spec" )
@classmethod
def stdvar2var( cls, varnom, filetable, season, reduction_function, recurse=True ):
"""From a variable name, a filetable, and a season, this finds the variable name in
standard_variables. If it's there, this method generates a variable as an instance
of reduced_variable or derived_var, which represents the variable and how to compute it
from the data described by the filetable.
Inputs are the variable name (e.g. FLUT, TREFHT), a filetable, a season, and (important!)
a reduction function which reduces data variables to reduced variables prior to computing
the variable specified by varnom.
If successful, this will return (i) a variable id for varnom, including filetable and
season; it is the id of the first item in the returned list of derived variables.
(ii) a list of reduced_variables needed for computing varnom. For
each such reduced variable rv, it may be placed in a dictionary using rv.id() as its key.
(iii) a list of derived variables - normally just the one representing varnom, but
in more complicated situations (which haven't been implemented yet) it may be longer.
For a member of the list dv, dv.id() is a suitable dictionary key.
If unsuccessful, this will return None,None,None.
"""
if filetable is None:
return None,[],[]
#if varnom not in amwg_plot_spec.standard_variables:
if varnom not in cls.standard_variables:
return None,[],[]
#print "jfp varnom=",varnom
computable = False
rvs = []
dvs = []
for svd in cls.standard_variables[varnom]: # loop over ways to compute varnom
invarnoms = svd.inputs()
#print "jfp first round, invarnoms=",invarnoms
#print "jfp filetable variables=",filetable.list_variables()
if len( set(invarnoms) - set(filetable.list_variables_incl_axes()) )<=0:
func = svd._func
computable = True
break
if computable:
for ivn in invarnoms:
#print "jfp computing reduced variable from input variableid=ivn=",ivn
rv = reduced_variable( variableid=ivn, filetable=filetable, season=season,
reduction_function=reduction_function )
#print "jfp adding reduced variable rv=",rv
rvs.append(rv)
#print "jfp1 rvs ids=",[rv.id() for rv in rvs]
if not computable and recurse==True:
#print "jfp second round"
# Maybe the input variables are themselves computed. We'll only do this one
# level of recursion before giving up. This is enough to do a real computation
# plus some variable renamings via standard_variables.
# Once we have a real system for handling name synonyms, this loop can probably
# be dispensed with. If we well never have such a system, then the above loop
# can be dispensed with.
for svd in cls.standard_variables[varnom]: # loop over ways to compute varnom
invarnoms = svd.inputs()
for invar in invarnoms:
if invar in filetable.list_variables_incl_axes():
rv = reduced_variable( variableid=invar, filetable=filetable, season=season,
reduction_function=reduction_function )
rvs.append(rv)
else:
if invar not in cls.standard_variables:
break
dummy,irvs,idvs =\
cls.stdvar2var( invar, filetable, season, reduction_function, recurse=False )
rvs += irvs
dvs += idvs
func = svd._func
computable = True
break
#print "jfp4 rvs ids=",[rv.id() for rv in rvs]
if len(rvs)<=0:
print "WARNING, no inputs found for",varnom,"in filetable",filetable.id()
print "need inputs",svd.inputs()
return None,[],[]
if not computable:
print "DEBUG: standard variable",varnom,"is not computable"
print "need inputs",svd.inputs()
print "found inputs",[rv.id() for rv in rvs]+[drv.id() for drv in dvs]
return None,[],[]
seasonid = season.seasons[0]
vid = dv.dict_id( varnom, '', seasonid, filetable )
#print "jfp stdvar is making a new derived_var, vid=",vid,"inputs=",[rv.id() for rv in rvs]
newdv = derived_var( vid=vid, inputs=[rv.id() for rv in rvs], func=func )
dvs.append(newdv)
return newdv.id(), rvs, dvs
# plot set classes in other files:
from metrics.packages.amwg.amwg1 import *
# plot set classes we need which I haven't done yet:
class amwg_plot_set4a(amwg_plot_spec):
pass
class amwg_plot_set7(amwg_plot_spec):
pass
class amwg_plot_set8(amwg_plot_spec):
pass
class amwg_plot_set10(amwg_plot_spec):
pass
#class amwg_plot_set11(amwg_plot_spec):
# pass
class amwg_plot_set12(amwg_plot_spec):
pass
class amwg_plot_set14(amwg_plot_spec):
pass
class amwg_plot_set15(amwg_plot_spec):
pass
class amwg_plot_set2(amwg_plot_spec):
"""represents one plot from AMWG Diagnostics Plot Set 2
Each such plot is a page consisting of two to four plots. The horizontal
axis is latitude and the vertical axis is heat or fresh-water transport.
Both model and obs data is plotted, sometimes in the same plot.
The data presented is averaged over everything but latitude.
"""
name = '2 - Line Plots of Annual Implied Northward Transport'
number = '2'
def __init__( self, filetable1, filetable2, varid, seasonid=None, region=None, aux=None ):
"""filetable1, filetable2 should be filetables for model and obs.
varid is a string identifying the derived variable to be plotted, e.g. 'Ocean_Heat'.
The seasonid argument will be ignored."""
plot_spec.__init__(self,seasonid)
self.season = cdutil.times.Seasons(self._seasonid) # note that self._seasonid can differ froms seasonid
self.plottype='Yxvsx'
vars = self._list_variables(filetable1,filetable2)
if varid not in vars:
print "In amwg_plot_set2 __init__, ignoring varid input, will compute Ocean_Heat"
varid = vars[0]
print "Warning: amwg_plot_set2 only uses NCEP obs, and will ignore any other obs specification."
# TO DO: Although model vs NCEP obs is all that NCAR does, there's no reason why we
# TO DO: shouldn't support something more general, at least model vs model.
if not self.computation_planned:
self.plan_computation( filetable1, filetable2, varid, seasonid )
@staticmethod
def _list_variables( self, filetable1=None, filetable2=None ):
return ['Ocean_Heat']
@staticmethod
def _all_variables( self, filetable1, filetable2=None ):
return { vn:basic_plot_variable for vn in amwg_plot_set2._list_variables( filetable1, filetable2 ) }
def plan_computation( self, filetable1, filetable2, varid, seasonid ):
# CAM variables needed for heat transport: (SOME ARE SUPERFLUOUS <<<<<<)
# FSNS, FLNS, FLUT, FSNTOA, FLNT, FSNT, SHFLX, LHFLX,
self.reduced_variables = {
'FSNS_1': reduced_variable(
variableid='FSNS', filetable=filetable1, season=self.season,
reduction_function=(lambda x,vid:x) ),
'FSNS_ANN_latlon_1': reduced_variable(
variableid='FSNS',
filetable=filetable1, season=self.season,
reduction_function=reduce2latlon ),
'FLNS_1': reduced_variable(
variableid='FLNS', filetable=filetable1, season=self.season,
reduction_function=(lambda x,vid:x) ),
'FLNS_ANN_latlon_1': reduced_variable(
variableid='FLNS',
filetable=filetable1, season=self.season,
reduction_function=reduce2latlon ),
'FLUT_ANN_latlon_1': reduced_variable(
variableid='FLUT',
filetable=filetable1, season=self.season,
reduction_function=reduce2latlon ),
'FSNTOA_ANN_latlon_1': reduced_variable(
variableid='FSNTOA',
filetable=filetable1, season=self.season,
reduction_function=reduce2latlon ),
'FLNT_1': reduced_variable(
variableid='FLNT',filetable=filetable1,reduction_function=(lambda x,vid:x) ),
'FLNT_ANN_latlon_1': reduced_variable(
variableid='FLNT',
filetable=filetable1, season=self.season,
reduction_function=reduce2latlon ),
'FSNT_1': reduced_variable(
variableid='FSNT', filetable=filetable1, season=self.season,
reduction_function=(lambda x,vid:x) ),
'FSNT_ANN_latlon_1': reduced_variable(
variableid='FSNT',
filetable=filetable1, season=self.season,
reduction_function=reduce2latlon ),
'QFLX_1': reduced_variable(
variableid='QFLX',filetable=filetable1,reduction_function=(lambda x,vid:x) ),
'SHFLX_1': reduced_variable(
variableid='SHFLX', filetable=filetable1, season=self.season,
reduction_function=(lambda x,vid:x) ),
'SHFLX_ANN_latlon_1': reduced_variable(
variableid='SHFLX',
filetable=filetable1, season=self.season,
reduction_function=reduce2latlon ),
'LHFLX_ANN_latlon_1': reduced_variable(
variableid='LHFLX',
filetable=filetable1, season=self.season,
reduction_function=reduce2latlon ),
'OCNFRAC_ANN_latlon_1': reduced_variable(
variableid='OCNFRAC',
filetable=filetable1, season=self.season,
reduction_function=reduce2latlon )
}
self.derived_variables = {
'CAM_HEAT_TRANSPORT_ALL_1': derived_var(
vid='CAM_HEAT_TRANSPORT_ALL_1',
inputs=['FSNS_ANN_latlon_1', 'FLNS_ANN_latlon_1', 'FLUT_ANN_latlon_1',
'FSNTOA_ANN_latlon_1', 'FLNT_ANN_latlon_1', 'FSNT_ANN_latlon_1',
'SHFLX_ANN_latlon_1', 'LHFLX_ANN_latlon_1', 'OCNFRAC_ANN_latlon_1' ],
outputs=['atlantic_heat_transport','pacific_heat_transport',
'indian_heat_transport', 'global_heat_transport' ],
func=oceanic_heat_transport ),
'NCEP_OBS_HEAT_TRANSPORT_ALL_2': derived_var(
vid='NCEP_OBS_HEAT_TRANSPORT_ALL_2',
inputs=[],
outputs=('latitude', ['atlantic_heat_transport','pacific_heat_transport',
'indian_heat_transport', 'global_heat_transport' ]),
func=(lambda: ncep_ocean_heat_transport(filetable2) ) )
}
ft1src = filetable1.source()
try:
ft2src = filetable2.source()
except:
ft2src = ''
self.single_plotspecs = {
'CAM_NCEP_HEAT_TRANSPORT_GLOBAL': plotspec(
vid='CAM_NCEP_HEAT_TRANSPORT_GLOBAL',
# x1vars=['FSNS_ANN_latlon_1'], x1func=latvar,
# y1vars=['CAM_HEAT_TRANSPORT_ALL_1' ],
# y1func=(lambda y: y[3]),
# x2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'], x2func=(lambda x: x[0]),
# y2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2' ],
# y2func=(lambda y: y[1][3]),
zvars=['CAM_HEAT_TRANSPORT_ALL_1' ],
zfunc=(lambda y: y[3]),
z2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'],
z2func=(lambda z: z[1][3]),
plottype = self.plottype,
title = 'CAM & NCEP HEAT_TRANSPORT GLOBAL',
source = ft1src ),
'CAM_NCEP_HEAT_TRANSPORT_PACIFIC': plotspec(
vid='CAM_NCEP_HEAT_TRANSPORT_PACIFIC',
# x1vars=['FSNS_ANN_latlon_1'], x1func=latvar,
# y1vars=['CAM_HEAT_TRANSPORT_ALL_1' ],
# y1func=(lambda y: y[0]),
# x2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'], x2func=(lambda x: x[0]),
# y2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2' ],
# y2func=(lambda y: y[1][0]),
zvars=['CAM_HEAT_TRANSPORT_ALL_1' ],
zfunc=(lambda y: y[0]),
z2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2' ],
z2func=(lambda y: y[1][0]),
plottype = self.plottype,
title = 'CAM & NCEP HEAT_TRANSPORT PACIFIC',
source = ft1src ),
'CAM_NCEP_HEAT_TRANSPORT_ATLANTIC': plotspec(
vid='CAM_NCEP_HEAT_TRANSPORT_ATLANTIC',
# x1vars=['FSNS_ANN_latlon_1'], x1func=latvar,
# y1vars=['CAM_HEAT_TRANSPORT_ALL_1' ],
# y1func=(lambda y: y[0]),
# x2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'], x2func=(lambda x: x[0]),
# y2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2' ],
# y2func=(lambda y: y[1][1]),
zvars=['CAM_HEAT_TRANSPORT_ALL_1' ],
zfunc=(lambda y: y[1]),
z2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2' ],
z2func=(lambda y: y[1][1]),
plottype = self.plottype ,
title = 'CAM & NCEP HEAT_TRANSPORT ATLANTIC',
source = ft1src ),
'CAM_NCEP_HEAT_TRANSPORT_INDIAN': plotspec(
vid='CAM_NCEP_HEAT_TRANSPORT_INDIAN',
# x1vars=['FSNS_ANN_latlon_1'], x1func=latvar,
# y1vars=['CAM_HEAT_TRANSPORT_ALL_1' ],
# y1func=(lambda y: y[0]),
# x2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'], x2func=(lambda x: x[0]),
# y2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2' ],
# y2func=(lambda y: y[1][2]),
zvars=['CAM_HEAT_TRANSPORT_ALL_1' ],
zfunc=(lambda y: y[2]),
z2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2' ],
z2func=(lambda y: y[1][2]),
plottype = self.plottype,
title = 'CAM & NCEP HEAT_TRANSPORT INDIAN',
source = ft1src ),
}
self.composite_plotspecs = {
'CAM_NCEP_HEAT_TRANSPORT_ALL':
['CAM_NCEP_HEAT_TRANSPORT_GLOBAL','CAM_NCEP_HEAT_TRANSPORT_PACIFIC',
'CAM_NCEP_HEAT_TRANSPORT_ATLANTIC','CAM_NCEP_HEAT_TRANSPORT_INDIAN']
}
self.computation_planned = True
def _results(self,newgrid=0):
results = plot_spec._results(self,newgrid)
if results is None: return None
psv = self.plotspec_values
if not('CAM_NCEP_HEAT_TRANSPORT_GLOBAL' in psv.keys()) or\
psv['CAM_NCEP_HEAT_TRANSPORT_GLOBAL'] is None:
return None
psv['CAM_NCEP_HEAT_TRANSPORT_GLOBAL'].synchronize_many_values(
[ psv['CAM_NCEP_HEAT_TRANSPORT_PACIFIC'], psv['CAM_NCEP_HEAT_TRANSPORT_ATLANTIC'],
psv['CAM_NCEP_HEAT_TRANSPORT_INDIAN'] ],
suffix_length=0 )
psv['CAM_NCEP_HEAT_TRANSPORT_GLOBAL'].finalize()
psv['CAM_NCEP_HEAT_TRANSPORT_PACIFIC'].finalize()
psv['CAM_NCEP_HEAT_TRANSPORT_ATLANTIC'].finalize()
psv['CAM_NCEP_HEAT_TRANSPORT_INDIAN'].finalize()
return self.plotspec_values['CAM_NCEP_HEAT_TRANSPORT_ALL']
class amwg_plot_set3(amwg_plot_spec,basic_id):
"""represents one plot from AMWG Diagnostics Plot Set 3.
Each such plot is a pair of plots: a 2-line plot comparing model with obs, and
a 1-line plot of the model-obs difference. A plot's x-axis is latitude, and
its y-axis is the specified variable. The data presented is a climatological mean - i.e.,
time-averaged with times restricted to the specified season, DJF, JJA, or ANN."""
# N.B. In plot_data.py, the plotspec contained keys identifying reduced variables.
# Here, the plotspec contains the variables themselves.
name = '3 - Line Plots of Zonal Means'
number = '3'
def __init__( self, filetable1, filetable2, varnom, seasonid=None, regionid=None, aux=None ):
"""filetable1, filetable2 should be filetables for model and obs.
varnom is a string, e.g. 'TREFHT'. Seasonid is a string, e.g. 'DJF'."""
basic_id.__init__(self,varnom,seasonid)
plot_spec.__init__(self,seasonid)
self.season = cdutil.times.Seasons(self._seasonid) # note that self._seasonid can differ froms seasonid
if regionid=="Global" or regionid=="global" or regionid is None:
self._regionid="Global"
else:
self._regionid=regionid
self.region = interpret_region(regionid)
if not self.computation_planned:
self.plan_computation( filetable1, filetable2, varnom, seasonid )
def plan_computation( self, filetable1, filetable2, varnom, seasonid ):
zvar = reduced_variable(
variableid=varnom,
filetable=filetable1, season=self.season, region=self.region,
reduction_function=(lambda x,vid=None: reduce2lat_seasonal(x,self.season,self.region,vid=vid)) )
self.reduced_variables[zvar._strid] = zvar
#self.reduced_variables[varnom+'_1'] = zvar
#zvar._vid = varnom+'_1' # _vid is deprecated
z2var = reduced_variable(
variableid=varnom,
filetable=filetable2, season=self.season, region=self.region,
reduction_function=(lambda x,vid=None: reduce2lat_seasonal(x,self.season,self.region,vid=vid)) )
self.reduced_variables[z2var._strid] = z2var
#self.reduced_variables[varnom+'_2'] = z2var
#z2var._vid = varnom+'_2' # _vid is deprecated
self.plot_a = basic_two_line_plot( zvar, z2var )
ft1id,ft2id = filetable_ids(filetable1,filetable2)
vid = '_'.join([self._id[0],self._id[1],ft1id,ft2id,'diff'])
# ... e.g. CLT_DJF_ft1_ft2_diff
self.plot_b = one_line_diff_plot( zvar, z2var, vid )
self.computation_planned = True
def _results(self,newgrid=0):
# At the moment this is very specific to plot set 3. Maybe later I'll use a
# more general method, to something like what's in plot_data.py, maybe not.
# later this may be something more specific to the needs of the UV-CDAT GUI
results = plot_spec._results(self,newgrid)
if results is None: return None
zvar = self.plot_a.zvars[0]
z2var = self.plot_a.z2vars[0]
#zval = zvar.reduce()
zval = self.variable_values[zvar._strid]
#zval = self.variable_values[zvar._vid] # _vid is deprecated
if zval is None: return None
zunam = zvar._filetable._strid # part of y1 distinguishing it from y2, e.g. ft_1
zval.id = '_'.join([self._id[0],self._id[1],zunam])
z2val = self.variable_values[z2var._strid]
if z2val is None:
z2unam = ''
zdiffval = None
else:
z2unam = z2var._filetable._strid # part of y2 distinguishing it from y1, e.g. ft_2
z2val.id = '_'.join([self._id[0],self._id[1],z2unam])
zdiffval = apply( self.plot_b.zfunc, [zval,z2val] )
zdiffval.id = '_'.join([self._id[0],self._id[1],
zvar._filetable._strid, z2var._filetable._strid, 'diff'])
# ... e.g. CLT_DJF_set3_CAM456_NCEP_diff
ft1src = zvar._filetable.source()
try:
ft2src = z2var._filetable.source()
except:
ft2src = ''
plot_a_val = uvc_plotspec(
[v for v in [zval,z2val] if v is not None],'Yxvsx', labels=[zunam,z2unam],
#title=' '.join([self._id[0],self._id[1],self._id[2],zunam,'and',z2unam]),
title = ' '.join([self._id[0],self._id[1],self._id[2]]),
source = ','.join([ft1src,ft2src] ))
plot_b_val = uvc_plotspec(
[v for v in [zdiffval] if v is not None],'Yxvsx', labels=['difference'],
title=' '.join([self._id[0],self._id[1],self._id[2],'difference']),
source = ','.join([ft1src,ft2src] ))
# no, we don't want same range for values & difference! plot_a_val.synchronize_ranges(plot_b_val)
plot_a_val.finalize()
plot_b_val.finalize()
return [ plot_a_val, plot_b_val ]
class amwg_plot_set4(amwg_plot_spec):
"""represents one plot from AMWG Diagnostics Plot Set 4.
Each such plot is a set of three contour plots: one each for model output, observations, and
the difference between the two. A plot's x-axis is latitude and its y-axis is the level,
measured as pressure. The model and obs plots should have contours at the same values of
their variable. The data presented is a climatological mean - i.e.,
time-averaged with times restricted to the specified season, DJF, JJA, or ANN."""
# N.B. In plot_data.py, the plotspec contained keys identifying reduced variables.
# Here, the plotspec contains the variables themselves.
name = '4 - Vertical Contour Plots Zonal Means'
number = '4'
def __init__( self, filetable1, filetable2, varid, seasonid=None, regionid=None, aux=None ):
"""filetable1, filetable2 should be filetables for model and obs.
varid is a string, e.g. 'TREFHT'. Seasonid is a string, e.g. 'DJF'.
At the moment we assume that data from filetable1 has CAM hybrid levels,
and data from filetable2 has pressure levels."""
plot_spec.__init__(self,seasonid)
self.plottype = 'Isofill'
self.season = cdutil.times.Seasons(self._seasonid) # note that self._seasonid can differ froms seasonid
if regionid=="Global" or regionid=="global" or regionid is None:
self._regionid="Global"
else:
self._regionid=regionid
self.region = interpret_region(regionid)
ft1id,ft2id = filetable_ids(filetable1,filetable2)
self.plot1_id = '_'.join([ft1id,varid,seasonid,'contour'])
self.plot2_id = '_'.join([ft2id,varid,seasonid,'contour'])
self.plot3_id = '_'.join([ft1id+'-'+ft2id,varid,seasonid,'contour'])
self.plotall_id = '_'.join([ft1id,ft2id,varid,seasonid])
if not self.computation_planned:
self.plan_computation( filetable1, filetable2, varid, seasonid )
@staticmethod
def _list_variables( filetable1, filetable2=None ):
allvars = amwg_plot_set4._all_variables( filetable1, filetable2 )
listvars = allvars.keys()
listvars.sort()
# print "amwg plot set 4 listvars=",listvars
return listvars
@staticmethod
def _all_variables( filetable1, filetable2=None ):
allvars = {}
for varname in amwg_plot_spec.package._list_variables_with_levelaxis(
filetable1, filetable2, "amwg_plot_spec" ):
allvars[varname] = basic_level_variable
return allvars
def reduced_variables_press_lev( self, filetable, varid, seasonid, ftno=None ):
return reduced_variables_press_lev( filetable, varid, seasonid, region=self.region )
def reduced_variables_hybrid_lev( self, filetable, varid, seasonid, ftno=None ):
return reduced_variables_hybrid_lev( filetable, varid, seasonid, region=self.region )
def plan_computation( self, filetable1, filetable2, varid, seasonid ):
ft1_hyam = filetable1.find_files('hyam')
if filetable2 is None:
ft2_hyam = None
else:
ft2_hyam = filetable2.find_files('hyam')
hybrid1 = ft1_hyam is not None and ft1_hyam!=[] # true iff filetable1 uses hybrid level coordinates
hybrid2 = ft2_hyam is not None and ft2_hyam!=[] # true iff filetable2 uses hybrid level coordinates
if hybrid1:
reduced_variables_1 = self.reduced_variables_hybrid_lev( filetable1, varid, seasonid )
else:
reduced_variables_1 = self.reduced_variables_press_lev( filetable1, varid, seasonid )
if hybrid2:
reduced_variables_2 = self.reduced_variables_hybrid_lev( filetable2, varid, seasonid )
else:
reduced_variables_2 = self.reduced_variables_press_lev( filetable2, varid, seasonid )
reduced_variables_1.update( reduced_variables_2 )
self.reduced_variables = reduced_variables_1
self.derived_variables = {}
if hybrid1:
# >>>> actually last arg of the derived var should identify the coarsest level, not nec. 2
vid1=dv.dict_id(varid,'levlat',seasonid,filetable1)
self.derived_variables[vid1] = derived_var(
vid=vid1, inputs=[rv.dict_id(varid,seasonid,filetable1), rv.dict_id('hyam',seasonid,filetable1),
rv.dict_id('hybm',seasonid,filetable1), rv.dict_id('PS',seasonid,filetable1),
rv.dict_id(varid,seasonid,filetable2) ],
func=verticalize )
else:
vid1 = rv.dict_id(varid,seasonid,filetable1)
if hybrid2:
# >>>> actually last arg of the derived var should identify the coarsest level, not nec. 2
vid2=dv.dict_id(varid,'levlat',seasonid,filetable2)
self.derived_variables[vid2] = derived_var(
vid=vid2, inputs=[rv.dict_id(varid,seasonid,filetable2),
rv.dict_id('hyam',seasonid,filetable2),
rv.dict_id('hybm',seasonid,filetable2),
rv.dict_id('PS',seasonid,filetable2),
rv.dict_id(varid,seasonid,filetable2) ],
func=verticalize )
else:
vid2 = rv.dict_id(varid,seasonid,filetable2)
ft1src = filetable1.source()
try:
ft2src = filetable2.source()
except:
ft2src = ''
self.single_plotspecs = {
self.plot1_id: plotspec(
vid = ps.dict_idid(vid1), zvars=[vid1], zfunc=(lambda z: z),
plottype = self.plottype,
title = ' '.join([varid,seasonid,'(1)']),
source = ft1src ),
self.plot2_id: plotspec(
vid = ps.dict_idid(vid2), zvars=[vid2], zfunc=(lambda z: z),
plottype = self.plottype,
title = ' '.join([varid,seasonid,'(2)']),
source = ft2src ),
self.plot3_id: plotspec(
vid = ps.dict_id(varid,'diff',seasonid,filetable1,filetable2), zvars=[vid1,vid2],
zfunc=aminusb_2ax, plottype = self.plottype,
title = ' '.join([varid,seasonid,'(1)-(2)']),
source = ', '.join([ft1src,ft2src]) )
}
self.composite_plotspecs = {
self.plotall_id: [self.plot1_id, self.plot2_id, self.plot3_id ]
}
self.computation_planned = True
def _results(self,newgrid=0):
results = plot_spec._results(self,newgrid)
if results is None:
print "WARNING, AMWG plot set 4 found nothing to plot"
return None
psv = self.plotspec_values
if self.plot1_id in psv and self.plot2_id in psv and\
psv[self.plot1_id] is not None and psv[self.plot2_id] is not None:
psv[self.plot1_id].synchronize_ranges(psv[self.plot2_id])
else:
print "WARNING not synchronizing ranges for",self.plot1_id,"and",self.plot2_id
for key,val in psv.items():
if type(val) is not list: val=[val]
for v in val:
if v is None: continue
v.finalize(flip_y=True)
return self.plotspec_values[self.plotall_id]
class amwg_plot_set5and6(amwg_plot_spec):
"""represents one plot from AMWG Diagnostics Plot Sets 5 and 6 <actually only the contours, set 5>
NCAR has the same menu for both plot sets, and we want to ease the transition from NCAR
diagnostics to these; so both plot sets will be done together here as well.
Each contour plot is a set of three contour plots: one each for model output, observations, and
the difference between the two. A plot's x-axis is longitude and its y-axis is the latitude;
normally a world map will be overlaid.
"""
def __init__( self, filetable1, filetable2, varid, seasonid=None, regionid=None, aux=None ):
"""filetable1, filetable2 should be filetables for model and obs.
varid is a string identifying the variable to be plotted, e.g. 'TREFHT'.
seasonid is a string such as 'DJF'."""
plot_spec.__init__(self, seasonid, regionid)
self.plottype = 'Isofill'
self.season = cdutil.times.Seasons(self._seasonid) # note that self._seasonid can differ froms seasonid
if regionid=="Global" or regionid=="global" or regionid is None:
self._regionid="Global"
else:
self._regionid=regionid
self.region = interpret_region(regionid)
self.varid = varid
ft1id,ft2id = filetable_ids(filetable1,filetable2)
self.reduced_variables = {}
self.derived_variables = {}
self.plot1_id = ft1id+'_'+varid+'_'+seasonid
self.plot2_id = ft2id+'_'+varid+'_'+seasonid
self.plot3_id = ft1id+' - '+ft2id+'_'+varid+'_'+seasonid
self.plot1var_id = ft1id+'_'+varid+'_var_'+seasonid
self.plotall_id = ft1id+'_'+ft2id+'_'+varid+'_'+seasonid
if not self.computation_planned:
self.plan_computation( filetable1, filetable2, varid, seasonid, aux )
@staticmethod
def _list_variables( filetable1, filetable2=None ):
allvars = amwg_plot_set5and6._all_variables( filetable1, filetable2 )
listvars = allvars.keys()
listvars.sort()
return listvars
@staticmethod
def _all_variables( filetable1, filetable2=None, use_standard_vars=True ):
allvars = amwg_plot_spec.package._all_variables( filetable1, filetable2, "amwg_plot_spec" )
for varname in amwg_plot_spec.package._list_variables_with_levelaxis(
filetable1, filetable2, "amwg_plot_spec" ):
allvars[varname] = level_variable_for_amwg_set5
if use_standard_vars:
for varname in amwg_plot_spec.standard_variables.keys():
allvars[varname] = basic_plot_variable
return allvars
def plan_computation( self, filetable1, filetable2, varid, seasonid, aux=None ):
if isinstance(aux,Number):
return self.plan_computation_level_surface( filetable1, filetable2, varid, seasonid, aux )
else:
return self.plan_computation_normal_contours( filetable1, filetable2, varid, seasonid, aux )
def plan_computation_normal_contours( self, filetable1, filetable2, varnom, seasonid, aux=None ):
"""Set up for a lat-lon contour plot, as in plot set 5. Data is averaged over all other
axes."""
if varnom in filetable1.list_variables():
vid1,vid1var = self.vars_normal_contours(
filetable1, varnom, seasonid, aux=None )
elif varnom in self.standard_variables.keys():
vid1,vid1var = self.vars_stdvar_normal_contours(
filetable1, varnom, seasonid, aux=None )
else:
print "ERROR, variable",varnom,"not found in and cannot be computed from",filetable1
return None
if filetable2 is not None and varnom in filetable2.list_variables():
vid2,vid2var = self.vars_normal_contours(
filetable2, varnom, seasonid, aux=None )
elif varnom in self.standard_variables.keys():
vid2,vid2var = self.vars_stdvar_normal_contours(
filetable2, varnom, seasonid, aux=None )
else:
vid2,vid2var = None,None
self.single_plotspecs = {}
ft1src = filetable1.source()
try:
ft2src = filetable2.source()
except:
ft2src = ''
all_plotnames = []
if filetable1 is not None:
if vid1 is not None:
self.single_plotspecs[self.plot1_id] = plotspec(
vid = ps.dict_idid(vid1),
zvars = [vid1], zfunc = (lambda z: z),
plottype = self.plottype,
#title = ' '.join([varnom,seasonid,filetable1._strid]) )
title = ' '.join([varnom,seasonid,'(1)']),
source = ft1src )
all_plotnames.append(self.plot1_id)
if vid1var is not None:
self.single_plotspecs[self.plot1var_id] = plotspec(
vid = ps.dict_idid(vid1var),
zvars = [vid1var], zfunc = (lambda z: z),
plottype = self.plottype,
#title = ' '.join([varnom,seasonid,filetable1._strid,'variance']) )
title = ' '.join([varnom,seasonid,'1 variance']),
source = ft1src )
all_plotnames.append(self.plot1var_id)
if filetable2 is not None and vid2 is not None:
self.single_plotspecs[self.plot2_id] = plotspec(
vid = ps.dict_idid(vid2),
zvars = [vid2], zfunc = (lambda z: z),
plottype = self.plottype,
#title = ' '.join([varnom,seasonid,filetable2._strid]) )
title = ' '.join([varnom,seasonid,'(2)']),
source = ft2src )
all_plotnames.append(self.plot2_id)
if filetable1 is not None and filetable2 is not None and vid1 is not None and vid2 is not None:
self.single_plotspecs[self.plot3_id] = plotspec(
vid = ps.dict_id(varnom,'diff',seasonid,filetable1,filetable2),
zvars = [vid1,vid2], zfunc = aminusb_2ax,
plottype = self.plottype,
#title = ' '.join([varnom,seasonid,filetable1._strid,'-',filetable2._strid]) )
title = ' '.join([varnom,seasonid,'(1)-(2)']),
source = ', '.join([ft1src,ft2src]) )
all_plotnames.append(self.plot3_id)
if len(all_plotnames)>0:
self.composite_plotspecs = {
self.plotall_id: all_plotnames
}
else:
self.composite_plotspecs = {}
self.computation_planned = True
def vars_normal_contours( self, filetable, varnom, seasonid, aux=None ):
reduced_varlis = [
reduced_variable(
variableid=varnom, filetable=filetable, season=self.season,
reduction_function=(lambda x,vid: reduce2latlon_seasonal( x, self.season, self.region, vid) ) ),
reduced_variable(
# variance, for when there are variance climatology files
variableid=varnom+'_var', filetable=filetable, season=self.season,
reduction_function=(lambda x,vid: reduce2latlon_seasonal( x, self.season, self.region, vid ) ) )
]
for v in reduced_varlis:
self.reduced_variables[v.id()] = v
vid = rv.dict_id( varnom, seasonid, filetable )
vidvar = rv.dict_id( varnom+'_var', seasonid, filetable ) # variance
return vid, vidvar
def vars_stdvar_normal_contours( self, filetable, varnom, seasonid, aux=None ):
"""Set up for a lat-lon contour plot, as in plot set 5. Data is averaged over all other
axes. The variable given by varnom is *not* a data variable suitable for reduction. It is
a standard_variable. Its inputs will be reduced, then it will be set up as a derived_var.
"""
varid,rvs,dvs = self.stdvar2var(
varnom, filetable, self.season,\
(lambda x,vid:
reduce2latlon_seasonal(x, self.season, self.region, vid, exclude_axes=[
'isccp_prs','isccp_tau','cosp_prs','cosp_tau',
'modis_prs','modis_tau','cosp_tau_modis',
'misr_cth','misr_tau','cosp_htmisr']) ))
# ... isccp_prs, isccp_tau etc. are used for cloud variables and need special treatment
if varid is None:
return None,None
for rv in rvs:
self.reduced_variables[rv.id()] = rv
for dv in dvs:
self.derived_variables[dv.id()] = dv
# This is the former code, which was moved to stdvar2var so other classes may use it:
#if varnom not in self.standard_variables:
# return None,None
#computable = False
#for svd in self.standard_variables[varnom]: # loop over ways to compute varnom
# invarnoms = svd._inputs
# if len( set(invarnoms) - set(filetable.list_variables()) )<=0:
# func = svd._func
# computable = True
# break
#if not computable:
# return None,None
#rvs = []
#for ivn in invarnoms:
# rv = reduced_variable(
# variableid=ivn, filetable=filetable, season=self.season,
# reduction_function=(lambda x,vid: reduce2latlon_seasonal( x, self.season, vid ) ))
# self.reduced_variables[rv.id()] = rv
# rvs.append(rv.id())
#varid = dv.dict_id( varnom, '', seasonid, filetable )
#self.derived_variables[varid] = derived_var( vid=varid, inputs=rvs, func=func )
return varid, None
def plan_computation_level_surface( self, filetable1, filetable2, varid, seasonid, aux ):
"""Set up for a lat-lon contour plot, averaged in other directions - except that if the
variable to be plotted depend on level, it is not averaged over level. Instead, the value
at a single specified pressure level, aux, is used. The units of aux are millbars."""
# In calling reduce_time_seasonal, I am assuming that no variable has axes other than
# (time, lev,lat,lon).
# If there were another axis, then we'd need a new function which reduces it as well.
if not isinstance(aux,Number): return None
pselect = udunits(aux,'mbar')
# self.reduced_variables = {
# varid+'_1': reduced_variable( # var=var(time,lev,lat,lon)
# variableid=varid, filetable=filetable1, reduced_var_id=varid+'_1', season=self.season,
# reduction_function=(lambda x,vid: reduce_time_seasonal( x, self.season, vid ) ) ),
# 'hyam_1': reduced_variable( # hyam=hyam(lev)
# variableid='hyam', filetable=filetable1, reduced_var_id='hyam_1',season=self.season,
# reduction_function=(lambda x,vid=None: x) ),
# 'hybm_1': reduced_variable( # hybm=hybm(lev)
# variableid='hybm', filetable=filetable1, reduced_var_id='hybm_1',season=self.season,
# reduction_function=(lambda x,vid=None: x) ),
# 'PS_1': reduced_variable( # ps=ps(time,lat,lon)
# variableid='PS', filetable=filetable1, reduced_var_id='PS_1', season=self.season,
# reduction_function=(lambda x,vid=None: reduce_time_seasonal( x, self.season, vid ) ) ) }
reduced_varlis = [
reduced_variable( # var=var(time,lev,lat,lon)
variableid=varid, filetable=filetable1, season=self.season,
reduction_function=(lambda x,vid: reduce_time_seasonal( x, self.season, self.region, vid ) ) ),
reduced_variable( # hyam=hyam(lev)
variableid='hyam', filetable=filetable1, season=self.season,
reduction_function=(lambda x,vid=None: select_region( x, self.region)) ),
reduced_variable( # hybm=hybm(lev)
variableid='hybm', filetable=filetable1, season=self.season,
reduction_function=(lambda x,vid=None: select_region( x, self.region)) ),
reduced_variable( # ps=ps(time,lat,lon)
variableid='PS', filetable=filetable1, season=self.season,
reduction_function=(lambda x,vid: reduce_time_seasonal( x, self.season, self.region, vid ) ) ) ]
# vid1 = varid+'_p_1'
# vidl1 = varid+'_lp_1'
vid1 = dv.dict_id( varid, 'p', seasonid, filetable1)
vidl1 = dv.dict_id(varid, 'lp', seasonid, filetable1)
self.derived_variables = {
vid1: derived_var( vid=vid1, inputs =
[rv.dict_id(varid,seasonid,filetable1), rv.dict_id('hyam',seasonid,filetable1),
rv.dict_id('hybm',seasonid,filetable1), rv.dict_id('PS',seasonid,filetable1) ],
#was vid1: derived_var( vid=vid1, inputs=[ varid+'_1', 'hyam_1', 'hybm_1', 'PS_1' ],
func=verticalize ),
vidl1: derived_var( vid=vidl1, inputs=[vid1], func=(lambda z: select_lev(z,pselect))) }
ft1src = filetable1.source()
self.single_plotspecs = {
self.plot1_id: plotspec(
# was vid = varid+'_1',
# was zvars = [vid1], zfunc = (lambda z: select_lev( z, pselect ) ),
vid = ps.dict_idid(vidl1),
zvars = [vidl1], zfunc = (lambda z: z),
plottype = self.plottype,
#title = ' '.join([varid,seasonid,filetable1._strid,'at',str(pselect)]) ) }
title = ' '.join([varid,seasonid,'at',str(pselect),'(1)']),
source = ft1src ) }
if filetable2 is None:
self.reduced_variables = { v.id():v for v in reduced_varlis }
self.composite_plotspecs = {
self.plotall_id: [ self.plot1_id ]
}
self.computation_planned = True
return
if 'hyam' in filetable2.list_variables() and 'hybm' in filetable2.list_variables():
# hybrid levels in use, convert to pressure levels
reduced_varlis += [
reduced_variable( # var=var(time,lev,lat,lon)
variableid=varid, filetable=filetable2, season=self.season,
reduction_function=(lambda x,vid: reduce_time_seasonal( x, self.season, self.region, vid ) ) ),
reduced_variable( # hyam=hyam(lev)
variableid='hyam', filetable=filetable2, season=self.season,
reduction_function=(lambda x,vid=None: select_region( x, self.region)) ),
reduced_variable( # hybm=hybm(lev)
variableid='hybm', filetable=filetable2, season=self.season,
reduction_function=(lambda x,vid=None: select_region( x, self.region)) ),
reduced_variable( # ps=ps(time,lat,lon)
variableid='PS', filetable=filetable2, season=self.season,
reduction_function=(lambda x,vid: reduce_time_seasonal( x, self.season, self.region, vid ) ) )
]
#vid2 = varid+'_p_2'
#vidl2 = varid+'_lp_2'
vid2 = dv.dict_id( varid, 'p', seasonid, filetable2 )
vid2 = dv.dict_id( vards, 'lp', seasonid, filetable2 )
self.derived_variables[vid2] = derived_var( vid=vid2, inputs=[
rv.dict_id(varid,seasonid,filetable2), rv.dict_id('hyam',seasonid,filetable2),
rv.dict_id('hybm',seasonid,filetable2), rv.dict_id('PS',seasonid,filetable2) ],
func=verticalize )
self.derived_variables[vidl2] = derived_var( vid=vidl2, inputs=[vid2],
func=(lambda z: select_lev(z,pselect) ) )
else:
# no hybrid levels, assume pressure levels.
#vid2 = varid+'_2'
#vidl2 = varid+'_lp_2'
vid2 = rv.dict_id(varid,seasonid,filetable2)
vidl2 = dv.dict_id( varid, 'lp', seasonid, filetable2 )
reduced_varlis += [
reduced_variable( # var=var(time,lev,lat,lon)
variableid=varid, filetable=filetable2, season=self.season,
reduction_function=(lambda x,vid: reduce_time_seasonal( x, self.season, self.region, vid ) ) )
]
self.derived_variables[vidl2] = derived_var( vid=vidl2, inputs=[vid2],
func=(lambda z: select_lev(z,pselect) ) )
self.reduced_variables = { v.id():v for v in reduced_varlis }
try:
ft2src = filetable2.source()
except:
ft2src = ''
self.single_plotspecs[self.plot2_id] = plotspec(
#was vid = varid+'_2',
vid = ps.dict_idid(vidl2),
zvars = [vidl2], zfunc = (lambda z: z),
plottype = self.plottype,
#title = ' '.join([varid,seasonid,filetable2._strid,'at',str(pselect)]) )
title = ' '.join([varid,seasonid,'at',str(pselect),'(2)']),
source = ft2src )
self.single_plotspecs[self.plot3_id] = plotspec(
#was vid = varid+'_diff',
vid = ps.dict_id(varid,'diff',seasonid,filetable1,filetable2),
zvars = [vidl1,vidl2], zfunc = aminusb_2ax,
plottype = self.plottype,
#title = ' '.join([varid,seasonid,filetable1._strid,'-',filetable2._strid,'at',str(pselect)]) )
title = ' '.join([varid,seasonid,'at',str(pselect),'(1)-(2)']),
source = ', '.join([ft1src,ft2src]) )
self.composite_plotspecs = {
self.plotall_id: [ self.plot1_id, self.plot2_id, self.plot3_id ]
}
self.computation_planned = True
def _results(self,newgrid=0):
results = plot_spec._results(self,newgrid)
if results is None: return None
psv = self.plotspec_values
if self.plot1_id in psv and self.plot2_id in psv and\
psv[self.plot1_id] is not None and psv[self.plot2_id] is not None:
psv[self.plot1_id].synchronize_ranges(psv[self.plot2_id])
else:
print "WARNING not synchronizing ranges for",self.plot1_id,"and",self.plot2_id
for key,val in psv.items():
if type(val) is not list: val=[val]
for v in val:
if v is None: continue
v.finalize()
return self.plotspec_values[self.plotall_id]
class amwg_plot_set5(amwg_plot_set5and6):
"""represents one plot from AMWG Diagnostics Plot Set 5
Each contour plot is a set of three contour plots: one each for model output, observations, and
the difference between the two. A plot's x-axis is longitude and its y-axis is the latitude;
normally a world map will be overlaid. """
name = '5 - Horizontal Contour Plots of Seasonal Means'
number = '5'
class amwg_plot_set6old(amwg_plot_set5and6):
"""represents one plot from AMWG Diagnostics Plot Set 6
Each contour plot is a set of three contour plots: one each for model output, observations, and
the difference between the two. A plot's x-axis is longitude and its y-axis is the latitude;
normally a world map will be overlaid. """
#name = '6old - Horizontal Contour Plots of Seasonal Means'
#number = '6old'
class amwg_plot_set6(amwg_plot_spec):
"""represents one plot from AMWG Diagnostics Plot Set 6
This is a vector+contour plot - the contour plot shows magnitudes and the vector plot shows both
directions and magnitudes. Unlike NCAR's diagnostics, our AMWG plot set 6 uses a different
menu from set 5.
Each compound plot is a set of three simple plots: one each for model output, observations, and
the difference between the two. A plot's x-axis is longitude and its y-axis is the latitude;
normally a world map will be overlaid.
"""
name = '6 - (Experimental, doesnt work with GUI) Horizontal Vector Plots of Seasonal Means'
number = '6'
standard_variables = { 'STRESS':[['STRESS_MAG','TAUX','TAUY'],['TAUX','TAUY']] }
# ...built-in variables. The key is the name, as the user specifies it.
# The value is a lists of lists of the required data variables. If the dict item is, for
# example, V:[[a,b,c],[d,e]] then V can be computed either as V(a,b,c) or as V(d,e).
# The first in the list (e.g. [a,b,c]) is to be preferred.
#... If this works, I'll make it universal, defaulting to {}. For plot set 6, the first
# data variable will be used for the contour plot, and the other two for the vector plot.
def __init__( self, filetable1, filetable2, varid, seasonid=None, regionid=None, aux=None ):
"""filetable1, filetable2 should be filetables for model and obs.
varid is a string identifying the variable to be plotted, e.g. 'STRESS'.
seasonid is a string such as 'DJF'."""
plot_spec.__init__(self,seasonid)
# self.plottype = ['Isofill','Vector'] <<<< later we'll add contour plots
self.plottype = 'Vector'
self.season = cdutil.times.Seasons(self._seasonid) # note that self._seasonid can differ froms seasonid
if regionid=="Global" or regionid=="global" or regionid is None:
self._regionid="Global"
else:
self._regionid=regionid
self.region = interpret_region(regionid)
self.varid = varid
ft1id,ft2id = filetable_ids(filetable1,filetable2)
self.plot1_id = ft1id+'_'+varid+'_'+seasonid
self.plot2_id = ft2id+'_'+varid+'_'+seasonid
self.plot3_id = ft1id+' - '+ft2id+'_'+varid+'_'+seasonid
self.plotall_id = ft1id+'_'+ft2id+'_'+varid+'_'+seasonid
if not self.computation_planned:
self.plan_computation( filetable1, filetable2, varid, seasonid, aux )
@staticmethod
def _list_variables( filetable1, filetable2=None ):
return amwg_plot_set6.standard_variables.keys()
@staticmethod
def _all_variables( filetable1, filetable2=None ):
return { vn:basic_plot_variable for vn in amwg_plot_set6._list_variables( filetable1, filetable2 ) }
def plan_computation( self, filetable1, filetable2, varid, seasonid, aux=None ):
if aux is None:
return self.plan_computation_normal_contours( filetable1, filetable2, varid, seasonid, aux )
else:
print "ERROR plot set 6 does not support auxiliary variable aux=",aux
return None
def STRESS_setup( self, filetable, varid, seasonid ):
"""sets up reduced & derived variables for the STRESS (ocean wind stress) variable.
Updates self.derived variables.
Returns several variable names and lists of variable names.
"""
vars = None
if filetable is not None:
for dvars in self.standard_variables[varid]: # e.g. dvars=['STRESS_MAG','TAUX','TAUY']
if filetable.has_variables(dvars):
vars = dvars # e.g. ['STRESS_MAG','TAUX','TAUY']
break
if vars==['STRESS_MAG','TAUX','TAUY']:
rvars = vars # variable names which may become reduced variables
dvars = [] # variable names which will become derived variables
var_cont = vars[0] # for contour plot
vars_vec = ( vars[1], vars[2] ) # for vector plot
vid_cont = rv.dict_id( var_cont, seasonid, filetable )
# BTW, I'll use STRESS_MAG from a climo file (as it is in obs and cam35 e.g.)
# but I don't think it is correct, because of its nonlinearity.
elif vars==['TAUX','TAUY']:
rvars = vars # variable names which may become reduced variables
dvars = ['STRESS_MAG'] # variable names which will become derived variables
var_cont = dv.dict_id( 'STRESS_MAG', '', seasonid, filetable )
vars_vec = ( vars[0], vars[1] ) # for vector plot
vid_cont = var_cont
else:
rvars = []
dvars = []
var_cont = ''
vars_vec = ['','']
vid_cont = ''
return vars, rvars, dvars, var_cont, vars_vec, vid_cont
def STRESS_rvs( self, filetable, rvars, seasonid, vardict ):
"""returns a list of reduced variables needed for the STRESS variable computation,
and orginating from the specified filetable. Also returned is a partial list of derived
variables which will be needed. The input rvars, a list, names the variables needed."""
if filetable is None:
return [],[]
reduced_vars = []
needed_derivedvars = []
for var in rvars:
if var in ['TAUX','TAUY'] and filetable.filefmt.find('CAM')>=0:
# We'll cheat a bit and change the sign as well as reducing dimensionality.
# The issue is that sign conventions differ in CAM output and the obs files.
if filetable.has_variables(['OCNFRAC']):
# Applying the ocean mask will get a derived variable with variableid=var.
reduced_vars.append( reduced_variable(
variableid=var, filetable=filetable, season=self.season,
#reduction_function=(lambda x,vid=var+'_nomask':
reduction_function=(lambda x,vid=None:
minusb(reduce2latlon_seasonal( x, self.season, self.region, vid)) ) ))
needed_derivedvars.append(var)
reduced_vars.append( reduced_variable(
variableid='OCNFRAC', filetable=filetable, season=self.season,
reduction_function=(lambda x,vid=None:
reduce2latlon_seasonal( x, self.season, self.region, vid) ) ))
elif filetable.has_variables(['ORO']):
# Applying the ocean mask will get a derived variable with variableid=var.
reduced_vars.append( reduced_variable(
variableid=var, filetable=filetable, season=self.season,
reduction_function=(lambda x,vid=var+'_nomask':
minusb(reduce2latlon_seasonal( x, self.season, self.region, vid)) ) ))
needed_derivedvars.append(var)
reduced_vars.append( reduced_variable(
variableid='ORO', filetable=filetable, season=self.season,
reduction_function=(lambda x,vid=None:
reduce2latlon_seasonal( x, self.season, self.region, vid) ) ))
else:
# No ocean mask available. Go on without applying one. But still apply minusb
# because this is a CAM file.
reduced_vars.append( reduced_variable(
variableid=var, filetable=filetable, season=self.season,
reduction_function=(lambda x,vid=None:
minusb(reduce2latlon_seasonal( x, self.season, self.region, vid)) ) ))
else:
# No ocean mask available and it's not a CAM file; just do an ordinary reduction.
reduced_vars.append( reduced_variable(
variableid=var, filetable=filetable, season=self.season,
reduction_function=(lambda x,vid=None:
reduce2latlon_seasonal( x, self.season, self.region, vid ) ) ))
vardict[var] = rv.dict_id( var, seasonid, filetable )
return reduced_vars, needed_derivedvars
def STRESS_dvs( self, filetable, dvars, seasonid, vardict, vid_cont, vars_vec ):
"""Updates self.derived_vars and returns with derived variables needed for the STRESS
variable computation and orginating from the specified filetable.
rvars, a list, names the variables needed.
Also, a list of the new drived variables is returned."""
if filetable is None:
vardict[','] = None
return []
derived_vars = []
for var in dvars:
if var in ['TAUX','TAUY']:
#tau = rv.dict_id(var+'_nomask',seasonid,filetable)
tau = rv.dict_id(var,seasonid,filetable)
vid = dv.dict_id( var, '', seasonid, filetable )
if filetable.has_variables(['OCNFRAC']):
# Applying the ocean mask will get a derived variable with variableid=var.
ocn_frac = rv.dict_id('OCNFRAC',seasonid,filetable)
new_derived_var = derived_var( vid=vid, inputs=[tau,ocn_frac], outputs=[var],
func=mask_OCNFRAC )
derived_vars.append( new_derived_var )
vardict[var] = vid
self.derived_variables[vid] = new_derived_var
elif filetable.has_variables(['ORO']):
# Applying the ocean mask will get a derived variable with variableid=var.
oro = rv.dict_id('ORO',seasonid,filetable)
new_derived_var = derived_var( vid=vid, inputs=[tau,oro], outputs=[var],
func=mask_ORO )
derived_vars.append( new_derived_var )
vardict[var] = vid
self.derived_variables[vid] = new_derived_var
else:
# No ocean mask available. Go on without applying one.
pass
else:
pass
vecid = ','.join(vars_vec)
if filetable.has_variables(['OCNFRAC']) or filetable.has_variables(['ORO']):
# TAUX,TAUY are masked as derived variables
vardict[vecid] = dv.dict_id( vecid, '', seasonid, filetable )
else:
vardict[vecid] = rv.dict_id( vecid, seasonid, filetable )
if tuple(vid_cont) and vid_cont[0]=='dv': # need to compute STRESS_MAG from TAUX,TAUY
if filetable.filefmt.find('CAM')>=0: # TAUX,TAUY are derived variables
tau_x = dv.dict_id('TAUX','',seasonid,filetable)
tau_y = dv.dict_id('TAUY','',seasonid,filetable)
else: #if filetable.filefmt.find('CAM')>=0: # TAUX,TAUY are reduced variables
tau_x = rv.dict_id('TAUX',seasonid,filetable)
tau_y = rv.dict_id('TAUY',seasonid,filetable)
new_derived_var = derived_var( vid=vid_cont, inputs=[tau_x,tau_y], func=abnorm )
derived_vars.append( new_derived_var )
vardict['STRESS_MAG'] = vid_cont
self.derived_variables[vid_cont] = new_derived_var
return derived_vars
def plan_computation_normal_contours( self, filetable1, filetable2, varid, seasonid, aux=None ):
"""Set up for a lat-lon contour plot, as in plot set 5. Data is averaged over all other
axes."""
self.derived_variables = {}
vars_vec1 = {}
vars_vec2 = {}
try:
if varid=='STRESS' or varid=='SURF_STRESS':
vars1,rvars1,dvars1,var_cont1,vars_vec1,vid_cont1 =\
self.STRESS_setup( filetable1, varid, seasonid )
vars2,rvars2,dvars2,var_cont2,vars_vec2,vid_cont2 =\
self.STRESS_setup( filetable2, varid, seasonid )
if vars1 is None and vars2 is None:
raise Exception("cannot find standard variables in data 2")
else:
print "ERROR, AMWG plot set 6 does not yet support",varid
return None
except Exception as e:
print "ERROR cannot find suitable standard_variables in data for varid=",varid
print "exception is",e
return None
reduced_varlis = []
vardict1 = {'':'nameless_variable'}
vardict2 = {'':'nameless_variable'}
new_reducedvars, needed_derivedvars = self.STRESS_rvs( filetable1, rvars1, seasonid, vardict1 )
reduced_varlis += new_reducedvars
self.reduced_variables = { v.id():v for v in reduced_varlis }
self.STRESS_dvs( filetable1, needed_derivedvars, seasonid, vardict1, vid_cont1, vars_vec1 )
new_reducedvars, needed_derivedvars = self.STRESS_rvs( filetable2, rvars2, seasonid, vardict2 )
reduced_varlis += new_reducedvars
self.STRESS_dvs( filetable2, needed_derivedvars, seasonid, vardict2, vid_cont2, vars_vec2 )
self.reduced_variables = { v.id():v for v in reduced_varlis }
self.single_plotspecs = {}
ft1src = filetable1.source()
try:
ft2src = filetable2.source()
except:
ft2src = ''
vid_vec1 = vardict1[','.join([vars_vec1[0],vars_vec1[1]])]
vid_vec11 = vardict1[vars_vec1[0]]
vid_vec12 = vardict1[vars_vec1[1]]
vid_vec2 = vardict2[','.join([vars_vec2[0],vars_vec2[1]])]
vid_vec21 = vardict2[vars_vec2[0]]
vid_vec22 = vardict2[vars_vec2[1]]
plot_type_temp = ['Isofill','Vector'] # can't use self.plottype yet because don't support it elsewhere as a list or tuple <<<<<
if vars1 is not None:
# Draw two plots, contour and vector, over one another to get a single plot.
# Only one needs title,source.
title = ' '.join([varid,seasonid,'(1)'])
contplot = plotspec(
vid = ps.dict_idid(vid_cont1), zvars = [vid_cont1], zfunc = (lambda z: z),
plottype = plot_type_temp[0],
title = title, source=ft1src )
vecplot = plotspec(
vid = ps.dict_idid(vid_vec1), zvars=[vid_vec11,vid_vec12], zfunc = (lambda z,w: (z,w)),
plottype = plot_type_temp[1],
title = title, source=ft1src )
#self.single_plotspecs[self.plot1_id] = [contplot,vecplot]
self.single_plotspecs[self.plot1_id+'c'] = contplot
self.single_plotspecs[self.plot1_id+'v'] = vecplot
if vars2 is not None:
# Draw two plots, contour and vector, over one another to get a single plot.
# Only one needs title,source.
title = ' '.join([varid,seasonid,'(2)'])
contplot = plotspec(
vid = ps.dict_idid(vid_cont2), zvars = [vid_cont2], zfunc = (lambda z: z),
plottype = plot_type_temp[0],
title = title, source=ft2src )
vecplot = plotspec(
vid = ps.dict_idid(vid_vec2), zvars=[vid_vec21,vid_vec22], zfunc = (lambda z,w: (z,w)),
plottype = plot_type_temp[1],
title = title, source=ft2src )
self.single_plotspecs[self.plot2_id+'c'] = contplot
self.single_plotspecs[self.plot2_id+'v'] = vecplot
if vars1 is not None and vars2 is not None:
title = ' '.join([varid,seasonid,'(1)-(2)'])
source = ', '.join([ft1src,ft2src])
contplot = plotspec(
vid = ps.dict_id(var_cont1,'diff',seasonid,filetable1,filetable2),
zvars = [vid_cont1,vid_cont2], zfunc = aminusb_2ax, # This is difference of magnitudes; sdb mag of diff!!!
plottype = plot_type_temp[0], title=title, source=source )
vecplot = plotspec(
vid = ps.dict_id(vid_vec2,'diff',seasonid,filetable1,filetable2),
zvars = [vid_vec11,vid_vec12,vid_vec21,vid_vec22],
zfunc = (lambda z1,w1,z2,w2: (aminusb_2ax(z1,z2),aminusb_2ax(w1,w2))),
plottype = plot_type_temp[1],
title = title, source = source )
self.single_plotspecs[self.plot3_id+'c'] = contplot
self.single_plotspecs[self.plot3_id+'v'] = vecplot
# initially we're not plotting the contour part of the plots....
#for pln,pl in self.single_plotspecs.iteritems(): #jfp
# print "dbg single plot",pln,pl.plottype
# print "dbg ",pl.zvars
self.composite_plotspecs = {
self.plot1_id: ( self.plot1_id+'c', self.plot1_id+'v' ),
#self.plot1_id: [ self.plot1_id+'v' ],
self.plot2_id: ( self.plot2_id+'c', self.plot2_id+'v' ),
self.plot3_id: ( self.plot3_id+'c', self.plot3_id+'v' ),
self.plotall_id: [self.plot1_id, self.plot2_id, self.plot3_id]
}
self.computation_planned = True
def _results(self,newgrid=0):
results = plot_spec._results(self,newgrid)
if results is None: return None
psv = self.plotspec_values
# >>>> synchronize_ranges is a bit more complicated because plot1_id,plot2_id aren't
# >>>> here the names of single_plotspecs members, and should be for synchronize_ranges.
# >>>> And the result of one sync of 2 plots will apply to 4 plots, not just those 2.
# >>>> So for now, don't do it...
#if self.plot1_id in psv and self.plot2_id in psv and\
# psv[self.plot1_id] is not None and psv[self.plot2_id] is not None:
# psv[self.plot1_id].synchronize_ranges(psv[self.plot2_id])
#else:
# print "WARNING not synchronizing ranges for",self.plot1_id,"and",self.plot2_id
print "WARNING not synchronizing ranges for AMWG plot set 6"
for key,val in psv.items():
if type(val) is not list and type(val) is not tuple: val=[val]
for v in val:
if v is None: continue
if type(v) is tuple:
continue # finalize has already been called for this, it comes from plotall_id but also has its own entry
v.finalize()
return self.plotspec_values[self.plotall_id]
class amwg_plot_set7(amwg_plot_spec):
"""This represents one plot from AMWG Diagnostics Plot Set 7
Each graphic is a set of three polar contour plots: model output, observations, and
the difference between the two. A plot's x-axis is longitude and its y-axis is the latitude;
normally a world map will be overlaid using stereographic projection. The user selects the
hemisphere.
"""
name = '7 - Polar Contour and Vector Plots of Seasonal Means'
number = '7'
def __init__( self, filetable1, filetable2, varid, seasonid=None, region=None, aux=slice(0,None) ):
"""filetable1, filetable2 should be filetables for model and obs.
varid is a string identifying the variable to be plotted, e.g. 'TREFHT'.
seasonid is a string such as 'DJF'."""
plot_spec.__init__(self,seasonid)
self.plottype = 'Isofill_polar'
self.season = cdutil.times.Seasons(self._seasonid) # note that self._seasonid can differ froms seasonid
self.varid = varid
ft1id,ft2id = filetable_ids(filetable1, filetable2)
self.plot1_id = ft1id+'_'+varid+'_'+seasonid
self.plot2_id = ft2id+'_'+varid+'_'+seasonid
self.plot3_id = ft1id+' - '+ft2id+'_'+varid+'_'+seasonid
self.plotall_id = ft1id+'_'+ft2id+'_'+varid+'_'+seasonid
if not self.computation_planned:
self.plan_computation( filetable1, filetable2, varid, seasonid, region, aux )
@staticmethod
def _list_variables( filetable1, filetable2=None ):
allvars = amwg_plot_set5and6._all_variables( filetable1, filetable2 )
listvars = allvars.keys()
listvars.sort()
return listvars
@staticmethod
def _all_variables( filetable1, filetable2=None ):
allvars = amwg_plot_spec.package._all_variables( filetable1, filetable2, "amwg_plot_spec" )
for varname in amwg_plot_spec.package._list_variables_with_levelaxis(
filetable1, filetable2, "amwg_plot_spec" ):
allvars[varname] = basic_pole_variable
return allvars
def plan_computation( self, filetable1, filetable2, varid, seasonid, region=None, aux=slice(0,None) ):
"""Set up for a lat-lon polar contour plot. Data is averaged over all other axes."""
reduced_varlis = [
reduced_variable(
variableid=varid, filetable=filetable1, season=self.season,
reduction_function=(lambda x, vid, region=None: reduce2latlon_seasonal( x(latitude=aux, longitude=(0, 360)), self.season, region, vid=vid ) ) ),
reduced_variable(
variableid=varid, filetable=filetable2, season=self.season,
reduction_function=(lambda x,vid, region=None: reduce2latlon_seasonal( x(latitude=aux, longitude=(0, 360)), self.season, region, vid=vid ) ) )
]
self.reduced_variables = { v.id():v for v in reduced_varlis }
vid1 = rv.dict_id( varid, seasonid, filetable1 )
vid2 = rv.dict_id( varid, seasonid, filetable2 )
self.derived_variables = {}
self.single_plotspecs = {
self.plot1_id: plotspec(
vid = ps.dict_idid(vid1),
zvars = [vid1], zfunc = (lambda z: z),
plottype = self.plottype ),
self.plot2_id: plotspec(
vid = ps.dict_idid(vid2),
zvars = [vid2], zfunc = (lambda z: z),
plottype = self.plottype ),
self.plot3_id: plotspec(
vid = ps.dict_id(varid,'diff',seasonid,filetable1,filetable2),
zvars = [vid1,vid2], zfunc = aminusb_2ax,
plottype = self.plottype )
}
self.composite_plotspecs = {
self.plotall_id: [ self.plot1_id, self.plot2_id, self.plot3_id]
}
self.computation_planned = True
#pdb.set_trace()
def _results(self, newgrid=0):
#pdb.set_trace()
results = plot_spec._results(self,newgrid)
if results is None: return None
psv = self.plotspec_values
if self.plot1_id in psv and self.plot2_id in psv and\
psv[self.plot1_id] is not None and psv[self.plot2_id] is not None:
psv[self.plot1_id].synchronize_ranges(psv[self.plot2_id])
else:
print "WARNING not synchronizing ranges for",self.plot1_id,"and",self.plot2_id
for key,val in psv.items():
if type(val) is not list: val=[val]
for v in val:
if v is None: continue
v.finalize()
return self.plotspec_values[self.plotall_id]
class amwg_plot_set8(amwg_plot_spec):
"""This class represents one plot from AMWG Diagnostics Plot Set 8.
Each such plot is a set of three contour plots: two for the model output and
the difference between the two. A plot's x-axis is time and its y-axis is latitude.
The data presented is a climatological zonal mean throughout the year.
To generate plots use Dataset 1 in the AMWG diagnostics menu, set path to the directory.
Repeat this for observation 1 and then apply. If only Dataset 1 is specified a plot
of the model zonal mean is diaplayed.
"""
# N.B. In plot_data.py, the plotspec contained keys identifying reduced variables.
# Here, the plotspec contains the variables themselves.
name = '8 - Annual Cycle Contour Plots of Zonal Means '
number = '8'
def __init__( self, filetable1, filetable2, varid, seasonid='ANN', regionid=None, aux=None ):
"""filetable1, should be a directory filetable for each model.
varid is a string, e.g. 'TREFHT'. The zonal mean is computed for each month. """
self.season = seasonid
self.FT1 = (filetable1 != None)
self.FT2 = (filetable2 != None)
self.CONTINUE = self.FT1
if not self.CONTINUE:
print "user must specify a file table"
return None
self.filetables = [filetable1]
if self.FT2:
self.filetables +=[filetable2]
if regionid=="Global" or regionid=="global" or regionid is None:
self._regionid="Global"
else:
self._regionid=regionid
self.region = interpret_region(regionid)
plot_spec.__init__(self, seasonid)
self.plottype = 'Isofill'
self._seasonid = seasonid
self.season = cdutil.times.Seasons(self._seasonid) # note that self._seasonid can differ froms seasonid
ft1id, ft2id = filetable_ids(filetable1, filetable2)
self.plot1_id = '_'.join([ft1id, varid, 'composite', 'contour'])
if self.FT2:
self.plot2_id = '_'.join([ft2id, varid, 'composite', 'contour'])
self.plot3_id = '_'.join([ft1id+'-'+ft2id, varid, seasonid, 'contour'])
self.plotall_id = '_'.join([ft1id,ft2id, varid, seasonid])
if not self.computation_planned:
self.plan_computation( filetable1, filetable2, varid, seasonid )
def plan_computation( self, filetable1, filetable2, varid, seasonid ):
self.computation_planned = False
#setup the reduced variables
self.reduced_variables = {}
vidAll = {}
for FT in self.filetables:
#pdb.set_trace()
VIDs = []
for i in range(1, 13):
month = cdutil.times.getMonthString(i)
#pdb.set_trace()
#create identifiers
VID = rv.dict_id(varid, month, FT)
RV = reduced_variable(variableid = varid,
filetable = FT,
season = cdutil.times.Seasons(VID[2]),
reduction_function = (lambda x, vid=id2str(VID), month=VID[2]:
reduce2lat_seasonal(x, cdutil.times.Seasons(month), self.region, vid=vid)))
self.reduced_variables[RV.id()] = RV
VIDs += [VID]
vidAll[FT] = VIDs
#print self.reduced_variables.keys()
vidModel = dv.dict_id(varid, 'ZonalMean model', self._seasonid, filetable1)
if self.FT2:
vidObs = dv.dict_id(varid, 'ZonalMean obs', self._seasonid, filetable2)
vidDiff = dv.dict_id(varid, 'ZonalMean difference', self._seasonid, filetable1)
else:
vidObs = None
vidDiff = None
self.derived_variables = {}
#create the derived variables which is the composite of the months
#print vidAll[filetable1]
self.derived_variables[vidModel] = derived_var(vid=id2str(vidModel), inputs=vidAll[filetable1], func=join_data)
if self.FT2:
#print vidAll[filetable2]
self.derived_variables[vidObs] = derived_var(vid=id2str(vidObs), inputs=vidAll[filetable2], func=join_data)
#create the derived variable which is the difference of the composites
self.derived_variables[vidDiff] = derived_var(vid=id2str(vidDiff), inputs=[vidModel, vidObs], func=aminusb_ax2)
#create composite plots np.transpose zfunc = (lambda x: x), zfunc = (lambda z:z),
self.single_plotspecs = {
self.plot1_id: plotspec(vid = ps.dict_idid(vidModel),
zvars = [vidModel],
zfunc = (lambda x: MV2.transpose(x)),
plottype = self.plottype )}
if self.FT2:
self.single_plotspecs[self.plot2_id] = \
plotspec(vid = ps.dict_idid(vidObs),
zvars=[vidObs],
zfunc = (lambda x: MV2.transpose(x)),
plottype = self.plottype )
self.single_plotspecs[self.plot3_id] = \
plotspec(vid = ps.dict_idid(vidDiff),
zvars = [vidDiff],
zfunc = (lambda x: MV2.transpose(x)),
plottype = self.plottype )
self.composite_plotspecs = { self.plotall_id: self.single_plotspecs.keys() }
self.computation_planned = True
def _results(self, newgrid=0):
#pdb.set_trace()
results = plot_spec._results(self, newgrid)
if results is None:
print "WARNING, AMWG plot set 8 found nothing to plot"
return None
psv = self.plotspec_values
if self.FT2:
if self.plot1_id in psv and self.plot2_id in psv and\
psv[self.plot1_id] is not None and psv[self.plot2_id] is not None:
psv[self.plot1_id].synchronize_ranges(psv[self.plot2_id])
for key,val in psv.items():
if type(val) is not list: val=[val]
for v in val:
if v is None: continue
v.finalize()
return self.plotspec_values[self.plotall_id]
class amwg_plot_set9(amwg_plot_spec):
"""This class represents one plot from AMWG Diagnostics Plot Set 9.
Each such plot is a set of three contour plots: two for the model output and
the difference between the two. A plot's x-axis is latitude and its y-axis is longitute.
Both model plots should have contours at the same values of their variable. The data
presented is a climatological mean - i.e., seasonal-average of the specified season, DJF, JJA, etc.
To generate plots use Dataset 1 in the AMWG ddiagnostics menu, set path to the directory,
and enter the file name. Repeat this for dataset 2 and then apply.
"""
# N.B. In plot_data.py, the plotspec contained keys identifying reduced variables.
# Here, the plotspec contains the variables themselves.
name = '9 - Horizontal Contour Plots of DJF-JJA Differences'
number = '9'
def __init__( self, filetable1, filetable2, varid, seasonid='DJF-JJA', regionid=None, aux=None ):
"""filetable1, filetable2 should be filetables for each model.
varid is a string, e.g. 'TREFHT'. The seasonal difference is Seasonid
It is is a string, e.g. 'DJF-JJA'. """
import string
#the following is for future case of setting 2 seasons
if "-" in seasonid:
_seasons = string.split(seasonid, '-')
if len(_seasons) == 2:
self._s1, self._s2 = _seasons
else:
self._s1 = 'DJF'
self._s2 = 'JJA'
seasonid = 'DJF-JJA'
if regionid=="Global" or regionid=="global" or regionid is None:
self._regionid="Global"
else:
self._regionid=regionid
self.region = interpret_region(regionid)
plot_spec.__init__(self, seasonid)
self.plottype = 'Isofill'
self._seasonid = seasonid
self.season = cdutil.times.Seasons(self._seasonid) # note that self._seasonid can differ froms seasonid
ft1id, ft2id = filetable_ids(filetable1, filetable2)
self.plot1_id = '_'.join([ft1id, varid, self._s1, 'contour'])
self.plot2_id = '_'.join([ft2id, varid, self._s2, 'contour'])
self.plot3_id = '_'.join([ft1id+'-'+ft2id, varid, seasonid, 'contour'])
self.plotall_id = '_'.join([ft1id,ft2id, varid, seasonid])
if not self.computation_planned:
self.plan_computation( filetable1, filetable2, varid, seasonid )
def plan_computation( self, filetable1, filetable2, varid, seasonid ):
self.computation_planned = False
#check if there is data to process
ft1_valid = False
ft2_valid = False
if filetable1 is not None and filetable2 is not None:
ft1 = filetable1.find_files(varid)
ft2 = filetable2.find_files(varid)
ft1_valid = ft1 is not None and ft1!=[] # true iff filetable1 uses hybrid level coordinates
ft2_valid = ft2 is not None and ft2!=[] # true iff filetable2 uses hybrid level coordinates
else:
print "ERROR: user must specify 2 data files"
return None
if not ft1_valid or not ft2_valid:
return None
#generate identifiers
vid1 = rv.dict_id(varid, self._s1, filetable1)
vid2 = rv.dict_id(varid, self._s2, filetable2)
vid3 = dv.dict_id(varid, 'SeasonalDifference', self._seasonid, filetable1)#, ft2=filetable2)
#setup the reduced variables
vid1_season = cdutil.times.Seasons(self._s1)
if vid1_season is None:
vid1_season = seasonsyr
vid2_season = cdutil.times.Seasons(self._s2)
if vid2_season is None:
vid2_season = seasonsyr
rv_1 = reduced_variable(variableid=varid, filetable=filetable1, season=vid1_season,
reduction_function=( lambda x, vid=vid1: reduce2latlon_seasonal(x, vid1_season, self.region, vid=vid)) )
rv_2 = reduced_variable(variableid=varid, filetable=filetable2, season=vid2_season,
reduction_function=( lambda x, vid=vid2: reduce2latlon_seasonal(x, vid2_season, self.region, vid=vid)) )
self.reduced_variables = {rv_1.id(): rv_1, rv_2.id(): rv_2}
#create the derived variable which is the difference
self.derived_variables = {}
self.derived_variables[vid3] = derived_var(vid=vid3, inputs=[vid1, vid2], func=aminusb_2ax)
self.single_plotspecs = {
self.plot1_id: plotspec(
vid = ps.dict_idid(vid1),
zvars=[vid1],
zfunc = (lambda z: z),
plottype = self.plottype ),
self.plot2_id: plotspec(
vid = ps.dict_idid(vid2),
zvars=[vid2],
zfunc = (lambda z: z),
plottype = self.plottype ),
self.plot3_id: plotspec(
vid = ps.dict_idid(vid3),
zvars = [vid3],
zfunc = (lambda x: x),
plottype = self.plottype )
}
self.composite_plotspecs = { self.plotall_id: self.single_plotspecs.keys() }
self.computation_planned = True
def _results(self, newgrid=0):
#pdb.set_trace()
results = plot_spec._results(self, newgrid)
if results is None:
print "WARNING, AMWG plot set 9 found nothing to plot"
return None
psv = self.plotspec_values
if self.plot1_id in psv and self.plot2_id in psv and\
psv[self.plot1_id] is not None and psv[self.plot2_id] is not None:
psv[self.plot1_id].synchronize_ranges(psv[self.plot2_id])
for key,val in psv.items():
if type(val) is not list: val=[val]
for v in val:
if v is None: continue
v.finalize()
return self.plotspec_values[self.plotall_id]
class amwg_plot_set10(amwg_plot_spec, basic_id):
"""represents one plot from AMWG Diagnostics Plot Set 10.
The plot is a plot of 2 curves comparing model with obs. The x-axis is month of the year and
its y-axis is the specified variable. The data presented is a climatological mean - i.e.,
time-averaged with times restricted to the specified month."""
# N.B. In plot_data.py, the plotspec contained keys identifying reduced variables.
# Here, the plotspec contains the variables themselves.
name = '10 - Annual Line Plots of Global Means'
number = '10'
def __init__( self, filetable1, filetable2, varid, seasonid='ANN', regionid=None, aux=None ):
"""filetable1, filetable2 should be filetables for model and obs.
varid is a string, e.g. 'TREFHT'. Seasonid is a string, e.g. 'DJF'."""
basic_id.__init__(self, varid, seasonid)
plot_spec.__init__(self, seasonid)
self.plottype = 'Yxvsx'
self.season = cdutil.times.Seasons(self._seasonid)
if regionid=="Global" or regionid=="global" or regionid is None:
self._regionid="Global"
else:
self._regionid=regionid
self.region = interpret_region(regionid)
ft1id, ft2id = filetable_ids(filetable1, filetable2)
self.plot_id = '_'.join([ft1id, ft2id, varid, self.plottype])
self.computation_planned = False
if not self.computation_planned:
self.plan_computation( filetable1, filetable2, varid, seasonid )
def plan_computation( self, filetable1, filetable2, varid, seasonid ):
self.reduced_variables = {}
vidAll = {}
for FT in [filetable1, filetable2]:
VIDs = []
for i in range(1, 13):
month = cdutil.times.getMonthString(i)
#pdb.set_trace()
#create identifiers
VID = rv.dict_id(varid, month, FT) #cdutil.times.getMonthIndex(VID[2])[0]-1
RV = reduced_variable(variableid = varid,
filetable = FT,
season = cdutil.times.Seasons(month),
reduction_function = (lambda x, vid=id2str(VID), month = VID[2]:
reduce2scalar_seasonal_zonal(x, cdutil.times.Seasons(month), self.region, vid=vid)))
VID = id2str(VID)
self.reduced_variables[VID] = RV
VIDs += [VID]
vidAll[FT] = VIDs
#print self.reduced_variables.keys()
#create the identifiers
vidModel = dv.dict_id(varid, 'model', "", filetable1)
vidObs = dv.dict_id(varid, 'obs', "", filetable2)
self.vidModel = id2str(vidModel)
self.vidObs = id2str(vidObs)
#create the derived variables which is the composite of the months
#pdb.set_trace()
model = derived_var(vid=self.vidModel, inputs=vidAll[filetable1], func=join_1d_data)
obs = derived_var(vid=self.vidObs, inputs=vidAll[filetable2], func=join_1d_data)
self.derived_variables = {self.vidModel: model, self.vidObs: obs}
#create the plot spec
self.single_plotspecs = {}
self.single_plotspecs[self.plot_id] = plotspec(self.plot_id,
zvars = [self.vidModel],
zfunc = (lambda y: y),
z2vars = [self.vidObs ],
z2func = (lambda z: z),
plottype = self.plottype)
self.computation_planned = True
def _results(self,newgrid=0):
#pdb.set_trace()
results = plot_spec._results(self, newgrid)
if results is None: return None
psv = self.plotspec_values
#print self.plotspec_values.keys()
model = self.single_plotspecs[self.plot_id].zvars[0]
obs = self.single_plotspecs[self.plot_id].z2vars[0]
modelVal = self.variable_values[model]
obsVal = self.variable_values[obs]
plot_val = uvc_plotspec([modelVal, obsVal],
self.plottype,
title=self.plot_id)
plot_val.finalize()
return [ plot_val]
class amwg_plot_set11(amwg_plot_spec):
name = '11 - Pacific annual cycle, Scatter plots:incomplete'
number = '11'
def __init__( self, filetable1, filetable2, varid, seasonid='ANN', region=None, aux=None ):
"""filetable1, filetable2 should be filetables for each model.
varid is a string, e.g. 'TREFHT'. The seasonal difference is Seasonid
It is is a string, e.g. 'DJF-JJA'. """
import string
print 'plot set 11'
plot_spec.__init__(self, seasonid)
self.plottype = 'Scatter'
self._seasonid = seasonid
self.season = cdutil.times.Seasons(self._seasonid)
ft1id, ft2id = filetable_ids(filetable1, filetable2)
self.datatype = ['model', 'obs']
self.filetables = [filetable1, filetable2]
self.filetable_ids = [ft1id, ft2id]
self.seasons = ['ANN', 'DJF', 'JJA']
self.vars = ['LWCF', 'SWCF']
self.plot_ids = []
vars_id = '_'.join(self.vars)
for dt in self.datatype:
for season in self.seasons:
plot_id = '_'.join([dt, season])
self.plot_ids += [plot_id]
self.plotall_id = '_'.join(self.datatype + ['Warm', 'Pool'])
if not self.computation_planned:
self.plan_computation( filetable1, filetable2, varid, seasonid )
def plan_computation( self, filetable1, filetable2, varid, seasonid ):
self.computation_planned = False
#check if there is data to process
ft1_valid = False
ft2_valid = False
if filetable1 is not None and filetable2 is not None:
ft1 = filetable1.find_files(self.vars[0])
ft2 = filetable2.find_files(self.vars[1])
ft1_valid = ft1 is not None and ft1!=[] # true iff filetable1 uses hybrid level coordinates
ft2_valid = ft2 is not None and ft2!=[] # true iff filetable2 uses hybrid level coordinates
else:
print "ERROR: user must specify 2 data files"
return None
if not ft1_valid or not ft2_valid:
return None
VIDs = []
for ft in self.filetables:
for season in self.seasons:
for var in self.vars:
VID = rv.dict_id(var, season, ft)
VID = id2str(VID)
#print VID
RV = reduced_variable( variableid=var,
filetable=ft,
season=cdutil.times.Seasons(season),
reduction_function=( lambda x, vid=VID:x) )
self.reduced_variables[VID] = RV
VIDs += [VID]
#setup the rdeuced variable pairs
self.rv_pairs = []
i = 0
while i <= 10:
#print VIDs[i], VIDs[i+1]
self.rv_pairs += [(VIDs[i], VIDs[i+1])] #( self.reduced_variables[VIDs[i]], self.reduced_variables[VIDs[i+1]] )]
i += 2
self.single_plotspecs = {}
self.composite_plotspecs[self.plotall_id] = []
title = self.vars[0] + ' vs ' + self.vars[1]
for i, plot_id in enumerate(self.plot_ids):
#zvars, z2vars = self.reduced_variables[VIDs[i]], self.reduced_variables[VIDs[i+1]]
xVID, yVID = self.rv_pairs[i]
#print xVID, yVID z2rangevars=[-120., 0.], zrangevars=[0., 120.], z2vars = [yVID],
self.single_plotspecs[plot_id] = plotspec(vid = plot_id,
zvars=[xVID],
zfunc = (lambda x: x),
zrangevars={'xrange':[0., 120.]},
z2vars = [yVID],
z2func = (lambda x: x),
z2rangevars={'yrange':[-120., 0.]},
plottype = 'Scatter',
title = title,
overplotline = True)
#self.composite_plotspecs[plot_id] = ( plot_id+'scatter', plot_id+'line' )
#self.composite_plotspecs[self.plotall_id] += [plot_id]
self.composite_plotspecs = { self.plotall_id: self.single_plotspecs.keys() }
self.computation_planned = True
def _results(self, newgrid=0):
#pdb.set_trace()
results = plot_spec._results(self, newgrid)
if results is None:
print "WARNING, AMWG plot set 11 found nothing to plot"
return None
psv = self.plotspec_values
#pdb.set_trace()
#if self.plot_ids[0] in psv and self.plot_ids[0] is not None:
# for plot_id in self.plot_ids[1:]:
# if plot_id in psv and plot_id is not None:
# psv[plot_id].synchronize_ranges(psv[self.plot_ids[0]])
for key,val in psv.items():
#if type(val) is not list: val=[val]
if type(val) is not list and type(val) is not tuple: val=[val]
for v in val:
if v is None: continue
if type(v) is tuple:
continue # finalize has already been called for this, it comes from plotall_id but also has its own entry
v.finalize()
return self.plotspec_values[self.plotall_id]
class amwg_plot_set12(amwg_plot_spec):
name = '12 - Vertical Profiles at 17 selected raobs stations:incomplete'
number = '12'
def __init__( self, filetable1, filetable2, varid, seasonid='ANN', region=None, aux=None ):
"""filetable1, filetable2 should be filetables for each model.
varid is a string, e.g. 'TREFHT'. The seasonal difference is Seasonid
It is is a string, e.g. 'DJF-JJA'. """
import string
print 'plot set 12'
plot_spec.__init__(self, seasonid)
self.plottype = 'Scatter'
self._seasonid = seasonid
self.season = cdutil.times.Seasons(self._seasonid)
ft1id, ft2id = filetable_ids(filetable1, filetable2)
self.datatype = ['model', 'obs']
self.filetables = [filetable1, filetable2]
self.filetable_ids = [ft1id, ft2id]
self.months = ['JAN', 'APR', 'JUL', 'AUG']
self.plot_ids = []
for month in self.months:
plot_id = '_'.join(['month', month])
self.plot_ids += [plot_id]
#print self.plot_ids
self.plotall_id = '_'.join(self.datatype + ['Warm', 'Pool'])
if not self.computation_planned:
self.plan_computation( filetable1, filetable2, varid, seasonid )
def plan_computation( self, filetable1, filetable2, varid, seasonid ):
self.computation_planned = False
#check if there is data to process
ft1_valid = False
ft2_valid = False
if filetable1 is not None and filetable2 is not None:
ft1 = filetable1.find_files(varid)
ft2 = filetable2.find_files(varid)
ft1_valid = ft1 is not None and ft1!=[] # true iff filetable1 uses hybrid level coordinates
ft2_valid = ft2 is not None and ft2!=[] # true iff filetable2 uses hybrid level coordinates
else:
print "ERROR: user must specify 2 data files"
return None
if not ft1_valid or not ft2_valid:
return None
VIDs = {}
for dt, ft in zip(self.datatype, self.filetables):
VIDs[dt] = []
for month in self.months:
#for var in self.vars:
VID = rv.dict_id(varid, month, ft)
#print VID, VID[2]
RF = (lambda x, vid=VID, month=VID[2]: reduce2level(x, seasons=month, vid=vid) )
RV = reduced_variable( variableid=varid,
filetable=ft,
season=cdutil.times.Seasons(seasonid),
reduction_function=RF )
VID = id2str(VID)
self.reduced_variables[VID] = RV
VIDs[dt] += [VID]
self.single_plotspecs = {}
title = 'PS vs ' + varid
for i, plot_id in enumerate(self.plot_ids):
VIDobs = VIDs['obs'][i]
VIDmodel = VIDs['model'][i]
#print xVID, yVID
self.single_plotspecs[plot_id+'_obs'] = plotspec(vid = plot_id+'_obs',
zvars = [VIDobs],
zfunc = (lambda z: z),
zrangevars={'yrange':[0., 1000.]},
plottype='Scatter',
title = title)
self.single_plotspecs[plot_id+'_model'] = plotspec(vid = plot_id+'_model',
zvars = [VIDmodel],
zfunc = (lambda z: z),
plottype = "Yxvsx",
title = title)
self.composite_plotspecs = {}
plotall_id = []
for plot_id in self.plot_ids:
self.composite_plotspecs[plot_id] = ( plot_id+'_obs', plot_id+'_model' )
plotall_id += [plot_id]
self.composite_plotspecs[self.plotall_id] = plotall_id
self.computation_planned = True
def _results(self, newgrid=0):
#pdb.set_trace()
results = plot_spec._results(self, newgrid)
if results is None:
print "WARNING, AMWG plot set 12 found nothing to plot"
return None
psv = self.plotspec_values
for key,val in psv.items():
if type(val) is not list and type(val) is not tuple: val=[val]
for v in val:
if v is None: continue
if type(v) is tuple:
continue # finalize has already been called for this, it comes from plotall_id but also has its own entry
v.finalize(flip_y=True)
#self.presentation.yticlabels1 = self.vars[1]
return self.plotspec_values[self.plotall_id]
class amwg_plot_set13(amwg_plot_spec):
"""represents one plot from AMWG Diagnostics Plot Set 13, Cloud Simulator Histograms.
Each such plot is a histogram with a numerical value laid over a box.
At present, the histogram is used to show values of CLISCCP, cloud occurence in percent,
for each position in the vertical axis, (pressure) level, and each position in the horizontal
axis, optical thickness.
The data presented is a climatological mean - i.e., time-averaged with times restricted to
the specified season, DJF, JJA, or ANN. And it's space-averaged with lat-lon restricted to
the specified region."""
#Often data comes from COSP = CFMIP Observation Simulator Package
name = '13 - Cloud Simulator Histograms'
number = '13'
standard_variables = { # Note: shadows amwg_plot_spec.standard_variables
'CLISCCP':[derived_var(
vid='CLISCCP', inputs=['FISCCP1','isccp_prs','isccp_tau'], outputs=['CLISCCP'],
func=uncompress_fisccp1 )]
}
def __init__( self, filetable1, filetable2, varnom, seasonid=None, region=None, aux=None ):
"""filetable1, filetable2 should be filetables for model and obs.
varnom is a string. The variable described may depend on time,lat,lon and will be averaged
in those dimensions. But it also should have two other axes which will be used for the
histogram.
Seasonid is a string, e.g. 'DJF'.
Region is an instance of the class rectregion (region.py).
"""
plot_spec.__init__(self,seasonid)
region = interpret_region(region)
self.reduced_variables = {}
self.derived_variables = {}
self.plottype = 'Boxfill'
self.season = cdutil.times.Seasons(self._seasonid) # note that self._seasonid can differ froms seasonid
ft1id,ft2id = filetable_ids(filetable1,filetable2)
self.plot1_id = '_'.join([ft1id,varnom,seasonid,str(region),'histo'])
self.plot2_id = '_'.join([ft2id,varnom,seasonid,str(region),'histo'])
self.plot3_id = '_'.join([ft1id+'-'+ft2id,varnom,seasonid,str(region),'histo'])
self.plotall_id = '_'.join([ft1id,ft2id,varnom,seasonid])
if not self.computation_planned:
self.plan_computation( filetable1, filetable2, varnom, seasonid, region )
@staticmethod
def _list_variables( filetable1, filetable2=None ):
allvars = amwg_plot_set13._all_variables( filetable1, filetable2 )
listvars = allvars.keys()
listvars.sort()
print "amwg plot set 13 listvars=",listvars
return listvars
@classmethod
def _all_variables( cls, filetable1, filetable2=None ):
allvars = {}
# First, make a dictionary varid:varaxisnames.
# Each variable will appear many times, but getting every occurence is the simplest
# way to ensure that we get every variable.
vars1 = {}
vars2 = {}
for row in filetable1._table:
vars1[row.variableid] = row.varaxisnames
if filetable2 is not None:
for row in filetable2._table:
vars2[row.variableid] = row.varaxisnames
# Now start with variables common to both filetables. Keep only the ones with 2 axes
# other than time,lat,lon. That's because we're going to average over time,lat,lon
# and display a histogram dependent on (exactly) two remaining axes.
for varname in amwg_plot_spec.package._list_variables(
filetable1, filetable2, "amwg_plot_spec" ):
varaxisnames1 = vars1[varname]
#otheraxes1 = list(set(varaxisnames1) - set(['time','lat','lon']))
otheraxes1 = list(set(varaxisnames1) -
set(filetable1.lataxes+filetable1.lonaxes+['time']))
if len(otheraxes1)!=2:
continue
if filetable2 is not None:
varaxisnames2 = vars2[varname]
#otheraxes2 = list(set(varaxisnames2) - set(['time','lat','lon']))
otheraxes1 = list(set(varaxisnames1) -
set(filetable2.lataxes+filetable2.lonaxes+['time']))
if len(otheraxes2)!=2:
continue
allvars[varname] = basic_plot_variable
# Finally, add in the standard variables. Note that there is no check on whether
# we have the inputs needed to compute them.
for varname in set(cls.standard_variables.keys())-set(allvars.keys()):
allvars[varname] = basic_plot_variable
return allvars
def var_from_data( self, filetable, varnom, seasonid, region ):
"""defines the reduced variable for varnom when available in the specified filetable"""
rv = reduced_variable(
variableid=varnom, filetable=filetable, season=self.season, region=region,
reduction_function =\
(lambda x,vid,season=self.season,region=region:
reduce_time_space_seasonal_regional( x,season=season,region=region,vid=vid ))
)
self.reduced_variables[ rv.id() ] = rv
return rv.id()
def var_from_std( self, filetable, varnom, seasonid, region ):
"""defines the derived variable for varnom when computable as a standard variable using data
in the specified filetable"""
varid,rvs,dvs = self.stdvar2var(
varnom, filetable, self.season,\
(lambda x,vid,season=self.season,region=region:
reduce_time_space_seasonal_regional(x, season=season, region=region, vid=vid) ))
for rv in rvs:
self.reduced_variables[ rv.id() ] = rv
for dv in dvs:
self.derived_variables[ dv.id() ] = dv
return varid
def plan_computation( self, filetable1, filetable2, varnom, seasonid, region ):
region = interpret_region( region )
if varnom in filetable1.list_variables_incl_axes():
vid1 = self.var_from_data( filetable1, varnom, seasonid, region )
elif varnom in self.standard_variables.keys():
vid1 = self.var_from_std( filetable1, varnom, seasonid, region )
else:
print "ERROR variable",varnom,"cannot be read or computed from data in the filetable",filetable1
return None
if filetable2 is None:
vid2 = None
elif varnom in filetable2.list_variables_incl_axes():
vid2 = self.var_from_data( filetable2, varnom, seasonid, region )
elif varnom in self.standard_variables.keys():
vid2 = self.var_from_std( filetable2, varnom, seasonid, region )
else:
vid2 = None
ft1src = filetable1.source()
try:
ft2src = filetable2.source()
except:
ft2src = ''
#vid1 = rv.dict_id( varnom,seasonid, filetable1, region=region)
#vid2 = rv.dict_id( varnom,seasonid, filetable2, region=region)
self.single_plotspecs = {
self.plot1_id: plotspec(
vid = ps.dict_idid(vid1), zvars=[vid1], zfunc=(lambda z: z),
plottype = self.plottype,
title = ' '.join([varnom,seasonid,str(region),'(1)']),
source = ft1src ),
self.plot2_id: plotspec(
vid = ps.dict_idid(vid2), zvars=[vid2], zfunc=(lambda z: z),
plottype = self.plottype,
title = ' '.join([varnom,seasonid,str(region),'(2)']),
source = ft2src ),
self.plot3_id: plotspec(
vid = ps.dict_id(varnom,'diff',seasonid,filetable1,filetable2,region=region), zvars=[vid1,vid2],
zfunc=aminusb_2ax, plottype = self.plottype,
title = ' '.join([varnom,seasonid,str(region),'(1)-(2)']),
source = ', '.join([ft1src,ft2src]) )
}
self.composite_plotspecs = {
self.plotall_id: [self.plot1_id, self.plot2_id, self.plot3_id ]
}
self.computation_planned = True
def _results(self,newgrid=0):
results = plot_spec._results(self,newgrid)
if results is None:
print "WARNING, AMWG plot set 13 found nothing to plot"
return None
psv = self.plotspec_values
if self.plot1_id in psv and self.plot2_id in psv and\
psv[self.plot1_id] is not None and psv[self.plot2_id] is not None:
psv[self.plot1_id].synchronize_ranges(psv[self.plot2_id])
else:
print "WARNING not synchronizing ranges for",self.plot1_id,"and",self.plot2_id
for key,val in psv.items():
if type(val) is not list: val=[val]
for v in val:
if v is None: continue
v.finalize(flip_y=True)
return self.plotspec_values[self.plotall_id]
def centered_RMS_difference(mv1, mv2):
#pdb.set_trace()
mv1_mean = mv1.mean()
#kludge for mismatch in dimensions
mv2_mean = mv2[0,:,:].mean()
x = aminusb_2ax(mv1-mv1_mean, mv2[0,:,:]-mv2_mean)
rms_diff = MV2.sqrt((x**2).mean())
return MV2.array([rms_diff])
def join_scalar_data(*args ):
""" This function joins the results of several reduced variables into a
single derived variable. It is used to produce a line plot of months
versus zonal mean.
"""
import cdms2, cdutil, numpy
#pdb.set_trace()
nargs = len(args)
M = []
for arg in args:
M += [arg[0]]
M = numpy.array(M)
M.shape = (2, nargs/2)
M = MV2.array(M)
#print M
#M.info()
return M
class xxxamwg_plot_set14(amwg_plot_spec):
#name = '14 - Taylor diagrams: incomplete'
#number = '14'
def __init__( self, filetable1, filetable2, varid, seasonid='ANN', region=None, aux=None ):
"""filetable1, filetable2 should be filetables for each model.
varid is a string, e.g. 'TREFHT'. The seasonal difference is Seasonid
It is is a string, e.g. 'DJF-JJA'. """
import string
plot_spec.__init__(self, seasonid)
self.plottype = 'Taylor'
self._seasonid = seasonid
self.season = cdutil.times.Seasons(self._seasonid)
ft1id, ft2id = filetable_ids(filetable1, filetable2)
self.datatype = ['model', 'obs']
self.filetables = [filetable1, filetable2]
self.filetable_ids = [ft1id, ft2id]
self.vars = [varid]
self.plot_ids = []
vars_id = '_'.join(self.vars)
#for dt in self.datatype:
plot_id = 'Taylor'
self.plot_ids += [plot_id]
#print self.plot_ids
#self.plotall_id = '_'.join(self.datatype + ['Warm', 'Pool'])
if not self.computation_planned:
self.plan_computation( filetable1, filetable2, varid, seasonid )
def plan_computation( self, filetable1, filetable2, varid, seasonid ):
self.computation_planned = False
#check if there is data to process
ft1_valid = False
ft2_valid = False
#if filetable1 is not None and filetable2 is not None:
# ft1 = filetable1.find_files(self.vars[0])
# ft2 = filetable2.find_files(self.vars[1])
# ft1_valid = ft1 is not None and ft1!=[] # true iff filetable1 uses hybrid level coordinates
# ft2_valid = ft2 is not None and ft2!=[] # true iff filetable2 uses hybrid level coordinates
#else:
# print "ERROR: user must specify 2 data files"
# return None
#if not ft1_valid or not ft2_valid:
# return None
RVs = {}
for dt, ft in zip(self.datatype, self.filetables):
for var in self.vars:
#rv for the data
VID_data = rv.dict_id(var, 'data', ft)
VID_data = id2str(VID_data)
#print VID_data
RV = reduced_variable( variableid=var,
filetable=ft,
season=cdutil.times.Seasons(seasonid),
reduction_function=( lambda x, vid=VID_data:x ) )
self.reduced_variables[VID_data] = RV
#rv for its variance
VID_var = rv.dict_id(var, 'variance', ft)
VID_var = id2str(VID_var)
#print VID_var
RV = reduced_variable( variableid=var,
filetable=ft,
season=cdutil.times.Seasons(seasonid),
reduction_function=( lambda x, vid=VID_var:MV2.array([x.var()]) ) )
self.reduced_variables[VID_var] = RV
RVs[(var, dt)] = (VID_data, VID_var)
#generate derived variables for centered RMS difference
nvars = len(self.vars)
DVs = {}
for var in self.vars:
Vobs = RVs[var, 'obs'][0]
Vmodel = RVs[var, 'model'][0]
DV = var+'_RMS_CD'
#print Vobs
#print Vmodel
#print DV
DVs['RMS_CD', var] = DV
self.derived_variables[DV] = derived_var(vid=DV, inputs=[Vobs, Vmodel], func=centered_RMS_difference)
pairs = []
for var in self.vars:
for dt in self.datatype:
pairs += [RVs[var, dt][1], DVs['RMS_CD', var]]
#print pairs
#correlation coefficient
self.derived_variables['TaylorData'] = derived_var(vid='TaylorData', inputs=pairs, func=join_scalar_data)
#self.derived_variables['modelData'] = derived_var(vid='modelData', inputs=RVs['model']+DVs['RMS_CD'], func=join_scalar_data)
self.single_plotspecs = {}
title = "Taylor diagram"
self.single_plotspecs['Taylor'] = plotspec(vid = 'Taylor',
zvars = ['TaylorData'],
zfunc = (lambda x: x),
plottype = self.plottype,
title = title)
#self.composite_plotspecs = { self.plotall_id: self.single_plotspecs.keys() }
self.computation_planned = True
def _results(self, newgrid=0):
#pdb.set_trace()
results = plot_spec._results(self, newgrid)
if results is None:
print "WARNING, AMWG plot set 12 found nothing to plot"
return None
psv = self.plotspec_values
#pdb.set_trace()
for key,val in psv.items():
if type(val) is not list: val=[val]
for v in val:
if v is None: continue
v.finalize()
#self.presentation.xticlabels1 = self.vars[0]
#self.presentation.yticlabels1 = self.vars[1]
return self.plotspec_values
class xxxamwg_plot_set15(amwg_plot_spec):
"""This class represents one plot from AMWG Diagnostics Plot Set 8.
Each such plot is a set of three contour plots: two for the model output and
the difference between the two. A plot's x-axis is time and its y-axis is latitude.
The data presented is a climatological zonal mean throughout the year.
To generate plots use Dataset 1 in the AMWG diagnostics menu, set path to the directory.
Repeat this for observation 1 and then apply. If only Dataset 1 is specified a plot
of the model zonal mean is diaplayed.
"""
# N.B. In plot_data.py, the plotspec contained keys identifying reduced variables.
# Here, the plotspec contains the variables themselves.
#name = '15 - ARM Sites Annual Cycle Contour Plots:incomplete'
#number = '15'
def __init__( self, filetable1, filetable2, varid, seasonid='ANN', region=None, aux=None ):
"""filetable1, should be a directory filetable for each model.
varid is a string, e.g. 'TREFHT'. The zonal mean is computed for each month. """
self.season = seasonid
self.FT1 = (filetable1 != None)
self.FT2 = (filetable2 != None)
self.CONTINUE = self.FT1
if not self.CONTINUE:
print "user must specify a file table"
return None
self.filetables = [filetable1]
if self.FT2:
self.filetables +=[filetable2]
self.datatype = ['model', 'obs']
self.vars = [varid, 'P']
plot_spec.__init__(self, seasonid)
self.plottype = 'Isofill'
self._seasonid = seasonid
self.season = cdutil.times.Seasons(self._seasonid) # note that self._seasonid can differ froms seasonid
ft1id, ft2id = filetable_ids(filetable1, filetable2)
self.plot1_id = '_'.join([ft1id, varid, 'composite', 'contour'])
if self.FT2:
self.plot2_id = '_'.join([ft2id, varid, 'composite', 'contour'])
self.plot3_id = '_'.join([ft1id+'-'+ft2id, varid, seasonid, 'contour'])
self.plotall_id = '_'.join([ft1id,ft2id, varid, seasonid])
if not self.computation_planned:
self.plan_computation( filetable1, filetable2, varid, seasonid )
def plan_computation( self, filetable1, filetable2, varid, seasonid ):
self.computation_planned = False
#setup the reduced variables
self.reduced_variables = {}
vidAll = {}
for FT in self.filetables:
#pdb.set_trace()
VIDs = []
for i in range(1, 13):
month = cdutil.times.getMonthString(i)
#pdb.set_trace()
#create identifiers
VID = rv.dict_id(varid, month, FT)
RF = (lambda x, varid, vid=id2str(VID), month=VID[2]:reduced_variables_press_lev(x, varid, month, vid=vid))
RV = reduced_variable(variableid = varid,
filetable = FT,
season = cdutil.times.Seasons(VID[2]),
reduction_function = RF)
self.reduced_variables[id2str(VID)] = RV
VIDs += [VID]
vidAll[FT] = VIDs
print self.reduced_variables.keys()
vidModel = dv.dict_id(varid, 'ZonalMean model', self._seasonid, filetable1)
if self.FT2:
vidObs = dv.dict_id(varid, 'ZonalMean obs', self._seasonid, filetable2)
vidDiff = dv.dict_id(varid, 'ZonalMean difference', self._seasonid, filetable1)
else:
vidObs = None
vidDiff = None
#vidModel = id2str(vidModel)
#vidObs = id2str(vidObs)
#vidDiff = id2str(vidDiff)
self.derived_variables = {}
#create the derived variables which is the composite of the months
#print vidAll[filetable1]
self.derived_variables[vidModel] = derived_var(vid=id2str(vidModel), inputs=vidAll[filetable1], func=join_data)
if self.FT2:
#print vidAll[filetable2]
self.derived_variables[vidObs] = derived_var(vid=id2str(vidObs), inputs=vidAll[filetable2], func=join_data)
#create the derived variable which is the difference of the composites
self.derived_variables[vidDiff] = derived_var(vid=id2str(vidDiff), inputs=[vidModel, vidObs], func=aminusb_ax2)
print self.derived_variables.keys()
#create composite plots np.transpose zfunc = (lambda x: x), zfunc = (lambda z:z),
self.single_plotspecs = {
self.plot1_id: plotspec(vid = ps.dict_idid(vidModel),
zvars = [vidModel],
zfunc = (lambda x: MV2.transpose(x)),
plottype = self.plottype )}
if self.FT2:
self.single_plotspecs[self.plot2_id] = \
plotspec(vid = ps.dict_idid(vidObs),
zvars=[vidObs],
zfunc = (lambda x: MV2.transpose(x)),
plottype = self.plottype )
self.single_plotspecs[self.plot3_id] = \
plotspec(vid = ps.dict_idid(vidDiff),
zvars = [vidDiff],
zfunc = (lambda x: MV2.transpose(x)),
plottype = self.plottype )
print self.single_plotspecs.keys()
self.composite_plotspecs = { self.plotall_id: self.single_plotspecs.keys() }
self.computation_planned = True
pdb.set_trace()
def _results(self, newgrid=0):
#pdb.set_trace()
results = plot_spec._results(self, newgrid)
if results is None:
print "WARNING, AMWG plot set 15 found nothing to plot"
return None
psv = self.plotspec_values
if self.FT2:
if self.plot1_id in psv and self.plot2_id in psv and\
psv[self.plot1_id] is not None and psv[self.plot2_id] is not None:
psv[self.plot1_id].synchronize_ranges(psv[self.plot2_id])
for key,val in psv.items():
if type(val) is not list: val=[val]
for v in val:
if v is None: continue
v.finalize()
return self.plotspec_values[self.plotall_id]
| 51.689003 | 160 | 0.588264 |
79413e700ce3aa19b9d6f56157bd3b288b10c269 | 7,547 | py | Python | slither/core/analysis.py | AlexanderFabisch/slither | c527e0412cf89197f907a42699a554f26cb2af59 | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2021-04-23T09:06:51.000Z | 2021-08-19T07:18:32.000Z | slither/core/analysis.py | AlexanderFabisch/slither | c527e0412cf89197f907a42699a554f26cb2af59 | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2021-04-01T08:55:46.000Z | 2022-02-20T18:00:53.000Z | slither/core/analysis.py | AlexanderFabisch/slither | c527e0412cf89197f907a42699a554f26cb2af59 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | from collections import deque
import numpy as np
from scipy.signal import medfilt
from .config import config
def check_coords(coords):
"""Filter non-finite GPS coordinates.
Parameters
----------
coords : array, shape (n_steps, 2)
Latitudes and longitudes
Returns
-------
filtered_coords : array, shape (n_valid_steps, 2)
All finite GPS coordinates
"""
coords_sum = coords[:, 0] + coords[:, 1]
valid_coords = np.isfinite(coords_sum)
return coords[valid_coords]
def is_outlier(points, thresh=3.5):
"""Check an array for outliers.
Parameters
----------
points : array-like, shape (n_samples, n_dims)
An array of observations
thresh : float, optional
The modified z-score to use as a threshold. Observations with
a modified z-score (based on the median absolute deviation) greater
than this value will be classified as outliers.
Returns
-------
mask : array-like, shape (n_samples,)
A boolean array that indicates whether the corresponding sample is an
outlier
References:
----------
http://stackoverflow.com/a/22357811/915743
Boris Iglewicz and David Hoaglin (1993), "Volume 16: How to Detect and
Handle Outliers", The ASQC Basic References in Quality Control:
Statistical Techniques, Edward F. Mykytka, Ph.D., Editor.
"""
points = np.asarray(points)
if points.ndim == 1:
points = points[:, np.newaxis]
nonzero = np.unique(np.nonzero(points)[0])
median = np.median(points[nonzero], axis=0)
diff = np.sum((points - median) ** 2, axis=-1)
diff = np.sqrt(diff)
med_abs_deviation = np.median(diff[nonzero])
modified_z_score = 0.6745 * diff / med_abs_deviation
return modified_z_score > thresh
def filtered_heartrates(path, filter_width):
"""Apply median filter to heartrates of a path.
Parameters
----------
path : dict
Path with at least the entry 'heartrates'
filter_width : int
Width of the median filter
Returns
-------
heartrates : array, shape (n_steps,)
Filtered heartrates
"""
return medfilt(path["heartrates"], filter_width)
def filter_median_average(timeseries, filter_width):
"""Apply median and average filter to time series.
Parameters
----------
timeseries : array-like, shape (n_steps,)
Time series
filter_width : int
Width of the two filters
Returns
-------
timeseries : array, shape (n_steps,)
Filtered time series
"""
timeseries = medfilt(timeseries, filter_width)
timeseries = np.convolve(
timeseries, np.ones(filter_width) / filter_width,
mode="same")
return timeseries
def elevation_summary(altitudes, total_distance_in_m):
"""Overall elevation statistics.
Parameters
----------
altitudes : array, shape (n_steps,)
Altitudes
total_distance_in_m : float
Total distance in meters
Returns
-------
gain : float
Total elevation gain
loss : float
Total elevation loss
slope_in_percent : float
Average slope in percent, ignoring elevation loss
"""
altitude_diffs = np.diff(altitudes)
gain = sum(altitude_diffs[altitude_diffs > 0])
loss = -sum(altitude_diffs[altitude_diffs < 0])
slope_in_percent = 100.0 * gain / total_distance_in_m
return gain, loss, slope_in_percent
def get_paces(path, sport):
"""Generate pace table of an activity.
Parameters
----------
path : dict
A path that has at least the entries 'timestamps' and 'velocities'.
sport : str
Sport
Returns
-------
paces : list
Each entry is a tuple of the traveled distance in meters and
corresponding average pace at this distance in seconds per
kilometer.
"""
velocities = path["velocities"][1:]
timestamps = path["timestamps"]
delta_t = np.diff(timestamps)
max_velocity = config["max_velocity"].get(
sport, config["max_velocity"]["default"])
valid_velocities = np.where(velocities <= max_velocity)
velocities = velocities[valid_velocities]
delta_t = delta_t[valid_velocities]
dist = np.cumsum(velocities * delta_t)
if len(dist) == 0:
return []
split_distance = appropriate_partition(dist[-1])
pdt = config["pace_distance_table"]
pace_distance = pdt.get(sport, pdt["other"])
paces = []
last_t = 0
for threshold in range(split_distance, int(dist[-1]), split_distance):
t = np.argmax(dist >= threshold)
split_time = timestamps[t] - timestamps[last_t]
pace = split_time / split_distance * pace_distance
paces.append((threshold, pace))
last_t = t
return paces
def fastest_part(sport, timestamps, velocities, distance):
"""Compute fastest time for a given distance in an activity.
Parameters
----------
sport : str
Sport
timestamps : array, shape (n_steps,)
Timestamps
velocities : array, shape (n_steps,)
Velocities
distance : float
Length of the segment for which we want to compute the fastest time
in this activity.
Returns
-------
record : float
Fastest time for the requested distance in seconds
"""
queue_dist = 0.0
queue_time = 0.0
dqueue = deque()
tqueue = deque()
v = velocities[1:]
dt = np.diff(timestamps)
record = float("inf")
for t in range(len(v)):
if np.isnan(v[t]):
queue_dist = 0.0
queue_time = 0.0
dqueue.clear()
tqueue.clear()
continue
if v[t] > config["max_velocity"][sport]:
continue
dist = v[t] * dt[t]
dqueue.appendleft(dist)
tqueue.appendleft(dt[t])
queue_dist += dist
queue_time += dt[t]
while queue_dist > distance:
if queue_time < record:
record = queue_time
dist = dqueue.pop()
time = tqueue.pop()
queue_dist -= dist
queue_time -= time
return record
def appropriate_partition(distance):
"""Find appropriate partition of a distance into parts.
Parameters
----------
distance : float
Traveled distance in meters
Returns
-------
segment_distance : float
Appropriate length of segments in which we split the total distance
"""
if distance < 5000:
return 400
elif distance < 20000:
return 1000
elif distance < 40000:
return 2000
elif distance < 100000:
return 5000
else:
return 10000
def compute_distances_for_valid_trackpoints(path):
"""Compute distances for valid trackpoints from a path.
Parameters
----------
path : dict
A path that has at least the entries 'timestamps' and 'velocities'.
Returns
-------
distances_in_m : array, shape (n_valid_trackpoints,)
Distances in meters [m] per valid trackpoint.
valid_trackpoints : array, shape (n_valid_trackpoints,)
Indices of finite velocities in path.
"""
delta_ts = np.gradient(path["timestamps"])
velocities = path["velocities"]
valid_trackpoints = np.isfinite(velocities)
delta_ts = delta_ts[valid_trackpoints]
velocities = velocities[valid_trackpoints]
distances_in_m = np.cumsum(delta_ts * velocities)
return distances_in_m, valid_trackpoints
| 25.670068 | 77 | 0.627004 |
79413eaab87ef488a9b28a13e0be24c806c1b310 | 456 | py | Python | sphinx/source/docs/first_steps/examples/first_steps_4_plot_size.py | arefeena/bokeh | 4dd2b6536ad672532b47f52b04a2c3292c947c15 | [
"BSD-3-Clause"
] | null | null | null | sphinx/source/docs/first_steps/examples/first_steps_4_plot_size.py | arefeena/bokeh | 4dd2b6536ad672532b47f52b04a2c3292c947c15 | [
"BSD-3-Clause"
] | null | null | null | sphinx/source/docs/first_steps/examples/first_steps_4_plot_size.py | arefeena/bokeh | 4dd2b6536ad672532b47f52b04a2c3292c947c15 | [
"BSD-3-Clause"
] | null | null | null | from bokeh.plotting import figure, output_file, show
# prepare some data
x = [1, 2, 3, 4, 5]
y = [4, 5, 5, 7, 2]
# set output to static HTML file
output_file("first_steps.html")
# create a new plot with a specific size
p = figure(
title="Plot sizing example",
plot_width=350,
plot_height=250,
x_axis_label="x",
y_axis_label="y",
)
# add circle renderer
circle = p.circle(x, y, fill_color="red", size=15)
# show the results
show(p)
| 19 | 52 | 0.66886 |
79413ec68d10cb3167608052cefcc8e8182ba0bc | 5,777 | py | Python | Tax-Calculator-2.9.0/taxcalc/tests/test_consumption.py | grantseiter/Biden-Tax-Proposals | c215ff845264f3fce9281c7fbb343ed10758a4b6 | [
"MIT"
] | null | null | null | Tax-Calculator-2.9.0/taxcalc/tests/test_consumption.py | grantseiter/Biden-Tax-Proposals | c215ff845264f3fce9281c7fbb343ed10758a4b6 | [
"MIT"
] | null | null | null | Tax-Calculator-2.9.0/taxcalc/tests/test_consumption.py | grantseiter/Biden-Tax-Proposals | c215ff845264f3fce9281c7fbb343ed10758a4b6 | [
"MIT"
] | null | null | null | # CODING-STYLE CHECKS:
# pycodestyle test_consumption.py
import numpy as np
import pytest
import copy
from taxcalc import Policy, Records, Calculator, Consumption
def test_year_consistency():
assert Consumption.JSON_START_YEAR == Policy.JSON_START_YEAR
assert Consumption.DEFAULT_NUM_YEARS == Policy.DEFAULT_NUM_YEARS
def test_validity_of_consumption_vars_set():
records_varinfo = Records(data=None)
assert Consumption.RESPONSE_VARS.issubset(records_varinfo.USABLE_READ_VARS)
useable_vars = set(['housing', 'snap', 'tanf', 'vet', 'wic',
'mcare', 'mcaid', 'other'])
assert Consumption.BENEFIT_VARS.issubset(useable_vars)
def test_update_consumption():
consump = Consumption()
consump.update_consumption({})
revision = {
'MPC_e20400': {2014: 0.05,
2015: 0.06},
'BEN_mcare_value': {2014: 0.75,
2015: 0.80}
}
consump.update_consumption(revision)
expected_mpc_e20400 = np.full((Consumption.DEFAULT_NUM_YEARS,), 0.06)
expected_mpc_e20400[0] = 0.0
expected_mpc_e20400[1] = 0.05
assert np.allclose(consump._MPC_e20400,
expected_mpc_e20400,
rtol=0.0)
assert np.allclose(consump._MPC_e17500,
np.zeros((Consumption.DEFAULT_NUM_YEARS,)),
rtol=0.0)
expected_ben_mcare_value = np.full((Consumption.DEFAULT_NUM_YEARS,), 0.80)
expected_ben_mcare_value[0] = 1.0
expected_ben_mcare_value[1] = 0.75
assert np.allclose(consump._BEN_mcare_value,
expected_ben_mcare_value,
rtol=0.0)
assert np.allclose(consump._BEN_snap_value,
np.ones((Consumption.DEFAULT_NUM_YEARS,)),
rtol=0.0)
consump.set_year(2015)
assert consump.current_year == 2015
assert consump.MPC_e20400 == 0.06
assert consump.MPC_e17500 == 0.0
assert consump.BEN_mcare_value == 0.80
assert consump.BEN_snap_value == 1.0
def test_incorrect_update_consumption():
with pytest.raises(ValueError):
Consumption().update_consumption([])
with pytest.raises(ValueError):
Consumption().update_consumption({'MPC_e17500': {'xyz': 0.2}})
with pytest.raises(ValueError):
Consumption().update_consumption({'MPC_e17500': {2012: 0.2}})
with pytest.raises(ValueError):
Consumption().update_consumption({'MPC_e17500': {2052: 0.2}})
with pytest.raises(ValueError):
Consumption().update_consumption({'MPC_exxxxx': {2014: 0.2}})
with pytest.raises(ValueError):
Consumption().update_consumption({'MPC_e17500': {2014: -0.1}})
with pytest.raises(ValueError):
Consumption().update_consumption({'MPC_e17500-indexed': {2014: 0.1}})
def test_future_update_consumption():
consump = Consumption()
assert consump.current_year == consump.start_year
assert consump.has_response() is False
cyr = 2020
consump.set_year(cyr)
consump.update_consumption({'MPC_e20400': {cyr: 0.01}})
assert consump.current_year == cyr
assert consump.has_response() is True
consump.set_year(cyr - 1)
assert consump.has_response() is False
# test future updates for benefits
consump_ben = Consumption()
assert consump_ben.current_year == consump_ben.start_year
assert consump_ben.has_response() is False
consump_ben.set_year(cyr)
consump_ben.update_consumption({'BEN_vet_value': {cyr: 0.95}})
assert consump_ben.current_year == cyr
assert consump_ben.has_response() is True
consump_ben.set_year(cyr - 1)
assert consump_ben.has_response() is False
def test_consumption_default_data():
consump = Consumption()
pdata = consump._vals
for pname in pdata.keys():
if pname.startswith('MPC'):
assert pdata[pname]['value'] == [0.0]
elif pname.startswith('BEN'):
assert pdata[pname]['value'] == [1.0]
def test_consumption_response(cps_subsample):
consump = Consumption()
mpc = 0.5
consumption_response = {'MPC_e20400': {2013: mpc}}
consump.update_consumption(consumption_response)
# test incorrect call to response method
with pytest.raises(ValueError):
consump.response(list(), 1)
# test correct call to response method
rec = Records.cps_constructor(data=cps_subsample)
pre = copy.deepcopy(rec.e20400)
consump.response(rec, 1.0)
post = rec.e20400
actual_diff = post - pre
expected_diff = np.ones(rec.array_length) * mpc
assert np.allclose(actual_diff, expected_diff)
# compute earnings mtr with no consumption response
rec = Records.cps_constructor(data=cps_subsample)
ided0 = copy.deepcopy(rec.e20400)
calc0 = Calculator(policy=Policy(), records=rec, consumption=None)
(mtr0_ptax, mtr0_itax, _) = calc0.mtr(variable_str='e00200p',
wrt_full_compensation=False)
assert np.allclose(calc0.array('e20400'), ided0)
# compute earnings mtr with consumption response
calc1 = Calculator(policy=Policy(), records=rec, consumption=consump)
mtr1_ptax, mtr1_itax, _ = calc1.mtr(variable_str='e00200p',
wrt_full_compensation=False)
assert np.allclose(calc1.array('e20400'), ided0)
# confirm that payroll mtr values are no different
assert np.allclose(mtr1_ptax, mtr0_ptax)
# confirm that all mtr with cons-resp are no greater than without cons-resp
assert np.all(np.less_equal(np.around(mtr1_itax, decimals=5),
np.around(mtr0_itax, decimals=5)))
# confirm that some mtr with cons-resp are less than without cons-resp
assert np.any(np.less(mtr1_itax, mtr0_itax))
| 40.118056 | 79 | 0.672494 |
79413ee9a9b448722ada0e140a2b9c72fb7efb25 | 1,019 | py | Python | release/stubs.min/Autodesk/Revit/DB/__init___parts/SATImportOptions.py | htlcnn/ironpython-stubs | 780d829e2104b2789d5f4d6f32b0ec9f2930ca03 | [
"MIT"
] | 182 | 2017-06-27T02:26:15.000Z | 2022-03-30T18:53:43.000Z | release/stubs.min/Autodesk/Revit/DB/__init___parts/SATImportOptions.py | htlcnn/ironpython-stubs | 780d829e2104b2789d5f4d6f32b0ec9f2930ca03 | [
"MIT"
] | 28 | 2017-06-27T13:38:23.000Z | 2022-03-15T11:19:44.000Z | release/stubs.min/Autodesk/Revit/DB/__init___parts/SATImportOptions.py | htlcnn/ironpython-stubs | 780d829e2104b2789d5f4d6f32b0ec9f2930ca03 | [
"MIT"
] | 67 | 2017-06-28T09:43:59.000Z | 2022-03-20T21:17:10.000Z | class SATImportOptions(BaseImportOptions,IDisposable):
"""
The import options used to import SAT format files.
SATImportOptions(option: SATImportOptions)
SATImportOptions()
"""
def Dispose(self):
""" Dispose(self: BaseImportOptions,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: BaseImportOptions,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,option=None):
"""
__new__(cls: type,option: SATImportOptions)
__new__(cls: type)
"""
pass
| 29.970588 | 215 | 0.698724 |
79413eff8f41961d06e6a2b629c1e7adc42944d0 | 1,925 | py | Python | lib/systems/olympicene.py | pulsar-chem/BPModule | f8e64e04fdb01947708f098e833600c459c2ff0e | [
"BSD-3-Clause"
] | null | null | null | lib/systems/olympicene.py | pulsar-chem/BPModule | f8e64e04fdb01947708f098e833600c459c2ff0e | [
"BSD-3-Clause"
] | null | null | null | lib/systems/olympicene.py | pulsar-chem/BPModule | f8e64e04fdb01947708f098e833600c459c2ff0e | [
"BSD-3-Clause"
] | null | null | null | import pulsar as psr
def load_ref_system():
""" Returns olympicene as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C -0.00000 0.61250 2.46369
C -0.00000 -0.09959 1.24408
C 0.00000 0.59715 -0.00000
C 0.00000 2.01010 -0.00000
C -0.00000 -0.09588 3.67776
C -0.00000 -1.49274 3.69443
C -0.00000 -2.20487 2.49866
C -0.00000 -1.51375 1.28555
C 0.00000 -0.09959 -1.24408
C 0.00000 0.61250 -2.46369
C 0.00000 2.01220 -2.43482
C 0.00000 2.70097 -1.21967
H -0.00000 0.43936 4.62498
H -0.00000 -2.02335 4.64311
H -0.00000 -3.29136 2.51508
C -0.00000 -1.51375 -1.28555
H 0.00000 3.78890 -1.23577
C 0.00000 2.70097 1.21967
C -0.00000 2.01220 2.43482
H 0.00000 3.78890 1.23577
H -0.00000 2.58219 3.36128
C 0.00000 -0.09588 -3.67776
H 0.00000 2.58219 -3.36128
C 0.00000 -2.20487 -2.49866
C 0.00000 -1.49274 -3.69443
C -0.00000 -2.22743 0.00000
H 0.00000 0.43936 -4.62498
H -0.00000 -3.29136 -2.51508
H 0.00000 -2.02335 -4.64311
H 0.91665 -2.79200 0.00000
H -0.91665 -2.79200 0.00000
""")
| 49.358974 | 66 | 0.38026 |
79413f13161baf66f9d15f4194a10256e825a630 | 39,613 | py | Python | sympy/integrals/tests/test_transforms.py | gum3ng/sympy | e9414fafa976b26aa0b701a0217ab0f3b561989f | [
"BSD-3-Clause"
] | 1 | 2022-01-17T12:38:24.000Z | 2022-01-17T12:38:24.000Z | sympy/integrals/tests/test_transforms.py | gum3ng/sympy | e9414fafa976b26aa0b701a0217ab0f3b561989f | [
"BSD-3-Clause"
] | null | null | null | sympy/integrals/tests/test_transforms.py | gum3ng/sympy | e9414fafa976b26aa0b701a0217ab0f3b561989f | [
"BSD-3-Clause"
] | null | null | null | from sympy.integrals.transforms import (mellin_transform,
inverse_mellin_transform, laplace_transform, inverse_laplace_transform,
fourier_transform, inverse_fourier_transform,
sine_transform, inverse_sine_transform,
cosine_transform, inverse_cosine_transform,
hankel_transform, inverse_hankel_transform,
LaplaceTransform, FourierTransform, SineTransform, CosineTransform,
InverseLaplaceTransform, InverseFourierTransform,
InverseSineTransform, InverseCosineTransform, IntegralTransformError)
from sympy.core.function import (Function, expand_mul)
from sympy.core import EulerGamma
from sympy.core.numbers import (I, Rational, oo, pi)
from sympy.core.relational import Eq, Ne
from sympy.core.singleton import S
from sympy.core.symbol import (Symbol, symbols)
from sympy.functions.combinatorial.factorials import factorial
from sympy.functions.elementary.complexes import (Abs, arg, re, unpolarify)
from sympy.functions.elementary.exponential import (exp, exp_polar, log)
from sympy.functions.elementary.hyperbolic import (cosh, sinh)
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.trigonometric import (atan, atan2, cos, sin, tan)
from sympy.functions.special.bessel import (besseli, besselj, besselk, bessely)
from sympy.functions.special.delta_functions import Heaviside
from sympy.functions.special.error_functions import (erf, erfc, expint)
from sympy.functions.special.gamma_functions import gamma
from sympy.functions.special.hyper import meijerg
from sympy.simplify.gammasimp import gammasimp
from sympy.simplify.hyperexpand import hyperexpand
from sympy.simplify.trigsimp import trigsimp
from sympy.testing.pytest import XFAIL, slow, skip, raises, warns_deprecated_sympy
from sympy.matrices import Matrix, eye
from sympy.abc import x, s, a, b, c, d
nu, beta, rho = symbols('nu beta rho')
def test_undefined_function():
from sympy.integrals.transforms import MellinTransform
f = Function('f')
assert mellin_transform(f(x), x, s) == MellinTransform(f(x), x, s)
assert mellin_transform(f(x) + exp(-x), x, s) == \
(MellinTransform(f(x), x, s) + gamma(s), (0, oo), True)
assert laplace_transform(2*f(x), x, s) == 2*LaplaceTransform(f(x), x, s)
# TODO test derivative and other rules when implemented
def test_free_symbols():
f = Function('f')
assert mellin_transform(f(x), x, s).free_symbols == {s}
assert mellin_transform(f(x)*a, x, s).free_symbols == {s, a}
def test_as_integral():
from sympy.integrals.integrals import Integral
f = Function('f')
assert mellin_transform(f(x), x, s).rewrite('Integral') == \
Integral(x**(s - 1)*f(x), (x, 0, oo))
assert fourier_transform(f(x), x, s).rewrite('Integral') == \
Integral(f(x)*exp(-2*I*pi*s*x), (x, -oo, oo))
assert laplace_transform(f(x), x, s).rewrite('Integral') == \
Integral(f(x)*exp(-s*x), (x, 0, oo))
assert str(2*pi*I*inverse_mellin_transform(f(s), s, x, (a, b)).rewrite('Integral')) \
== "Integral(f(s)/x**s, (s, _c - oo*I, _c + oo*I))"
assert str(2*pi*I*inverse_laplace_transform(f(s), s, x).rewrite('Integral')) == \
"Integral(f(s)*exp(s*x), (s, _c - oo*I, _c + oo*I))"
assert inverse_fourier_transform(f(s), s, x).rewrite('Integral') == \
Integral(f(s)*exp(2*I*pi*s*x), (s, -oo, oo))
# NOTE this is stuck in risch because meijerint cannot handle it
@slow
@XFAIL
def test_mellin_transform_fail():
skip("Risch takes forever.")
MT = mellin_transform
bpos = symbols('b', positive=True)
# bneg = symbols('b', negative=True)
expr = (sqrt(x + b**2) + b)**a/sqrt(x + b**2)
# TODO does not work with bneg, argument wrong. Needs changes to matching.
assert MT(expr.subs(b, -bpos), x, s) == \
((-1)**(a + 1)*2**(a + 2*s)*bpos**(a + 2*s - 1)*gamma(a + s)
*gamma(1 - a - 2*s)/gamma(1 - s),
(-re(a), -re(a)/2 + S.Half), True)
expr = (sqrt(x + b**2) + b)**a
assert MT(expr.subs(b, -bpos), x, s) == \
(
2**(a + 2*s)*a*bpos**(a + 2*s)*gamma(-a - 2*
s)*gamma(a + s)/gamma(-s + 1),
(-re(a), -re(a)/2), True)
# Test exponent 1:
assert MT(expr.subs({b: -bpos, a: 1}), x, s) == \
(-bpos**(2*s + 1)*gamma(s)*gamma(-s - S.Half)/(2*sqrt(pi)),
(-1, Rational(-1, 2)), True)
def test_mellin_transform():
from sympy.functions.elementary.miscellaneous import (Max, Min)
MT = mellin_transform
bpos = symbols('b', positive=True)
# 8.4.2
assert MT(x**nu*Heaviside(x - 1), x, s) == \
(-1/(nu + s), (-oo, -re(nu)), True)
assert MT(x**nu*Heaviside(1 - x), x, s) == \
(1/(nu + s), (-re(nu), oo), True)
assert MT((1 - x)**(beta - 1)*Heaviside(1 - x), x, s) == \
(gamma(beta)*gamma(s)/gamma(beta + s), (0, oo), re(beta) > 0)
assert MT((x - 1)**(beta - 1)*Heaviside(x - 1), x, s) == \
(gamma(beta)*gamma(1 - beta - s)/gamma(1 - s),
(-oo, 1 - re(beta)), re(beta) > 0)
assert MT((1 + x)**(-rho), x, s) == \
(gamma(s)*gamma(rho - s)/gamma(rho), (0, re(rho)), True)
assert MT(abs(1 - x)**(-rho), x, s) == (
2*sin(pi*rho/2)*gamma(1 - rho)*
cos(pi*(rho/2 - s))*gamma(s)*gamma(rho-s)/pi,
(0, re(rho)), re(rho) < 1)
mt = MT((1 - x)**(beta - 1)*Heaviside(1 - x)
+ a*(x - 1)**(beta - 1)*Heaviside(x - 1), x, s)
assert mt[1], mt[2] == ((0, -re(beta) + 1), re(beta) > 0)
assert MT((x**a - b**a)/(x - b), x, s)[0] == \
pi*b**(a + s - 1)*sin(pi*a)/(sin(pi*s)*sin(pi*(a + s)))
assert MT((x**a - bpos**a)/(x - bpos), x, s) == \
(pi*bpos**(a + s - 1)*sin(pi*a)/(sin(pi*s)*sin(pi*(a + s))),
(Max(0, -re(a)), Min(1, 1 - re(a))), True)
expr = (sqrt(x + b**2) + b)**a
assert MT(expr.subs(b, bpos), x, s) == \
(-a*(2*bpos)**(a + 2*s)*gamma(s)*gamma(-a - 2*s)/gamma(-a - s + 1),
(0, -re(a)/2), True)
expr = (sqrt(x + b**2) + b)**a/sqrt(x + b**2)
assert MT(expr.subs(b, bpos), x, s) == \
(2**(a + 2*s)*bpos**(a + 2*s - 1)*gamma(s)
*gamma(1 - a - 2*s)/gamma(1 - a - s),
(0, -re(a)/2 + S.Half), True)
# 8.4.2
assert MT(exp(-x), x, s) == (gamma(s), (0, oo), True)
assert MT(exp(-1/x), x, s) == (gamma(-s), (-oo, 0), True)
# 8.4.5
assert MT(log(x)**4*Heaviside(1 - x), x, s) == (24/s**5, (0, oo), True)
assert MT(log(x)**3*Heaviside(x - 1), x, s) == (6/s**4, (-oo, 0), True)
assert MT(log(x + 1), x, s) == (pi/(s*sin(pi*s)), (-1, 0), True)
assert MT(log(1/x + 1), x, s) == (pi/(s*sin(pi*s)), (0, 1), True)
assert MT(log(abs(1 - x)), x, s) == (pi/(s*tan(pi*s)), (-1, 0), True)
assert MT(log(abs(1 - 1/x)), x, s) == (pi/(s*tan(pi*s)), (0, 1), True)
# 8.4.14
assert MT(erf(sqrt(x)), x, s) == \
(-gamma(s + S.Half)/(sqrt(pi)*s), (Rational(-1, 2), 0), True)
def test_mellin_transform2():
MT = mellin_transform
# TODO we cannot currently do these (needs summation of 3F2(-1))
# this also implies that they cannot be written as a single g-function
# (although this is possible)
mt = MT(log(x)/(x + 1), x, s)
assert mt[1:] == ((0, 1), True)
assert not hyperexpand(mt[0], allow_hyper=True).has(meijerg)
mt = MT(log(x)**2/(x + 1), x, s)
assert mt[1:] == ((0, 1), True)
assert not hyperexpand(mt[0], allow_hyper=True).has(meijerg)
mt = MT(log(x)/(x + 1)**2, x, s)
assert mt[1:] == ((0, 2), True)
assert not hyperexpand(mt[0], allow_hyper=True).has(meijerg)
@slow
def test_mellin_transform_bessel():
from sympy.functions.elementary.miscellaneous import Max
MT = mellin_transform
# 8.4.19
assert MT(besselj(a, 2*sqrt(x)), x, s) == \
(gamma(a/2 + s)/gamma(a/2 - s + 1), (-re(a)/2, Rational(3, 4)), True)
assert MT(sin(sqrt(x))*besselj(a, sqrt(x)), x, s) == \
(2**a*gamma(-2*s + S.Half)*gamma(a/2 + s + S.Half)/(
gamma(-a/2 - s + 1)*gamma(a - 2*s + 1)), (
-re(a)/2 - S.Half, Rational(1, 4)), True)
assert MT(cos(sqrt(x))*besselj(a, sqrt(x)), x, s) == \
(2**a*gamma(a/2 + s)*gamma(-2*s + S.Half)/(
gamma(-a/2 - s + S.Half)*gamma(a - 2*s + 1)), (
-re(a)/2, Rational(1, 4)), True)
assert MT(besselj(a, sqrt(x))**2, x, s) == \
(gamma(a + s)*gamma(S.Half - s)
/ (sqrt(pi)*gamma(1 - s)*gamma(1 + a - s)),
(-re(a), S.Half), True)
assert MT(besselj(a, sqrt(x))*besselj(-a, sqrt(x)), x, s) == \
(gamma(s)*gamma(S.Half - s)
/ (sqrt(pi)*gamma(1 - a - s)*gamma(1 + a - s)),
(0, S.Half), True)
# NOTE: prudnikov gives the strip below as (1/2 - re(a), 1). As far as
# I can see this is wrong (since besselj(z) ~ 1/sqrt(z) for z large)
assert MT(besselj(a - 1, sqrt(x))*besselj(a, sqrt(x)), x, s) == \
(gamma(1 - s)*gamma(a + s - S.Half)
/ (sqrt(pi)*gamma(Rational(3, 2) - s)*gamma(a - s + S.Half)),
(S.Half - re(a), S.Half), True)
assert MT(besselj(a, sqrt(x))*besselj(b, sqrt(x)), x, s) == \
(4**s*gamma(1 - 2*s)*gamma((a + b)/2 + s)
/ (gamma(1 - s + (b - a)/2)*gamma(1 - s + (a - b)/2)
*gamma( 1 - s + (a + b)/2)),
(-(re(a) + re(b))/2, S.Half), True)
assert MT(besselj(a, sqrt(x))**2 + besselj(-a, sqrt(x))**2, x, s)[1:] == \
((Max(re(a), -re(a)), S.Half), True)
# Section 8.4.20
assert MT(bessely(a, 2*sqrt(x)), x, s) == \
(-cos(pi*(a/2 - s))*gamma(s - a/2)*gamma(s + a/2)/pi,
(Max(-re(a)/2, re(a)/2), Rational(3, 4)), True)
assert MT(sin(sqrt(x))*bessely(a, sqrt(x)), x, s) == \
(-4**s*sin(pi*(a/2 - s))*gamma(S.Half - 2*s)
* gamma((1 - a)/2 + s)*gamma((1 + a)/2 + s)
/ (sqrt(pi)*gamma(1 - s - a/2)*gamma(1 - s + a/2)),
(Max(-(re(a) + 1)/2, (re(a) - 1)/2), Rational(1, 4)), True)
assert MT(cos(sqrt(x))*bessely(a, sqrt(x)), x, s) == \
(-4**s*cos(pi*(a/2 - s))*gamma(s - a/2)*gamma(s + a/2)*gamma(S.Half - 2*s)
/ (sqrt(pi)*gamma(S.Half - s - a/2)*gamma(S.Half - s + a/2)),
(Max(-re(a)/2, re(a)/2), Rational(1, 4)), True)
assert MT(besselj(a, sqrt(x))*bessely(a, sqrt(x)), x, s) == \
(-cos(pi*s)*gamma(s)*gamma(a + s)*gamma(S.Half - s)
/ (pi**S('3/2')*gamma(1 + a - s)),
(Max(-re(a), 0), S.Half), True)
assert MT(besselj(a, sqrt(x))*bessely(b, sqrt(x)), x, s) == \
(-4**s*cos(pi*(a/2 - b/2 + s))*gamma(1 - 2*s)
* gamma(a/2 - b/2 + s)*gamma(a/2 + b/2 + s)
/ (pi*gamma(a/2 - b/2 - s + 1)*gamma(a/2 + b/2 - s + 1)),
(Max((-re(a) + re(b))/2, (-re(a) - re(b))/2), S.Half), True)
# NOTE bessely(a, sqrt(x))**2 and bessely(a, sqrt(x))*bessely(b, sqrt(x))
# are a mess (no matter what way you look at it ...)
assert MT(bessely(a, sqrt(x))**2, x, s)[1:] == \
((Max(-re(a), 0, re(a)), S.Half), True)
# Section 8.4.22
# TODO we can't do any of these (delicate cancellation)
# Section 8.4.23
assert MT(besselk(a, 2*sqrt(x)), x, s) == \
(gamma(
s - a/2)*gamma(s + a/2)/2, (Max(-re(a)/2, re(a)/2), oo), True)
assert MT(besselj(a, 2*sqrt(2*sqrt(x)))*besselk(
a, 2*sqrt(2*sqrt(x))), x, s) == (4**(-s)*gamma(2*s)*
gamma(a/2 + s)/(2*gamma(a/2 - s + 1)), (Max(0, -re(a)/2), oo), True)
# TODO bessely(a, x)*besselk(a, x) is a mess
assert MT(besseli(a, sqrt(x))*besselk(a, sqrt(x)), x, s) == \
(gamma(s)*gamma(
a + s)*gamma(-s + S.Half)/(2*sqrt(pi)*gamma(a - s + 1)),
(Max(-re(a), 0), S.Half), True)
assert MT(besseli(b, sqrt(x))*besselk(a, sqrt(x)), x, s) == \
(2**(2*s - 1)*gamma(-2*s + 1)*gamma(-a/2 + b/2 + s)* \
gamma(a/2 + b/2 + s)/(gamma(-a/2 + b/2 - s + 1)* \
gamma(a/2 + b/2 - s + 1)), (Max(-re(a)/2 - re(b)/2, \
re(a)/2 - re(b)/2), S.Half), True)
# TODO products of besselk are a mess
mt = MT(exp(-x/2)*besselk(a, x/2), x, s)
mt0 = gammasimp(trigsimp(gammasimp(mt[0].expand(func=True))))
assert mt0 == 2*pi**Rational(3, 2)*cos(pi*s)*gamma(-s + S.Half)/(
(cos(2*pi*a) - cos(2*pi*s))*gamma(-a - s + 1)*gamma(a - s + 1))
assert mt[1:] == ((Max(-re(a), re(a)), oo), True)
# TODO exp(x/2)*besselk(a, x/2) [etc] cannot currently be done
# TODO various strange products of special orders
@slow
def test_expint():
from sympy.functions.elementary.miscellaneous import Max
from sympy.functions.special.error_functions import (Ci, E1, Ei, Si)
from sympy.functions.special.zeta_functions import lerchphi
from sympy.simplify.simplify import simplify
aneg = Symbol('a', negative=True)
u = Symbol('u', polar=True)
assert mellin_transform(E1(x), x, s) == (gamma(s)/s, (0, oo), True)
assert inverse_mellin_transform(gamma(s)/s, s, x,
(0, oo)).rewrite(expint).expand() == E1(x)
assert mellin_transform(expint(a, x), x, s) == \
(gamma(s)/(a + s - 1), (Max(1 - re(a), 0), oo), True)
# XXX IMT has hickups with complicated strips ...
assert simplify(unpolarify(
inverse_mellin_transform(gamma(s)/(aneg + s - 1), s, x,
(1 - aneg, oo)).rewrite(expint).expand(func=True))) == \
expint(aneg, x)
assert mellin_transform(Si(x), x, s) == \
(-2**s*sqrt(pi)*gamma(s/2 + S.Half)/(
2*s*gamma(-s/2 + 1)), (-1, 0), True)
assert inverse_mellin_transform(-2**s*sqrt(pi)*gamma((s + 1)/2)
/(2*s*gamma(-s/2 + 1)), s, x, (-1, 0)) \
== Si(x)
assert mellin_transform(Ci(sqrt(x)), x, s) == \
(-2**(2*s - 1)*sqrt(pi)*gamma(s)/(s*gamma(-s + S.Half)), (0, 1), True)
assert inverse_mellin_transform(
-4**s*sqrt(pi)*gamma(s)/(2*s*gamma(-s + S.Half)),
s, u, (0, 1)).expand() == Ci(sqrt(u))
# TODO LT of Si, Shi, Chi is a mess ...
assert laplace_transform(Ci(x), x, s) == (-log(1 + s**2)/2/s, 0, True)
assert laplace_transform(expint(a, x), x, s) == \
(lerchphi(s*exp_polar(I*pi), 1, a), 0, re(a) > S.Zero)
assert laplace_transform(expint(1, x), x, s) == (log(s + 1)/s, 0, True)
assert laplace_transform(expint(2, x), x, s) == \
((s - log(s + 1))/s**2, 0, True)
assert inverse_laplace_transform(-log(1 + s**2)/2/s, s, u).expand() == \
Heaviside(u)*Ci(u)
assert inverse_laplace_transform(log(s + 1)/s, s, x).rewrite(expint) == \
Heaviside(x)*E1(x)
assert inverse_laplace_transform((s - log(s + 1))/s**2, s,
x).rewrite(expint).expand() == \
(expint(2, x)*Heaviside(x)).rewrite(Ei).rewrite(expint).expand()
@slow
def test_inverse_mellin_transform():
from sympy.core.function import expand
from sympy.functions.elementary.miscellaneous import (Max, Min)
from sympy.functions.elementary.trigonometric import cot
from sympy.simplify.powsimp import powsimp
from sympy.simplify.simplify import simplify
IMT = inverse_mellin_transform
assert IMT(gamma(s), s, x, (0, oo)) == exp(-x)
assert IMT(gamma(-s), s, x, (-oo, 0)) == exp(-1/x)
assert simplify(IMT(s/(2*s**2 - 2), s, x, (2, oo))) == \
(x**2 + 1)*Heaviside(1 - x)/(4*x)
# test passing "None"
assert IMT(1/(s**2 - 1), s, x, (-1, None)) == \
-x*Heaviside(-x + 1)/2 - Heaviside(x - 1)/(2*x)
assert IMT(1/(s**2 - 1), s, x, (None, 1)) == \
-x*Heaviside(-x + 1)/2 - Heaviside(x - 1)/(2*x)
# test expansion of sums
assert IMT(gamma(s) + gamma(s - 1), s, x, (1, oo)) == (x + 1)*exp(-x)/x
# test factorisation of polys
r = symbols('r', real=True)
assert IMT(1/(s**2 + 1), s, exp(-x), (None, oo)
).subs(x, r).rewrite(sin).simplify() \
== sin(r)*Heaviside(1 - exp(-r))
# test multiplicative substitution
_a, _b = symbols('a b', positive=True)
assert IMT(_b**(-s/_a)*factorial(s/_a)/s, s, x, (0, oo)) == exp(-_b*x**_a)
assert IMT(factorial(_a/_b + s/_b)/(_a + s), s, x, (-_a, oo)) == x**_a*exp(-x**_b)
def simp_pows(expr):
return simplify(powsimp(expand_mul(expr, deep=False), force=True)).replace(exp_polar, exp)
# Now test the inverses of all direct transforms tested above
# Section 8.4.2
nu = symbols('nu', real=True)
assert IMT(-1/(nu + s), s, x, (-oo, None)) == x**nu*Heaviside(x - 1)
assert IMT(1/(nu + s), s, x, (None, oo)) == x**nu*Heaviside(1 - x)
assert simp_pows(IMT(gamma(beta)*gamma(s)/gamma(s + beta), s, x, (0, oo))) \
== (1 - x)**(beta - 1)*Heaviside(1 - x)
assert simp_pows(IMT(gamma(beta)*gamma(1 - beta - s)/gamma(1 - s),
s, x, (-oo, None))) \
== (x - 1)**(beta - 1)*Heaviside(x - 1)
assert simp_pows(IMT(gamma(s)*gamma(rho - s)/gamma(rho), s, x, (0, None))) \
== (1/(x + 1))**rho
assert simp_pows(IMT(d**c*d**(s - 1)*sin(pi*c)
*gamma(s)*gamma(s + c)*gamma(1 - s)*gamma(1 - s - c)/pi,
s, x, (Max(-re(c), 0), Min(1 - re(c), 1)))) \
== (x**c - d**c)/(x - d)
assert simplify(IMT(1/sqrt(pi)*(-c/2)*gamma(s)*gamma((1 - c)/2 - s)
*gamma(-c/2 - s)/gamma(1 - c - s),
s, x, (0, -re(c)/2))) == \
(1 + sqrt(x + 1))**c
assert simplify(IMT(2**(a + 2*s)*b**(a + 2*s - 1)*gamma(s)*gamma(1 - a - 2*s)
/gamma(1 - a - s), s, x, (0, (-re(a) + 1)/2))) == \
b**(a - 1)*(sqrt(1 + x/b**2) + 1)**(a - 1)*(b**2*sqrt(1 + x/b**2) +
b**2 + x)/(b**2 + x)
assert simplify(IMT(-2**(c + 2*s)*c*b**(c + 2*s)*gamma(s)*gamma(-c - 2*s)
/ gamma(-c - s + 1), s, x, (0, -re(c)/2))) == \
b**c*(sqrt(1 + x/b**2) + 1)**c
# Section 8.4.5
assert IMT(24/s**5, s, x, (0, oo)) == log(x)**4*Heaviside(1 - x)
assert expand(IMT(6/s**4, s, x, (-oo, 0)), force=True) == \
log(x)**3*Heaviside(x - 1)
assert IMT(pi/(s*sin(pi*s)), s, x, (-1, 0)) == log(x + 1)
assert IMT(pi/(s*sin(pi*s/2)), s, x, (-2, 0)) == log(x**2 + 1)
assert IMT(pi/(s*sin(2*pi*s)), s, x, (Rational(-1, 2), 0)) == log(sqrt(x) + 1)
assert IMT(pi/(s*sin(pi*s)), s, x, (0, 1)) == log(1 + 1/x)
# TODO
def mysimp(expr):
from sympy.core.function import expand
from sympy.simplify.powsimp import powsimp
from sympy.simplify.simplify import logcombine
return expand(
powsimp(logcombine(expr, force=True), force=True, deep=True),
force=True).replace(exp_polar, exp)
assert mysimp(mysimp(IMT(pi/(s*tan(pi*s)), s, x, (-1, 0)))) in [
log(1 - x)*Heaviside(1 - x) + log(x - 1)*Heaviside(x - 1),
log(x)*Heaviside(x - 1) + log(1 - 1/x)*Heaviside(x - 1) + log(-x +
1)*Heaviside(-x + 1)]
# test passing cot
assert mysimp(IMT(pi*cot(pi*s)/s, s, x, (0, 1))) in [
log(1/x - 1)*Heaviside(1 - x) + log(1 - 1/x)*Heaviside(x - 1),
-log(x)*Heaviside(-x + 1) + log(1 - 1/x)*Heaviside(x - 1) + log(-x +
1)*Heaviside(-x + 1), ]
# 8.4.14
assert IMT(-gamma(s + S.Half)/(sqrt(pi)*s), s, x, (Rational(-1, 2), 0)) == \
erf(sqrt(x))
# 8.4.19
assert simplify(IMT(gamma(a/2 + s)/gamma(a/2 - s + 1), s, x, (-re(a)/2, Rational(3, 4)))) \
== besselj(a, 2*sqrt(x))
assert simplify(IMT(2**a*gamma(S.Half - 2*s)*gamma(s + (a + 1)/2)
/ (gamma(1 - s - a/2)*gamma(1 - 2*s + a)),
s, x, (-(re(a) + 1)/2, Rational(1, 4)))) == \
sin(sqrt(x))*besselj(a, sqrt(x))
assert simplify(IMT(2**a*gamma(a/2 + s)*gamma(S.Half - 2*s)
/ (gamma(S.Half - s - a/2)*gamma(1 - 2*s + a)),
s, x, (-re(a)/2, Rational(1, 4)))) == \
cos(sqrt(x))*besselj(a, sqrt(x))
# TODO this comes out as an amazing mess, but simplifies nicely
assert simplify(IMT(gamma(a + s)*gamma(S.Half - s)
/ (sqrt(pi)*gamma(1 - s)*gamma(1 + a - s)),
s, x, (-re(a), S.Half))) == \
besselj(a, sqrt(x))**2
assert simplify(IMT(gamma(s)*gamma(S.Half - s)
/ (sqrt(pi)*gamma(1 - s - a)*gamma(1 + a - s)),
s, x, (0, S.Half))) == \
besselj(-a, sqrt(x))*besselj(a, sqrt(x))
assert simplify(IMT(4**s*gamma(-2*s + 1)*gamma(a/2 + b/2 + s)
/ (gamma(-a/2 + b/2 - s + 1)*gamma(a/2 - b/2 - s + 1)
*gamma(a/2 + b/2 - s + 1)),
s, x, (-(re(a) + re(b))/2, S.Half))) == \
besselj(a, sqrt(x))*besselj(b, sqrt(x))
# Section 8.4.20
# TODO this can be further simplified!
assert simplify(IMT(-2**(2*s)*cos(pi*a/2 - pi*b/2 + pi*s)*gamma(-2*s + 1) *
gamma(a/2 - b/2 + s)*gamma(a/2 + b/2 + s) /
(pi*gamma(a/2 - b/2 - s + 1)*gamma(a/2 + b/2 - s + 1)),
s, x,
(Max(-re(a)/2 - re(b)/2, -re(a)/2 + re(b)/2), S.Half))) == \
besselj(a, sqrt(x))*-(besselj(-b, sqrt(x)) -
besselj(b, sqrt(x))*cos(pi*b))/sin(pi*b)
# TODO more
# for coverage
assert IMT(pi/cos(pi*s), s, x, (0, S.Half)) == sqrt(x)/(x + 1)
@slow
def test_laplace_transform():
from sympy.functions.special.delta_functions import DiracDelta
from sympy.functions.special.error_functions import (fresnelc, fresnels)
LT = laplace_transform
a, b, c, = symbols('a b c', positive=True)
t = symbols('t')
w = Symbol("w")
f = Function("f")
# Test unevaluated form
assert laplace_transform(f(t), t, w) == LaplaceTransform(f(t), t, w)
assert inverse_laplace_transform(
f(w), w, t, plane=0) == InverseLaplaceTransform(f(w), w, t, 0)
# test a bug
spos = symbols('s', positive=True)
assert LT(exp(t), t, spos) == (1/(spos - 1), 0, spos > 1)
# basic tests from wikipedia
assert LT((t - a)**b*exp(-c*(t - a))*Heaviside(t - a), t, s) == \
((s + c)**(-b - 1)*exp(-a*s)*gamma(b + 1), -c, True)
assert LT(t**a, t, s) == (s**(-a - 1)*gamma(a + 1), 0, True)
assert LT(Heaviside(t), t, s) == (1/s, 0, True)
assert LT(Heaviside(t - a), t, s) == (exp(-a*s)/s, 0, True)
assert LT(1 - exp(-a*t), t, s) == (a/(s*(a + s)), 0, True)
assert LT((exp(2*t) - 1)*exp(-b - t)*Heaviside(t)/2, t, s, noconds=True) \
== exp(-b)/(s**2 - 1)
assert LT(exp(t), t, s) == (1/(s - 1), 0, abs(s) > 1)
assert LT(exp(2*t), t, s) == (1/(s - 2), 0, abs(s) > 2)
assert LT(exp(a*t), t, s) == (1/(s - a), a, Ne(s/a, 1))
assert LT(log(t/a), t, s) == (-(log(a*s) + EulerGamma)/s, 0, True)
assert LT(erf(t), t, s) == (erfc(s/2)*exp(s**2/4)/s, 0, True)
assert LT(sin(a*t), t, s) == (a/(a**2 + s**2), 0, True)
assert LT(cos(a*t), t, s) == (s/(a**2 + s**2), 0, True)
# TODO would be nice to have these come out better
assert LT(exp(-a*t)*sin(b*t), t, s) == (b/(b**2 + (a + s)**2), -a, True)
assert LT(exp(-a*t)*cos(b*t), t, s) == \
((a + s)/(b**2 + (a + s)**2), -a, True)
assert LT(besselj(0, t), t, s) == (1/sqrt(1 + s**2), 0, True)
assert LT(besselj(1, t), t, s) == (1 - 1/sqrt(1 + 1/s**2), 0, True)
# TODO general order works, but is a *mess*
# TODO besseli also works, but is an even greater mess
# test a bug in conditions processing
# TODO the auxiliary condition should be recognised/simplified
assert LT(exp(t)*cos(t), t, s)[:-1] in [
((s - 1)/(s**2 - 2*s + 2), -oo),
((s - 1)/((s - 1)**2 + 1), -oo),
]
# DiracDelta function: standard cases
assert LT(DiracDelta(t), t, s) == (1, -oo, True)
assert LT(DiracDelta(a*t), t, s) == (1/a, -oo, True)
assert LT(DiracDelta(t/42), t, s) == (42, -oo, True)
assert LT(DiracDelta(t+42), t, s) == (0, -oo, True)
assert LT(DiracDelta(t)+DiracDelta(t-42), t, s) == \
(1 + exp(-42*s), -oo, True)
assert LT(DiracDelta(t)-a*exp(-a*t), t, s) == (-a/(a + s) + 1, 0, True)
assert LT(exp(-t)*(DiracDelta(t)+DiracDelta(t-42)), t, s) == \
(exp(-42*s - 42) + 1, -oo, True)
# Collection of cases that cannot be fully evaluated and/or would catch
# some common implementation errors
assert LT(DiracDelta(t**2), t, s) == LaplaceTransform(DiracDelta(t**2), t, s)
assert LT(DiracDelta(t**2 - 1), t, s) == (exp(-s)/2, -oo, True)
assert LT(DiracDelta(t*(1 - t)), t, s) == \
LaplaceTransform(DiracDelta(-t**2 + t), t, s)
assert LT((DiracDelta(t) + 1)*(DiracDelta(t - 1) + 1), t, s) == \
(LaplaceTransform(DiracDelta(t)*DiracDelta(t - 1), t, s) + \
1 + exp(-s) + 1/s, 0, True)
assert LT(DiracDelta(2*t - 2*exp(a)), t, s) == \
(exp(-s*exp(a))/2, -oo, True)
# Fresnel functions
assert laplace_transform(fresnels(t), t, s) == \
((-sin(s**2/(2*pi))*fresnels(s/pi) + sin(s**2/(2*pi))/2 -
cos(s**2/(2*pi))*fresnelc(s/pi) + cos(s**2/(2*pi))/2)/s, 0, True)
assert laplace_transform(fresnelc(t), t, s) == (
((2*sin(s**2/(2*pi))*fresnelc(s/pi) - 2*cos(s**2/(2*pi))*fresnels(s/pi)
+ sqrt(2)*cos(s**2/(2*pi) + pi/4))/(2*s), 0, True))
Mt = Matrix([[exp(t), t*exp(-t)], [t*exp(-t), exp(t)]])
Ms = Matrix([[ 1/(s - 1), (s + 1)**(-2)],
[(s + 1)**(-2), 1/(s - 1)]])
# The default behaviour for Laplace tranform of a Matrix returns a Matrix
# of Tuples and is deprecated:
with warns_deprecated_sympy():
Ms_conds = Matrix([[(1/(s - 1), 0, Abs(s) > 1), ((s + 1)**(-2),
0, True)], [((s + 1)**(-2), 0, True), (1/(s - 1), 0, Abs(s) > 1)]])
with warns_deprecated_sympy():
assert LT(Mt, t, s) == Ms_conds
# The new behavior is to return a tuple of a Matrix and the convergence
# conditions for the matrix as a whole:
assert LT(Mt, t, s, legacy_matrix=False) == (Ms, 0, Abs(s) > 1)
# With noconds=True the transformed matrix is returned without conditions
# either way:
assert LT(Mt, t, s, noconds=True) == Ms
assert LT(Mt, t, s, legacy_matrix=False, noconds=True) == Ms
@slow
def test_issue_8368t_7173():
LT = laplace_transform
# hyperbolic
assert LT(sinh(x), x, s) == (1/(s**2 - 1), 0, abs(s) > 1)
assert LT(cosh(x), x, s) == (s/(s**2 - 1), -oo, s**2 > 1)
assert LT(sinh(x + 3), x, s) == (
(-s + (s + 1)*exp(6) + 1)*exp(-3)/(s - 1)/(s + 1)/2, 0, Abs(s) > 1)
assert LT(sinh(x)*cosh(x), x, s) == (
1/(s**2 - 4), 0, Abs(s) > 2)
# trig (make sure they are not being rewritten in terms of exp)
assert LT(cos(x + 3), x, s) == ((s*cos(3) - sin(3))/(s**2 + 1), 0, True)
@slow
def test_inverse_laplace_transform():
from sympy.core.exprtools import factor_terms
from sympy.functions.special.delta_functions import DiracDelta
from sympy.simplify.simplify import simplify
ILT = inverse_laplace_transform
a, b, c, = symbols('a b c', positive=True)
t = symbols('t')
def simp_hyp(expr):
return factor_terms(expand_mul(expr)).rewrite(sin)
assert ILT(1, s, t) == DiracDelta(t)
assert ILT(1/s, s, t) == Heaviside(t)
assert ILT(a/(a + s), s, t) == a*exp(-a*t)*Heaviside(t)
assert ILT(s/(a + s), s, t) == -a*exp(-a*t)*Heaviside(t) + DiracDelta(t)
assert ILT((a + s)**(-2), s, t) == t*exp(-a*t)*Heaviside(t)
assert ILT((a + s)**(-5), s, t) == t**4*exp(-a*t)*Heaviside(t)/24
assert ILT(a/(a**2 + s**2), s, t) == sin(a*t)*Heaviside(t)
assert ILT(s/(s**2 + a**2), s, t) == cos(a*t)*Heaviside(t)
assert ILT(b/(b**2 + (a + s)**2), s, t) == exp(-a*t)*sin(b*t)*Heaviside(t)
assert ILT(b*s/(b**2 + (a + s)**2), s, t) +\
(a*sin(b*t) - b*cos(b*t))*exp(-a*t)*Heaviside(t) == 0
assert ILT(exp(-a*s)/s, s, t) == Heaviside(-a + t)
assert ILT(exp(-a*s)/(b + s), s, t) == exp(b*(a - t))*Heaviside(-a + t)
assert ILT((b + s)/(a**2 + (b + s)**2), s, t) == \
exp(-b*t)*cos(a*t)*Heaviside(t)
assert ILT(exp(-a*s)/s**b, s, t) == \
(-a + t)**(b - 1)*Heaviside(-a + t)/gamma(b)
assert ILT(exp(-a*s)/sqrt(s**2 + 1), s, t) == \
Heaviside(-a + t)*besselj(0, a - t)
assert ILT(1/(s*sqrt(s + 1)), s, t) == Heaviside(t)*erf(sqrt(t))
assert ILT(1/(s**2*(s**2 + 1)), s, t) == (t - sin(t))*Heaviside(t)
assert ILT(s**2/(s**2 + 1), s, t) == -sin(t)*Heaviside(t) + DiracDelta(t)
assert ILT(1 - 1/(s**2 + 1), s, t) == -sin(t)*Heaviside(t) + DiracDelta(t)
assert ILT(1/s**2, s, t) == t*Heaviside(t)
assert ILT(1/s**5, s, t) == t**4*Heaviside(t)/24
assert simp_hyp(ILT(a/(s**2 - a**2), s, t)) == sinh(a*t)*Heaviside(t)
assert simp_hyp(ILT(s/(s**2 - a**2), s, t)) == cosh(a*t)*Heaviside(t)
# TODO sinh/cosh shifted come out a mess. also delayed trig is a mess
# TODO should this simplify further?
assert ILT(exp(-a*s)/s**b, s, t) == \
(t - a)**(b - 1)*Heaviside(t - a)/gamma(b)
assert ILT(exp(-a*s)/sqrt(1 + s**2), s, t) == \
Heaviside(t - a)*besselj(0, a - t) # note: besselj(0, x) is even
# XXX ILT turns these branch factor into trig functions ...
assert simplify(ILT(a**b*(s + sqrt(s**2 - a**2))**(-b)/sqrt(s**2 - a**2),
s, t).rewrite(exp)) == \
Heaviside(t)*besseli(b, a*t)
assert ILT(a**b*(s + sqrt(s**2 + a**2))**(-b)/sqrt(s**2 + a**2),
s, t).rewrite(exp) == \
Heaviside(t)*besselj(b, a*t)
assert ILT(1/(s*sqrt(s + 1)), s, t) == Heaviside(t)*erf(sqrt(t))
# TODO can we make erf(t) work?
assert ILT(1/(s**2*(s**2 + 1)),s,t) == (t - sin(t))*Heaviside(t)
assert ILT( (s * eye(2) - Matrix([[1, 0], [0, 2]])).inv(), s, t) ==\
Matrix([[exp(t)*Heaviside(t), 0], [0, exp(2*t)*Heaviside(t)]])
def test_inverse_laplace_transform_delta():
from sympy.functions.special.delta_functions import DiracDelta
ILT = inverse_laplace_transform
t = symbols('t')
assert ILT(2, s, t) == 2*DiracDelta(t)
assert ILT(2*exp(3*s) - 5*exp(-7*s), s, t) == \
2*DiracDelta(t + 3) - 5*DiracDelta(t - 7)
a = cos(sin(7)/2)
assert ILT(a*exp(-3*s), s, t) == a*DiracDelta(t - 3)
assert ILT(exp(2*s), s, t) == DiracDelta(t + 2)
r = Symbol('r', real=True)
assert ILT(exp(r*s), s, t) == DiracDelta(t + r)
def test_inverse_laplace_transform_delta_cond():
from sympy.functions.elementary.complexes import im
from sympy.functions.special.delta_functions import DiracDelta
ILT = inverse_laplace_transform
t = symbols('t')
r = Symbol('r', real=True)
assert ILT(exp(r*s), s, t, noconds=False) == (DiracDelta(t + r), True)
z = Symbol('z')
assert ILT(exp(z*s), s, t, noconds=False) == \
(DiracDelta(t + z), Eq(im(z), 0))
# inversion does not exist: verify it doesn't evaluate to DiracDelta
for z in (Symbol('z', extended_real=False),
Symbol('z', imaginary=True, zero=False)):
f = ILT(exp(z*s), s, t, noconds=False)
f = f[0] if isinstance(f, tuple) else f
assert f.func != DiracDelta
# issue 15043
assert ILT(1/s + exp(r*s)/s, s, t, noconds=False) == (
Heaviside(t) + Heaviside(r + t), True)
def test_fourier_transform():
from sympy.core.function import (expand, expand_complex, expand_trig)
from sympy.polys.polytools import factor
from sympy.simplify.simplify import simplify
FT = fourier_transform
IFT = inverse_fourier_transform
def simp(x):
return simplify(expand_trig(expand_complex(expand(x))))
def sinc(x):
return sin(pi*x)/(pi*x)
k = symbols('k', real=True)
f = Function("f")
# TODO for this to work with real a, need to expand abs(a*x) to abs(a)*abs(x)
a = symbols('a', positive=True)
b = symbols('b', positive=True)
posk = symbols('posk', positive=True)
# Test unevaluated form
assert fourier_transform(f(x), x, k) == FourierTransform(f(x), x, k)
assert inverse_fourier_transform(
f(k), k, x) == InverseFourierTransform(f(k), k, x)
# basic examples from wikipedia
assert simp(FT(Heaviside(1 - abs(2*a*x)), x, k)) == sinc(k/a)/a
# TODO IFT is a *mess*
assert simp(FT(Heaviside(1 - abs(a*x))*(1 - abs(a*x)), x, k)) == sinc(k/a)**2/a
# TODO IFT
assert factor(FT(exp(-a*x)*Heaviside(x), x, k), extension=I) == \
1/(a + 2*pi*I*k)
# NOTE: the ift comes out in pieces
assert IFT(1/(a + 2*pi*I*x), x, posk,
noconds=False) == (exp(-a*posk), True)
assert IFT(1/(a + 2*pi*I*x), x, -posk,
noconds=False) == (0, True)
assert IFT(1/(a + 2*pi*I*x), x, symbols('k', negative=True),
noconds=False) == (0, True)
# TODO IFT without factoring comes out as meijer g
assert factor(FT(x*exp(-a*x)*Heaviside(x), x, k), extension=I) == \
1/(a + 2*pi*I*k)**2
assert FT(exp(-a*x)*sin(b*x)*Heaviside(x), x, k) == \
b/(b**2 + (a + 2*I*pi*k)**2)
assert FT(exp(-a*x**2), x, k) == sqrt(pi)*exp(-pi**2*k**2/a)/sqrt(a)
assert IFT(sqrt(pi/a)*exp(-(pi*k)**2/a), k, x) == exp(-a*x**2)
assert FT(exp(-a*abs(x)), x, k) == 2*a/(a**2 + 4*pi**2*k**2)
# TODO IFT (comes out as meijer G)
# TODO besselj(n, x), n an integer > 0 actually can be done...
# TODO are there other common transforms (no distributions!)?
def test_sine_transform():
t = symbols("t")
w = symbols("w")
a = symbols("a")
f = Function("f")
# Test unevaluated form
assert sine_transform(f(t), t, w) == SineTransform(f(t), t, w)
assert inverse_sine_transform(
f(w), w, t) == InverseSineTransform(f(w), w, t)
assert sine_transform(1/sqrt(t), t, w) == 1/sqrt(w)
assert inverse_sine_transform(1/sqrt(w), w, t) == 1/sqrt(t)
assert sine_transform((1/sqrt(t))**3, t, w) == 2*sqrt(w)
assert sine_transform(t**(-a), t, w) == 2**(
-a + S.Half)*w**(a - 1)*gamma(-a/2 + 1)/gamma((a + 1)/2)
assert inverse_sine_transform(2**(-a + S(
1)/2)*w**(a - 1)*gamma(-a/2 + 1)/gamma(a/2 + S.Half), w, t) == t**(-a)
assert sine_transform(
exp(-a*t), t, w) == sqrt(2)*w/(sqrt(pi)*(a**2 + w**2))
assert inverse_sine_transform(
sqrt(2)*w/(sqrt(pi)*(a**2 + w**2)), w, t) == exp(-a*t)
assert sine_transform(
log(t)/t, t, w) == sqrt(2)*sqrt(pi)*-(log(w**2) + 2*EulerGamma)/4
assert sine_transform(
t*exp(-a*t**2), t, w) == sqrt(2)*w*exp(-w**2/(4*a))/(4*a**Rational(3, 2))
assert inverse_sine_transform(
sqrt(2)*w*exp(-w**2/(4*a))/(4*a**Rational(3, 2)), w, t) == t*exp(-a*t**2)
def test_cosine_transform():
from sympy.functions.special.error_functions import (Ci, Si)
t = symbols("t")
w = symbols("w")
a = symbols("a")
f = Function("f")
# Test unevaluated form
assert cosine_transform(f(t), t, w) == CosineTransform(f(t), t, w)
assert inverse_cosine_transform(
f(w), w, t) == InverseCosineTransform(f(w), w, t)
assert cosine_transform(1/sqrt(t), t, w) == 1/sqrt(w)
assert inverse_cosine_transform(1/sqrt(w), w, t) == 1/sqrt(t)
assert cosine_transform(1/(
a**2 + t**2), t, w) == sqrt(2)*sqrt(pi)*exp(-a*w)/(2*a)
assert cosine_transform(t**(
-a), t, w) == 2**(-a + S.Half)*w**(a - 1)*gamma((-a + 1)/2)/gamma(a/2)
assert inverse_cosine_transform(2**(-a + S(
1)/2)*w**(a - 1)*gamma(-a/2 + S.Half)/gamma(a/2), w, t) == t**(-a)
assert cosine_transform(
exp(-a*t), t, w) == sqrt(2)*a/(sqrt(pi)*(a**2 + w**2))
assert inverse_cosine_transform(
sqrt(2)*a/(sqrt(pi)*(a**2 + w**2)), w, t) == exp(-a*t)
assert cosine_transform(exp(-a*sqrt(t))*cos(a*sqrt(
t)), t, w) == a*exp(-a**2/(2*w))/(2*w**Rational(3, 2))
assert cosine_transform(1/(a + t), t, w) == sqrt(2)*(
(-2*Si(a*w) + pi)*sin(a*w)/2 - cos(a*w)*Ci(a*w))/sqrt(pi)
assert inverse_cosine_transform(sqrt(2)*meijerg(((S.Half, 0), ()), (
(S.Half, 0, 0), (S.Half,)), a**2*w**2/4)/(2*pi), w, t) == 1/(a + t)
assert cosine_transform(1/sqrt(a**2 + t**2), t, w) == sqrt(2)*meijerg(
((S.Half,), ()), ((0, 0), (S.Half,)), a**2*w**2/4)/(2*sqrt(pi))
assert inverse_cosine_transform(sqrt(2)*meijerg(((S.Half,), ()), ((0, 0), (S.Half,)), a**2*w**2/4)/(2*sqrt(pi)), w, t) == 1/(t*sqrt(a**2/t**2 + 1))
def test_hankel_transform():
r = Symbol("r")
k = Symbol("k")
nu = Symbol("nu")
m = Symbol("m")
a = symbols("a")
assert hankel_transform(1/r, r, k, 0) == 1/k
assert inverse_hankel_transform(1/k, k, r, 0) == 1/r
assert hankel_transform(
1/r**m, r, k, 0) == 2**(-m + 1)*k**(m - 2)*gamma(-m/2 + 1)/gamma(m/2)
assert inverse_hankel_transform(
2**(-m + 1)*k**(m - 2)*gamma(-m/2 + 1)/gamma(m/2), k, r, 0) == r**(-m)
assert hankel_transform(1/r**m, r, k, nu) == (
2*2**(-m)*k**(m - 2)*gamma(-m/2 + nu/2 + 1)/gamma(m/2 + nu/2))
assert inverse_hankel_transform(2**(-m + 1)*k**(
m - 2)*gamma(-m/2 + nu/2 + 1)/gamma(m/2 + nu/2), k, r, nu) == r**(-m)
assert hankel_transform(r**nu*exp(-a*r), r, k, nu) == \
2**(nu + 1)*a*k**(-nu - 3)*(a**2/k**2 + 1)**(-nu - S(
3)/2)*gamma(nu + Rational(3, 2))/sqrt(pi)
assert inverse_hankel_transform(
2**(nu + 1)*a*k**(-nu - 3)*(a**2/k**2 + 1)**(-nu - Rational(3, 2))*gamma(
nu + Rational(3, 2))/sqrt(pi), k, r, nu) == r**nu*exp(-a*r)
def test_issue_7181():
assert mellin_transform(1/(1 - x), x, s) != None
def test_issue_8882():
# This is the original test.
# from sympy import diff, Integral, integrate
# r = Symbol('r')
# psi = 1/r*sin(r)*exp(-(a0*r))
# h = -1/2*diff(psi, r, r) - 1/r*psi
# f = 4*pi*psi*h*r**2
# assert integrate(f, (r, -oo, 3), meijerg=True).has(Integral) == True
# To save time, only the critical part is included.
F = -a**(-s + 1)*(4 + 1/a**2)**(-s/2)*sqrt(1/a**2)*exp(-s*I*pi)* \
sin(s*atan(sqrt(1/a**2)/2))*gamma(s)
raises(IntegralTransformError, lambda:
inverse_mellin_transform(F, s, x, (-1, oo),
**{'as_meijerg': True, 'needeval': True}))
def test_issue_7173():
from sympy.simplify.cse_main import cse
x0, x1, x2, x3 = symbols('x:4')
ans = laplace_transform(sinh(a*x)*cosh(a*x), x, s)
r, e = cse(ans)
assert r == [
(x0, arg(a)),
(x1, Abs(x0)),
(x2, pi/2),
(x3, Abs(x0 + pi))]
assert e == [
a/(-4*a**2 + s**2),
0,
((x1 <= x2) | (x1 < x2)) & ((x3 <= x2) | (x3 < x2))]
def test_issue_8514():
from sympy.simplify.simplify import simplify
a, b, c, = symbols('a b c', positive=True)
t = symbols('t', positive=True)
ft = simplify(inverse_laplace_transform(1/(a*s**2+b*s+c),s, t))
assert ft == (I*exp(t*cos(atan2(0, -4*a*c + b**2)/2)*sqrt(Abs(4*a*c -
b**2))/a)*sin(t*sin(atan2(0, -4*a*c + b**2)/2)*sqrt(Abs(
4*a*c - b**2))/(2*a)) + exp(t*cos(atan2(0, -4*a*c + b**2)
/2)*sqrt(Abs(4*a*c - b**2))/a)*cos(t*sin(atan2(0, -4*a*c
+ b**2)/2)*sqrt(Abs(4*a*c - b**2))/(2*a)) + I*sin(t*sin(
atan2(0, -4*a*c + b**2)/2)*sqrt(Abs(4*a*c - b**2))/(2*a))
- cos(t*sin(atan2(0, -4*a*c + b**2)/2)*sqrt(Abs(4*a*c -
b**2))/(2*a)))*exp(-t*(b + cos(atan2(0, -4*a*c + b**2)/2)
*sqrt(Abs(4*a*c - b**2)))/(2*a))/sqrt(-4*a*c + b**2)
def test_issue_12591():
x, y = symbols("x y", real=True)
assert fourier_transform(exp(x), x, y) == FourierTransform(exp(x), x, y)
def test_issue_14692():
b = Symbol('b', negative=True)
assert laplace_transform(1/(I*x - b), x, s) == \
(-I*exp(I*b*s)*expint(1, b*s*exp_polar(I*pi/2)), 0, True)
| 43.435307 | 151 | 0.526595 |
Subsets and Splits