max_stars_repo_path
stringlengths
4
245
max_stars_repo_name
stringlengths
7
115
max_stars_count
int64
101
368k
id
stringlengths
2
8
content
stringlengths
6
1.03M
tests/multimodel/test_aln.py
Ronjaa95/neurolib
258
11080556
<reponame>Ronjaa95/neurolib """ Set of tests for adaptive exponential integrate-and-fire mean-field model. """ import unittest import numba import numpy as np import pytest import xarray as xr from jitcdde import jitcdde_input from neurolib.models.aln import ALNModel from neurolib.models.multimodel.builder.aln import ( ALN_EXC_DEFAULT_PARAMS, ALN_INH_DEFAULT_PARAMS, ALN_NODE_DEFAULT_CONNECTIVITY, ALNNetwork, ALNNode, ExcitatoryALNMass, InhibitoryALNMass, _get_interpolation_values, _table_lookup, ) from neurolib.models.multimodel.builder.base.constants import EXC from neurolib.utils.stimulus import ZeroInput # these keys do not test since they are rescaled on the go PARAMS_NOT_TEST_KEYS = ["c_gl", "taum", "input_0"] def _strip_keys(dict_test, strip_keys=PARAMS_NOT_TEST_KEYS): return {k: v for k, v in dict_test.items() if k not in strip_keys} SEED = 42 DURATION = 100.0 DT = 0.01 CORR_THRESHOLD = 0.9 NEUROLIB_VARIABLES_TO_TEST = [("r_mean_EXC", "rates_exc"), ("r_mean_INH", "rates_inh")] # dictionary as backend name: format in which the noise is passed BACKENDS_TO_TEST = { "jitcdde": lambda x, d, dt: x.as_cubic_splines(d, dt), "numba": lambda x, d, dt: x.as_array(d, dt), } class TestALNCallbacks(unittest.TestCase): SIGMA_TEST = 3.2 MU_TEST = 1.7 INTERP_EXPECTED = (37, 117, 0.8000000000000185, 0.7875000000002501) FIRING_RATE_EXPECTED = 0.09444942503533124 VOLTAGE_EXPECTED = -56.70455755705249 TAU_EXPECTED = 0.4487499999999963 @classmethod def setUpClass(cls): cls.mass = ExcitatoryALNMass() def test_get_interpolation_values(self): self.assertTrue(callable(_get_interpolation_values)) print(type(_get_interpolation_values)) self.assertTrue(isinstance(_get_interpolation_values, numba.core.registry.CPUDispatcher)) interp_result = _get_interpolation_values( self.SIGMA_TEST, self.MU_TEST, self.mass.sigma_range, self.mass.mu_range, self.mass.d_sigma, self.mass.d_mu, ) self.assertTupleEqual(interp_result, self.INTERP_EXPECTED) def test_table_lookup(self): self.assertTrue(callable(_table_lookup)) self.assertTrue(isinstance(_table_lookup, numba.core.registry.CPUDispatcher)) firing_rate = _table_lookup( self.SIGMA_TEST, self.MU_TEST, self.mass.sigma_range, self.mass.mu_range, self.mass.d_sigma, self.mass.d_mu, self.mass.firing_rate_transfer_function, ) self.assertEqual(firing_rate, self.FIRING_RATE_EXPECTED) voltage = _table_lookup( self.SIGMA_TEST, self.MU_TEST, self.mass.sigma_range, self.mass.mu_range, self.mass.d_sigma, self.mass.d_mu, self.mass.voltage_transfer_function, ) self.assertEqual(voltage, self.VOLTAGE_EXPECTED) tau = _table_lookup( self.SIGMA_TEST, self.MU_TEST, self.mass.sigma_range, self.mass.mu_range, self.mass.d_sigma, self.mass.d_mu, self.mass.tau_transfer_function, ) self.assertEqual(tau, self.TAU_EXPECTED) class ALNMassTestCase(unittest.TestCase): def _run_node(self, node, duration, dt): coupling_variables = {k: 0.0 for k in node.required_couplings} noise = ZeroInput(n=node.num_noise_variables).as_cubic_splines(duration, dt) system = jitcdde_input( node._derivatives(coupling_variables), input=noise, callback_functions=node._callbacks(), ) system.constant_past(np.array(node.initial_state)) system.adjust_diff() times = np.arange(dt, duration + dt, dt) return np.vstack([system.integrate(time) for time in times]) class TestALNMass(ALNMassTestCase): def _create_exc_mass(self): exc = ExcitatoryALNMass() exc.index = 0 exc.idx_state_var = 0 exc.init_mass() return exc def _create_inh_mass(self): inh = InhibitoryALNMass() inh.index = 0 inh.idx_state_var = 0 inh.init_mass() return inh def test_init(self): aln_exc = self._create_exc_mass() aln_inh = self._create_inh_mass() self.assertTrue(isinstance(aln_exc, ExcitatoryALNMass)) self.assertTrue(isinstance(aln_inh, InhibitoryALNMass)) self.assertDictEqual(_strip_keys(aln_exc.params), _strip_keys(ALN_EXC_DEFAULT_PARAMS)) self.assertDictEqual(_strip_keys(aln_inh.params), _strip_keys(ALN_INH_DEFAULT_PARAMS)) # test cascade np.testing.assert_equal(aln_exc.mu_range, aln_inh.mu_range) np.testing.assert_equal(aln_exc.sigma_range, aln_inh.sigma_range) np.testing.assert_equal(aln_exc.firing_rate_transfer_function, aln_inh.firing_rate_transfer_function) np.testing.assert_equal(aln_exc.voltage_transfer_function, aln_inh.voltage_transfer_function) np.testing.assert_equal(aln_exc.tau_transfer_function, aln_inh.tau_transfer_function) for aln in [aln_exc, aln_inh]: # test cascade self.assertTrue(callable(getattr(aln, "firing_rate_lookup"))) self.assertTrue(callable(getattr(aln, "voltage_lookup"))) self.assertTrue(callable(getattr(aln, "tau_lookup"))) # test callbacks self.assertEqual(len(aln._callbacks()), 3) self.assertTrue(all(len(callback) == 3 for callback in aln._callbacks())) # test numba callbacks self.assertEqual(len(aln._numba_callbacks()), 3) for numba_callbacks in aln._numba_callbacks(): self.assertEqual(len(numba_callbacks), 2) self.assertTrue(isinstance(numba_callbacks[0], str)) self.assertTrue(isinstance(numba_callbacks[1], numba.core.registry.CPUDispatcher)) # test derivatives coupling_variables = {k: 0.0 for k in aln.required_couplings} self.assertEqual( len(aln._derivatives(coupling_variables)), aln.num_state_variables, ) self.assertEqual(len(aln.initial_state), aln.num_state_variables) self.assertEqual(len(aln.noise_input_idx), aln.num_noise_variables) def test_run(self): aln_exc = self._create_exc_mass() aln_inh = self._create_inh_mass() for aln in [aln_exc, aln_inh]: result = self._run_node(aln, DURATION, DT) self.assertTrue(isinstance(result, np.ndarray)) self.assertTupleEqual(result.shape, (int(DURATION / DT), aln.num_state_variables)) class TestALNNode(unittest.TestCase): def _create_node(self): node = ALNNode(exc_seed=SEED, inh_seed=SEED) node.index = 0 node.idx_state_var = 0 node.init_node() return node def test_init(self): aln = self._create_node() self.assertTrue(isinstance(aln, ALNNode)) self.assertEqual(len(aln), 2) self.assertDictEqual(_strip_keys(aln[0].params), _strip_keys(ALN_EXC_DEFAULT_PARAMS)) self.assertDictEqual(_strip_keys(aln[1].params), _strip_keys(ALN_INH_DEFAULT_PARAMS)) self.assertTrue(hasattr(aln, "_rescale_connectivity")) self.assertEqual(len(aln._sync()), 4 * len(aln)) self.assertEqual(len(aln.default_network_coupling), 2) np.testing.assert_equal( np.array(sum([alnm.initial_state for alnm in aln], [])), aln.initial_state, ) def test_update_rescale_params(self): aln = self._create_node() # update connectivity and check rescaling old_rescaled = aln.connectivity.copy() aln.update_params({"local_connectivity": 2 * ALN_NODE_DEFAULT_CONNECTIVITY}) np.testing.assert_equal(aln.connectivity, 2 * old_rescaled) def test_run(self): aln = self._create_node() all_results = [] for backend, noise_func in BACKENDS_TO_TEST.items(): result = aln.run( DURATION, DT, noise_func(ZeroInput(aln.num_noise_variables), DURATION, DT), backend=backend ) self.assertTrue(isinstance(result, xr.Dataset)) self.assertEqual(len(result), aln.num_state_variables) self.assertTrue(all(state_var in result for state_var in aln.state_variable_names[0])) self.assertTrue( all(result[state_var].shape == (int(DURATION / DT), 1) for state_var in aln.state_variable_names[0]) ) all_results.append(result) # test results are the same from different backends for state_var in all_results[0]: corr_mat = np.corrcoef( np.vstack([result[state_var].values.flatten().astype(float) for result in all_results]) ) self.assertTrue(np.greater(corr_mat, CORR_THRESHOLD).all()) def test_compare_w_neurolib_native_model(self): """ Compare with neurolib's native ALN model. """ # run this model aln_multi = self._create_node() multi_result = aln_multi.run( DURATION, DT, ZeroInput(aln_multi.num_noise_variables).as_array(DURATION, DT), backend="numba" ) # run neurolib's model aln_neurolib = ALNModel(seed=SEED) aln_neurolib.params["duration"] = DURATION aln_neurolib.params["dt"] = DT aln_neurolib.params["mue_ext_mean"] = 0.0 aln_neurolib.params["mui_ext_mean"] = 0.0 aln_neurolib.run() for (var_multi, var_neurolib) in NEUROLIB_VARIABLES_TO_TEST: corr_mat = np.corrcoef(aln_neurolib[var_neurolib], multi_result[var_multi].values.T) self.assertTrue(np.greater(corr_mat, CORR_THRESHOLD).all()) class TestALNNetwork(unittest.TestCase): SC = np.random.rand(2, 2) np.fill_diagonal(SC, 0.0) DELAYS = np.array([[0.0, 7.8], [7.8, 0.0]]) def test_init(self): aln = ALNNetwork(self.SC, self.DELAYS) self.assertTrue(isinstance(aln, ALNNetwork)) self.assertEqual(len(aln), self.SC.shape[0]) self.assertEqual(aln.initial_state.shape[0], aln.num_state_variables) self.assertEqual(aln.default_output, f"r_mean_{EXC}") def test_run(self): aln = ALNNetwork(self.SC, self.DELAYS, exc_seed=SEED, inh_seed=SEED) all_results = [] for backend, noise_func in BACKENDS_TO_TEST.items(): result = aln.run( DURATION, DT, noise_func(ZeroInput(aln.num_noise_variables), DURATION, DT), backend=backend, ) self.assertTrue(isinstance(result, xr.Dataset)) self.assertEqual(len(result), aln.num_state_variables / aln.num_nodes) self.assertTrue(all(result[result_].shape == (int(DURATION / DT), aln.num_nodes) for result_ in result)) all_results.append(result) # test results are the same from different backends for state_var in all_results[0]: all_ts = np.vstack([result[state_var].values.flatten().astype(float) for result in all_results]) if np.isnan(all_ts).any(): continue corr_mat = np.corrcoef(all_ts) print(state_var, corr_mat, np.var(all_ts)) self.assertTrue(np.greater(corr_mat, CORR_THRESHOLD).all()) @pytest.mark.xfail def test_compare_w_neurolib_native_model(self): """ Compare with neurolib's native ALN model. Marked with xfail, since sometimes fail on specific python version on Linux, no idea why, but the model works... """ aln_multi = ALNNetwork(self.SC, self.DELAYS, exc_seed=SEED, inh_seed=SEED) multi_result = aln_multi.run( DURATION, DT, ZeroInput(aln_multi.num_noise_variables).as_array(DURATION, DT), backend="numba" ) # run neurolib's model aln_neurolib = ALNModel(Cmat=self.SC, Dmat=self.DELAYS, seed=SEED) aln_neurolib.params["duration"] = DURATION aln_neurolib.params["dt"] = DT # there is no "global coupling" parameter in MultiModel aln_neurolib.params["K_gl"] = 1.0 # delays <-> length matrix aln_neurolib.params["signalV"] = 1.0 aln_neurolib.params["sigma_ou"] = 0.0 aln_neurolib.params["mue_ext_mean"] = 0.0 aln_neurolib.params["mui_ext_mean"] = 0.0 # match initial state at least for current - this seems to be enough aln_neurolib.params["mufe_init"] = np.array( [aln_multi[0][0].initial_state[0], aln_multi[1][0].initial_state[0]] ) aln_neurolib.params["mufi_init"] = np.array( [aln_multi[0][1].initial_state[0], aln_multi[1][1].initial_state[0]] ) aln_neurolib.run() for (var_multi, var_neurolib) in NEUROLIB_VARIABLES_TO_TEST: for node_idx in range(len(aln_multi)): corr_mat = np.corrcoef( aln_neurolib[var_neurolib][node_idx, :], multi_result[var_multi].values.T[node_idx, :] ) print(corr_mat) self.assertTrue(np.greater(corr_mat, CORR_THRESHOLD).all()) if __name__ == "__main__": unittest.main()
venv/Lib/site-packages/nipype/interfaces/niftyfit/tests/test_dwi.py
richung99/digitizePlots
585
11080567
<reponame>richung99/digitizePlots # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import pytest from ....testing import example_data from ...niftyreg import get_custom_path from ..dwi import FitDwi, DwiTool from ...niftyreg.tests.test_regutils import no_nifty_tool @pytest.mark.skipif(no_nifty_tool(cmd="fit_dwi"), reason="niftyfit is not installed") def test_fit_dwi(): """Testing FitDwi interface.""" # Create a node object fit_dwi = FitDwi() # Check if the command is properly defined cmd = get_custom_path("fit_dwi", env_dir="NIFTYFITDIR") assert fit_dwi.cmd == cmd # test raising error with mandatory args absent with pytest.raises(ValueError): fit_dwi.run() # Assign some input data in_file = example_data("dwi.nii.gz") bval_file = example_data("bvals") bvec_file = example_data("bvecs") fit_dwi.inputs.source_file = in_file fit_dwi.inputs.bval_file = bval_file fit_dwi.inputs.bvec_file = bvec_file fit_dwi.inputs.dti_flag = True cmd_tmp = "{cmd} -source {in_file} -bval {bval} -bvec {bvec} -dti \ -error {error} -famap {fa} -mcmap {mc} -mcout {mcout} -mdmap {md} -nodiff \ {nodiff} -res {res} -rgbmap {rgb} -syn {syn} -tenmap2 {ten2} -v1map {v1}" expected_cmd = cmd_tmp.format( cmd=cmd, in_file=in_file, bval=bval_file, bvec=bvec_file, error="dwi_error.nii.gz", fa="dwi_famap.nii.gz", mc="dwi_mcmap.nii.gz", md="dwi_mdmap.nii.gz", nodiff="dwi_no_diff.nii.gz", res="dwi_resmap.nii.gz", rgb="dwi_rgbmap.nii.gz", syn="dwi_syn.nii.gz", ten2="dwi_tenmap2.nii.gz", v1="dwi_v1map.nii.gz", mcout="dwi_mcout.txt", ) assert fit_dwi.cmdline == expected_cmd @pytest.mark.skipif(no_nifty_tool(cmd="dwi_tool"), reason="niftyfit is not installed") def test_dwi_tool(): """Testing DwiTool interface.""" # Create a node object dwi_tool = DwiTool() # Check if the command is properly defined cmd = get_custom_path("dwi_tool", env_dir="NIFTYFITDIR") assert dwi_tool.cmd == cmd # test raising error with mandatory args absent with pytest.raises(ValueError): dwi_tool.run() # Assign some input data in_file = example_data("dwi.nii.gz") bval_file = example_data("bvals") bvec_file = example_data("bvecs") b0_file = example_data("b0.nii") mask_file = example_data("mask.nii.gz") dwi_tool.inputs.source_file = in_file dwi_tool.inputs.mask_file = mask_file dwi_tool.inputs.bval_file = bval_file dwi_tool.inputs.bvec_file = bvec_file dwi_tool.inputs.b0_file = b0_file dwi_tool.inputs.dti_flag = True cmd_tmp = "{cmd} -source {in_file} -bval {bval} -bvec {bvec} -b0 {b0} \ -mask {mask} -dti -famap {fa} -logdti2 {log} -mcmap {mc} -mdmap {md} \ -rgbmap {rgb} -syn {syn} -v1map {v1}" expected_cmd = cmd_tmp.format( cmd=cmd, in_file=in_file, bval=bval_file, bvec=bvec_file, b0=b0_file, mask=mask_file, fa="dwi_famap.nii.gz", log="dwi_logdti2.nii.gz", mc="dwi_mcmap.nii.gz", md="dwi_mdmap.nii.gz", rgb="dwi_rgbmap.nii.gz", syn="dwi_syn.nii.gz", v1="dwi_v1map.nii.gz", ) assert dwi_tool.cmdline == expected_cmd
torch/_sources.py
xiaohanhuang/pytorch
183
11080594
import ast import functools import inspect from textwrap import dedent from typing import Any, Optional, Tuple, List, NamedTuple from torch._C import ErrorReport from torch._C._jit_tree_views import SourceRangeFactory def get_source_lines_and_file( obj: Any, error_msg: Optional[str] = None, ) -> Tuple[List[str], int, Optional[str]]: """ Wrapper around inspect.getsourcelines and inspect.getsourcefile. Returns: (sourcelines, file_lino, filename) """ filename = None # in case getsourcefile throws try: filename = inspect.getsourcefile(obj) sourcelines, file_lineno = inspect.getsourcelines(obj) except OSError as e: msg = (f"Can't get source for {obj}. TorchScript requires source access in " "order to carry out compilation, make sure original .py files are " "available.") if error_msg: msg += '\n' + error_msg raise OSError(msg) from e return sourcelines, file_lineno, filename def normalize_source_lines(sourcelines: List[str]) -> List[str]: """ This helper function accepts a list of source lines. It finds the indentation level of the function definition (`def`), then it indents all lines in the function body to a point at or greater than that level. This allows for comments and continued string literals that are at a lower indentation than the rest of the code. Args: sourcelines: function source code, separated into lines by the '\n' character Returns: A list of source lines that have been correctly aligned """ def remove_prefix(text, prefix): return text[text.startswith(prefix) and len(prefix):] # Find the line and line number containing the function definition for i, l in enumerate(sourcelines): if l.lstrip().startswith("def"): idx = i break fn_def = sourcelines[idx] # Get a string representing the amount of leading whitespace whitespace = fn_def.split("def")[0] # Add this leading whitespace to all lines before and after the `def` aligned_prefix = [whitespace + remove_prefix(s, whitespace) for s in sourcelines[:idx]] aligned_suffix = [whitespace + remove_prefix(s, whitespace) for s in sourcelines[idx + 1:]] # Put it together again aligned_prefix.append(fn_def) return aligned_prefix + aligned_suffix # Thin wrapper around SourceRangeFactory to store extra metadata # about the function-to-be-compiled. class SourceContext(SourceRangeFactory): def __init__(self, source, filename, file_lineno, leading_whitespace_len, uses_true_division=True, funcname=None): super(SourceContext, self).__init__(source, filename, file_lineno, leading_whitespace_len) self.uses_true_division = uses_true_division self.filename = filename self.funcname = funcname @functools.lru_cache(maxsize=None) def make_source_context(*args): return SourceContext(*args) def fake_range(): return SourceContext('', None, 0, 0).make_raw_range(0, 1) class ParsedDef(NamedTuple): ast: ast.Module ctx: SourceContext source: str filename: Optional[str] file_lineno: int def parse_def(fn): sourcelines, file_lineno, filename = get_source_lines_and_file(fn, ErrorReport.call_stack()) sourcelines = normalize_source_lines(sourcelines) source = ''.join(sourcelines) dedent_src = dedent(source) py_ast = ast.parse(dedent_src) if len(py_ast.body) != 1 or not isinstance(py_ast.body[0], ast.FunctionDef): raise RuntimeError(f"Expected a single top-level function: {filename}:{file_lineno}") leading_whitespace_len = len(source.split('\n', 1)[0]) - len(dedent_src.split('\n', 1)[0]) ctx = make_source_context(source, filename, file_lineno, leading_whitespace_len, True, fn.__name__) return ParsedDef(py_ast, ctx, source, filename, file_lineno)
Imagr/main.py
rogerhu/imagr
355
11080609
# -*- coding: utf-8 -*- # # main.py # Imagr # # Created by <NAME> on 04/04/2015. # Copyright (c) 2015 <NAME>. All rights reserved. # # import modules required by application import objc import Foundation import AppKit from PyObjCTools import AppHelper # import modules containing classes required to start application and load MainMenu.nib import AppDelegate import MainController # pass control to AppKit AppHelper.runEventLoop()
utils/global_step_functions_test.py
slowy07/tensor2robot
456
11080637
<reponame>slowy07/tensor2robot<filename>utils/global_step_functions_test.py # coding=utf-8 # Copyright 2021 The Tensor2Robot Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for tensor2robot.utils.global_step_functions.""" from absl.testing import parameterized from tensor2robot.utils import global_step_functions import tensorflow.compat.v1 as tf class GlobalStepFunctionsTest(parameterized.TestCase, tf.test.TestCase): @parameterized.named_parameters({ 'testcase_name': 'constant', 'boundaries': [1], 'values': [5.0], 'test_inputs': [0, 1, 10], 'expected_outputs': [5.0, 5.0, 5.0] }, { 'testcase_name': 'ramp_up', 'boundaries': [10, 20], 'values': [1.0, 11.0], 'test_inputs': [0, 10, 13, 15, 18, 20, 25], 'expected_outputs': [1.0, 1.0, 4.0, 6.0, 9.0, 11.0, 11.0] }) def test_piecewise_linear(self, boundaries, values, test_inputs, expected_outputs): global_step = tf.train.get_or_create_global_step() global_step_value = tf.placeholder(tf.int64, []) set_global_step = tf.assign(global_step, global_step_value) test_function = global_step_functions.piecewise_linear(boundaries, values) with tf.Session() as sess: for x, y_expected in zip(test_inputs, expected_outputs): sess.run(set_global_step, {global_step_value: x}) y = sess.run(test_function) self.assertEqual(y, y_expected) # Test the same with tensors as inputs test_function = global_step_functions.piecewise_linear( tf.convert_to_tensor(boundaries), tf.convert_to_tensor(values)) with tf.Session() as sess: for x, y_expected in zip(test_inputs, expected_outputs): sess.run(set_global_step, {global_step_value: x}) y = sess.run(test_function) self.assertEqual(y, y_expected) if __name__ == '__main__': tf.test.main()
controle_estoque/movconta.py
jucimar1/controleEstoque
134
11080645
# -*- codind: utf-8 -*- from PyQt5.QtCore import QDate, QUrl from PyQt5.QtWebEngineWidgets import QWebEngineView import re from Views.movConta import Ui_ct_movimento from Crud.CrudContaAReceber import CrudContaAReceber from Crud.CrudContaAPagar import CrudContaAPagar class MainMovimentoConta(Ui_ct_movimento): def mainmovconta(self, frame): super(MainMovimentoConta, self).setMovConta(frame) self.fr_movimento.show() # Icone dos botoes self.IconeBotaoMenu(self.bt_BuscaMovimento, self.resourcepath('Images/search.png')) self.IconeBotaoMenu(self.bt_PrintMovimento, self.resourcepath('Images/gtk-print.png')) # Setando Data Padrão self.dt_inicio.setDate(self.primeiroDiaMes()) self.dt_fim.setDate(self.ultimoDiaMes()) # Tamanho da tabela de detalhes self.tb_receita.setColumnWidth(0, 360) self.tb_receita.setColumnWidth(1, 100) self.tb_despesa.setColumnWidth(0, 360) self.tb_despesa.setColumnWidth(1, 100) # Funcao chamada botoes # Buscar self.bt_BuscaMovimento.clicked.connect(self.Entrada) self.bt_BuscaMovimento.clicked.connect(self.Despesa) # Imprimir self.bt_PrintMovimento.clicked.connect(self.imprimirMovimento) # Chamando primeira consulta self.Entrada() self.Despesa() def Entrada(self): dataInicio = QDate.toString( self.dt_inicio.date(), "yyyy-MM-dd") dataFim = QDate.toString( self.dt_fim.date(), "yyyy-MM-dd") busca = CrudContaAReceber() busca.dataRecebimento = dataInicio busca.dataFim = dataFim busca.movEntrada() # Setando a data referente a busca self.lb_inicioMovimento.setText(QDate.toString( self.dt_inicio.date(), "dd-MM-yyyy")) self.lb_fimMovimento.setText(QDate.toString( self.dt_fim.date(), "dd-MM-yyyy")) self.lb_inicioDespesa.setText(QDate.toString( self.dt_inicio.date(), "dd-MM-yyyy")) self.lb_fimDespesa.setText(QDate.toString( self.dt_fim.date(), "dd-MM-yyyy")) self.lb_entradaPendente.setText(str(busca.valorAReceber)) self.lb_entradaRecebido.setText(str(busca.valorRecebido)) if busca.valorAReceber > 0.01: # Grafico if busca.valorRecebido: valor = busca.valorRecebido / busca.valorAReceber * 100 # formato self.pr_receita.setFormat("%.02f%%" % (valor)) # Max e valor self.pr_receita.setMaximum(busca.valorAReceber) self.pr_receita.setValue(busca.valorRecebido) self.detalheEntrada() pass def detalheEntrada(self): dataInicio = QDate.toString( self.dt_inicio.date(), "yyyy-MM-dd") dataFim = QDate.toString( self.dt_fim.date(), "yyyy-MM-dd") busca = CrudContaAReceber() busca.dataRecebimento = dataInicio busca.dataFim = dataFim busca.detalheEntrada() while self.tb_receita.rowCount() > 0: self.tb_receita.removeRow(0) i = 0 while i < len(busca.categoria): self.tb_receita.insertRow(i) self.conteudoTabelaLeft( self.tb_receita, i, 0, busca.categoria[i] + " - "+busca.formaPagamento[i]) self.conteudoTabela(self.tb_receita, i, 1, "R$ " + str(busca.valorRecebido[i])) i += 1 pass def Despesa(self): dataInicio = QDate.toString( self.dt_inicio.date(), "yyyy-MM-dd") dataFim = QDate.toString( self.dt_fim.date(), "yyyy-MM-dd") busca = CrudContaAPagar() busca.dataPagamento = dataInicio busca.dataFim = dataFim busca.movDespesa() self.lb_despesaAPagar.setText(str(busca.valorAPagar)) self.lb_despesaPaga.setText(str(busca.valorPago)) if busca.valorAPagar > 0.01: # Grafico if busca.valorPago: valor = busca.valorPago / busca.valorAPagar * 100 # formato self.pr_despesa.setFormat("%.02f%%" % (valor)) # Max e valor self.pr_despesa.setMaximum(busca.valorAPagar) self.pr_despesa.setValue(busca.valorPago) self.detalheDespesa() pass def detalheDespesa(self): dataInicio = QDate.toString( self.dt_inicio.date(), "yyyy-MM-dd") dataFim = QDate.toString( self.dt_fim.date(), "yyyy-MM-dd") busca = CrudContaAPagar() busca.dataPagamento = dataInicio busca.dataFim = dataFim busca.detalheDespesa() while self.tb_despesa.rowCount() > 0: self.tb_despesa.removeRow(0) i = 0 while i < len(busca.categoria): self.tb_despesa.insertRow(i) self.conteudoTabelaLeft( self.tb_despesa, i, 0, busca.categoria[i] + " - "+busca.formaPagamento[i]) self.conteudoTabela(self.tb_despesa, i, 1, "R$ " + str(busca.valorPago[i])) i += 1 self.calculoMovimento() pass def calculoMovimento(self): if self.lb_despesaPaga.text() and self.lb_entradaRecebido.text(): despesa = float(self.lb_despesaPaga.text()) receita = float(self.lb_entradaRecebido.text()) elif self.lb_despesaPaga.text() and not self.lb_entradaRecebido.text(): despesa = float(self.lb_despesaPaga.text()) receita = 0.00 elif not self.lb_despesaPaga.text() and self.lb_entradaRecebido.text(): receita = float(self.lb_entradaRecebido.text()) despesa = 0.00 total = receita - despesa if total < 0: self.lb_totalMovimento.setStyleSheet("QLabel{\n" "font-size: 26px;\n" "font-family: \"Arial\";\n" "font-weight: bold;\n" "color: red;\n" "border: none;\n" "background: none;\n" "}") else: self.lb_totalMovimento.setStyleSheet("QLabel{\n" "font-size: 26px;\n" "font-family: \"Arial\";\n" "font-weight: bold;\n" "color: #072D06;\n" "border: none;\n" "background: none;\n" "}") self.lb_totalMovimento.setText("R$ "+format(total, ".2f")) # Imprimindo def imprimirMovimento(self): self.documento = QWebEngineView() headertable = ["Receitas", " Despesas"] data_inicio = QDate.toString(self.dt_inicio.date(), "dd-MM-yyyy") data_fim = QDate.toString(self.dt_fim.date(), "dd-MM-yyyy") desc_receita = [] total_desc = [] desc_despesa = [] total_descDespesa = [] print(self.usuario) if self.tb_receita.rowCount() >= 1: for i in range(self.tb_receita.rowCount()): desc_receita.append(self.tb_receita.item(i, 0).text()) total_desc.append(self.tb_receita.item(i, 1).text()) if self.tb_despesa.rowCount() >= 1: for i in range(self.tb_despesa.rowCount()): desc_despesa.append(self.tb_despesa.item(i, 0).text()) total_descDespesa.append(self.tb_despesa.item(i, 1).text()) if self.lb_despesaPaga.text(): totaldespesa = self.lb_despesaPaga.text() else: totaldespesa = 0.00 if self.lb_entradaRecebido.text(): totalreceita = self.lb_entradaRecebido.text() else: totalreceita = 0.00 totalFinal = self.lb_totalMovimento.text() totalFinal2 = re.sub(r'([^\d]+) ', '', totalFinal) self.renderTemplate( "movimento.html", estilo=self.resourcepath('Template/estilo.css'), titulo="Fluxo de Caixa periodo {} à {}".format( data_inicio, data_fim), headertable=headertable, desc_receita=desc_receita, total_desc=total_desc, desc_despesa=desc_despesa, total_descDespesa=total_descDespesa, totaldespesa=totaldespesa, totalreceita=totalreceita, totalFinal=totalFinal, totalFinal2=float(totalFinal2) ) self.documento.load(QUrl.fromLocalFile( self.resourcepath("report.html"))) self.documento.loadFinished['bool'].connect(self.previaImpressao)
tests/tmp.py
d6t/d6tflow
1,004
11080659
import d6tflow, luigi import pandas as pd # define 2 tasks that load raw data class Task1(d6tflow.tasks.TaskPickle): def run(self): df = pd.DataFrame({'a':range(3)}) self.save(df) # quickly save dataframe self.saveMeta({1:1}) Task1(path='data/data2').run() Task1(path='data/data2').outputLoad() Task1(path='data/data2').metaLoad()
tests/components/qnap_qsw/test_button.py
liangleslie/core
30,023
11080687
<reponame>liangleslie/core """The sensor tests for the QNAP QSW platform.""" from unittest.mock import patch from homeassistant.components.button.const import DOMAIN as BUTTON_DOMAIN, SERVICE_PRESS from homeassistant.const import ATTR_ENTITY_ID, STATE_UNKNOWN from homeassistant.core import HomeAssistant from .util import SYSTEM_COMMAND_MOCK, USERS_VERIFICATION_MOCK, async_init_integration async def test_qnap_buttons(hass: HomeAssistant) -> None: """Test buttons.""" await async_init_integration(hass) state = hass.states.get("button.qsw_m408_4c_reboot") assert state assert state.state == STATE_UNKNOWN with patch( "homeassistant.components.qnap_qsw.QnapQswApi.get_users_verification", return_value=USERS_VERIFICATION_MOCK, ) as mock_users_verification, patch( "homeassistant.components.qnap_qsw.QnapQswApi.post_system_command", return_value=SYSTEM_COMMAND_MOCK, ) as mock_post_system_command: await hass.services.async_call( BUTTON_DOMAIN, SERVICE_PRESS, {ATTR_ENTITY_ID: "button.qsw_m408_4c_reboot"}, blocking=True, ) await hass.async_block_till_done() mock_users_verification.assert_called_once() mock_post_system_command.assert_called_once()
modules/dbnd-airflow/src/dbnd_airflow/airflow_extensions/airflow_config.py
ipattarapong/dbnd
224
11080707
import logging import os logger = logging.getLogger(__name__) def reinit_airflow_sql_conn(): from airflow.settings import configure_vars, configure_orm from dbnd._core.configuration.dbnd_config import config as dbnd_config configure_vars() # The webservers import this file from models.py with the default settings. configure_orm() # add query handler before every execute # this will print query, code line and stack trace if dbnd_config.getboolean("log", "sqlalchemy_trace"): from sqlalchemy import event from airflow import settings as airflow_settings from dbnd_airflow.db_utils import trace_sqlalchemy_query event.listen( airflow_settings.engine, "before_cursor_execute", trace_sqlalchemy_query ) # this will print query execution time from sqlalchemy import event from airflow import settings as airflow_settings from dbnd_airflow.db_utils import ( profile_before_cursor_execute, profile_after_cursor_execute, ) event.listen( airflow_settings.engine, "before_cursor_execute", profile_before_cursor_execute ) event.listen( airflow_settings.engine, "after_cursor_execute", profile_after_cursor_execute )
Getting_Started_With_Raspberry_Pi_Pico/conditional_loop/code.py
gamblor21/Adafruit_Learning_System_Guides
665
11080726
"""Example of assigning a variable, and comparing it to a value in a loop.""" user_name = input ("What is your name? ") while user_name != "<NAME>": print("You are not Superman - try again!") user_name = input ("What is your name? ") print("You are Superman!")
greykite/tests/framework/templates/test_forecaster.py
kenzie-q/greykite
1,503
11080762
<gh_stars>1000+ import sys import warnings from enum import Enum import numpy as np import pytest from testfixtures import LogCapture from greykite.common.constants import LOGGER_NAME from greykite.common.constants import TIME_COL from greykite.common.constants import VALUE_COL from greykite.common.data_loader import DataLoader from greykite.common.evaluation import EvaluationMetricEnum from greykite.common.python_utils import assert_equal from greykite.common.testing_utils import generate_df_for_tests from greykite.common.testing_utils import generate_df_with_reg_for_tests from greykite.framework.templates.autogen.forecast_config import ComputationParam from greykite.framework.templates.autogen.forecast_config import EvaluationMetricParam from greykite.framework.templates.autogen.forecast_config import EvaluationPeriodParam from greykite.framework.templates.autogen.forecast_config import ForecastConfig from greykite.framework.templates.autogen.forecast_config import MetadataParam from greykite.framework.templates.autogen.forecast_config import ModelComponentsParam from greykite.framework.templates.forecaster import Forecaster from greykite.framework.templates.model_templates import ModelTemplate from greykite.framework.templates.model_templates import ModelTemplateEnum from greykite.framework.templates.prophet_template import ProphetTemplate from greykite.framework.templates.silverkite_template import SilverkiteTemplate from greykite.framework.templates.simple_silverkite_template import SimpleSilverkiteTemplate from greykite.framework.templates.simple_silverkite_template_config import SILVERKITE_COMPONENT_KEYWORDS from greykite.framework.templates.simple_silverkite_template_config import SimpleSilverkiteTemplateOptions from greykite.framework.utils.framework_testing_utils import assert_basic_pipeline_equal from greykite.framework.utils.framework_testing_utils import assert_forecast_pipeline_result_equal from greykite.framework.utils.framework_testing_utils import check_forecast_pipeline_result from greykite.framework.utils.result_summary import summarize_grid_search_results try: import fbprophet # noqa except ModuleNotFoundError: pass @pytest.fixture def df_config(): data = generate_df_with_reg_for_tests( freq="W-MON", periods=140, remove_extra_cols=True, mask_test_actuals=True) reg_cols = ["regressor1", "regressor2", "regressor_categ"] keep_cols = [TIME_COL, VALUE_COL] + reg_cols df = data["df"][keep_cols] model_template = "SILVERKITE" evaluation_metric = EvaluationMetricParam( cv_selection_metric=EvaluationMetricEnum.MeanAbsoluteError.name, agg_periods=7, agg_func=np.max, null_model_params={ "strategy": "quantile", "constant": None, "quantile": 0.5 } ) evaluation_period = EvaluationPeriodParam( test_horizon=10, periods_between_train_test=5, cv_horizon=4, cv_min_train_periods=80, cv_expanding_window=False, cv_periods_between_splits=20, cv_periods_between_train_test=3, cv_max_splits=3 ) model_components = ModelComponentsParam( regressors={ "regressor_cols": reg_cols }, custom={ "fit_algorithm_dict": { "fit_algorithm": "ridge", "fit_algorithm_params": {"cv": 2} } } ) computation = ComputationParam( verbose=2 ) forecast_horizon = 27 coverage = 0.90 config = ForecastConfig( model_template=model_template, computation_param=computation, coverage=coverage, evaluation_metric_param=evaluation_metric, evaluation_period_param=evaluation_period, forecast_horizon=forecast_horizon, model_components_param=model_components ) return { "df": df, "config": config, "model_template": model_template, "reg_cols": reg_cols, } class MySimpleSilverkiteTemplate(SimpleSilverkiteTemplate): """Same as `SimpleSilverkiteTemplate`, but with different default model template. """ DEFAULT_MODEL_TEMPLATE = "SILVERKITE_WEEKLY" class MyModelTemplateEnum(Enum): """Custom version of TemplateEnum for test cases""" MYSILVERKITE = ModelTemplate( template_class=MySimpleSilverkiteTemplate, description="My own version of Silverkite.") SILVERKITE = ModelTemplate( template_class=SimpleSilverkiteTemplate, description="My own version of Silverkite.") class MissingSimpleSilverkiteTemplateEnum(Enum): """Custom version of TemplateEnum for test cases. SimpleSilverkiteTemplate is not included. """ SK = ModelTemplate( template_class=SilverkiteTemplate, description="Silverkite template.") PROPHET = ModelTemplate( template_class=ProphetTemplate, description="Prophet template.") def test_init(): """Tests constructor""" forecaster = Forecaster() assert forecaster.model_template_enum == ModelTemplateEnum assert forecaster.default_model_template_name == "SILVERKITE" forecaster = Forecaster( model_template_enum=MyModelTemplateEnum, default_model_template_name="MYSILVERKITE" ) assert forecaster.model_template_enum == MyModelTemplateEnum assert forecaster.default_model_template_name == "MYSILVERKITE" def test_get_config_with_default_model_template_and_components(): """Tests `__get_config_with_default_model_template_and_components`""" forecaster = Forecaster() config = forecaster._Forecaster__get_config_with_default_model_template_and_components() assert config == ForecastConfig( model_template=ModelTemplateEnum.SILVERKITE.name, model_components_param=ModelComponentsParam() ) # Overrides `default_model_template_name`, unnests `model_components_param`. forecaster = Forecaster(default_model_template_name="SK") config = ForecastConfig( model_components_param=[ModelComponentsParam()] ) config = forecaster._Forecaster__get_config_with_default_model_template_and_components(config) assert config == ForecastConfig( model_template=ModelTemplateEnum.SK.name, model_components_param=ModelComponentsParam() ) # Overrides `model_template_enum` and `default_model_template_name` forecaster = Forecaster( model_template_enum=MyModelTemplateEnum, default_model_template_name="MYSILVERKITE" ) config = forecaster._Forecaster__get_config_with_default_model_template_and_components() assert config == ForecastConfig( model_template=MyModelTemplateEnum.MYSILVERKITE.name, model_components_param=ModelComponentsParam() ) def test_get_template_class(): """Tests `__get_template_class`""" forecaster = Forecaster() assert forecaster._Forecaster__get_template_class() == SimpleSilverkiteTemplate assert forecaster._Forecaster__get_template_class( config=ForecastConfig(model_template=ModelTemplateEnum.SILVERKITE_WEEKLY.name)) == SimpleSilverkiteTemplate if "fbprophet" in sys.modules: assert forecaster._Forecaster__get_template_class( config=ForecastConfig(model_template=ModelTemplateEnum.PROPHET.name)) == ProphetTemplate assert forecaster._Forecaster__get_template_class( config=ForecastConfig(model_template=ModelTemplateEnum.SK.name)) == SilverkiteTemplate # list `model_template` model_template = [ ModelTemplateEnum.SILVERKITE.name, ModelTemplateEnum.SILVERKITE_DAILY_90.name, SimpleSilverkiteTemplateOptions()] forecaster = Forecaster() assert forecaster._Forecaster__get_template_class(config=ForecastConfig(model_template=model_template)) == SimpleSilverkiteTemplate # `model_template` name is wrong model_template = "SOME_TEMPLATE" with pytest.raises(ValueError, match=f"Model Template '{model_template}' is not recognized! " f"Must be one of: SILVERKITE, SILVERKITE_WITH_AR, " f"SILVERKITE_DAILY_1_CONFIG_1, SILVERKITE_DAILY_1_CONFIG_2, SILVERKITE_DAILY_1_CONFIG_3, " f"SILVERKITE_DAILY_1, SILVERKITE_DAILY_90, " f"SILVERKITE_WEEKLY, SILVERKITE_HOURLY_1, SILVERKITE_HOURLY_24, " f"SILVERKITE_HOURLY_168, SILVERKITE_HOURLY_336, SILVERKITE_EMPTY"): forecaster = Forecaster() forecaster._Forecaster__get_template_class( config=ForecastConfig(model_template=model_template)) # List of `model_template` that include names not compatible with `SimpleSilverkiteTemplate`. model_template = [ ModelTemplateEnum.SK.name, ModelTemplateEnum.SILVERKITE.name, ModelTemplateEnum.SILVERKITE_DAILY_90.name, SimpleSilverkiteTemplateOptions()] with pytest.raises(ValueError, match="All model templates must use the same template class"): forecaster = Forecaster() forecaster._Forecaster__get_template_class(config=ForecastConfig(model_template=model_template)) # list of `model_template` not supported by template class model_template = [ModelTemplateEnum.SK.name, ModelTemplateEnum.SK.name] with pytest.raises(ValueError, match="The template class <class " "'greykite.framework.templates.silverkite_template.SilverkiteTemplate'> " "does not allow `model_template` to be a list"): forecaster = Forecaster() forecaster._Forecaster__get_template_class(config=ForecastConfig(model_template=model_template)) # List of `model_components_param` not compatible with `model_template`. model_template = ModelTemplateEnum.SK.name config = ForecastConfig( model_template=model_template, model_components_param=[ModelComponentsParam(), ModelComponentsParam()] ) with pytest.raises(ValueError, match=f"Model template {model_template} does not support a list of `ModelComponentsParam`."): forecaster = Forecaster() forecaster._Forecaster__get_template_class(config=config) # List of a single `model_components_param` is acceptable for a model template # that does not accept multiple `model_components_param`. forecaster = Forecaster() config = ForecastConfig( model_template=model_template, model_components_param=[ModelComponentsParam()] ) forecaster._Forecaster__get_template_class(config=config) # List of multiple `model_components_param` is accepted by SILVERKITE config = ForecastConfig( model_template=ModelTemplateEnum.SILVERKITE.name, model_components_param=[ModelComponentsParam(), ModelComponentsParam()] ) forecaster._Forecaster__get_template_class(config=config) # Error for unrecognized model template when there is no simple silverkite template model_template = "UNKNOWN" with pytest.raises(ValueError, match=rf"Model Template '{model_template}' is not recognized! " rf"Must be one of: SK, PROPHET\."): forecaster = Forecaster( model_template_enum=MissingSimpleSilverkiteTemplateEnum, default_model_template_name="SK", ) forecaster._Forecaster__get_template_class(config=ForecastConfig(model_template=model_template)) # Custom `model_template_enum` forecaster = Forecaster( model_template_enum=MyModelTemplateEnum, default_model_template_name="MYSILVERKITE", ) assert forecaster._Forecaster__get_template_class() == MySimpleSilverkiteTemplate if "fbprophet" in sys.modules: model_template = ModelTemplateEnum.PROPHET.name # `model_template` name is wrong with pytest.raises(ValueError, match=f"Model Template '{model_template}' is not recognized! " f"Must be one of: MYSILVERKITE, SILVERKITE or satisfy the `SimpleSilverkiteTemplate` rules."): forecaster._Forecaster__get_template_class(config=ForecastConfig(model_template=model_template)) model_template = SimpleSilverkiteTemplateOptions() # dataclass with LogCapture(LOGGER_NAME) as log_capture: forecaster._Forecaster__get_template_class(config=ForecastConfig(model_template=model_template)) log_capture.check( (LOGGER_NAME, 'DEBUG', 'Model template SimpleSilverkiteTemplateOptions(freq=<SILVERKITE_FREQ.DAILY: ' "'DAILY'>, seas=<SILVERKITE_SEAS.LT: 'LT'>, gr=<SILVERKITE_GR.LINEAR: " "'LINEAR'>, cp=<SILVERKITE_CP.NONE: 'NONE'>, hol=<SILVERKITE_HOL.NONE: " "'NONE'>, feaset=<SILVERKITE_FEASET.OFF: 'OFF'>, " "algo=<SILVERKITE_ALGO.LINEAR: 'LINEAR'>, ar=<SILVERKITE_AR.OFF: 'OFF'>, " "dsi=<SILVERKITE_DSI.AUTO: 'AUTO'>, wsi=<SILVERKITE_WSI.AUTO: 'AUTO'>) is " 'not found in the template enum. Checking if model template is suitable for ' '`SimpleSilverkiteTemplate`.'), (LOGGER_NAME, 'DEBUG', 'Multiple template classes could be used for the model template ' "SimpleSilverkiteTemplateOptions(freq=<SILVERKITE_FREQ.DAILY: 'DAILY'>, " "seas=<SILVERKITE_SEAS.LT: 'LT'>, gr=<SILVERKITE_GR.LINEAR: 'LINEAR'>, " "cp=<SILVERKITE_CP.NONE: 'NONE'>, hol=<SILVERKITE_HOL.NONE: 'NONE'>, " "feaset=<SILVERKITE_FEASET.OFF: 'OFF'>, algo=<SILVERKITE_ALGO.LINEAR: " "'LINEAR'>, ar=<SILVERKITE_AR.OFF: 'OFF'>, dsi=<SILVERKITE_DSI.AUTO: " "'AUTO'>, wsi=<SILVERKITE_WSI.AUTO: 'AUTO'>): [<class " "'test_forecaster.MySimpleSilverkiteTemplate'>, <class " "'greykite.framework.templates.simple_silverkite_template.SimpleSilverkiteTemplate'>]"), (LOGGER_NAME, 'DEBUG', 'Using template class <class ' "'test_forecaster.MySimpleSilverkiteTemplate'> " 'for the model template ' "SimpleSilverkiteTemplateOptions(freq=<SILVERKITE_FREQ.DAILY: 'DAILY'>, " "seas=<SILVERKITE_SEAS.LT: 'LT'>, gr=<SILVERKITE_GR.LINEAR: 'LINEAR'>, " "cp=<SILVERKITE_CP.NONE: 'NONE'>, hol=<SILVERKITE_HOL.NONE: 'NONE'>, " "feaset=<SILVERKITE_FEASET.OFF: 'OFF'>, algo=<SILVERKITE_ALGO.LINEAR: " "'LINEAR'>, ar=<SILVERKITE_AR.OFF: 'OFF'>, dsi=<SILVERKITE_DSI.AUTO: " "'AUTO'>, wsi=<SILVERKITE_WSI.AUTO: 'AUTO'>)")) def test_apply_forecast_config(df_config): """Tests `apply_forecast_config`""" df = df_config["df"] config = df_config["config"] model_template = df_config["model_template"] reg_cols = df_config["reg_cols"] # The same class can be re-used. `df` and `config` are taken from the function call # to `apply_forecast_config`. Only `model_template_enum` and # `default_model_template_name` are persistent in the state. forecaster = Forecaster() # no config with warnings.catch_warnings(): warnings.simplefilter("ignore") pipeline_params = forecaster.apply_forecast_config( df=df) template_class = SimpleSilverkiteTemplate # based on `default_model_template_name` expected_pipeline_params = template_class().apply_template_for_pipeline_params( df=df) assert_basic_pipeline_equal(pipeline_params.pop("pipeline"), expected_pipeline_params.pop("pipeline")) assert_equal(pipeline_params, expected_pipeline_params) assert forecaster.config is not None assert forecaster.template_class == template_class assert isinstance(forecaster.template, forecaster.template_class) assert forecaster.pipeline_params is not None # custom config with warnings.catch_warnings(): warnings.simplefilter("ignore") pipeline_params = forecaster.apply_forecast_config( df=df, config=config) template_class = ModelTemplateEnum[model_template].value.template_class # SimpleSilverkiteTemplate expected_pipeline_params = template_class().apply_template_for_pipeline_params( df, config) expected_pipeline = expected_pipeline_params.pop("pipeline") assert_basic_pipeline_equal(pipeline_params.pop("pipeline"), expected_pipeline) assert_equal(pipeline_params, expected_pipeline_params) # Custom `model_template_enum`. Same result, because # `MySimpleSilverkiteTemplate` has the same apply_template_for_pipeline_params # as `SimpleSilverkiteTemplate`. forecaster = Forecaster(model_template_enum=MyModelTemplateEnum) pipeline_params = forecaster.apply_forecast_config(df=df, config=config) assert_basic_pipeline_equal(pipeline_params.pop("pipeline"), expected_pipeline) assert_equal(pipeline_params, expected_pipeline_params) if "fbprophet" in sys.modules: # `model_component` of config is incompatible with model_template forecaster = Forecaster() config = ForecastConfig( model_template=ModelTemplateEnum.PROPHET.name, model_components_param=ModelComponentsParam( regressors={ "regressor_cols": reg_cols } ) ) with pytest.raises(ValueError) as record: forecaster.apply_forecast_config(df=df, config=config) assert "Unexpected key(s) found: {\'regressor_cols\'}. The valid keys are: " \ "dict_keys([\'add_regressor_dict\'])" in str(record) # metadata of config is incompatible with df df = df.rename(columns={TIME_COL: "some_time_col", VALUE_COL: "some_value_col"}) with pytest.raises(ValueError, match="ts column is not in input data"): forecaster.apply_forecast_config(df=df, config=config) def test_run_forecast_config(): """Tests `run_forecast_config`""" data = generate_df_for_tests(freq="H", periods=14*24) df = data["df"] # Checks if exception is raised with pytest.raises(ValueError, match="is not recognized"): forecaster = Forecaster() forecaster.run_forecast_config(df=df, config=ForecastConfig(model_template="unknown_template")) with pytest.raises(ValueError, match="is not recognized"): forecaster = Forecaster() forecaster.run_forecast_json(df=df, json_str="""{ "model_template": "unknown_template" }""") # All run_forecast_config* functions return the same result for the default config, # call forecast_pipeline, and return a result with the proper format. np.random.seed(123) forecaster = Forecaster() default_result = forecaster.run_forecast_config(df=df) score_func = EvaluationMetricEnum.MeanAbsolutePercentError.name check_forecast_pipeline_result( default_result, coverage=None, strategy=None, score_func=score_func, greater_is_better=False) assert_equal(forecaster.forecast_result, default_result) np.random.seed(123) forecaster = Forecaster() json_result = forecaster.run_forecast_json(df=df) check_forecast_pipeline_result( json_result, coverage=None, strategy=None, score_func=score_func, greater_is_better=False) assert_forecast_pipeline_result_equal(json_result, default_result, rel=0.02) def test_run_forecast_config_custom(): """Tests `run_forecast_config` on weekly data with custom config: - numeric and categorical regressors - coverage - null model """ data = generate_df_with_reg_for_tests( freq="W-MON", periods=140, remove_extra_cols=True, mask_test_actuals=True) reg_cols = ["regressor1", "regressor2", "regressor_categ"] keep_cols = [TIME_COL, VALUE_COL] + reg_cols df = data["df"][keep_cols] metric = EvaluationMetricEnum.MeanAbsoluteError evaluation_metric = EvaluationMetricParam( cv_selection_metric=metric.name, agg_periods=7, agg_func=np.max, null_model_params={ "strategy": "quantile", "constant": None, "quantile": 0.5 } ) evaluation_period = EvaluationPeriodParam( test_horizon=10, periods_between_train_test=5, cv_horizon=4, cv_min_train_periods=80, cv_expanding_window=False, cv_periods_between_splits=20, cv_periods_between_train_test=3, cv_max_splits=3 ) model_components = ModelComponentsParam( regressors={ "regressor_cols": reg_cols }, custom={ "fit_algorithm_dict": { "fit_algorithm": "ridge", "fit_algorithm_params": {"cv": 2} } } ) computation = ComputationParam( verbose=2 ) forecast_horizon = 27 coverage = 0.90 forecast_config = ForecastConfig( model_template=ModelTemplateEnum.SILVERKITE.name, computation_param=computation, coverage=coverage, evaluation_metric_param=evaluation_metric, evaluation_period_param=evaluation_period, forecast_horizon=forecast_horizon, model_components_param=model_components ) with warnings.catch_warnings(): warnings.simplefilter("ignore") forecaster = Forecaster() result = forecaster.run_forecast_config( df=df, config=forecast_config) mse = EvaluationMetricEnum.RootMeanSquaredError.get_metric_name() q80 = EvaluationMetricEnum.Quantile80.get_metric_name() assert result.backtest.test_evaluation[mse] == pytest.approx(2.976, rel=1e-2) assert result.backtest.test_evaluation[q80] == pytest.approx(1.360, rel=1e-2) assert result.forecast.train_evaluation[mse] == pytest.approx(2.224, rel=1e-2) assert result.forecast.train_evaluation[q80] == pytest.approx(0.941, rel=1e-2) check_forecast_pipeline_result( result, coverage=coverage, strategy=None, score_func=metric.name, greater_is_better=False) with pytest.raises(KeyError, match="missing_regressor"): model_components = ModelComponentsParam( regressors={ "regressor_cols": ["missing_regressor"] } ) forecaster = Forecaster() result = forecaster.run_forecast_config( df=df, config=ForecastConfig( model_template=ModelTemplateEnum.SILVERKITE.name, model_components_param=model_components ) ) check_forecast_pipeline_result( result, coverage=None, strategy=None, score_func=metric.get_metric_func(), greater_is_better=False) def test_run_forecast_json(): """Tests: - no coverage - hourly data (2+ years) - default `hyperparameter_grid` (all interaction terms enabled) """ # sets random state for consistent comparison data = generate_df_for_tests( freq="H", periods=700*24) df = data["train_df"] json_str = """{ "model_template": "SILVERKITE", "forecast_horizon": 3359, "model_components_param": { "custom": { "fit_algorithm_dict": { "fit_algorithm": "linear" } } } }""" with warnings.catch_warnings(): warnings.simplefilter("ignore") forecaster = Forecaster() result = forecaster.run_forecast_json( df=df, json_str=json_str) mse = EvaluationMetricEnum.RootMeanSquaredError.get_metric_name() q80 = EvaluationMetricEnum.Quantile80.get_metric_name() assert result.backtest.test_evaluation[mse] == pytest.approx(2.120, rel=0.03) assert result.backtest.test_evaluation[q80] == pytest.approx(0.863, rel=0.02) assert result.forecast.train_evaluation[mse] == pytest.approx(1.975, rel=0.02) assert result.forecast.train_evaluation[q80] == pytest.approx(0.786, rel=1e-2) check_forecast_pipeline_result( result, coverage=None, strategy=None, score_func=EvaluationMetricEnum.MeanAbsolutePercentError.name, greater_is_better=False) def test_run_forecast_config_with_single_simple_silverkite_template(): # The generic name of single simple silverkite templates are not added to `ModelTemplateEnum`, # therefore we test if these are recognized. data = generate_df_for_tests(freq="D", periods=365) df = data["df"] metric = EvaluationMetricEnum.MeanAbsoluteError evaluation_metric = EvaluationMetricParam( cv_selection_metric=metric.name, agg_periods=7, agg_func=np.max, null_model_params={ "strategy": "quantile", "constant": None, "quantile": 0.5 } ) evaluation_period = EvaluationPeriodParam( test_horizon=10, periods_between_train_test=5, cv_horizon=4, cv_min_train_periods=80, cv_expanding_window=False, cv_periods_between_splits=20, cv_periods_between_train_test=3, cv_max_splits=2 ) model_components = ModelComponentsParam( hyperparameter_override=[ {"estimator__yearly_seasonality": 1}, {"estimator__yearly_seasonality": 2} ] ) computation = ComputationParam( verbose=2 ) forecast_horizon = 27 coverage = 0.90 single_template_class = SimpleSilverkiteTemplateOptions( freq=SILVERKITE_COMPONENT_KEYWORDS.FREQ.value.DAILY, seas=SILVERKITE_COMPONENT_KEYWORDS.SEAS.value.NONE ) forecast_config = ForecastConfig( model_template=[single_template_class, "DAILY_ALGO_SGD", "SILVERKITE_DAILY_90"], computation_param=computation, coverage=coverage, evaluation_metric_param=evaluation_metric, evaluation_period_param=evaluation_period, forecast_horizon=forecast_horizon, model_components_param=model_components ) forecaster = Forecaster() result = forecaster.run_forecast_config( df=df, config=forecast_config) summary = summarize_grid_search_results(result.grid_search) # single_template_class is 1 template, # "DAILY_ALGO_SGD" is 1 template and "SILVERKITE_DAILY_90" has 4 templates. # With 2 items in `hyperparameter_override, there should be a total of 12 cases. assert summary.shape[0] == 12 # Tests functionality for single template class only. forecast_config = ForecastConfig( model_template=single_template_class, computation_param=computation, coverage=coverage, evaluation_metric_param=evaluation_metric, evaluation_period_param=evaluation_period, forecast_horizon=forecast_horizon ) forecaster = Forecaster() pipeline_parameters = forecaster.apply_forecast_config( df=df, config=forecast_config ) assert_equal( actual=pipeline_parameters["hyperparameter_grid"], expected={ "estimator__time_properties": [None], "estimator__origin_for_time_vars": [None], "estimator__train_test_thresh": [None], "estimator__training_fraction": [None], "estimator__fit_algorithm_dict": [{"fit_algorithm": "linear", "fit_algorithm_params": None}], "estimator__holidays_to_model_separately": [[]], "estimator__holiday_lookup_countries": [[]], "estimator__holiday_pre_num_days": [0], "estimator__holiday_post_num_days": [0], "estimator__holiday_pre_post_num_dict": [None], "estimator__daily_event_df_dict": [None], "estimator__changepoints_dict": [None], "estimator__seasonality_changepoints_dict": [None], "estimator__yearly_seasonality": [0], "estimator__quarterly_seasonality": [0], "estimator__monthly_seasonality": [0], "estimator__weekly_seasonality": [0], "estimator__daily_seasonality": [0], "estimator__max_daily_seas_interaction_order": [0], "estimator__max_weekly_seas_interaction_order": [2], "estimator__autoreg_dict": [None], "estimator__simulation_num": [10], "estimator__lagged_regressor_dict": [None], "estimator__min_admissible_value": [None], "estimator__max_admissible_value": [None], "estimator__normalize_method": [None], "estimator__uncertainty_dict": [None], "estimator__growth_term": ["linear"], "estimator__regressor_cols": [[]], "estimator__feature_sets_enabled": [False], "estimator__extra_pred_cols": [[]], "estimator__drop_pred_cols": [None], "estimator__explicit_pred_cols": [None], "estimator__regression_weight_col": [None], }, ignore_keys={"estimator__time_properties": None} ) def test_estimator_plot_components_from_forecaster(): """Tests estimator's plot_components function after the Forecaster has set everything up at the top most level""" # Test with real data (Female-births) via model template dl = DataLoader() data_path = dl.get_data_home(data_sub_dir="daily") df = dl.get_df(data_path=data_path, data_name="daily_female_births") metadata = MetadataParam(time_col="Date", value_col="Births", freq="D") model_components = ModelComponentsParam( seasonality={ "yearly_seasonality": True, "quarterly_seasonality": True, "weekly_seasonality": True, "daily_seasonality": False } ) result = Forecaster().run_forecast_config( df=df, config=ForecastConfig( model_template=ModelTemplateEnum.SILVERKITE.name, forecast_horizon=30, # forecast 1 month coverage=0.95, # 95% prediction intervals metadata_param=metadata, model_components_param=model_components ) ) estimator = result.model.steps[-1][-1] assert estimator.plot_components() def test_estimator_get_coef_summary_from_forecaster(): """Tests model summary for silverkite model with missing values in value_col after everything is setup by Forecaster""" dl = DataLoader() df_pt = dl.load_peyton_manning() config = ForecastConfig().from_dict(dict( model_template=ModelTemplateEnum.SILVERKITE.name, forecast_horizon=10, metadata_param=dict( time_col="ts", value_col="y", freq="D" ), model_components_param=dict( custom={ "fit_algorithm_dict": {"fit_algorithm": "linear"} } ) )) result = Forecaster().run_forecast_config( df=df_pt[:365], # shortens df to speed up config=config ) summary = result.model[-1].summary() x = summary.get_coef_summary( is_intercept=True, return_df=True) assert x.shape[0] == 1 summary.get_coef_summary(is_time_feature=True) summary.get_coef_summary(is_event=True) summary.get_coef_summary(is_trend=True) summary.get_coef_summary(is_interaction=True) x = summary.get_coef_summary(is_lag=True) assert x is None x = summary.get_coef_summary( is_trend=True, is_seasonality=False, is_interaction=False, return_df=True) assert all([":" not in col for col in x["Pred_col"].tolist()]) assert "ct1" in x["Pred_col"].tolist() assert "sin1_ct1_yearly" not in x["Pred_col"].tolist() x = summary.get_coef_summary(return_df=True) assert x.shape[0] == summary.info_dict["coef_summary_df"].shape[0]
model/alignment.py
aflorithmic/DurIAN
158
11080821
import torch from .base import BaseModule class AlignmentModule(BaseModule): """ Special module in DurIAN, which duplicates encoder hidden states with the correspodence to the outputs of duration model. """ def __init__(self): super(AlignmentModule, self).__init__() def forward(self, encoded_inputs, alignments): durations = alignments.sum(dim=2) alignments = alignments.transpose(2, 1) outputs = alignments.bmm(encoded_inputs) B, max_target_len = encoded_inputs.shape[0], alignments.shape[1] position_encodings = torch.zeros(B, max_target_len, dtype=torch.float32).to(encoded_inputs) for obj_idx in range(B): positions = torch.cat([torch.linspace(0, 1, steps=int(dur), dtype=torch.float32) for dur in durations[obj_idx]]).to(encoded_inputs) position_encodings[obj_idx, :positions.shape[0]] = positions outputs = torch.cat([outputs, position_encodings.unsqueeze(dim=2)], dim=2) return outputs def inference(self, encoded_inputs, alignments): return self.forward(encoded_inputs=encoded_inputs, alignments=alignments)
base/site-packages/captchas/urls.py
edisonlz/fastor
285
11080822
from django.conf.urls.defaults import * from captchas.views import captcha_image urlpatterns = patterns('', url(r'^(?P<id>\w+)/$', captcha_image, name='captcha_image') )
coreference/train.py
shinoyuki222/torch-light
310
11080827
import os import json import argparse import torch from tqdm import tqdm import torch.optim as optim import numpy as np from torch.nn.functional import binary_cross_entropy import data_loader import const import model import utils parser = argparse.ArgumentParser() parser.add_argument('--data', type=str, default=os.path.join(const.DATAPATH, "corpus.pt")) parser.add_argument('--model_path', type=str, default='weights') parser.add_argument('--cuda_device', type=str, default='0') parser.add_argument('--epochs', type=int, default=100) parser.add_argument('--seed', type=int, default=1111) parser.add_argument('--learning_rate', type=float, default=5e-4) parser.add_argument('--batch_size', type=int, default=100) parser.add_argument('--max_len', type=int, default=500) parser.add_argument('--span_len', type=int, default=4) parser.add_argument('--d_model', type=int, default=512) parser.add_argument('--pos_dim', type=int, default=20) parser.add_argument('--n_head', type=int, default=8) parser.add_argument('--rnn_hidden_size', type=int, default=128) parser.add_argument('--dropout', type=float, default=0.5) args = parser.parse_args() os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda_device use_cuda = torch.cuda.is_available() corpus = torch.load(os.path.join(args.data)) args.word_ebd_weight = corpus["wordW"] args.use_cuda = use_cuda torch.manual_seed(args.seed) if use_cuda: torch.cuda.manual_seed(args.seed) corpus = torch.load(args.data) args.wordW = corpus["wordW"] train_and_test_data = data_loader.DataLoader( const.DATAPATH, corpus["word2idx"], cuda=use_cuda) mention_pair_score = model.MentionPairScore(args) if use_cuda: mention_pair_score = mention_pair_score.cuda() optimizer = optim.Adam(mention_pair_score.parameters(), lr=args.learning_rate) def train(i): mention_pair_score.train() total_loss = corrects = recall = ground_truth = 0 for doc in tqdm(train_and_test_data.train_docs, mininterval=1, desc='pre-Train Processing', leave=False): optimizer.zero_grad() scores, labels = mention_pair_score(doc, corpus["word2idx"]) loss = binary_cross_entropy(scores, labels, reduction='mean') loss.backward() optimizer.step() total_loss += loss.data.item() predict = scores.gt(0.5).float() corrects += (predict*labels).sum().item() recall += predict.sum().item() ground_truth += labels.sum().item() f1 = 2*corrects/(recall+ground_truth) print(f"train epoch {i+1}/{args.epochs} loss: {total_loss/100:.4f} corrects: {corrects} recall: {recall} ground_truth: {ground_truth} f1: {f1:.4f}") def dev(i): mention_pair_score.eval() total_loss = corrects = recall = ground_truth = 0 for doc in tqdm(train_and_test_data.test_docs, mininterval=1, desc='pre-Dev Processing', leave=False): with torch.no_grad(): scores, labels = mention_pair_score(doc, corpus["word2idx"]) loss = binary_cross_entropy(scores, labels, reduction='mean') total_loss += loss.data.item() predict = scores.gt(0.5).float() corrects += (predict*labels).sum().item() recall += predict.sum().item() ground_truth += labels.sum().item() f1 = 2*corrects/(recall+ground_truth) print(f"dev epoch {i+1}/{args.epochs} loss: {total_loss/len(train_and_test_data.test_docs):.4f} corrects: {corrects} recall: {recall} ground_truth: {ground_truth} f1: {f1:.4f}") return f1 def save(): model_state_dict = mention_pair_score.state_dict() model_source = { "settings": args, "model": model_state_dict, "word2idx": corpus['word2idx'], } torch.save( model_source, f"{os.path.join(args.model_path, 'pretrain_model.pt')}") os.makedirs(args.model_path, exist_ok=True) best_f1 = 0 try: print('-' * 90) for epoch in range(args.epochs): train(epoch) print('-' * 90) f1 = dev(epoch) print('-' * 90) if f1 >= best_f1: print(f"new best f1 score {f1:.4f} and save model") best_f1 = f1 mention_pair_score.save_model( f"{os.path.join(args.model_path, 'middle_pretrain_model.pt')}") save() else: print( f"f1 score {f1:.4f} and reload best model best f1 {best_f1:.4f}") mention_pair_score.load_model( f"{os.path.join(args.model_path, 'middle_pretrain_model.pt')}", use_cuda) print('-' * 90) except KeyboardInterrupt: print("Exiting from training early")
ext/libigl/external/cgal/src/CGAL_Project/auxiliary/gdb/python/CGAL/printers.py
liminchen/OptCuts
187
11080831
# Copyright (c) 2011 GeometryFactory Sarl (France) # # This file is part of CGAL (www.cgal.org); you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation; either version 3 of the License, # or (at your option) any later version. # # Licensees holding a valid commercial license may use this file in # accordance with the commercial license agreement provided with the software. # # This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE # WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. # # $URL$ # $Id$ # # Author(s) : <NAME> import gdb import itertools import re def lookup_function (val): "Look-up and return a pretty-printer that can print val." # Get the type. type = val.type # If it points to a reference, get the reference. if type.code == gdb.TYPE_CODE_REF: type = type.target () # Get the unqualified type, stripped of typedefs. type = type.unqualified ().strip_typedefs () # Get the type name. typename = type.tag if typename == None: return None # Iterate over local dictionary of types to determine # if a printer is registered for that type. Return an # instantiation of the printer if found. for function in CGAL_pretty_printers_dict: if function.search (typename): return CGAL_pretty_printers_dict[function] (val) # Cannot find a pretty printer. Return None. return None class CGAL_Handle_for: def __init__(self, val): self.val = val def to_string (self): node = self.val['ptr_'].dereference() return 'Handle_for(%s , refcount=%d)' % (node['t'],node['count']) class CGAL_Point_2: def __init__(self, val): self.val = val def to_string (self): node = self.val; type = self.val.type for field in type.fields(): if field.is_base_class: node = node.cast(field.type) break return 'CGAL::Point_2(%s)' % node['base']['base'] class CGAL_Tdsvb3: def __init__(self, val): self.val = val def to_string (self): node = self.val; return 'CGAL::Tvb_3(%s)' % node['_p'] class CGAL_Point_3: def __init__(self, val): self.val = val def to_string (self): node = self.val; type = self.val.type for field in type.fields(): if field.is_base_class: node = node.cast(field.type) break return 'CGAL::Point_3(%s)' % node['base']['base'] class CGAL_Vector_2: def __init__(self, val, name): self.val = val self.name = name def to_string (self): node = self.val['base'] return 'CGAL::%s(%s)' % (self.name, node) class CGAL_Array: def __init__(self, val): self.val = val def to_string (self): node = self.val['_M_instance'] return node class CGAL_Boost_tuples: def __init__(self, val): self.val = val def to_string (self): return '{%s}' % self.display_head_and_continue(self.val) def display_head_and_continue(self, val): has_tail = False; for field in val.type.fields(): if(field.name != 'head'): has_tail = 1 break if has_tail: return '%s, %s' % (val['head'], self.display_head_and_continue(val['tail'])) else: return val['head'] gdb.pretty_printers.append(lookup_function) print "Hello from CGAL_pretty_printers" CGAL_pretty_printers_dict = {} CGAL_pretty_printers_dict[re.compile('^CGAL::Handle_for<.*>$')] = lambda val: CGAL_Handle_for(val) CGAL_pretty_printers_dict[re.compile('^CGAL::Point_2<.*>$')] = lambda val: CGAL_Point_2(val) CGAL_pretty_printers_dict[re.compile('^CGAL::Point_3<.*>$')] = lambda val: CGAL_Point_3(val) CGAL_pretty_printers_dict[re.compile('^CGAL::Vector_2<.*>$')] = lambda val: CGAL_Vector_2(val, 'Vector_2') CGAL_pretty_printers_dict[re.compile('^CGAL::Circle_2<.*>$')] = lambda val: CGAL_Vector_2(val, 'Circle_2') #CGAL_pretty_printers_dict[re.compile('^CGAL::Triangulation_ds_vertex_base_3<.*>$')] = lambda val: CGAL_Tdsvb3(val) CGAL_pretty_printers_dict[re.compile('^(std|boost)(::tr1)?::array<.*>')] = lambda val: CGAL_Array(val) CGAL_pretty_printers_dict[re.compile('^(std|boost)(::tr1)?(::tuples)?::tuple<.*>')] = lambda val: CGAL_Boost_tuples(val) #p2 = gdb.selected_frame().read_var('p2')
GeneratorInterface/RivetInterface/python/particleLevel_cfi.py
Purva-Chaudhari/cmssw
852
11080840
import FWCore.ParameterSet.Config as cms particleLevel = cms.EDProducer("ParticleLevelProducer", src = cms.InputTag("genParticles2HepMC:unsmeared"), usePromptFinalStates = cms.bool(True), # for leptons, photons, neutrinos excludePromptLeptonsFromJetClustering = cms.bool(True), excludeNeutrinosFromJetClustering = cms.bool(True), doJetClustering = cms.bool(True), particleMinPt = cms.double(0.), particleMaxEta = cms.double(5.), # HF range. Maximum 6.0 on MiniAOD lepConeSize = cms.double(0.1), # for photon dressing lepMinPt = cms.double(15.), lepMaxEta = cms.double(2.5), jetConeSize = cms.double(0.4), jetMinPt = cms.double(30.), jetMaxEta = cms.double(2.4), fatJetConeSize = cms.double(0.8), fatJetMinPt = cms.double(200.), fatJetMaxEta = cms.double(2.4), phoIsoConeSize = cms.double(0.4), phoMaxRelIso = cms.double(0.5), phoMinPt = cms.double(10), phoMaxEta = cms.double(2.5), )
vnpy/api/lts/vnltstd/test/lts_data_type.py
black0144/vnpy
101
11080843
<filename>vnpy/api/lts/vnltstd/test/lts_data_type.py # encoding: UTF-8 defineDict = {} typedefDict = {} #////////////////////////////////////////////////////////////////////// #@company shanghai liber information Technology Co.,Ltd #@file SecurityFtdcUserApiDataType.h #@brief 定义业务数据类型 #////////////////////////////////////////////////////////////////////// #////////////////////////////////////////////////////////////////////// #TFtdcErrorIDType是一个错误代码类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcErrorIDType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcErrorMsgType是一个错误信息类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcErrorMsgType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcExchangeIDType是一个交易所代码类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcExchangeIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcExchangeNameType是一个交易所名称类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcExchangeNameType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcExchangePropertyType是一个交易所属性类型 #////////////////////////////////////////////////////////////////////// #正常 defineDict["SECURITY_FTDC_EXP_Normal"] = '0' #根据成交生成报单 defineDict["SECURITY_FTDC_EXP_GenOrderByTrade"] = '1' typedefDict["TSecurityFtdcExchangePropertyType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcExchangeConnectStatusType是一个交易所连接状态类型 #////////////////////////////////////////////////////////////////////// #没有任何连接 defineDict["SECURITY_FTDC_ECS_NoConnection"] = '1' #已经发出合约查询请求 defineDict["SECURITY_FTDC_ECS_QryInstrumentSent"] = '2' #已经获取信息 defineDict["SECURITY_FTDC_ECS_GotInformation"] = '9' typedefDict["TSecurityFtdcExchangeConnectStatusType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcDateType是一个日期类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcDateType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcTimeType是一个时间类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcTimeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcInstrumentIDType是一个合约代码类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcInstrumentIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcProductNameType是一个产品名称类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcProductNameType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcProductClassType是一个产品类型类型 #////////////////////////////////////////////////////////////////////// #期货 defineDict["SECURITY_FTDC_PC_Futures"] = '1' #期权 defineDict["SECURITY_FTDC_PC_Options"] = '2' #组合 defineDict["SECURITY_FTDC_PC_Combination"] = '3' #即期 defineDict["SECURITY_FTDC_PC_Spot"] = '4' #期转现 defineDict["SECURITY_FTDC_PC_EFP"] = '5' #证券A股 defineDict["SECURITY_FTDC_PC_StockA"] = '6' #证券B股 defineDict["SECURITY_FTDC_PC_StockB"] = '7' #ETF defineDict["SECURITY_FTDC_PC_ETF"] = '8' #ETF申赎 defineDict["SECURITY_FTDC_PC_ETFPurRed"] = '9' typedefDict["TSecurityFtdcProductClassType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcVolumeMultipleType是一个合约数量乘数类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcVolumeMultipleType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcPriceType是一个价格类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcPriceType"] = "float" #////////////////////////////////////////////////////////////////////// #TFtdcVolumeType是一个数量类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcVolumeType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcPositionTypeType是一个持仓类型类型 #////////////////////////////////////////////////////////////////////// #净持仓 defineDict["SECURITY_FTDC_PT_Net"] = '1' #综合持仓 defineDict["SECURITY_FTDC_PT_Gross"] = '2' typedefDict["TSecurityFtdcPositionTypeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcPositionDateTypeType是一个持仓日期类型类型 #////////////////////////////////////////////////////////////////////// #使用历史持仓 defineDict["SECURITY_FTDC_PDT_UseHistory"] = '1' #不使用历史持仓 defineDict["SECURITY_FTDC_PDT_NoUseHistory"] = '2' typedefDict["TSecurityFtdcPositionDateTypeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcExchangeInstIDType是一个合约在交易所的代码类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcExchangeInstIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcYearType是一个年份类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcYearType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcMonthType是一个月份类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcMonthType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcInstLifePhaseType是一个合约生命周期状态类型 #////////////////////////////////////////////////////////////////////// #未上市 defineDict["SECURITY_FTDC_IP_NotStart"] = '0' #上市 defineDict["SECURITY_FTDC_IP_Started"] = '1' #停牌 defineDict["SECURITY_FTDC_IP_Pause"] = '2' #到期 defineDict["SECURITY_FTDC_IP_Expired"] = '3' typedefDict["TSecurityFtdcInstLifePhaseType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcBoolType是一个布尔型类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcBoolType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcRightModelIDType是一个股票权限模版代码类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcRightModelIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcRightModelNameType是一个股票权限模版名称类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcRightModelNameType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcPosTradeTypeType是一个持仓交易类型类型 #////////////////////////////////////////////////////////////////////// #今日新增持仓能卖出 defineDict["SECURITY_FTDC_PTT_CanSelTodayPos"] = '1' #今日新增持仓不能卖出 defineDict["SECURITY_FTDC_PTT_CannotSellTodayPos"] = '2' typedefDict["TSecurityFtdcPosTradeTypeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcTraderIDType是一个交易所交易员代码类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcTraderIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcParticipantIDType是一个会员代码类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcParticipantIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcPasswordType是一个密码类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcPasswordType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcBrokerIDType是一个经纪公司代码类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcBrokerIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcOrderLocalIDType是一个本地报单编号类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcOrderLocalIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcBrokerAbbrType是一个经纪公司简称类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcBrokerAbbrType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcBrokerNameType是一个经纪公司名称类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcBrokerNameType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcInvestorIDType是一个投资者代码类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcInvestorIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcPartyNameType是一个参与人名称类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcPartyNameType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcIdCardTypeType是一个证件类型类型 #////////////////////////////////////////////////////////////////////// #组织机构代码 defineDict["SECURITY_FTDC_ICT_EID"] = '0' #身份证 defineDict["SECURITY_FTDC_ICT_IDCard"] = '1' #军官证 defineDict["SECURITY_FTDC_ICT_OfficerIDCard"] = '2' #警官证 defineDict["SECURITY_FTDC_ICT_PoliceIDCard"] = '3' #士兵证 defineDict["SECURITY_FTDC_ICT_SoldierIDCard"] = '4' #户口簿 defineDict["SECURITY_FTDC_ICT_HouseholdRegister"] = '5' #护照 defineDict["SECURITY_FTDC_ICT_Passport"] = '6' #台胞证 defineDict["SECURITY_FTDC_ICT_TaiwanCompatriotIDCard"] = '7' #回乡证 defineDict["SECURITY_FTDC_ICT_HomeComingCard"] = '8' #营业执照号 defineDict["SECURITY_FTDC_ICT_LicenseNo"] = '9' #税务登记号 defineDict["SECURITY_FTDC_ICT_TaxNo"] = 'A' #其他证件 defineDict["SECURITY_FTDC_ICT_OtherCard"] = 'x' typedefDict["TSecurityFtdcIdCardTypeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcIdentifiedCardNoType是一个证件号码类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcIdentifiedCardNoType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcClientIDType是一个交易编码类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcClientIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcAccountIDType是一个投资者帐号类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcAccountIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcClientTypeType是一个交易编码类型类型 #////////////////////////////////////////////////////////////////////// #普通 defineDict["SECURITY_FTDC_CLT_Normal"] = '1' #信用交易 defineDict["SECURITY_FTDC_CLT_Credit"] = '2' #衍生品账户 defineDict["SECURITY_FTDC_CLT_Derive"] = '3' #其他类型 defineDict["SECURITY_FTDC_CLT_Other"] = '4' typedefDict["TSecurityFtdcClientTypeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcInvestorGroupNameType是一个投资者分组名称类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcInvestorGroupNameType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcUserIDType是一个用户代码类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcUserIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcUserNameType是一个用户名称类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcUserNameType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcFunctionCodeType是一个功能代码类型 #////////////////////////////////////////////////////////////////////// #强制用户登出 defineDict["SECURITY_FTDC_FC_ForceUserLogout"] = '2' #变更管理用户口令 defineDict["SECURITY_FTDC_FC_UserPasswordUpdate"] = '3' #变更经纪公司口令 defineDict["SECURITY_FTDC_FC_BrokerPasswordUpdate"] = '4' #变更投资者口令 defineDict["SECURITY_FTDC_FC_InvestorPasswordUpdate"] = '5' #报单插入 defineDict["SECURITY_FTDC_FC_OrderInsert"] = '6' #报单操作 defineDict["SECURITY_FTDC_FC_OrderAction"] = '7' #同步系统数据 defineDict["SECURITY_FTDC_FC_SyncSystemData"] = '8' #同步经纪公司数据 defineDict["SECURITY_FTDC_FC_SyncBrokerData"] = '9' #超级查询 defineDict["SECURITY_FTDC_FC_SuperQuery"] = 'B' #报单插入 defineDict["SECURITY_FTDC_FC_ParkedOrderInsert"] = 'C' #报单操作 defineDict["SECURITY_FTDC_FC_ParkedOrderAction"] = 'D' #同步动态令牌 defineDict["SECURITY_FTDC_FC_SyncOTP"] = 'E' #未知单操作 defineDict["SECURITY_FTDC_FC_UnkownOrderAction"] = 'F' #转托管 defineDict["SECURITY_FTDC_FC_DepositoryTransfer"] = 'G' #余券划转 defineDict["SECURITY_FTDC_FC_ExcessStockTransfer"] = 'H' typedefDict["TSecurityFtdcFunctionCodeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcUserTypeType是一个用户类型类型 #////////////////////////////////////////////////////////////////////// #投资者 defineDict["SECURITY_FTDC_UT_Investor"] = '0' #操作员 defineDict["SECURITY_FTDC_UT_Operator"] = '1' #管理员 defineDict["SECURITY_FTDC_UT_SuperUser"] = '2' typedefDict["TSecurityFtdcUserTypeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcBrokerFunctionCodeType是一个经纪公司功能代码类型 #////////////////////////////////////////////////////////////////////// #强制用户登出 defineDict["SECURITY_FTDC_BFC_ForceUserLogout"] = '1' #变更用户口令 defineDict["SECURITY_FTDC_BFC_UserPasswordUpdate"] = '2' #同步经纪公司数据 defineDict["SECURITY_FTDC_BFC_SyncBrokerData"] = '3' #报单插入 defineDict["SECURITY_FTDC_BFC_OrderInsert"] = '5' #报单操作 defineDict["SECURITY_FTDC_BFC_OrderAction"] = '6' #全部查询 defineDict["SECURITY_FTDC_BFC_AllQuery"] = '7' #未知单操作 defineDict["SECURITY_FTDC_BFC_UnkownOrderAction"] = '8' #转托管 defineDict["SECURITY_FTDC_BFC_DepositoryTransfer"] = '9' #余券划转 defineDict["SECURITY_FTDC_BFC_ExcessStockTransfer"] = 'A' #资金内转 defineDict["SECURITY_FTDC_BFC_FundInterTransfer"] = 'B' #系统功能:登入/登出/修改密码等 defineDict["SECURITY_FTDC_BFC_log"] = 'a' #基本查询:查询基础数据,如合约,交易所等常量 defineDict["SECURITY_FTDC_BFC_BaseQry"] = 'b' #交易查询:如查成交,委托 defineDict["SECURITY_FTDC_BFC_TradeQry"] = 'c' #交易功能:报单,撤单 defineDict["SECURITY_FTDC_BFC_Trade"] = 'd' #转账 defineDict["SECURITY_FTDC_BFC_Virement"] = 'e' #查询/管理:查询会话,踢人等 defineDict["SECURITY_FTDC_BFC_Session"] = 'g' #同步动态令牌 defineDict["SECURITY_FTDC_BFC_SyncOTP"] = 'E' typedefDict["TSecurityFtdcBrokerFunctionCodeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcCurrencyCodeType是一个币种类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCurrencyCodeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcMoneyType是一个资金类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcMoneyType"] = "float" #////////////////////////////////////////////////////////////////////// #TFtdcRatioType是一个比率类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcRatioType"] = "float" #////////////////////////////////////////////////////////////////////// #TFtdcAccountTypeType是一个账户类型类型 #////////////////////////////////////////////////////////////////////// #普通账户 defineDict["SECURITY_FTDC_AcT_Normal"] = '1' #信用账户 defineDict["SECURITY_FTDC_AcT_Credit"] = '2' #衍生品账户 defineDict["SECURITY_FTDC_AcT_Derive"] = '3' #其他类型 defineDict["SECURITY_FTDC_AcT_Other"] = '4' typedefDict["TSecurityFtdcAccountTypeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcDepartmentRangeType是一个投资者范围类型 #////////////////////////////////////////////////////////////////////// #所有 defineDict["SECURITY_FTDC_DR_All"] = '1' #组织架构 defineDict["SECURITY_FTDC_DR_Group"] = '2' #单一投资者 defineDict["SECURITY_FTDC_DR_Single"] = '3' typedefDict["TSecurityFtdcDepartmentRangeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcUserRightTypeType是一个客户权限类型类型 #////////////////////////////////////////////////////////////////////// #登录 defineDict["SECURITY_FTDC_URT_Logon"] = '1' #银期转帐 defineDict["SECURITY_FTDC_URT_Transfer"] = '2' #邮寄结算单 defineDict["SECURITY_FTDC_URT_EMail"] = '3' #传真结算单 defineDict["SECURITY_FTDC_URT_Fax"] = '4' #条件单 defineDict["SECURITY_FTDC_URT_ConditionOrder"] = '5' typedefDict["TSecurityFtdcUserRightTypeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcProductInfoType是一个产品信息类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcProductInfoType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcAuthCodeType是一个客户端认证码类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcAuthCodeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcLargeVolumeType是一个大额数量类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcLargeVolumeType"] = "float" #////////////////////////////////////////////////////////////////////// #TFtdcMillisecType是一个时间(毫秒)类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcMillisecType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcHedgeFlagType是一个投机套保标志类型 #////////////////////////////////////////////////////////////////////// #投机 defineDict["SECURITY_FTDC_HF_Speculation"] = '1' #套保 defineDict["SECURITY_FTDC_HF_Hedge"] = '3' typedefDict["TSecurityFtdcHedgeFlagType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcDirectionType是一个买卖方向类型 #////////////////////////////////////////////////////////////////////// #买 defineDict["SECURITY_FTDC_D_Buy"] = '0' #卖 defineDict["SECURITY_FTDC_D_Sell"] = '1' #ETF申购 defineDict["SECURITY_FTDC_D_ETFPur"] = '2' #ETF赎回 defineDict["SECURITY_FTDC_D_ETFRed"] = '3' #现金替代,只用作回报 defineDict["SECURITY_FTDC_D_CashIn"] = '4' #债券入库 defineDict["SECURITY_FTDC_D_PledgeBondIn"] = '5' #债券出库 defineDict["SECURITY_FTDC_D_PledgeBondOut"] = '6' #配股 defineDict["SECURITY_FTDC_D_Rationed"] = '7' #转托管 defineDict["SECURITY_FTDC_D_DepositoryTransfer"] = '8' #信用账户配股 defineDict["SECURITY_FTDC_D_CreditRationed"] = '9' #担保品买入 defineDict["SECURITY_FTDC_D_BuyCollateral"] = 'A' #担保品卖出 defineDict["SECURITY_FTDC_D_SellCollateral"] = 'B' #担保品转入 defineDict["SECURITY_FTDC_D_CollateralTransferIn"] = 'C' #担保品转出 defineDict["SECURITY_FTDC_D_CollateralTransferOut"] = 'D' #融资买入 defineDict["SECURITY_FTDC_D_MarginTrade"] = 'E' #融券卖出 defineDict["SECURITY_FTDC_D_ShortSell"] = 'F' #卖券还款 defineDict["SECURITY_FTDC_D_RepayMargin"] = 'G' #买券还券 defineDict["SECURITY_FTDC_D_RepayStock"] = 'H' #直接还款 defineDict["SECURITY_FTDC_D_DirectRepayMargin"] = 'I' #直接还券 defineDict["SECURITY_FTDC_D_DirectRepayStock"] = 'J' #余券划转 defineDict["SECURITY_FTDC_D_ExcessStockTransfer"] = 'K' #OF申购 defineDict["SECURITY_FTDC_D_OFPur"] = 'L' #OF赎回 defineDict["SECURITY_FTDC_D_OFRed"] = 'M' #SF拆分 defineDict["SECURITY_FTDC_D_SFSplit"] = 'N' #SF合并 defineDict["SECURITY_FTDC_D_SFMerge"] = 'O' #备兑 defineDict["SECURITY_FTDC_D_Covered"] = 'P' #证券冻结(开)/解冻(平) defineDict["SECURITY_FTDC_D_Freeze"] = 'Q' #行权 defineDict["SECURITY_FTDC_D_Execute"] = 'R' #CB回售 defineDict["SECURITY_FTDC_D_CBRed"] = 'S' #CB转股 defineDict["SECURITY_FTDC_D_CBConv"] = 'T' #OF认购 defineDict["SECURITY_FTDC_D_OFSub"] = 'U' typedefDict["TSecurityFtdcDirectionType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcTradeIDType是一个成交编号类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcTradeIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcTradeTypeType是一个成交类型类型 #////////////////////////////////////////////////////////////////////// #普通成交 defineDict["SECURITY_FTDC_TRDT_Common"] = '0' #期权执行 defineDict["SECURITY_FTDC_TRDT_OptionsExecution"] = '1' #OTC成交 defineDict["SECURITY_FTDC_TRDT_OTC"] = '2' #期转现衍生成交 defineDict["SECURITY_FTDC_TRDT_EFPDerived"] = '3' #组合衍生成交 defineDict["SECURITY_FTDC_TRDT_CombinationDerived"] = '4' #ETF申购 defineDict["SECURITY_FTDC_TRDT_EFTPurchase"] = '5' #ETF赎回 defineDict["SECURITY_FTDC_TRDT_EFTRedem"] = '6' typedefDict["TSecurityFtdcTradeTypeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcCreationredemptionStatusType是一个基金当天申购赎回状态类型 #////////////////////////////////////////////////////////////////////// #不允许申购赎回 defineDict["SECURITY_FTDC_CDS_Forbidden"] = '0' #表示允许申购和赎回 defineDict["SECURITY_FTDC_CDS_Allow"] = '1' #允许申购、不允许赎回 defineDict["SECURITY_FTDC_CDS_OnlyPurchase"] = '2' #不允许申购、允许赎回 defineDict["SECURITY_FTDC_CDS_OnlyRedeem"] = '3' typedefDict["TSecurityFtdcCreationredemptionStatusType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcETFCurrenceReplaceStatusType是一个ETF现金替代标志类型 #////////////////////////////////////////////////////////////////////// #禁止现金替代 defineDict["SECURITY_FTDC_ETFCRS_Forbidden"] = '0' #可以现金替代 defineDict["SECURITY_FTDC_ETFCRS_Allow"] = '1' #必须现金替代 defineDict["SECURITY_FTDC_ETFCRS_Force"] = '2' #跨市场股票退补现金替代 defineDict["SECURITY_FTDC_ETFCRS_CrossMarketComp"] = '3' #跨市场必须现金替代 defineDict["SECURITY_FTDC_ETFCRS_CrossMarketFroce"] = '4' typedefDict["TSecurityFtdcETFCurrenceReplaceStatusType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcInterestType是一个利息类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcInterestType"] = "float" #////////////////////////////////////////////////////////////////////// #TFtdcRepurchaseMaxTimesType是一个正回购放大倍数类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcRepurchaseMaxTimesType"] = "float" #////////////////////////////////////////////////////////////////////// #TFtdcCapitalStockTypeType是一个股本类型类型 #////////////////////////////////////////////////////////////////////// #总通股本 defineDict["SECURITY_FTDC_CPTSTOCK_TOTALSTOCK"] = '1' #流通股本 defineDict["SECURITY_FTDC_CPTSTOCK_CIRCULATION"] = '2' typedefDict["TSecurityFtdcCapitalStockTypeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcMarginPriceTypeType是一个保证金价格类型类型 #////////////////////////////////////////////////////////////////////// #昨结算价 defineDict["SECURITY_FTDC_MPT_PreSettlementPrice"] = '1' #最新价 defineDict["SECURITY_FTDC_MPT_SettlementPrice"] = '2' #成交均价 defineDict["SECURITY_FTDC_MPT_AveragePrice"] = '3' #开仓价 defineDict["SECURITY_FTDC_MPT_OpenPrice"] = '4' typedefDict["TSecurityFtdcMarginPriceTypeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcAlgorithmType是一个盈亏算法类型 #////////////////////////////////////////////////////////////////////// #浮盈浮亏都计算 defineDict["SECURITY_FTDC_AG_All"] = '1' #浮盈不计,浮亏计 defineDict["SECURITY_FTDC_AG_OnlyLost"] = '2' #浮盈计,浮亏不计 defineDict["SECURITY_FTDC_AG_OnlyGain"] = '3' #浮盈浮亏都不计算 defineDict["SECURITY_FTDC_AG_None"] = '4' typedefDict["TSecurityFtdcAlgorithmType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcIncludeCloseProfitType是一个是否包含平仓盈利类型 #////////////////////////////////////////////////////////////////////// #包含平仓盈利 defineDict["SECURITY_FTDC_ICP_Include"] = '0' #不包含平仓盈利 defineDict["SECURITY_FTDC_ICP_NotInclude"] = '2' typedefDict["TSecurityFtdcIncludeCloseProfitType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcAllWithoutTradeType是一个是否受可提比例限制类型 #////////////////////////////////////////////////////////////////////// #不受可提比例限制 defineDict["SECURITY_FTDC_AWT_Enable"] = '0' #受可提比例限制 defineDict["SECURITY_FTDC_AWT_Disable"] = '2' #无仓不受可提比例限制 defineDict["SECURITY_FTDC_AWT_NoHoldEnable"] = '3' typedefDict["TSecurityFtdcAllWithoutTradeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcHandlePositionAlgoIDType是一个持仓处理算法编号类型 #////////////////////////////////////////////////////////////////////// #基本 defineDict["SECURITY_FTDC_HPA_Base"] = '1' #非交易 defineDict["SECURITY_FTDC_HPA_NoneTrade"] = '4' #证券 defineDict["SECURITY_FTDC_HPA_Stock"] = '5' typedefDict["TSecurityFtdcHandlePositionAlgoIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcTradeParamIDType是一个交易系统参数代码类型 #////////////////////////////////////////////////////////////////////// #系统加密算法 defineDict["SECURITY_FTDC_TPID_EncryptionStandard"] = 'E' #用户最大会话数 defineDict["SECURITY_FTDC_TPID_SingleUserSessionMaxNum"] = 'S' #最大连续登录失败数 defineDict["SECURITY_FTDC_TPID_LoginFailMaxNum"] = 'L' #是否强制认证 defineDict["SECURITY_FTDC_TPID_IsAuthForce"] = 'A' #是否生成用户事件 defineDict["SECURITY_FTDC_TPID_GenUserEvent"] = 'G' #起始报单本地编号 defineDict["SECURITY_FTDC_TPID_StartOrderLocalID"] = 'O' #融资融券买券还券算法 defineDict["SECURITY_FTDC_TPID_RepayStockAlgo"] = 'R' #衍生品账户资金提取线 defineDict["SECURITY_FTDC_TPID_DeriveWithdrawRatio"] = 'D' #期权行权冻结可用起始时间 defineDict["SECURITY_FTDC_TPID_ExecuteStartTime"] = 'T' typedefDict["TSecurityFtdcTradeParamIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcSettlementParamValueType是一个参数代码值类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcSettlementParamValueType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcMemoType是一个备注类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcMemoType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcPriorityType是一个优先级类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcPriorityType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcOrderRefType是一个报单引用类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcOrderRefType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcMarketIDType是一个市场代码类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcMarketIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcMacAddressType是一个Mac地址类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcMacAddressType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcHDSerialNumberType是一个硬盘序列号类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcHDSerialNumberType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcInstrumentNameType是一个合约名称类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcInstrumentNameType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcOrderSysIDType是一个报单编号类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcOrderSysIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcIPAddressType是一个IP地址类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcIPAddressType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcIPPortType是一个IP端口类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcIPPortType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcProtocolInfoType是一个协议信息类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcProtocolInfoType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcDepositSeqNoType是一个出入金流水号类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcDepositSeqNoType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcSystemNameType是一个系统名称类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcSystemNameType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcInvestorRangeType是一个投资者范围类型 #////////////////////////////////////////////////////////////////////// #所有 defineDict["SECURITY_FTDC_IR_All"] = '1' #投资者组 defineDict["SECURITY_FTDC_IR_Group"] = '2' #单一投资者 defineDict["SECURITY_FTDC_IR_Single"] = '3' typedefDict["TSecurityFtdcInvestorRangeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcDataSyncStatusType是一个数据同步状态类型 #////////////////////////////////////////////////////////////////////// #未同步 defineDict["SECURITY_FTDC_DS_Asynchronous"] = '1' #同步中 defineDict["SECURITY_FTDC_DS_Synchronizing"] = '2' #已同步 defineDict["SECURITY_FTDC_DS_Synchronized"] = '3' typedefDict["TSecurityFtdcDataSyncStatusType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcTraderConnectStatusType是一个交易所交易员连接状态类型 #////////////////////////////////////////////////////////////////////// #没有任何连接 defineDict["SECURITY_FTDC_TCS_NotConnected"] = '1' #已经连接 defineDict["SECURITY_FTDC_TCS_Connected"] = '2' #已经发出合约查询请求 defineDict["SECURITY_FTDC_TCS_QryInstrumentSent"] = '3' #订阅私有流 defineDict["SECURITY_FTDC_TCS_SubPrivateFlow"] = '4' typedefDict["TSecurityFtdcTraderConnectStatusType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcOrderActionStatusType是一个报单操作状态类型 #////////////////////////////////////////////////////////////////////// #已经提交 defineDict["SECURITY_FTDC_OAS_Submitted"] = 'a' #已经接受 defineDict["SECURITY_FTDC_OAS_Accepted"] = 'b' #已经被拒绝 defineDict["SECURITY_FTDC_OAS_Rejected"] = 'c' typedefDict["TSecurityFtdcOrderActionStatusType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcOrderStatusType是一个报单状态类型 #////////////////////////////////////////////////////////////////////// #全部成交 defineDict["SECURITY_FTDC_OST_AllTraded"] = '0' #部分成交还在队列中 defineDict["SECURITY_FTDC_OST_PartTradedQueueing"] = '1' #部分成交不在队列中 defineDict["SECURITY_FTDC_OST_PartTradedNotQueueing"] = '2' #未成交还在队列中 defineDict["SECURITY_FTDC_OST_NoTradeQueueing"] = '3' #未成交不在队列中 defineDict["SECURITY_FTDC_OST_NoTradeNotQueueing"] = '4' #撤单 defineDict["SECURITY_FTDC_OST_Canceled"] = '5' #未知 defineDict["SECURITY_FTDC_OST_Unknown"] = 'a' #尚未触发 defineDict["SECURITY_FTDC_OST_NotTouched"] = 'b' #已触发 defineDict["SECURITY_FTDC_OST_Touched"] = 'c' typedefDict["TSecurityFtdcOrderStatusType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcOrderSubmitStatusType是一个报单提交状态类型 #////////////////////////////////////////////////////////////////////// #已经提交 defineDict["SECURITY_FTDC_OSS_InsertSubmitted"] = '0' #撤单已经提交 defineDict["SECURITY_FTDC_OSS_CancelSubmitted"] = '1' #修改已经提交 defineDict["SECURITY_FTDC_OSS_ModifySubmitted"] = '2' #已经接受 defineDict["SECURITY_FTDC_OSS_Accepted"] = '3' #报单已经被拒绝 defineDict["SECURITY_FTDC_OSS_InsertRejected"] = '4' #撤单已经被拒绝 defineDict["SECURITY_FTDC_OSS_CancelRejected"] = '5' #改单已经被拒绝 defineDict["SECURITY_FTDC_OSS_ModifyRejected"] = '6' typedefDict["TSecurityFtdcOrderSubmitStatusType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcPositionDateType是一个持仓日期类型 #////////////////////////////////////////////////////////////////////// #今日持仓 defineDict["SECURITY_FTDC_PSD_Today"] = '1' #历史持仓 defineDict["SECURITY_FTDC_PSD_History"] = '2' typedefDict["TSecurityFtdcPositionDateType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcTradingRoleType是一个交易角色类型 #////////////////////////////////////////////////////////////////////// #代理 defineDict["SECURITY_FTDC_ER_Broker"] = '1' #自营 defineDict["SECURITY_FTDC_ER_Host"] = '2' #做市商 defineDict["SECURITY_FTDC_ER_Maker"] = '3' typedefDict["TSecurityFtdcTradingRoleType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcPosiDirectionType是一个持仓多空方向类型 #////////////////////////////////////////////////////////////////////// #净 defineDict["SECURITY_FTDC_PD_Net"] = '1' #多头 defineDict["SECURITY_FTDC_PD_Long"] = '2' #空头 defineDict["SECURITY_FTDC_PD_Short"] = '3' #备兑 defineDict["SECURITY_FTDC_PD_Covered"] = '4' typedefDict["TSecurityFtdcPosiDirectionType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcOrderPriceTypeType是一个报单价格条件类型 #////////////////////////////////////////////////////////////////////// #即时成交剩余撤销市价单 defineDict["SECURITY_FTDC_OPT_AnyPrice"] = '1' #限价 defineDict["SECURITY_FTDC_OPT_LimitPrice"] = '2' #最优五档即时成交剩余撤销市价单 defineDict["SECURITY_FTDC_OPT_BestPrice"] = '3' #最优五档即时成交剩余转限价市价单 defineDict["SECURITY_FTDC_OPT_BestLimitPrice"] = '4' #全部成交或撤销市价单 defineDict["SECURITY_FTDC_OPT_AllPrice"] = '5' #本方最优价格市价单 defineDict["SECURITY_FTDC_OPT_ForwardBestPrice"] = '6' #对方最优价格市价单 defineDict["SECURITY_FTDC_OPT_ReverseBestPrice"] = '7' #即时成交剩余转限价市价单 defineDict["SECURITY_FTDC_OPT_Any2LimitPrice"] = '8' #全部成交或撤销限价单 defineDict["SECURITY_FTDC_OPT_AllLimitPrice"] = '9' #激活A股网络密码服务代码 defineDict["SECURITY_FTDC_OPT_ActiveANetPassSvrCode"] = 'G' #注销A股网络密码服务代码 defineDict["SECURITY_FTDC_OPT_InactiveANetPassSvrCode"] = 'H' #激活B股网络密码服务代码 defineDict["SECURITY_FTDC_OPT_ActiveBNetPassSvrCode"] = 'I' #注销B股网络密码服务代码 defineDict["SECURITY_FTDC_OPT_InactiveBNetPassSvrCode"] = 'J' #回购注销 defineDict["SECURITY_FTDC_OPT_Repurchase"] = 'K' #指定撤销 defineDict["SECURITY_FTDC_OPT_DesignatedCancel"] = 'L' #指定登记 defineDict["SECURITY_FTDC_OPT_Designated"] = 'M' #证券参与申购 defineDict["SECURITY_FTDC_OPT_SubscribingShares"] = 'N' #证券参与配股 defineDict["SECURITY_FTDC_OPT_Split"] = 'O' #要约收购登记 defineDict["SECURITY_FTDC_OPT_TenderOffer"] = 'P' #要约收购撤销 defineDict["SECURITY_FTDC_OPT_TenderOfferCancel"] = 'Q' #证券投票 defineDict["SECURITY_FTDC_OPT_Ballot"] = 'R' #可转债转换登记 defineDict["SECURITY_FTDC_OPT_ConvertibleBondsConvet"] = 'S' #可转债回售登记 defineDict["SECURITY_FTDC_OPT_ConvertibleBondsRepurchase"] = 'T' #权证行权 defineDict["SECURITY_FTDC_OPT_Exercise"] = 'U' #开放式基金申购 defineDict["SECURITY_FTDC_OPT_PurchasingFunds"] = 'V' #开放式基金赎回 defineDict["SECURITY_FTDC_OPT_RedemingFunds"] = 'W' #开放式基金认购 defineDict["SECURITY_FTDC_OPT_SubscribingFunds"] = 'X' #开放式基金转托管转出 defineDict["SECURITY_FTDC_OPT_LOFIssue"] = 'Y' #开放式基金设置分红方式 defineDict["SECURITY_FTDC_OPT_LOFSetBonusType"] = 'Z' #开放式基金转换为其他基金 defineDict["SECURITY_FTDC_OPT_LOFConvert"] = 'a' #债券入库 defineDict["SECURITY_FTDC_OPT_DebentureStockIn"] = 'b' #债券出库 defineDict["SECURITY_FTDC_OPT_DebentureStockOut"] = 'c' #ETF申购 defineDict["SECURITY_FTDC_OPT_PurchasesETF"] = 'd' #ETF赎回 defineDict["SECURITY_FTDC_OPT_RedeemETF"] = 'e' typedefDict["TSecurityFtdcOrderPriceTypeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcOffsetFlagType是一个开平标志类型 #////////////////////////////////////////////////////////////////////// #开仓 defineDict["SECURITY_FTDC_OF_Open"] = '0' #平仓 defineDict["SECURITY_FTDC_OF_Close"] = '1' #强平 defineDict["SECURITY_FTDC_OF_ForceClose"] = '2' #平今 defineDict["SECURITY_FTDC_OF_CloseToday"] = '3' #平昨 defineDict["SECURITY_FTDC_OF_CloseYesterday"] = '4' #强减 defineDict["SECURITY_FTDC_OF_ForceOff"] = '5' #本地强平 defineDict["SECURITY_FTDC_OF_LocalForceClose"] = '6' typedefDict["TSecurityFtdcOffsetFlagType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcForceCloseReasonType是一个强平原因类型 #////////////////////////////////////////////////////////////////////// #非强平 defineDict["SECURITY_FTDC_FCC_NotForceClose"] = '0' #资金不足 defineDict["SECURITY_FTDC_FCC_LackDeposit"] = '1' #客户超仓 defineDict["SECURITY_FTDC_FCC_ClientOverPositionLimit"] = '2' #会员超仓 defineDict["SECURITY_FTDC_FCC_MemberOverPositionLimit"] = '3' #持仓非整数倍 defineDict["SECURITY_FTDC_FCC_NotMultiple"] = '4' #违规 defineDict["SECURITY_FTDC_FCC_Violation"] = '5' #其它 defineDict["SECURITY_FTDC_FCC_Other"] = '6' #自然人临近交割 defineDict["SECURITY_FTDC_FCC_PersonDeliv"] = '7' typedefDict["TSecurityFtdcForceCloseReasonType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcOrderTypeType是一个报单类型类型 #////////////////////////////////////////////////////////////////////// #正常 defineDict["SECURITY_FTDC_ORDT_Normal"] = '0' #报价衍生 defineDict["SECURITY_FTDC_ORDT_DeriveFromQuote"] = '1' #组合衍生 defineDict["SECURITY_FTDC_ORDT_DeriveFromCombination"] = '2' #组合报单 defineDict["SECURITY_FTDC_ORDT_Combination"] = '3' #条件单 defineDict["SECURITY_FTDC_ORDT_ConditionalOrder"] = '4' #互换单 defineDict["SECURITY_FTDC_ORDT_Swap"] = '5' typedefDict["TSecurityFtdcOrderTypeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcTimeConditionType是一个有效期类型类型 #////////////////////////////////////////////////////////////////////// #立即完成,否则撤销 defineDict["SECURITY_FTDC_TC_IOC"] = '1' #本节有效 defineDict["SECURITY_FTDC_TC_GFS"] = '2' #当日有效 defineDict["SECURITY_FTDC_TC_GFD"] = '3' #指定日期前有效 defineDict["SECURITY_FTDC_TC_GTD"] = '4' #撤销前有效 defineDict["SECURITY_FTDC_TC_GTC"] = '5' #集合竞价有效 defineDict["SECURITY_FTDC_TC_GFA"] = '6' typedefDict["TSecurityFtdcTimeConditionType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcVolumeConditionType是一个成交量类型类型 #////////////////////////////////////////////////////////////////////// #任何数量 defineDict["SECURITY_FTDC_VC_AV"] = '1' #最小数量 defineDict["SECURITY_FTDC_VC_MV"] = '2' #全部数量 defineDict["SECURITY_FTDC_VC_CV"] = '3' typedefDict["TSecurityFtdcVolumeConditionType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcContingentConditionType是一个触发条件类型 #////////////////////////////////////////////////////////////////////// #立即 defineDict["SECURITY_FTDC_CC_Immediately"] = '1' #止损 defineDict["SECURITY_FTDC_CC_Touch"] = '2' #止赢 defineDict["SECURITY_FTDC_CC_TouchProfit"] = '3' #预埋单 defineDict["SECURITY_FTDC_CC_ParkedOrder"] = '4' #最新价大于条件价 defineDict["SECURITY_FTDC_CC_LastPriceGreaterThanStopPrice"] = '5' #最新价大于等于条件价 defineDict["SECURITY_FTDC_CC_LastPriceGreaterEqualStopPrice"] = '6' #最新价小于条件价 defineDict["SECURITY_FTDC_CC_LastPriceLesserThanStopPrice"] = '7' #最新价小于等于条件价 defineDict["SECURITY_FTDC_CC_LastPriceLesserEqualStopPrice"] = '8' #卖一价大于条件价 defineDict["SECURITY_FTDC_CC_AskPriceGreaterThanStopPrice"] = '9' #卖一价大于等于条件价 defineDict["SECURITY_FTDC_CC_AskPriceGreaterEqualStopPrice"] = 'A' #卖一价小于条件价 defineDict["SECURITY_FTDC_CC_AskPriceLesserThanStopPrice"] = 'B' #卖一价小于等于条件价 defineDict["SECURITY_FTDC_CC_AskPriceLesserEqualStopPrice"] = 'C' #买一价大于条件价 defineDict["SECURITY_FTDC_CC_BidPriceGreaterThanStopPrice"] = 'D' #买一价大于等于条件价 defineDict["SECURITY_FTDC_CC_BidPriceGreaterEqualStopPrice"] = 'E' #买一价小于条件价 defineDict["SECURITY_FTDC_CC_BidPriceLesserThanStopPrice"] = 'F' #买一价小于等于条件价 defineDict["SECURITY_FTDC_CC_BidPriceLesserEqualStopPrice"] = 'H' typedefDict["TSecurityFtdcContingentConditionType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcActionFlagType是一个操作标志类型 #////////////////////////////////////////////////////////////////////// #删除 defineDict["SECURITY_FTDC_AF_Delete"] = '0' #修改 defineDict["SECURITY_FTDC_AF_Modify"] = '3' typedefDict["TSecurityFtdcActionFlagType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcTradingRightType是一个交易权限类型 #////////////////////////////////////////////////////////////////////// #可以交易 defineDict["SECURITY_FTDC_TR_Allow"] = '0' #不能交易 defineDict["SECURITY_FTDC_TR_Forbidden"] = '2' typedefDict["TSecurityFtdcTradingRightType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcOrderSourceType是一个报单来源类型 #////////////////////////////////////////////////////////////////////// #来自参与者 defineDict["SECURITY_FTDC_OSRC_Participant"] = '0' #来自管理员 defineDict["SECURITY_FTDC_OSRC_Administrator"] = '1' typedefDict["TSecurityFtdcOrderSourceType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcPriceSourceType是一个成交价来源类型 #////////////////////////////////////////////////////////////////////// #前成交价 defineDict["SECURITY_FTDC_PSRC_LastPrice"] = '0' #买委托价 defineDict["SECURITY_FTDC_PSRC_Buy"] = '1' #卖委托价 defineDict["SECURITY_FTDC_PSRC_Sell"] = '2' typedefDict["TSecurityFtdcPriceSourceType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcOrderActionRefType是一个报单操作引用类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcOrderActionRefType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcFrontIDType是一个前置编号类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcFrontIDType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcSessionIDType是一个会话编号类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcSessionIDType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcInstallIDType是一个安装编号类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcInstallIDType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcSequenceNoType是一个序号类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcSequenceNoType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcRequestIDType是一个请求编号类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcRequestIDType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcCombOffsetFlagType是一个组合开平标志类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCombOffsetFlagType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcCombHedgeFlagType是一个组合投机套保标志类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCombHedgeFlagType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcSequenceSeriesType是一个序列系列号类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcSequenceSeriesType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcCommPhaseNoType是一个通讯时段编号类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCommPhaseNoType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcUserEventTypeType是一个用户事件类型类型 #////////////////////////////////////////////////////////////////////// #登录 defineDict["SECURITY_FTDC_UET_Login"] = '1' #登出 defineDict["SECURITY_FTDC_UET_Logout"] = '2' #交易成功 defineDict["SECURITY_FTDC_UET_Trading"] = '3' #交易失败 defineDict["SECURITY_FTDC_UET_TradingError"] = '4' #修改密码 defineDict["SECURITY_FTDC_UET_UpdatePassword"] = '5' #客户端认证 defineDict["SECURITY_FTDC_UET_Authenticate"] = '6' #其他 defineDict["SECURITY_FTDC_UET_Other"] = '9' typedefDict["TSecurityFtdcUserEventTypeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcUserEventInfoType是一个用户事件信息类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcUserEventInfoType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcOTPTypeType是一个动态令牌类型类型 #////////////////////////////////////////////////////////////////////// #无动态令牌 defineDict["SECURITY_FTDC_OTP_NONE"] = '0' #时间令牌 defineDict["SECURITY_FTDC_OTP_TOTP"] = '1' typedefDict["TSecurityFtdcOTPTypeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcTradeSourceType是一个成交来源类型 #////////////////////////////////////////////////////////////////////// #来自交易所普通回报 defineDict["SECURITY_FTDC_TSRC_NORMAL"] = '0' #来自查询 defineDict["SECURITY_FTDC_TSRC_QUERY"] = '1' typedefDict["TSecurityFtdcTradeSourceType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcBranchIDType是一个营业部编号类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcBranchIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcStockPriceType是一个证券交易价格类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcStockPriceType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcRightModelIDType是一个股票权限模版代码类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcRightModelIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcSerialNumberType是一个序列号类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcSerialNumberType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcInstrumentRangeType是一个股票权限分类类型 #////////////////////////////////////////////////////////////////////// #所有 defineDict["SECURITY_FTDC_INR_All"] = '1' #产品 defineDict["SECURITY_FTDC_INR_Product"] = '2' #股票权限模版 defineDict["SECURITY_FTDC_INR_Model"] = '3' #股票 defineDict["SECURITY_FTDC_INR_Stock"] = '4' #市场 defineDict["SECURITY_FTDC_INR_Market"] = '5' typedefDict["TSecurityFtdcInstrumentRangeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcBusinessUnitType是一个业务单元类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcBusinessUnitType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcOTPVendorsIDType是一个动态令牌提供商类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcOTPVendorsIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcLastDriftType是一个上次OTP漂移值类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcLastDriftType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcLastSuccessType是一个上次OTP成功值类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcLastSuccessType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcAuthKeyType是一个令牌密钥类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcAuthKeyType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcUserSessionHashType是一个用户会话Hash值类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcUserSessionHashType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcStockTradeTypeType是一个证券交易类型类型 #////////////////////////////////////////////////////////////////////// #可交易证券 defineDict["SECURITY_FTDC_STT_Stock"] = '0' #买入网络密码服务 defineDict["SECURITY_FTDC_STT_BuyNetService"] = '1' #回购注销 defineDict["SECURITY_FTDC_STT_CancelRepurchase"] = '2' #指定撤销 defineDict["SECURITY_FTDC_STT_CancelRegister"] = '3' #指定登记 defineDict["SECURITY_FTDC_STT_Register"] = '4' #买入发行申购 defineDict["SECURITY_FTDC_STT_PurchaseIssue"] = '5' #卖出配股 defineDict["SECURITY_FTDC_STT_Allotment"] = '6' #卖出要约收购 defineDict["SECURITY_FTDC_STT_SellTender"] = '7' #买入要约收购 defineDict["SECURITY_FTDC_STT_BuyTender"] = '8' #网上投票 defineDict["SECURITY_FTDC_STT_NetVote"] = '9' #卖出可转债回售 defineDict["SECURITY_FTDC_STT_SellConvertibleBonds"] = 'a' #权证行权代码 defineDict["SECURITY_FTDC_STT_OptionExecute"] = 'b' #开放式基金申购 defineDict["SECURITY_FTDC_STT_PurchaseOF"] = 'c' #开放式基金赎回 defineDict["SECURITY_FTDC_STT_RedeemOF"] = 'd' #开放式基金认购 defineDict["SECURITY_FTDC_STT_SubscribeOF"] = 'e' #开放式基金转托管转出 defineDict["SECURITY_FTDC_STT_OFCustodianTranfer"] = 'f' #开放式基金分红设置 defineDict["SECURITY_FTDC_STT_OFDividendConfig"] = 'g' #开放式基金转成其他基金 defineDict["SECURITY_FTDC_STT_OFTransfer"] = 'h' #债券入库 defineDict["SECURITY_FTDC_STT_BondsIn"] = 'i' #债券出库 defineDict["SECURITY_FTDC_STT_BondsOut"] = 'j' #EFT申购 defineDict["SECURITY_FTDC_STT_PurchaseETF"] = 'k' #EFT赎回 defineDict["SECURITY_FTDC_STT_RedeemETF"] = 'l' #可转债回售登记 defineDict["SECURITY_FTDC_STT_ConvertibleRegister"] = 'm' typedefDict["TSecurityFtdcStockTradeTypeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcHandleTradingAccountAlgoIDType是一个资金处理算法编号类型 #////////////////////////////////////////////////////////////////////// #基本 defineDict["SECURITY_FTDC_HTAA_Base"] = '1' typedefDict["TSecurityFtdcHandleTradingAccountAlgoIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcStockWthType是一个股票使用流水号类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcStockWthType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcStockSeqType是一个股票使用流水号类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcStockSeqType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcWTFSType是一个委托方式类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcWTFSType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcWTLBType是一个委托类别类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcWTLBType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcWTRQType是一个委托日期类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcWTRQType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcINTEGERType是一个一般整型类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcINTEGERType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcINT3Type是一个三位数整型类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcINT3Type"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcINT6Type是一个六位数整型类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcINT6Type"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcINT12Type是一个十二位数整型类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcINT12Type"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcCHAR1Type是一个一字节CHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCHAR1Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcCHAR2Type是一个二字节CHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCHAR2Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcCHAR3Type是一个三字节CHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCHAR3Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcCHAR4Type是一个四字节CHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCHAR4Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcCHAR5Type是一个五字节CHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCHAR5Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcCHAR6Type是一个六字节CHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCHAR6Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcCHAR8Type是一个八字节CHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCHAR8Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcCHAR10Type是一个十字节CHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCHAR10Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcCHAR11Type是一个十一字节CHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCHAR11Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcCHAR12Type是一个十二字节CHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCHAR12Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcCHAR13Type是一个十三字节CHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCHAR13Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcCHAR14Type是一个十四字节CHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCHAR14Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcCHAR16Type是一个十六字节CHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCHAR16Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcCHAR19Type是一个十九字节CHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCHAR19Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcCHAR20Type是一个二十字节CHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCHAR20Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcCHAR21Type是一个二十一字节CHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCHAR21Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcCHAR23Type是一个二十三字节CHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCHAR23Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcCHAR30Type是一个三十字节CHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCHAR30Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcCHAR32Type是一个三十二字节CHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCHAR32Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcCHAR50Type是一个五十字节CHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCHAR50Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcCHAR64Type是一个六十四字节CHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCHAR64Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcCHAR65Type是一个六十五字节CHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCHAR65Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcVCHAR4Type是一个四字节VCHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcVCHAR4Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcVCHAR6Type是一个六字节VCHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcVCHAR6Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcVCHAR8Type是一个八字节VCHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcVCHAR8Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcVCHAR10Type是一个十字节VCHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcVCHAR10Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcVCHAR12Type是一个十二字节VCHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcVCHAR12Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcVCHAR16Type是一个十六字节VCHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcVCHAR16Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcVCHAR20Type是一个二十字节VCHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcVCHAR20Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcVCHAR30Type是一个三十字节VCHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcVCHAR30Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcVCHAR50Type是一个五十字节VCHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcVCHAR50Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcVCHAR60Type是一个六十字节VCHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcVCHAR60Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcVCHAR65Type是一个六十五字节VCHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcVCHAR65Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcVCHAR80Type是一个八十字节VCHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcVCHAR80Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcVCHAR84Type是一个八十四字节VCHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcVCHAR84Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcVCHAR255Type是一个二五五字节VCHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcVCHAR255Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcVCHAR1024Type是一个一零二四字节VCHAR类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcVCHAR1024Type"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcREAL8P3Type是一个八点三实型类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcREAL8P3Type"] = "float" #////////////////////////////////////////////////////////////////////// #TFtdcREAL9P3Type是一个九点三实型类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcREAL9P3Type"] = "float" #////////////////////////////////////////////////////////////////////// #TFtdcREAL9P6Type是一个九点六实型类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcREAL9P6Type"] = "float" #////////////////////////////////////////////////////////////////////// #TFtdcREAL10P4Type是一个十点四实型类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcREAL10P4Type"] = "float" #////////////////////////////////////////////////////////////////////// #TFtdcREAL16P2Type是一个十六点二实型类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcREAL16P2Type"] = "float" #////////////////////////////////////////////////////////////////////// #TFtdcREAL16P8Type是一个十六点八实型类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcREAL16P8Type"] = "float" #////////////////////////////////////////////////////////////////////// #TFtdcREAL22P2Type是一个二十二点二实型类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcREAL22P2Type"] = "float" #////////////////////////////////////////////////////////////////////// #TFtdcCommandNoType是一个DB命令序号类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCommandNoType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcCommandTypeType是一个DB命令类型类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCommandTypeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcSettlementGroupIDType是一个结算组代码类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcSettlementGroupIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcFieldNameType是一个字段名类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcFieldNameType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcFieldContentType是一个字段内容类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcFieldContentType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcBankIDType是一个银行代码类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcBankIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcBankNameType是一个银行名称类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcBankNameType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcBankBrchIDType是一个银行分中心代码类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcBankBrchIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcLiberSerialType是一个Liber系统流水号类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcLiberSerialType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcRoleIDType是一个角色编号类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcRoleIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcRoleNameType是一个角色名称类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcRoleNameType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcDescriptionType是一个描述类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcDescriptionType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcFunctionIDType是一个功能代码类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcFunctionIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcBillNoType是一个票据号类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcBillNoType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcFundIOTypeType是一个出入金类型类型 #////////////////////////////////////////////////////////////////////// #出入金 defineDict["SECURITY_FTDC_FIOT_FundIO"] = '1' #银期转帐 defineDict["SECURITY_FTDC_FIOT_Transfer"] = '2' typedefDict["TSecurityFtdcFundIOTypeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcFundTypeType是一个资金类型类型 #////////////////////////////////////////////////////////////////////// #银行存款 defineDict["SECURITY_FTDC_FT_Deposite"] = '1' #分项资金 defineDict["SECURITY_FTDC_FT_ItemFund"] = '2' #公司调整 defineDict["SECURITY_FTDC_FT_Company"] = '3' typedefDict["TSecurityFtdcFundTypeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcFundDirectionType是一个出入金方向类型 #////////////////////////////////////////////////////////////////////// #入金 defineDict["SECURITY_FTDC_FD_In"] = '1' #出金 defineDict["SECURITY_FTDC_FD_Out"] = '2' typedefDict["TSecurityFtdcFundDirectionType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcBankFlagType是一个银行统一标识类型类型 #////////////////////////////////////////////////////////////////////// #工商银行 defineDict["SECURITY_FTDC_BF_ICBC"] = '1' #农业银行 defineDict["SECURITY_FTDC_BF_ABC"] = '2' #中国银行 defineDict["SECURITY_FTDC_BF_BC"] = '3' #建设银行 defineDict["SECURITY_FTDC_BF_CBC"] = '4' #交通银行 defineDict["SECURITY_FTDC_BF_BOC"] = '5' #其他银行 defineDict["SECURITY_FTDC_BF_Other"] = 'Z' typedefDict["TSecurityFtdcBankFlagType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcOperationMemoType是一个操作摘要类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcOperationMemoType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcFundStatusType是一个资金状态类型 #////////////////////////////////////////////////////////////////////// #已录入 defineDict["SECURITY_FTDC_FS_Record"] = '1' #已复核 defineDict["SECURITY_FTDC_FS_Check"] = '2' #已冲销 defineDict["SECURITY_FTDC_FS_Charge"] = '3' typedefDict["TSecurityFtdcFundStatusType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcFundProjectIDType是一个资金项目编号类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcFundProjectIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcOperatorIDType是一个操作员代码类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcOperatorIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcCounterIDType是一个计数器代码类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCounterIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcFunctionNameType是一个功能名称类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcFunctionNameType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcTradeCodeType是一个交易代码类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcTradeCodeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcBrokerBranchIDType是一个经纪公司分支机构代码类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcBrokerBranchIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcTradeDateType是一个交易日期类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcTradeDateType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcTradeTimeType是一个交易时间类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcTradeTimeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcBankSerialType是一个银行流水号类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcBankSerialType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcSerialType是一个流水号类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcSerialType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcLastFragmentType是一个最后分片标志类型 #////////////////////////////////////////////////////////////////////// #是最后分片 defineDict["SECURITY_FTDC_LF_Yes"] = '0' #不是最后分片 defineDict["SECURITY_FTDC_LF_No"] = '1' typedefDict["TSecurityFtdcLastFragmentType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcIndividualNameType是一个个人姓名类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcIndividualNameType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcCustTypeType是一个客户类型类型 #////////////////////////////////////////////////////////////////////// #自然人 defineDict["SECURITY_FTDC_CUSTT_Person"] = '0' #机构户 defineDict["SECURITY_FTDC_CUSTT_Institution"] = '1' typedefDict["TSecurityFtdcCustTypeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcBankAccountType是一个银行账户类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcBankAccountType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcYesNoIndicatorType是一个是或否标识类型 #////////////////////////////////////////////////////////////////////// #是 defineDict["SECURITY_FTDC_YNI_Yes"] = '0' #否 defineDict["SECURITY_FTDC_YNI_No"] = '1' typedefDict["TSecurityFtdcYesNoIndicatorType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcTradeAmountType是一个交易金额(元)类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcTradeAmountType"] = "float" #////////////////////////////////////////////////////////////////////// #TFtdcCustFeeType是一个应收客户费用(元)类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcCustFeeType"] = "float" #////////////////////////////////////////////////////////////////////// #TFtdcBrokerFeeType是一个应收经纪公司费用(元)类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcBrokerFeeType"] = "float" #////////////////////////////////////////////////////////////////////// #TFtdcFeePayFlagType是一个费用支付标志类型 #////////////////////////////////////////////////////////////////////// #由受益方支付费用 defineDict["SECURITY_FTDC_FPF_BEN"] = '0' #由发送方支付费用 defineDict["SECURITY_FTDC_FPF_OUR"] = '1' #由发送方支付发起的费用,受益方支付接受的费用 defineDict["SECURITY_FTDC_FPF_SHA"] = '2' typedefDict["TSecurityFtdcFeePayFlagType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcAddInfoType是一个附加信息类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcAddInfoType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcDigestType是一个摘要类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcDigestType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcBankAccTypeType是一个银行帐号类型类型 #////////////////////////////////////////////////////////////////////// #银行存折 defineDict["SECURITY_FTDC_BAT_BankBook"] = '1' #储蓄卡 defineDict["SECURITY_FTDC_BAT_SavingCard"] = '2' #信用卡 defineDict["SECURITY_FTDC_BAT_CreditCard"] = '3' typedefDict["TSecurityFtdcBankAccTypeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcDeviceIDType是一个渠道标志类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcDeviceIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcPwdFlagType是一个密码核对标志类型 #////////////////////////////////////////////////////////////////////// #不核对 defineDict["SECURITY_FTDC_BPWDF_NoCheck"] = '0' #明文核对 defineDict["SECURITY_FTDC_BPWDF_BlankCheck"] = '1' #密文核对 defineDict["SECURITY_FTDC_BPWDF_EncryptCheck"] = '2' typedefDict["TSecurityFtdcPwdFlagType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcBankCodingForBrokerType是一个银行对经纪公司的编码类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcBankCodingForBrokerType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcOperNoType是一个交易柜员类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcOperNoType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcTIDType是一个交易ID类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcTIDType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcTransferStatusType是一个转账交易状态类型 #////////////////////////////////////////////////////////////////////// #正常 defineDict["SECURITY_FTDC_TRFS_Normal"] = '0' #被冲正 defineDict["SECURITY_FTDC_TRFS_Repealed"] = '1' typedefDict["TSecurityFtdcTransferStatusType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcPlateSerialType是一个平台流水号类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcPlateSerialType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcAvailabilityFlagType是一个有效标志类型 #////////////////////////////////////////////////////////////////////// #未确认 defineDict["SECURITY_FTDC_AVAF_Invalid"] = '0' #有效 defineDict["SECURITY_FTDC_AVAF_Valid"] = '1' #冲正 defineDict["SECURITY_FTDC_AVAF_Repeal"] = '2' typedefDict["TSecurityFtdcAvailabilityFlagType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcOperatorCodeType是一个操作员类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcOperatorCodeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcRepayStockAlgoType是一个买券还券算法类型 #////////////////////////////////////////////////////////////////////// #默认算法 defineDict["SECURITY_FTDC_RSA_Original"] = '0' #按还券比例计算 defineDict["SECURITY_FTDC_RSA_Ratio"] = '1' #Min[1,2] defineDict["SECURITY_FTDC_RSA_Min"] = '2' typedefDict["TSecurityFtdcRepayStockAlgoType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcTradeSpanType是一个交易时间段类型类型 #////////////////////////////////////////////////////////////////////// #普通业务 defineDict["SECURITY_FTDC_TS_Common"] = '1' #转账 defineDict["SECURITY_FTDC_TS_Transfer"] = '2' typedefDict["TSecurityFtdcTradeSpanType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcSettleSystemTypeType是一个所属结算系统类型类型 #////////////////////////////////////////////////////////////////////// #顶点系统 defineDict["SECURITY_FTDC_SST_Aboss"] = '1' #恒生系统 defineDict["SECURITY_FTDC_SST_HS"] = '2' typedefDict["TSecurityFtdcSettleSystemTypeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcLogLevelType是一个日志级别类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcLogLevelType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcProcessNameType是一个存储过程名称类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcProcessNameType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcTemplateIDType是一个模板代码类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcTemplateIDType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcTradeIndexType是一个成交序号类型 #////////////////////////////////////////////////////////////////////// typedefDict["TSecurityFtdcTradeIndexType"] = "int" #////////////////////////////////////////////////////////////////////// #TFtdcSplitMergeStatusType是一个基金当天拆分合并状态类型 #////////////////////////////////////////////////////////////////////// #表示允许拆分和合并 defineDict["SECURITY_FTDC_SMS_Allow"] = '0' #允许拆分、不允许合并 defineDict["SECURITY_FTDC_SMS_OnlySplit"] = '1' #不允许拆分、允许合并 defineDict["SECURITY_FTDC_SMS_OnlyMerge"] = '2' #不允许拆分和合并 defineDict["SECURITY_FTDC_SMS_Forbidden"] = '3' typedefDict["TSecurityFtdcSplitMergeStatusType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcFundInterTransferTypeType是一个资金内转类型类型 #////////////////////////////////////////////////////////////////////// #转入 defineDict["SECURITY_FTDC_FITT_TransferIn"] = '0' #转出 defineDict["SECURITY_FTDC_FITT_TransferOut"] = '1' typedefDict["TSecurityFtdcFundInterTransferTypeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcInstrumentTypeType是一个合约类型类型 #////////////////////////////////////////////////////////////////////// #普通 defineDict["SECURITY_FTDC_IT_Normal"] = '0' #看涨期权 defineDict["SECURITY_FTDC_IT_CallOptions"] = '1' #看跌期权 defineDict["SECURITY_FTDC_IT_PutOptions"] = '2' #普通(STEP) defineDict["SECURITY_FTDC_IT_Normal_STEP"] = '3' typedefDict["TSecurityFtdcInstrumentTypeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcInvestorLevelType是一个投资者期权交易等级类型 #////////////////////////////////////////////////////////////////////// #一级 defineDict["SECURITY_FTDC_IL_Level_1"] = '0' #二级 defineDict["SECURITY_FTDC_IL_Level_2"] = '1' #三级 defineDict["SECURITY_FTDC_IL_Level_3"] = '2' typedefDict["TSecurityFtdcInvestorLevelType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcCloseDirectionType是一个平仓方向类型 #////////////////////////////////////////////////////////////////////// #买平仓 defineDict["SECURITY_FTDC_CD_CloseBuy"] = '!' #卖平仓 defineDict["SECURITY_FTDC_CD_CloseSell"] = '@' #备兑平仓 defineDict["SECURITY_FTDC_CD_CloseCover"] = '#' typedefDict["TSecurityFtdcCloseDirectionType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcDelivTypeType是一个交割类型类型 #////////////////////////////////////////////////////////////////////// #看涨期权执行 defineDict["SECURITY_FTDC_DT_ExecCallOptions"] = '0' #看跌期权执行 defineDict["SECURITY_FTDC_DT_ExecPutOptions"] = '1' #在途证券 defineDict["SECURITY_FTDC_DT_UnavailStock"] = '2' #赎回在途资金 defineDict["SECURITY_FTDC_DT_UnavailRedMoney"] = '2' typedefDict["TSecurityFtdcDelivTypeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcExpireTypeType是一个到期类型类型 #////////////////////////////////////////////////////////////////////// #正回购到期 defineDict["SECURITY_FTDC_ET_Repurchase"] = '0' #逆回购到期 defineDict["SECURITY_FTDC_ET_ReverseRepurch"] = '1' #债券到期 defineDict["SECURITY_FTDC_ET_Bond"] = '2' typedefDict["TSecurityFtdcExpireTypeType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcFundClassType是一个基金类别类型 #////////////////////////////////////////////////////////////////////// #发行期 defineDict["SECURITY_FTDC_FC_Subscription"] = '0' #普通型 defineDict["SECURITY_FTDC_FC_Normal"] = '1' #货币型 defineDict["SECURITY_FTDC_FC_Monetary"] = '2' typedefDict["TSecurityFtdcFundClassType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcTradingPhaseType是一个交易阶段类型 #////////////////////////////////////////////////////////////////////// #非交易时段 defineDict["SECURITY_FTDC_TP_NonTrade"] = '0' #集合竞价时段 defineDict["SECURITY_FTDC_TP_Bidding"] = '1' #连续交易时段 defineDict["SECURITY_FTDC_TP_Continuous"] = '2' #停牌时段 defineDict["SECURITY_FTDC_TP_Suspension"] = '3' #熔断时段 defineDict["SECURITY_FTDC_TP_Fuse"] = '4' typedefDict["TSecurityFtdcTradingPhaseType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcOpenRestrictionType是一个开仓限制类型 #////////////////////////////////////////////////////////////////////// #无开仓限制 defineDict["SECURITY_FTDC_OR_None"] = '0' #限制备兑开仓 defineDict["SECURITY_FTDC_OR_NoCoverOpen"] = '1' #限制卖出开仓 defineDict["SECURITY_FTDC_OR_NoSellOpen"] = '2' #限制卖出开仓、备兑开仓 defineDict["SECURITY_FTDC_OR_NoSellAndCoverOpen"] = '3' #限制买入开仓 defineDict["SECURITY_FTDC_OR_NoBuyOpen"] = '4' #限制买入开仓、备兑开仓 defineDict["SECURITY_FTDC_OR_NoBuyAndCoverOpen"] = '5' #限制买入开仓、卖出开仓 defineDict["SECURITY_FTDC_OR_NoBuyAndSellOpen"] = '6' #限制买入开仓、卖出开仓、备兑开仓 defineDict["SECURITY_FTDC_OR_NoBuySellAndCoverOpen"] = '7' typedefDict["TSecurityFtdcOpenRestrictionType"] = "string" #////////////////////////////////////////////////////////////////////// #TFtdcOfferTypeType是一个报盘类型类型 #////////////////////////////////////////////////////////////////////// #普通报盘 defineDict["SECURITY_FTDC_OT_Normal"] = '0' #期权报盘 defineDict["SECURITY_FTDC_OT_Options"] = '1' typedefDict["TSecurityFtdcOfferTypeType"] = "string"
salt/ext/vsan/vsanmgmtObjects.py
tomdoherty/salt
9,425
11080846
# pylint: skip-file from pyVmomi.VmomiSupport import ( CreateDataType, CreateManagedType, CreateEnumType, AddVersion, AddVersionParent, F_LINK, F_LINKABLE, F_OPTIONAL, ) CreateManagedType( "vim.cluster.VsanPerformanceManager", "VsanPerformanceManager", "vmodl.ManagedObject", "vim.version.version9", [], [ ( "setStatsObjectPolicy", "VsanPerfSetStatsObjectPolicy", "vim.version.version9", ( ( "cluster", "vim.ComputeResource", "vim.version.version9", 0 | F_OPTIONAL, None, ), ( "profile", "vim.vm.ProfileSpec", "vim.version.version9", 0 | F_OPTIONAL, None, ), ), (0, "boolean", "boolean"), "System.Read", None, ), ( "deleteStatsObject", "VsanPerfDeleteStatsObject", "vim.version.version9", ( ( "cluster", "vim.ComputeResource", "vim.version.version9", 0 | F_OPTIONAL, None, ), ), (0, "boolean", "boolean"), "System.Read", None, ), ( "createStatsObjectTask", "VsanPerfCreateStatsObjectTask", "vim.version.version9", ( ( "cluster", "vim.ComputeResource", "vim.version.version9", 0 | F_OPTIONAL, None, ), ( "profile", "vim.vm.ProfileSpec", "vim.version.version9", 0 | F_OPTIONAL, None, ), ), (0, "vim.Task", "vim.Task"), "System.Read", None, ), ( "deleteStatsObjectTask", "VsanPerfDeleteStatsObjectTask", "vim.version.version9", ( ( "cluster", "vim.ComputeResource", "vim.version.version9", 0 | F_OPTIONAL, None, ), ), (0, "vim.Task", "vim.Task"), "System.Read", None, ), ( "queryClusterHealth", "VsanPerfQueryClusterHealth", "vim.version.version9", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version9", 0, None, ), ), (0, "vmodl.DynamicData[]", "vmodl.DynamicData[]"), "System.Read", None, ), ( "queryStatsObjectInformation", "VsanPerfQueryStatsObjectInformation", "vim.version.version9", ( ( "cluster", "vim.ComputeResource", "vim.version.version9", 0 | F_OPTIONAL, None, ), ), ( 0, "vim.cluster.VsanObjectInformation", "vim.cluster.VsanObjectInformation", ), "System.Read", None, ), ( "queryNodeInformation", "VsanPerfQueryNodeInformation", "vim.version.version9", ( ( "cluster", "vim.ComputeResource", "vim.version.version9", 0 | F_OPTIONAL, None, ), ), ( 0 | F_OPTIONAL, "vim.cluster.VsanPerfNodeInformation[]", "vim.cluster.VsanPerfNodeInformation[]", ), "System.Read", None, ), ( "queryVsanPerf", "VsanPerfQueryPerf", "vim.version.version9", ( ( "querySpecs", "vim.cluster.VsanPerfQuerySpec[]", "vim.version.version9", 0, None, ), ( "cluster", "vim.ComputeResource", "vim.version.version9", 0 | F_OPTIONAL, None, ), ), ( 0, "vim.cluster.VsanPerfEntityMetricCSV[]", "vim.cluster.VsanPerfEntityMetricCSV[]", ), "System.Read", None, ), ( "getSupportedEntityTypes", "VsanPerfGetSupportedEntityTypes", "vim.version.version9", tuple(), ( 0 | F_OPTIONAL, "vim.cluster.VsanPerfEntityType[]", "vim.cluster.VsanPerfEntityType[]", ), "System.Read", None, ), ( "createStatsObject", "VsanPerfCreateStatsObject", "vim.version.version9", ( ( "cluster", "vim.ComputeResource", "vim.version.version9", 0 | F_OPTIONAL, None, ), ( "profile", "vim.vm.ProfileSpec", "vim.version.version9", 0 | F_OPTIONAL, None, ), ), (0, "string", "string"), "System.Read", None, ), ], ) CreateManagedType( "vim.cluster.VsanVcDiskManagementSystem", "VimClusterVsanVcDiskManagementSystem", "vmodl.ManagedObject", "vim.version.version10", [], [ ( "initializeDiskMappings", "InitializeDiskMappings", "vim.version.version10", ( ( "spec", "vim.vsan.host.DiskMappingCreationSpec", "vim.version.version10", 0, None, ), ), (0, "vim.Task", "vim.Task"), "System.Read", None, ), ( "retrieveAllFlashCapabilities", "RetrieveAllFlashCapabilities", "vim.version.version10", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version10", 0, None, ), ), ( 0 | F_OPTIONAL, "vim.vsan.host.VsanHostCapability[]", "vim.vsan.host.VsanHostCapability[]", ), "System.Read", None, ), ( "queryDiskMappings", "QueryDiskMappings", "vim.version.version10", (("host", "vim.HostSystem", "vim.version.version10", 0, None),), ( 0 | F_OPTIONAL, "vim.vsan.host.DiskMapInfoEx[]", "vim.vsan.host.DiskMapInfoEx[]", ), "System.Read", None, ), ], ) CreateManagedType( "vim.cluster.VsanObjectSystem", "VsanObjectSystem", "vmodl.ManagedObject", "vim.version.version9", [], [ ( "setVsanObjectPolicy", "VosSetVsanObjectPolicy", "vim.version.version9", ( ( "cluster", "vim.ComputeResource", "vim.version.version9", 0 | F_OPTIONAL, None, ), ("vsanObjectUuid", "string", "vim.version.version9", 0, None), ( "profile", "vim.vm.ProfileSpec", "vim.version.version9", 0 | F_OPTIONAL, None, ), ), (0, "boolean", "boolean"), "System.Read", None, ), ( "queryObjectIdentities", "VsanQueryObjectIdentities", "vim.version.version9", ( ( "cluster", "vim.ComputeResource", "vim.version.version9", 0 | F_OPTIONAL, None, ), ("objUuids", "string[]", "vim.version.version9", 0 | F_OPTIONAL, None), ( "includeHealth", "boolean", "vim.version.version9", 0 | F_OPTIONAL, None, ), ( "includeObjIdentity", "boolean", "vim.version.version9", 0 | F_OPTIONAL, None, ), ( "includeSpaceSummary", "boolean", "vim.version.version9", 0 | F_OPTIONAL, None, ), ), ( 0 | F_OPTIONAL, "vim.cluster.VsanObjectIdentityAndHealth", "vim.cluster.VsanObjectIdentityAndHealth", ), "System.Read", None, ), ( "queryVsanObjectInformation", "VosQueryVsanObjectInformation", "vim.version.version9", ( ( "cluster", "vim.ComputeResource", "vim.version.version9", 0 | F_OPTIONAL, None, ), ( "vsanObjectQuerySpecs", "vim.cluster.VsanObjectQuerySpec[]", "vim.version.version9", 0, None, ), ), ( 0, "vim.cluster.VsanObjectInformation[]", "vim.cluster.VsanObjectInformation[]", ), "System.Read", None, ), ], ) CreateManagedType( "vim.host.VsanStretchedClusterSystem", "VimHostVsanStretchedClusterSystem", "vmodl.ManagedObject", "vim.version.version10", [], [ ( "getStretchedClusterInfoFromCmmds", "VSANHostGetStretchedClusterInfoFromCmmds", "vim.version.version10", tuple(), ( 0 | F_OPTIONAL, "vim.host.VSANStretchedClusterHostInfo[]", "vim.host.VSANStretchedClusterHostInfo[]", ), "System.Read", None, ), ( "witnessJoinVsanCluster", "VSANWitnessJoinVsanCluster", "vim.version.version10", ( ("clusterUuid", "string", "vim.version.version10", 0, None), ("preferredFd", "string", "vim.version.version10", 0, None), ( "disableVsanAllowed", "boolean", "vim.version.version10", 0 | F_OPTIONAL, None, ), ), (0, "void", "void"), "System.Read", None, ), ( "witnessSetPreferredFaultDomain", "VSANWitnessSetPreferredFaultDomain", "vim.version.version10", (("preferredFd", "string", "vim.version.version10", 0, None),), (0, "void", "void"), "System.Read", None, ), ( "addUnicastAgent", "VSANHostAddUnicastAgent", "vim.version.version10", ( ("witnessAddress", "string", "vim.version.version10", 0, None), ("witnessPort", "int", "vim.version.version10", 0 | F_OPTIONAL, None), ("overwrite", "boolean", "vim.version.version10", 0 | F_OPTIONAL, None), ), (0, "void", "void"), "System.Read", None, ), ( "clusterGetPreferredFaultDomain", "VSANClusterGetPreferredFaultDomain", "vim.version.version10", tuple(), ( 0 | F_OPTIONAL, "vim.host.VSANCmmdsPreferredFaultDomainInfo", "vim.host.VSANCmmdsPreferredFaultDomainInfo", ), "System.Read", None, ), ( "witnessLeaveVsanCluster", "VSANWitnessLeaveVsanCluster", "vim.version.version10", tuple(), (0, "void", "void"), "System.Read", None, ), ( "getStretchedClusterCapability", "VSANHostGetStretchedClusterCapability", "vim.version.version10", tuple(), ( 0, "vim.host.VSANStretchedClusterHostCapability", "vim.host.VSANStretchedClusterHostCapability", ), "System.Read", None, ), ( "removeUnicastAgent", "VSANHostRemoveUnicastAgent", "vim.version.version10", ( ("witnessAddress", "string", "vim.version.version10", 0, None), ( "ignoreExistence", "boolean", "vim.version.version10", 0 | F_OPTIONAL, None, ), ), (0, "void", "void"), "System.Read", None, ), ( "listUnicastAgent", "VSANHostListUnicastAgent", "vim.version.version10", tuple(), (0, "string", "string"), "System.Read", None, ), ], ) CreateManagedType( "vim.VsanUpgradeSystemEx", "VsanUpgradeSystemEx", "vmodl.ManagedObject", "vim.version.version10", [], [ ( "performUpgrade", "PerformVsanUpgradeEx", "vim.version.version10", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version10", 0, None, ), ( "performObjectUpgrade", "boolean", "vim.version.version10", 0 | F_OPTIONAL, None, ), ( "downgradeFormat", "boolean", "vim.version.version10", 0 | F_OPTIONAL, None, ), ( "allowReducedRedundancy", "boolean", "vim.version.version10", 0 | F_OPTIONAL, None, ), ( "excludeHosts", "vim.HostSystem[]", "vim.version.version10", 0 | F_OPTIONAL, None, ), ( "spec", "vim.cluster.VsanDiskFormatConversionSpec", "vim.version.version10", 0 | F_OPTIONAL, None, ), ), (0, "vim.Task", "vim.Task"), "System.Read", None, ), ( "performUpgradePreflightCheck", "PerformVsanUpgradePreflightCheckEx", "vim.version.version10", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version10", 0, None, ), ( "downgradeFormat", "boolean", "vim.version.version10", 0 | F_OPTIONAL, None, ), ( "spec", "vim.cluster.VsanDiskFormatConversionSpec", "vim.version.version10", 0 | F_OPTIONAL, None, ), ), ( 0, "vim.cluster.VsanDiskFormatConversionCheckResult", "vim.cluster.VsanDiskFormatConversionCheckResult", ), "System.Read", None, ), ( "retrieveSupportedFormatVersion", "RetrieveSupportedVsanFormatVersion", "vim.version.version10", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version10", 0, None, ), ), (0, "int", "int"), "System.Read", None, ), ], ) CreateManagedType( "vim.cluster.VsanCapabilitySystem", "VsanCapabilitySystem", "vmodl.ManagedObject", "vim.version.version10", [], [ ( "getCapabilities", "VsanGetCapabilities", "vim.version.version10", ( ( "targets", "vmodl.ManagedObject[]", "vim.version.version10", 0 | F_OPTIONAL, None, ), ), (0, "vim.cluster.VsanCapability[]", "vim.cluster.VsanCapability[]"), "System.Read", None, ), ], ) CreateManagedType( "vim.cluster.VsanSpaceReportSystem", "VsanSpaceReportSystem", "vmodl.ManagedObject", "vim.version.version9", [], [ ( "querySpaceUsage", "VsanQuerySpaceUsage", "vim.version.version9", (("cluster", "vim.ComputeResource", "vim.version.version9", 0, None),), (0, "vim.cluster.VsanSpaceUsage", "vim.cluster.VsanSpaceUsage"), "System.Read", None, ), ], ) CreateManagedType( "vim.cluster.VsanVcClusterConfigSystem", "VsanVcClusterConfigSystem", "vmodl.ManagedObject", "vim.version.version10", [], [ ( "getConfigInfoEx", "VsanClusterGetConfig", "vim.version.version10", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version10", 0, None, ), ), (0, "vim.vsan.ConfigInfoEx", "vim.vsan.ConfigInfoEx"), "System.Read", None, ), ( "reconfigureEx", "VsanClusterReconfig", "vim.version.version10", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version10", 0, None, ), ( "vsanReconfigSpec", "vim.vsan.ReconfigSpec", "vim.version.version10", 0, None, ), ), (0, "vim.Task", "vim.Task"), "System.Read", None, ), ], ) CreateManagedType( "vim.host.VsanHealthSystem", "HostVsanHealthSystem", "vmodl.ManagedObject", "vim.version.version9", [], [ ( "queryAdvCfg", "VsanHostQueryAdvCfg", "vim.version.version9", (("options", "string[]", "vim.version.version9", 0, None),), (0, "vim.option.OptionValue[]", "vim.option.OptionValue[]"), "System.Read", None, ), ( "queryPhysicalDiskHealthSummary", "VsanHostQueryPhysicalDiskHealthSummary", "vim.version.version9", tuple(), ( 0, "vim.host.VsanPhysicalDiskHealthSummary", "vim.host.VsanPhysicalDiskHealthSummary", ), "System.Read", None, ), ( "startProactiveRebalance", "VsanStartProactiveRebalance", "vim.version.version9", ( ("timeSpan", "int", "vim.version.version9", 0 | F_OPTIONAL, None), ( "varianceThreshold", "float", "vim.version.version9", 0 | F_OPTIONAL, None, ), ("timeThreshold", "int", "vim.version.version9", 0 | F_OPTIONAL, None), ("rateThreshold", "int", "vim.version.version9", 0 | F_OPTIONAL, None), ), (0, "boolean", "boolean"), "System.Read", None, ), ( "queryHostInfoByUuids", "VsanHostQueryHostInfoByUuids", "vim.version.version9", (("uuids", "string[]", "vim.version.version9", 0, None),), ( 0, "vim.host.VsanQueryResultHostInfo[]", "vim.host.VsanQueryResultHostInfo[]", ), "System.Read", None, ), ( "queryVersion", "VsanHostQueryHealthSystemVersion", "vim.version.version9", tuple(), (0, "string", "string"), "System.Read", None, ), ( "queryVerifyNetworkSettings", "VsanHostQueryVerifyNetworkSettings", "vim.version.version9", (("peers", "string[]", "vim.version.version9", 0 | F_OPTIONAL, None),), (0, "vim.host.VsanNetworkHealthResult", "vim.host.VsanNetworkHealthResult"), "System.Read", None, ), ( "queryRunIperfClient", "VsanHostQueryRunIperfClient", "vim.version.version9", ( ("multicast", "boolean", "vim.version.version9", 0, None), ("serverIp", "string", "vim.version.version9", 0, None), ), ( 0, "vim.host.VsanNetworkLoadTestResult", "vim.host.VsanNetworkLoadTestResult", ), "System.Read", None, ), ( "runVmdkLoadTest", "VsanHostRunVmdkLoadTest", "vim.version.version9", ( ("runname", "string", "vim.version.version9", 0, None), ("durationSec", "int", "vim.version.version9", 0, None), ( "specs", "vim.host.VsanVmdkLoadTestSpec[]", "vim.version.version9", 0, None, ), ), ( 0, "vim.host.VsanVmdkLoadTestResult[]", "vim.host.VsanVmdkLoadTestResult[]", ), "System.Read", None, ), ( "queryObjectHealthSummary", "VsanHostQueryObjectHealthSummary", "vim.version.version9", ( ("objUuids", "string[]", "vim.version.version9", 0 | F_OPTIONAL, None), ( "includeObjUuids", "boolean", "vim.version.version9", 0 | F_OPTIONAL, None, ), ( "localHostOnly", "boolean", "vim.version.version9", 0 | F_OPTIONAL, None, ), ), (0, "vim.host.VsanObjectOverallHealth", "vim.host.VsanObjectOverallHealth"), "System.Read", None, ), ( "getHclInfo", "VsanGetHclInfo", "vim.version.version9", tuple(), (0, "vim.host.VsanHostHclInfo", "vim.host.VsanHostHclInfo"), "System.Read", None, ), ( "cleanupVmdkLoadTest", "VsanHostCleanupVmdkLoadTest", "vim.version.version9", ( ("runname", "string", "vim.version.version9", 0, None), ( "specs", "vim.host.VsanVmdkLoadTestSpec[]", "vim.version.version9", 0 | F_OPTIONAL, None, ), ), (0, "string", "string"), "System.Read", None, ), ( "waitForVsanHealthGenerationIdChange", "VsanWaitForVsanHealthGenerationIdChange", "vim.version.version9", (("timeout", "int", "vim.version.version9", 0, None),), (0, "boolean", "boolean"), "System.Read", None, ), ( "stopProactiveRebalance", "VsanStopProactiveRebalance", "vim.version.version9", tuple(), (0, "boolean", "boolean"), "System.Read", None, ), ( "repairImmediateObjects", "VsanHostRepairImmediateObjects", "vim.version.version9", ( ("uuids", "string[]", "vim.version.version9", 0 | F_OPTIONAL, None), ("repairType", "string", "vim.version.version9", 0 | F_OPTIONAL, None), ), (0, "vim.host.VsanRepairObjectsResult", "vim.host.VsanRepairObjectsResult"), "System.Read", None, ), ( "prepareVmdkLoadTest", "VsanHostPrepareVmdkLoadTest", "vim.version.version9", ( ("runname", "string", "vim.version.version9", 0, None), ( "specs", "vim.host.VsanVmdkLoadTestSpec[]", "vim.version.version9", 0, None, ), ), (0, "string", "string"), "System.Read", None, ), ( "queryRunIperfServer", "VsanHostQueryRunIperfServer", "vim.version.version9", ( ("multicast", "boolean", "vim.version.version9", 0, None), ("serverIp", "string", "vim.version.version9", 0 | F_OPTIONAL, None), ), ( 0, "vim.host.VsanNetworkLoadTestResult", "vim.host.VsanNetworkLoadTestResult", ), "System.Read", None, ), ( "queryCheckLimits", "VsanHostQueryCheckLimits", "vim.version.version9", tuple(), (0, "vim.host.VsanLimitHealthResult", "vim.host.VsanLimitHealthResult"), "System.Read", None, ), ( "getProactiveRebalanceInfo", "VsanGetProactiveRebalanceInfo", "vim.version.version9", tuple(), ( 0, "vim.host.VsanProactiveRebalanceInfoEx", "vim.host.VsanProactiveRebalanceInfoEx", ), "System.Read", None, ), ( "checkClomdLiveness", "VsanHostClomdLiveness", "vim.version.version9", tuple(), (0, "boolean", "boolean"), "System.Read", None, ), ], ) CreateManagedType( "vim.cluster.VsanVcClusterHealthSystem", "VsanVcClusterHealthSystem", "vmodl.ManagedObject", "vim.version.version9", [], [ ( "queryClusterCreateVmHealthHistoryTest", "VsanQueryVcClusterCreateVmHealthHistoryTest", "vim.version.version9", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version9", 0, None, ), ("count", "int", "vim.version.version9", 0 | F_OPTIONAL, None), ), ( 0 | F_OPTIONAL, "vim.cluster.VsanClusterCreateVmHealthTestResult[]", "vim.cluster.VsanClusterCreateVmHealthTestResult[]", ), "System.Read", None, ), ( "setLogLevel", "VsanHealthSetLogLevel", "vim.version.version9", ( ( "level", "vim.cluster.VsanHealthLogLevelEnum", "vim.version.version9", 0 | F_OPTIONAL, None, ), ), (0, "void", "void"), "System.Read", None, ), ( "testVsanClusterTelemetryProxy", "VsanHealthTestVsanClusterTelemetryProxy", "vim.version.version9", ( ( "proxyConfig", "vim.cluster.VsanClusterTelemetryProxyConfig", "vim.version.version9", 0, None, ), ), (0, "boolean", "boolean"), "System.Read", None, ), ( "uploadHclDb", "VsanVcUploadHclDb", "vim.version.version9", (("db", "string", "vim.version.version9", 0, None),), (0, "boolean", "boolean"), "System.Read", None, ), ( "updateHclDbFromWeb", "VsanVcUpdateHclDbFromWeb", "vim.version.version9", (("url", "string", "vim.version.version9", 0 | F_OPTIONAL, None),), (0, "boolean", "boolean"), "System.Read", None, ), ( "repairClusterObjectsImmediate", "VsanHealthRepairClusterObjectsImmediate", "vim.version.version9", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version9", 0, None, ), ("uuids", "string[]", "vim.version.version9", 0 | F_OPTIONAL, None), ), (0, "vim.Task", "vim.Task"), "System.Read", None, ), ( "queryClusterNetworkPerfTest", "VsanQueryVcClusterNetworkPerfTest", "vim.version.version9", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version9", 0, None, ), ("multicast", "boolean", "vim.version.version9", 0, None), ), ( 0, "vim.cluster.VsanClusterNetworkLoadTestResult", "vim.cluster.VsanClusterNetworkLoadTestResult", ), "System.Read", None, ), ( "queryClusterVmdkLoadHistoryTest", "VsanQueryVcClusterVmdkLoadHistoryTest", "vim.version.version9", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version9", 0, None, ), ("count", "int", "vim.version.version9", 0 | F_OPTIONAL, None), ("taskId", "string", "vim.version.version9", 0 | F_OPTIONAL, None), ), ( 0 | F_OPTIONAL, "vim.cluster.VsanClusterVmdkLoadTestResult[]", "vim.cluster.VsanClusterVmdkLoadTestResult[]", ), "System.Read", None, ), ( "queryVsanClusterHealthCheckInterval", "VsanHealthQueryVsanClusterHealthCheckInterval", "vim.version.version9", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version9", 0, None, ), ), (0, "int", "int"), "System.Read", None, ), ( "queryClusterCreateVmHealthTest", "VsanQueryVcClusterCreateVmHealthTest", "vim.version.version9", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version9", 0, None, ), ("timeout", "int", "vim.version.version9", 0, None), ), ( 0, "vim.cluster.VsanClusterCreateVmHealthTestResult", "vim.cluster.VsanClusterCreateVmHealthTestResult", ), "System.Read", None, ), ( "getClusterHclInfo", "VsanVcClusterGetHclInfo", "vim.version.version9", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version9", 0, None, ), ( "includeHostsResult", "boolean", "vim.version.version9", 0 | F_OPTIONAL, None, ), ), (0, "vim.cluster.VsanClusterHclInfo", "vim.cluster.VsanClusterHclInfo"), "System.Read", None, ), ( "queryAttachToSrHistory", "VsanQueryAttachToSrHistory", "vim.version.version9", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version9", 0, None, ), ("count", "int", "vim.version.version9", 0 | F_OPTIONAL, None), ("taskId", "string", "vim.version.version9", 0 | F_OPTIONAL, None), ), ( 0 | F_OPTIONAL, "vim.cluster.VsanAttachToSrOperation[]", "vim.cluster.VsanAttachToSrOperation[]", ), "System.Read", None, ), ( "rebalanceCluster", "VsanRebalanceCluster", "vim.version.version9", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version9", 0, None, ), ( "targetHosts", "vim.HostSystem[]", "vim.version.version9", 0 | F_OPTIONAL, None, ), ), (0, "vim.Task", "vim.Task"), "System.Read", None, ), ( "runVmdkLoadTest", "VsanVcClusterRunVmdkLoadTest", "vim.version.version9", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version9", 0, None, ), ("runname", "string", "vim.version.version9", 0, None), ("durationSec", "int", "vim.version.version9", 0 | F_OPTIONAL, None), ( "specs", "vim.host.VsanVmdkLoadTestSpec[]", "vim.version.version9", 0 | F_OPTIONAL, None, ), ("action", "string", "vim.version.version9", 0 | F_OPTIONAL, None), ), (0, "vim.Task", "vim.Task"), "System.Read", None, ), ( "sendVsanTelemetry", "VsanHealthSendVsanTelemetry", "vim.version.version9", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version9", 0, None, ), ), (0, "void", "void"), "System.Read", None, ), ( "queryClusterNetworkPerfHistoryTest", "VsanQueryVcClusterNetworkPerfHistoryTest", "vim.version.version9", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version9", 0, None, ), ("count", "int", "vim.version.version9", 0 | F_OPTIONAL, None), ), ( 0 | F_OPTIONAL, "vim.cluster.VsanClusterNetworkLoadTestResult[]", "vim.cluster.VsanClusterNetworkLoadTestResult[]", ), "System.Read", None, ), ( "queryClusterHealthSummary", "VsanQueryVcClusterHealthSummary", "vim.version.version9", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version9", 0, None, ), ( "vmCreateTimeout", "int", "vim.version.version9", 0 | F_OPTIONAL, None, ), ("objUuids", "string[]", "vim.version.version9", 0 | F_OPTIONAL, None), ( "includeObjUuids", "boolean", "vim.version.version9", 0 | F_OPTIONAL, None, ), ("fields", "string[]", "vim.version.version9", 0 | F_OPTIONAL, None), ( "fetchFromCache", "boolean", "vim.version.version9", 0 | F_OPTIONAL, None, ), ), ( 0, "vim.cluster.VsanClusterHealthSummary", "vim.cluster.VsanClusterHealthSummary", ), "System.Read", None, ), ( "stopRebalanceCluster", "VsanStopRebalanceCluster", "vim.version.version9", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version9", 0, None, ), ( "targetHosts", "vim.HostSystem[]", "vim.version.version9", 0 | F_OPTIONAL, None, ), ), (0, "vim.Task", "vim.Task"), "System.Read", None, ), ( "queryVsanClusterHealthConfig", "VsanHealthQueryVsanClusterHealthConfig", "vim.version.version9", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version9", 0, None, ), ), ( 0, "vim.cluster.VsanClusterHealthConfigs", "vim.cluster.VsanClusterHealthConfigs", ), "System.Read", None, ), ( "attachVsanSupportBundleToSr", "VsanAttachVsanSupportBundleToSr", "vim.version.version9", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version9", 0, None, ), ("srNumber", "string", "vim.version.version9", 0, None), ), (0, "vim.Task", "vim.Task"), "System.Read", None, ), ( "queryClusterVmdkWorkloadTypes", "VsanQueryVcClusterVmdkWorkloadTypes", "vim.version.version9", tuple(), ( 0, "vim.cluster.VsanStorageWorkloadType[]", "vim.cluster.VsanStorageWorkloadType[]", ), "System.Read", None, ), ( "queryVerifyClusterHealthSystemVersions", "VsanVcClusterQueryVerifyHealthSystemVersions", "vim.version.version9", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version9", 0, None, ), ), ( 0, "vim.cluster.VsanClusterHealthSystemVersionResult", "vim.cluster.VsanClusterHealthSystemVersionResult", ), "System.Read", None, ), ( "isRebalanceRunning", "VsanHealthIsRebalanceRunning", "vim.version.version9", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version9", 0, None, ), ( "targetHosts", "vim.HostSystem[]", "vim.version.version9", 0 | F_OPTIONAL, None, ), ), (0, "boolean", "boolean"), "System.Read", None, ), ( "setVsanClusterHealthCheckInterval", "VsanHealthSetVsanClusterHealthCheckInterval", "vim.version.version9", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version9", 0, None, ), ( "vsanClusterHealthCheckInterval", "int", "vim.version.version9", 0, None, ), ), (0, "void", "void"), "System.Read", None, ), ], ) CreateManagedType( "vim.cluster.VsanVcStretchedClusterSystem", "VimClusterVsanVcStretchedClusterSystem", "vmodl.ManagedObject", "vim.version.version10", [], [ ( "isWitnessHost", "VSANVcIsWitnessHost", "vim.version.version10", (("host", "vim.HostSystem", "vim.version.version10", 0, None),), (0, "boolean", "boolean"), "System.Read", None, ), ( "setPreferredFaultDomain", "VSANVcSetPreferredFaultDomain", "vim.version.version10", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version10", 0, None, ), ("preferredFd", "string", "vim.version.version10", 0, None), ( "witnessHost", "vim.HostSystem", "vim.version.version10", 0 | F_OPTIONAL, None, ), ), (0, "vim.Task", "vim.Task"), "System.Read", None, ), ( "getPreferredFaultDomain", "VSANVcGetPreferredFaultDomain", "vim.version.version10", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version10", 0, None, ), ), ( 0 | F_OPTIONAL, "vim.cluster.VSANPreferredFaultDomainInfo", "vim.cluster.VSANPreferredFaultDomainInfo", ), "System.Read", None, ), ( "getWitnessHosts", "VSANVcGetWitnessHosts", "vim.version.version10", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version10", 0, None, ), ), ( 0 | F_OPTIONAL, "vim.cluster.VSANWitnessHostInfo[]", "vim.cluster.VSANWitnessHostInfo[]", ), "System.Read", None, ), ( "retrieveStretchedClusterVcCapability", "VSANVcRetrieveStretchedClusterVcCapability", "vim.version.version10", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version10", 0, None, ), ( "verifyAllConnected", "boolean", "vim.version.version10", 0 | F_OPTIONAL, None, ), ), ( 0 | F_OPTIONAL, "vim.cluster.VSANStretchedClusterCapability[]", "vim.cluster.VSANStretchedClusterCapability[]", ), "System.Read", None, ), ( "convertToStretchedCluster", "VSANVcConvertToStretchedCluster", "vim.version.version10", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version10", 0, None, ), ( "faultDomainConfig", "vim.cluster.VSANStretchedClusterFaultDomainConfig", "vim.version.version10", 0, None, ), ("witnessHost", "vim.HostSystem", "vim.version.version10", 0, None), ("preferredFd", "string", "vim.version.version10", 0, None), ( "diskMapping", "vim.vsan.host.DiskMapping", "vim.version.version10", 0 | F_OPTIONAL, None, ), ), (0, "vim.Task", "vim.Task"), "System.Read", None, ), ( "removeWitnessHost", "VSANVcRemoveWitnessHost", "vim.version.version10", ( ( "cluster", "vim.ClusterComputeResource", "vim.version.version10", 0, None, ), ( "witnessHost", "vim.HostSystem", "vim.version.version10", 0 | F_OPTIONAL, None, ), ( "witnessAddress", "string", "vim.version.version10", 0 | F_OPTIONAL, None, ), ), (0, "vim.Task", "vim.Task"), "System.Read", None, ), ], ) CreateManagedType( "vim.cluster.VsanClusterHealthSystem", "VsanClusterHealthSystem", "vmodl.ManagedObject", "vim.version.version9", [], [ ( "queryPhysicalDiskHealthSummary", "VsanQueryClusterPhysicalDiskHealthSummary", "vim.version.version9", ( ("hosts", "string[]", "vim.version.version9", 0, None), ("esxRootPassword", "string", "vim.version.version9", 0, None), ), ( 0, "vim.host.VsanPhysicalDiskHealthSummary[]", "vim.host.VsanPhysicalDiskHealthSummary[]", ), "System.Read", None, ), ( "queryClusterNetworkPerfTest", "VsanQueryClusterNetworkPerfTest", "vim.version.version9", ( ("hosts", "string[]", "vim.version.version9", 0, None), ("esxRootPassword", "string", "vim.version.version9", 0, None), ("multicast", "boolean", "vim.version.version9", 0, None), ), ( 0, "vim.cluster.VsanClusterNetworkLoadTestResult", "vim.cluster.VsanClusterNetworkLoadTestResult", ), "System.Read", None, ), ( "queryAdvCfgSync", "VsanQueryClusterAdvCfgSync", "vim.version.version9", ( ("hosts", "string[]", "vim.version.version9", 0, None), ("esxRootPassword", "string", "vim.version.version9", 0, None), ), ( 0, "vim.cluster.VsanClusterAdvCfgSyncResult[]", "vim.cluster.VsanClusterAdvCfgSyncResult[]", ), "System.Read", None, ), ( "repairClusterImmediateObjects", "VsanRepairClusterImmediateObjects", "vim.version.version9", ( ("hosts", "string[]", "vim.version.version9", 0, None), ("esxRootPassword", "string", "vim.version.version9", 0, None), ("uuids", "string[]", "vim.version.version9", 0 | F_OPTIONAL, None), ), ( 0, "vim.cluster.VsanClusterHealthSystemObjectsRepairResult", "vim.cluster.VsanClusterHealthSystemObjectsRepairResult", ), "System.Read", None, ), ( "queryVerifyClusterNetworkSettings", "VsanQueryVerifyClusterNetworkSettings", "vim.version.version9", ( ("hosts", "string[]", "vim.version.version9", 0, None), ("esxRootPassword", "string", "vim.version.version9", 0, None), ), ( 0, "vim.cluster.VsanClusterNetworkHealthResult", "vim.cluster.VsanClusterNetworkHealthResult", ), "System.Read", None, ), ( "queryClusterCreateVmHealthTest", "VsanQueryClusterCreateVmHealthTest", "vim.version.version9", ( ("hosts", "string[]", "vim.version.version9", 0, None), ("esxRootPassword", "string", "vim.version.version9", 0, None), ("timeout", "int", "vim.version.version9", 0, None), ), ( 0, "vim.cluster.VsanClusterCreateVmHealthTestResult", "vim.cluster.VsanClusterCreateVmHealthTestResult", ), "System.Read", None, ), ( "queryClusterHealthSystemVersions", "VsanQueryClusterHealthSystemVersions", "vim.version.version9", ( ("hosts", "string[]", "vim.version.version9", 0, None), ("esxRootPassword", "string", "vim.version.version9", 0, None), ), ( 0, "vim.cluster.VsanClusterHealthSystemVersionResult", "vim.cluster.VsanClusterHealthSystemVersionResult", ), "System.Read", None, ), ( "getClusterHclInfo", "VsanClusterGetHclInfo", "vim.version.version9", ( ("hosts", "string[]", "vim.version.version9", 0, None), ("esxRootPassword", "string", "vim.version.version9", 0, None), ), (0, "vim.cluster.VsanClusterHclInfo", "vim.cluster.VsanClusterHclInfo"), "System.Read", None, ), ( "queryCheckLimits", "VsanQueryClusterCheckLimits", "vim.version.version9", ( ("hosts", "string[]", "vim.version.version9", 0, None), ("esxRootPassword", "string", "vim.version.version9", 0, None), ), ( 0, "vim.cluster.VsanClusterLimitHealthResult", "vim.cluster.VsanClusterLimitHealthResult", ), "System.Read", None, ), ( "queryCaptureVsanPcap", "VsanQueryClusterCaptureVsanPcap", "vim.version.version9", ( ("hosts", "string[]", "vim.version.version9", 0, None), ("esxRootPassword", "string", "vim.version.version9", 0, None), ("duration", "int", "vim.version.version9", 0, None), ( "vmknic", "vim.cluster.VsanClusterHostVmknicMapping[]", "vim.version.version9", 0 | F_OPTIONAL, None, ), ( "includeRawPcap", "boolean", "vim.version.version9", 0 | F_OPTIONAL, None, ), ( "includeIgmp", "boolean", "vim.version.version9", 0 | F_OPTIONAL, None, ), ( "cmmdsMsgTypeFilter", "string[]", "vim.version.version9", 0 | F_OPTIONAL, None, ), ("cmmdsPorts", "int[]", "vim.version.version9", 0 | F_OPTIONAL, None), ("clusterUuid", "string", "vim.version.version9", 0 | F_OPTIONAL, None), ), ( 0, "vim.cluster.VsanVsanClusterPcapResult", "vim.cluster.VsanVsanClusterPcapResult", ), "System.Read", None, ), ( "checkClusterClomdLiveness", "VsanCheckClusterClomdLiveness", "vim.version.version9", ( ("hosts", "string[]", "vim.version.version9", 0, None), ("esxRootPassword", "string", "vim.version.version9", 0, None), ), ( 0, "vim.cluster.VsanClusterClomdLivenessResult", "vim.cluster.VsanClusterClomdLivenessResult", ), "System.Read", None, ), ], ) CreateDataType( "vim.host.VSANCmmdsNodeInfo", "VimHostVSANCmmdsNodeInfo", "vmodl.DynamicData", "vim.version.version10", [ ("nodeUuid", "string", "vim.version.version10", 0), ("isWitness", "boolean", "vim.version.version10", 0), ], ) CreateDataType( "vim.host.VsanPhysicalDiskHealth", "VsanPhysicalDiskHealth", "vmodl.DynamicData", "vim.version.version9", [ ("name", "string", "vim.version.version9", 0), ("uuid", "string", "vim.version.version9", 0), ("inCmmds", "boolean", "vim.version.version9", 0), ("inVsi", "boolean", "vim.version.version9", 0), ("dedupScope", "long", "vim.version.version9", 0 | F_OPTIONAL), ("formatVersion", "int", "vim.version.version9", 0 | F_OPTIONAL), ("isAllFlash", "int", "vim.version.version9", 0 | F_OPTIONAL), ("congestionValue", "int", "vim.version.version9", 0 | F_OPTIONAL), ("congestionArea", "string", "vim.version.version9", 0 | F_OPTIONAL), ("congestionHealth", "string", "vim.version.version9", 0 | F_OPTIONAL), ("metadataHealth", "string", "vim.version.version9", 0 | F_OPTIONAL), ( "operationalHealthDescription", "string", "vim.version.version9", 0 | F_OPTIONAL, ), ("operationalHealth", "string", "vim.version.version9", 0 | F_OPTIONAL), ("dedupUsageHealth", "string", "vim.version.version9", 0 | F_OPTIONAL), ("capacityHealth", "string", "vim.version.version9", 0 | F_OPTIONAL), ("summaryHealth", "string", "vim.version.version9", 0), ("capacity", "long", "vim.version.version9", 0 | F_OPTIONAL), ("usedCapacity", "long", "vim.version.version9", 0 | F_OPTIONAL), ("reservedCapacity", "long", "vim.version.version9", 0 | F_OPTIONAL), ("totalBytes", "long", "vim.version.version9", 0 | F_OPTIONAL), ("freeBytes", "long", "vim.version.version9", 0 | F_OPTIONAL), ("hashedBytes", "long", "vim.version.version9", 0 | F_OPTIONAL), ("dedupedBytes", "long", "vim.version.version9", 0 | F_OPTIONAL), ("scsiDisk", "vim.host.ScsiDisk", "vim.version.version9", 0 | F_OPTIONAL), ("usedComponents", "long", "vim.version.version9", 0 | F_OPTIONAL), ("maxComponents", "long", "vim.version.version9", 0 | F_OPTIONAL), ("compLimitHealth", "string", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.vsan.DataEfficiencyConfig", "VsanDataEfficiencyConfig", "vmodl.DynamicData", "vim.version.version10", [ ("dedupEnabled", "boolean", "vim.version.version10", 0), ("compressionEnabled", "boolean", "vim.version.version10", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.StorageComplianceResult", "VsanStorageComplianceResult", "vmodl.DynamicData", "vim.version.version9", [ ("checkTime", "vmodl.DateTime", "vim.version.version9", 0 | F_OPTIONAL), ("profile", "string", "vim.version.version9", 0 | F_OPTIONAL), ("objectUUID", "string", "vim.version.version9", 0 | F_OPTIONAL), ( "complianceStatus", "vim.cluster.StorageComplianceStatus", "vim.version.version9", 0, ), ("mismatch", "boolean", "vim.version.version9", 0), ( "violatedPolicies", "vim.cluster.StoragePolicyStatus[]", "vim.version.version9", 0 | F_OPTIONAL, ), ( "operationalStatus", "vim.cluster.StorageOperationalStatus", "vim.version.version9", 0 | F_OPTIONAL, ), ], ) CreateDataType( "vim.cluster.VsanClusterHealthGroup", "VsanClusterHealthGroup", "vmodl.DynamicData", "vim.version.version9", [ ("groupId", "string", "vim.version.version9", 0), ("groupName", "string", "vim.version.version9", 0), ("groupHealth", "string", "vim.version.version9", 0), ( "groupTests", "vim.cluster.VsanClusterHealthTest[]", "vim.version.version9", 0 | F_OPTIONAL, ), ( "groupDetails", "vim.cluster.VsanClusterHealthResultBase[]", "vim.version.version9", 0 | F_OPTIONAL, ), ], ) CreateDataType( "vim.cluster.VsanSpaceUsageDetailResult", "VsanSpaceUsageDetailResult", "vmodl.DynamicData", "vim.version.version9", [ ( "spaceUsageByObjectType", "vim.cluster.VsanObjectSpaceSummary[]", "vim.version.version9", 0 | F_OPTIONAL, ) ], ) CreateDataType( "vim.cluster.VsanAttachToSrOperation", "VsanAttachToSrOperation", "vmodl.DynamicData", "vim.version.version9", [ ("task", "vim.Task", "vim.version.version9", 0 | F_OPTIONAL), ("success", "boolean", "vim.version.version9", 0 | F_OPTIONAL), ("timestamp", "vmodl.DateTime", "vim.version.version9", 0 | F_OPTIONAL), ("srNumber", "string", "vim.version.version9", 0), ], ) CreateDataType( "vim.cluster.VsanObjectSpaceSummary", "VsanObjectSpaceSummary", "vmodl.DynamicData", "vim.version.version9", [ ( "objType", "vim.cluster.VsanObjectTypeEnum", "vim.version.version9", 0 | F_OPTIONAL, ), ("overheadB", "long", "vim.version.version9", 0 | F_OPTIONAL), ("temporaryOverheadB", "long", "vim.version.version9", 0 | F_OPTIONAL), ("primaryCapacityB", "long", "vim.version.version9", 0 | F_OPTIONAL), ("provisionCapacityB", "long", "vim.version.version9", 0 | F_OPTIONAL), ("reservedCapacityB", "long", "vim.version.version9", 0 | F_OPTIONAL), ("overReservedB", "long", "vim.version.version9", 0 | F_OPTIONAL), ("physicalUsedB", "long", "vim.version.version9", 0 | F_OPTIONAL), ("usedB", "long", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.VsanClusterHclInfo", "VsanClusterHclInfo", "vmodl.DynamicData", "vim.version.version9", [ ("hclDbLastUpdate", "vmodl.DateTime", "vim.version.version9", 0 | F_OPTIONAL), ("hclDbAgeHealth", "string", "vim.version.version9", 0 | F_OPTIONAL), ( "hostResults", "vim.host.VsanHostHclInfo[]", "vim.version.version9", 0 | F_OPTIONAL, ), ], ) CreateDataType( "vim.cluster.VsanPerfGraph", "VsanPerfGraph", "vmodl.DynamicData", "vim.version.version9", [ ("id", "string", "vim.version.version9", 0), ("metrics", "vim.cluster.VsanPerfMetricId[]", "vim.version.version9", 0), ("unit", "vim.cluster.VsanPerfStatsUnitType", "vim.version.version9", 0), ( "threshold", "vim.cluster.VsanPerfThreshold", "vim.version.version9", 0 | F_OPTIONAL, ), ("name", "string", "vim.version.version9", 0 | F_OPTIONAL), ("description", "string", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.VsanClusterHealthResultBase", "VsanClusterHealthResultBase", "vmodl.DynamicData", "vim.version.version9", [("label", "string", "vim.version.version9", 0 | F_OPTIONAL)], ) CreateDataType( "vim.cluster.VsanPerfTopEntity", "VsanPerfTopEntity", "vmodl.DynamicData", "vim.version.version9", [ ("entityRefId", "string", "vim.version.version9", 0), ("value", "string", "vim.version.version9", 0), ], ) CreateDataType( "vim.cluster.VsanClusterBalancePerDiskInfo", "VsanClusterBalancePerDiskInfo", "vmodl.DynamicData", "vim.version.version9", [ ("uuid", "string", "vim.version.version9", 0 | F_OPTIONAL), ("fullness", "long", "vim.version.version9", 0), ("variance", "long", "vim.version.version9", 0), ("fullnessAboveThreshold", "long", "vim.version.version9", 0), ("dataToMoveB", "long", "vim.version.version9", 0), ], ) CreateDataType( "vim.cluster.VsanClusterHealthTest", "VsanClusterHealthTest", "vmodl.DynamicData", "vim.version.version9", [ ("testId", "string", "vim.version.version9", 0 | F_OPTIONAL), ("testName", "string", "vim.version.version9", 0 | F_OPTIONAL), ("testDescription", "string", "vim.version.version9", 0 | F_OPTIONAL), ("testShortDescription", "string", "vim.version.version9", 0 | F_OPTIONAL), ("testHealth", "string", "vim.version.version9", 0 | F_OPTIONAL), ( "testDetails", "vim.cluster.VsanClusterHealthResultBase[]", "vim.version.version9", 0 | F_OPTIONAL, ), ( "testActions", "vim.cluster.VsanClusterHealthAction[]", "vim.version.version9", 0 | F_OPTIONAL, ), ], ) CreateDataType( "vim.cluster.StoragePolicyStatus", "VsanStoragePolicyStatus", "vmodl.DynamicData", "vim.version.version9", [ ("id", "string", "vim.version.version9", 0 | F_OPTIONAL), ("expectedValue", "string", "vim.version.version9", 0 | F_OPTIONAL), ("currentValue", "string", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.VsanPerfMemberInfo", "VsanPerfMemberInfo", "vmodl.DynamicData", "vim.version.version9", [("thumbprint", "string", "vim.version.version9", 0)], ) CreateDataType( "vim.cluster.VsanPerfMetricId", "VsanPerfMetricId", "vmodl.DynamicData", "vim.version.version9", [ ("label", "string", "vim.version.version9", 0), ("group", "string", "vim.version.version9", 0 | F_OPTIONAL), ( "rollupType", "vim.cluster.VsanPerfSummaryType", "vim.version.version9", 0 | F_OPTIONAL, ), ( "statsType", "vim.cluster.VsanPerfStatsType", "vim.version.version9", 0 | F_OPTIONAL, ), ("name", "string", "vim.version.version9", 0 | F_OPTIONAL), ("description", "string", "vim.version.version9", 0 | F_OPTIONAL), ("metricsCollectInterval", "int", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.VSANWitnessHostInfo", "VimClusterVSANWitnessHostInfo", "vmodl.DynamicData", "vim.version.version10", [ ("nodeUuid", "string", "vim.version.version10", 0), ("faultDomainName", "string", "vim.version.version10", 0 | F_OPTIONAL), ("preferredFdName", "string", "vim.version.version10", 0 | F_OPTIONAL), ("preferredFdUuid", "string", "vim.version.version10", 0 | F_OPTIONAL), ("unicastAgentAddr", "string", "vim.version.version10", 0 | F_OPTIONAL), ("host", "vim.HostSystem", "vim.version.version10", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.VsanHealthExtMgmtPreCheckResult", "VsanHealthExtMgmtPreCheckResult", "vmodl.DynamicData", "vim.version.version9", [ ("overallResult", "boolean", "vim.version.version9", 0), ("esxVersionCheckPassed", "boolean", "vim.version.version9", 0 | F_OPTIONAL), ("drsCheckPassed", "boolean", "vim.version.version9", 0 | F_OPTIONAL), ("eamConnectionCheckPassed", "boolean", "vim.version.version9", 0 | F_OPTIONAL), ("installStateCheckPassed", "boolean", "vim.version.version9", 0 | F_OPTIONAL), ("results", "vim.cluster.VsanClusterHealthTest[]", "vim.version.version9", 0), ("vumRegistered", "boolean", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.vsan.upgradesystem.HostWithHybridDiskgroupIssue", "VsanHostWithHybridDiskgroupIssue", "vim.VsanUpgradeSystem.PreflightCheckIssue", "vim.version.version10", [("hosts", "vim.HostSystem[]", "vim.version.version10", 0)], ) CreateDataType( "vim.cluster.VsanPerfMetricSeriesCSV", "VsanPerfMetricSeriesCSV", "vmodl.DynamicData", "vim.version.version9", [ ("metricId", "vim.cluster.VsanPerfMetricId", "vim.version.version9", 0), ("values", "string", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.VsanPerfQuerySpec", "VsanPerfQuerySpec", "vmodl.DynamicData", "vim.version.version9", [ ("entityRefId", "string", "vim.version.version9", 0), ("startTime", "vmodl.DateTime", "vim.version.version9", 0 | F_OPTIONAL), ("endTime", "vmodl.DateTime", "vim.version.version9", 0 | F_OPTIONAL), ("group", "string", "vim.version.version9", 0 | F_OPTIONAL), ("labels", "string[]", "vim.version.version9", 0 | F_OPTIONAL), ("interval", "int", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.host.VsanRepairObjectsResult", "VsanRepairObjectsResult", "vmodl.DynamicData", "vim.version.version9", [ ("inQueueObjects", "string[]", "vim.version.version9", 0 | F_OPTIONAL), ( "failedRepairObjects", "vim.host.VsanFailedRepairObjectResult[]", "vim.version.version9", 0 | F_OPTIONAL, ), ("notInQueueObjects", "string[]", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.VsanClusterNetworkPartitionInfo", "VsanClusterNetworkPartitionInfo", "vmodl.DynamicData", "vim.version.version9", [("hosts", "string[]", "vim.version.version9", 0 | F_OPTIONAL)], ) CreateDataType( "vim.vsan.upgradesystem.MixedEsxVersionIssue", "VsanMixedEsxVersionIssue", "vim.VsanUpgradeSystem.PreflightCheckIssue", "vim.version.version10", [], ) CreateDataType( "vim.cluster.VsanClusterClomdLivenessResult", "VsanClusterClomdLivenessResult", "vmodl.DynamicData", "vim.version.version9", [ ( "clomdLivenessResult", "vim.cluster.VsanHostClomdLivenessResult[]", "vim.version.version9", 0 | F_OPTIONAL, ), ("issueFound", "boolean", "vim.version.version9", 0), ], ) CreateDataType( "vim.cluster.VsanVsanClusterPcapResult", "VsanVsanClusterPcapResult", "vmodl.DynamicData", "vim.version.version9", [ ("pkts", "string[]", "vim.version.version9", 0 | F_OPTIONAL), ( "groups", "vim.cluster.VsanVsanClusterPcapGroup[]", "vim.version.version9", 0 | F_OPTIONAL, ), ("issues", "string[]", "vim.version.version9", 0 | F_OPTIONAL), ( "hostResults", "vim.host.VsanVsanPcapResult[]", "vim.version.version9", 0 | F_OPTIONAL, ), ], ) CreateDataType( "vim.cluster.VsanPerfMasterInformation", "VsanPerfMasterInformation", "vmodl.DynamicData", "vim.version.version9", [ ("secSinceLastStatsWrite", "long", "vim.version.version9", 0 | F_OPTIONAL), ("secSinceLastStatsCollect", "long", "vim.version.version9", 0 | F_OPTIONAL), ("statsIntervalSec", "long", "vim.version.version9", 0), ( "collectionFailureHostUuids", "string[]", "vim.version.version9", 0 | F_OPTIONAL, ), ("renamedStatsDirectories", "string[]", "vim.version.version9", 0 | F_OPTIONAL), ("statsDirectoryPercentFree", "long", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.VsanHostCreateVmHealthTestResult", "VsanHostCreateVmHealthTestResult", "vmodl.DynamicData", "vim.version.version9", [ ("hostname", "string", "vim.version.version9", 0), ("state", "string", "vim.version.version9", 0), ("fault", "vmodl.MethodFault", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.VsanDiskFormatConversionCheckResult", "VsanDiskFormatConversionCheckResult", "vim.VsanUpgradeSystem.PreflightCheckResult", "vim.version.version10", [ ("isSupported", "boolean", "vim.version.version10", 0), ("targetVersion", "int", "vim.version.version10", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.VsanClusterHealthSystemObjectsRepairResult", "VsanClusterHealthSystemObjectsRepairResult", "vmodl.DynamicData", "vim.version.version9", [ ("inRepairingQueueObjects", "string[]", "vim.version.version9", 0 | F_OPTIONAL), ( "failedRepairObjects", "vim.host.VsanFailedRepairObjectResult[]", "vim.version.version9", 0 | F_OPTIONAL, ), ("issueFound", "boolean", "vim.version.version9", 0), ], ) CreateDataType( "vim.host.VsanHostHclInfo", "VsanHostHclInfo", "vmodl.DynamicData", "vim.version.version9", [ ("hostname", "string", "vim.version.version9", 0), ("hclChecked", "boolean", "vim.version.version9", 0), ("releaseName", "string", "vim.version.version9", 0 | F_OPTIONAL), ("error", "vmodl.MethodFault", "vim.version.version9", 0 | F_OPTIONAL), ( "controllers", "vim.host.VsanHclControllerInfo[]", "vim.version.version9", 0 | F_OPTIONAL, ), ], ) CreateDataType( "vim.cluster.VSANStretchedClusterCapability", "VimClusterVSANStretchedClusterCapability", "vmodl.DynamicData", "vim.version.version10", [ ("hostMoId", "string", "vim.version.version10", 0), ("connStatus", "string", "vim.version.version10", 0 | F_OPTIONAL), ("isSupported", "boolean", "vim.version.version10", 0 | F_OPTIONAL), ( "hostCapability", "vim.host.VSANStretchedClusterHostCapability", "vim.version.version10", 0 | F_OPTIONAL, ), ], ) CreateDataType( "vim.cluster.VsanDiskMappingsConfigSpec", "VimClusterVsanDiskMappingsConfigSpec", "vmodl.DynamicData", "vim.version.version10", [ ( "hostDiskMappings", "vim.cluster.VsanHostDiskMapping[]", "vim.version.version10", 0, ) ], ) CreateDataType( "vim.host.VsanHostVmdkLoadTestResult", "VsanHostVmdkLoadTestResult", "vmodl.DynamicData", "vim.version.version9", [ ("hostname", "string", "vim.version.version9", 0), ("issueFound", "boolean", "vim.version.version9", 0), ("faultMessage", "string", "vim.version.version9", 0 | F_OPTIONAL), ( "vmdkResults", "vim.host.VsanVmdkLoadTestResult[]", "vim.version.version9", 0 | F_OPTIONAL, ), ], ) CreateDataType( "vim.vsan.ReconfigSpec", "VimVsanReconfigSpec", "vmodl.DynamicData", "vim.version.version10", [ ( "vsanClusterConfig", "vim.vsan.cluster.ConfigInfo", "vim.version.version10", 0 | F_OPTIONAL, ), ( "dataEfficiencyConfig", "vim.vsan.DataEfficiencyConfig", "vim.version.version10", 0 | F_OPTIONAL, ), ( "diskMappingSpec", "vim.cluster.VsanDiskMappingsConfigSpec", "vim.version.version10", 0 | F_OPTIONAL, ), ( "faultDomainsSpec", "vim.cluster.VsanFaultDomainsConfigSpec", "vim.version.version10", 0 | F_OPTIONAL, ), ("modify", "boolean", "vim.version.version10", 0), ("allowReducedRedundancy", "boolean", "vim.version.version10", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.host.VsanNetworkPeerHealthResult", "VsanNetworkPeerHealthResult", "vmodl.DynamicData", "vim.version.version9", [ ("peer", "string", "vim.version.version9", 0 | F_OPTIONAL), ("peerHostname", "string", "vim.version.version9", 0 | F_OPTIONAL), ("peerVmknicName", "string", "vim.version.version9", 0 | F_OPTIONAL), ("smallPingTestSuccessPct", "int", "vim.version.version9", 0 | F_OPTIONAL), ("largePingTestSuccessPct", "int", "vim.version.version9", 0 | F_OPTIONAL), ("maxLatencyUs", "long", "vim.version.version9", 0 | F_OPTIONAL), ("onSameIpSubnet", "boolean", "vim.version.version9", 0 | F_OPTIONAL), ("sourceVmknicName", "string", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.VsanWitnessSpec", "VimClusterVsanWitnessSpec", "vmodl.DynamicData", "vim.version.version10", [ ("host", "vim.HostSystem", "vim.version.version10", 0), ("preferredFaultDomainName", "string", "vim.version.version10", 0), ( "diskMapping", "vim.vsan.host.DiskMapping", "vim.version.version10", 0 | F_OPTIONAL, ), ], ) CreateDataType( "vim.vsan.host.DiskMappingCreationSpec", "VimVsanHostDiskMappingCreationSpec", "vmodl.DynamicData", "vim.version.version10", [ ("host", "vim.HostSystem", "vim.version.version10", 0), ("cacheDisks", "vim.host.ScsiDisk[]", "vim.version.version10", 0 | F_OPTIONAL), ("capacityDisks", "vim.host.ScsiDisk[]", "vim.version.version10", 0), ( "creationType", "vim.vsan.host.DiskMappingCreationType", "vim.version.version10", 0, ), ], ) CreateDataType( "vim.host.VsanLimitHealthResult", "VsanLimitHealthResult", "vmodl.DynamicData", "vim.version.version9", [ ("hostname", "string", "vim.version.version9", 0 | F_OPTIONAL), ("issueFound", "boolean", "vim.version.version9", 0), ("maxComponents", "int", "vim.version.version9", 0), ("freeComponents", "int", "vim.version.version9", 0), ("componentLimitHealth", "string", "vim.version.version9", 0), ("lowestFreeDiskSpacePct", "int", "vim.version.version9", 0), ("usedDiskSpaceB", "long", "vim.version.version9", 0), ("totalDiskSpaceB", "long", "vim.version.version9", 0), ("diskFreeSpaceHealth", "string", "vim.version.version9", 0), ("reservedRcSizeB", "long", "vim.version.version9", 0), ("totalRcSizeB", "long", "vim.version.version9", 0), ("rcFreeReservationHealth", "string", "vim.version.version9", 0), ], ) CreateDataType( "vim.cluster.VSANPreferredFaultDomainInfo", "VimClusterVSANPreferredFaultDomainInfo", "vmodl.DynamicData", "vim.version.version10", [ ("preferredFaultDomainName", "string", "vim.version.version10", 0), ("preferredFaultDomainId", "string", "vim.version.version10", 0), ], ) CreateDataType( "vim.host.VsanObjectOverallHealth", "VsanObjectOverallHealth", "vmodl.DynamicData", "vim.version.version9", [ ( "objectHealthDetail", "vim.host.VsanObjectHealth[]", "vim.version.version9", 0 | F_OPTIONAL, ), ("objectVersionCompliance", "boolean", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.VsanVsanClusterPcapGroup", "VsanVsanClusterPcapGroup", "vmodl.DynamicData", "vim.version.version9", [ ("master", "string", "vim.version.version9", 0), ("members", "string[]", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.VsanClusterHealthResultColumnInfo", "VsanClusterHealthResultColumnInfo", "vmodl.DynamicData", "vim.version.version9", [ ("label", "string", "vim.version.version9", 0), ("type", "string", "vim.version.version9", 0), ], ) CreateDataType( "vim.cluster.VsanClusterNetworkHealthResult", "VsanClusterNetworkHealthResult", "vmodl.DynamicData", "vim.version.version9", [ ( "hostResults", "vim.host.VsanNetworkHealthResult[]", "vim.version.version9", 0 | F_OPTIONAL, ), ("issueFound", "boolean", "vim.version.version9", 0 | F_OPTIONAL), ("vsanVmknicPresent", "boolean", "vim.version.version9", 0 | F_OPTIONAL), ("matchingMulticastConfig", "boolean", "vim.version.version9", 0 | F_OPTIONAL), ("matchingIpSubnets", "boolean", "vim.version.version9", 0 | F_OPTIONAL), ("pingTestSuccess", "boolean", "vim.version.version9", 0 | F_OPTIONAL), ("largePingTestSuccess", "boolean", "vim.version.version9", 0 | F_OPTIONAL), ("potentialMulticastIssue", "boolean", "vim.version.version9", 0 | F_OPTIONAL), ("otherHostsInVsanCluster", "string[]", "vim.version.version9", 0 | F_OPTIONAL), ( "partitions", "vim.cluster.VsanClusterNetworkPartitionInfo[]", "vim.version.version9", 0 | F_OPTIONAL, ), ("hostsWithVsanDisabled", "string[]", "vim.version.version9", 0 | F_OPTIONAL), ("hostsDisconnected", "string[]", "vim.version.version9", 0 | F_OPTIONAL), ("hostsCommFailure", "string[]", "vim.version.version9", 0 | F_OPTIONAL), ( "hostsInEsxMaintenanceMode", "string[]", "vim.version.version9", 0 | F_OPTIONAL, ), ( "hostsInVsanMaintenanceMode", "string[]", "vim.version.version9", 0 | F_OPTIONAL, ), ( "infoAboutUnexpectedHosts", "vim.host.VsanQueryResultHostInfo[]", "vim.version.version9", 0 | F_OPTIONAL, ), ], ) CreateDataType( "vim.cluster.VsanPerfNodeInformation", "VsanPerfNodeInformation", "vmodl.DynamicData", "vim.version.version9", [ ("version", "string", "vim.version.version9", 0), ("hostname", "string", "vim.version.version9", 0 | F_OPTIONAL), ("error", "vmodl.MethodFault", "vim.version.version9", 0 | F_OPTIONAL), ("isCmmdsMaster", "boolean", "vim.version.version9", 0), ("isStatsMaster", "boolean", "vim.version.version9", 0), ("vsanMasterUuid", "string", "vim.version.version9", 0 | F_OPTIONAL), ("vsanNodeUuid", "string", "vim.version.version9", 0 | F_OPTIONAL), ( "masterInfo", "vim.cluster.VsanPerfMasterInformation", "vim.version.version9", 0 | F_OPTIONAL, ), ], ) CreateDataType( "vim.cluster.VsanPerfEntityMetricCSV", "VsanPerfEntityMetricCSV", "vmodl.DynamicData", "vim.version.version9", [ ("entityRefId", "string", "vim.version.version9", 0), ("sampleInfo", "string", "vim.version.version9", 0 | F_OPTIONAL), ( "value", "vim.cluster.VsanPerfMetricSeriesCSV[]", "vim.version.version9", 0 | F_OPTIONAL, ), ], ) CreateDataType( "vim.vsan.upgradesystem.DiskUnhealthIssue", "VsanDiskUnhealthIssue", "vim.VsanUpgradeSystem.PreflightCheckIssue", "vim.version.version10", [("uuids", "string[]", "vim.version.version10", 0)], ) CreateDataType( "vim.cluster.VsanFaultDomainSpec", "VimClusterVsanFaultDomainSpec", "vmodl.DynamicData", "vim.version.version10", [ ("hosts", "vim.HostSystem[]", "vim.version.version10", 0), ("name", "string", "vim.version.version10", 0), ], ) CreateDataType( "vim.vsan.upgradesystem.ObjectInaccessibleIssue", "VsanObjectInaccessibleIssue", "vim.VsanUpgradeSystem.PreflightCheckIssue", "vim.version.version10", [("uuids", "string[]", "vim.version.version10", 0)], ) CreateDataType( "vim.cluster.VsanDiskFormatConversionSpec", "VsanDiskFormatConversionSpec", "vmodl.DynamicData", "vim.version.version10", [ ( "dataEfficiencyConfig", "vim.vsan.DataEfficiencyConfig", "vim.version.version10", 0 | F_OPTIONAL, ) ], ) CreateDataType( "vim.cluster.VsanClusterHealthAction", "VsanClusterHealthAction", "vmodl.DynamicData", "vim.version.version9", [ ( "actionId", "vim.cluster.VsanClusterHealthActionIdEnum", "vim.version.version9", 0, ), ("actionLabel", "vmodl.LocalizableMessage", "vim.version.version9", 0), ("actionDescription", "vmodl.LocalizableMessage", "vim.version.version9", 0), ("enabled", "boolean", "vim.version.version9", 0), ], ) CreateDataType( "vim.cluster.VsanClusterHealthSystemVersionResult", "VsanClusterHealthSystemVersionResult", "vmodl.DynamicData", "vim.version.version9", [ ( "hostResults", "vim.cluster.VsanHostHealthSystemVersionResult[]", "vim.version.version9", 0 | F_OPTIONAL, ), ("vcVersion", "string", "vim.version.version9", 0 | F_OPTIONAL), ("issueFound", "boolean", "vim.version.version9", 0), ], ) CreateDataType( "vim.cluster.VsanClusterHealthResultRow", "VsanClusterHealthResultRow", "vmodl.DynamicData", "vim.version.version9", [ ("values", "string[]", "vim.version.version9", 0), ( "nestedRows", "vim.cluster.VsanClusterHealthResultRow[]", "vim.version.version9", 0 | F_OPTIONAL, ), ], ) CreateDataType( "vim.cluster.VsanClusterHealthSystemStatusResult", "VsanClusterHealthSystemStatusResult", "vmodl.DynamicData", "vim.version.version9", [ ("status", "string", "vim.version.version9", 0), ("goalState", "string", "vim.version.version9", 0), ("untrackedHosts", "string[]", "vim.version.version9", 0 | F_OPTIONAL), ( "trackedHostsStatus", "vim.host.VsanHostHealthSystemStatusResult[]", "vim.version.version9", 0 | F_OPTIONAL, ), ], ) CreateDataType( "vim.cluster.VsanHostDiskMapping", "VimClusterVsanHostDiskMapping", "vmodl.DynamicData", "vim.version.version10", [ ("host", "vim.HostSystem", "vim.version.version10", 0), ("cacheDisks", "vim.host.ScsiDisk[]", "vim.version.version10", 0 | F_OPTIONAL), ("capacityDisks", "vim.host.ScsiDisk[]", "vim.version.version10", 0), ("type", "vim.cluster.VsanDiskGroupCreationType", "vim.version.version10", 0), ], ) CreateDataType( "vim.cluster.VSANStretchedClusterFaultDomainConfig", "VimClusterVSANStretchedClusterFaultDomainConfig", "vmodl.DynamicData", "vim.version.version10", [ ("firstFdName", "string", "vim.version.version10", 0), ("firstFdHosts", "vim.HostSystem[]", "vim.version.version10", 0), ("secondFdName", "string", "vim.version.version10", 0), ("secondFdHosts", "vim.HostSystem[]", "vim.version.version10", 0), ], ) CreateDataType( "vim.host.VSANStretchedClusterHostInfo", "VimHostVSANStretchedClusterHostInfo", "vmodl.DynamicData", "vim.version.version10", [ ("nodeInfo", "vim.host.VSANCmmdsNodeInfo", "vim.version.version10", 0), ( "faultDomainInfo", "vim.host.VSANCmmdsFaultDomainInfo", "vim.version.version10", 0 | F_OPTIONAL, ), ( "preferredFaultDomainInfo", "vim.host.VSANCmmdsPreferredFaultDomainInfo", "vim.version.version10", 0 | F_OPTIONAL, ), ], ) CreateDataType( "vim.vsan.upgradesystem.HigherObjectsPresentDuringDowngradeIssue", "VsanHigherObjectsPresentDuringDowngradeIssue", "vim.VsanUpgradeSystem.PreflightCheckIssue", "vim.version.version10", [("uuids", "string[]", "vim.version.version10", 0)], ) CreateDataType( "vim.host.VSANCmmdsFaultDomainInfo", "VimHostVSANCmmdsFaultDomainInfo", "vmodl.DynamicData", "vim.version.version10", [ ("faultDomainId", "string", "vim.version.version10", 0), ("faultDomainName", "string", "vim.version.version10", 0), ], ) CreateDataType( "vim.fault.VsanNodeNotMaster", "VsanNodeNotMaster", "vim.fault.VimFault", "vim.version.version9", [ ("vsanMasterUuid", "string", "vim.version.version9", 0 | F_OPTIONAL), ( "cmmdsMasterButNotStatsMaster", "boolean", "vim.version.version9", 0 | F_OPTIONAL, ), ], ) CreateDataType( "vim.cluster.VsanHostHealthSystemVersionResult", "VsanHostHealthSystemVersionResult", "vmodl.DynamicData", "vim.version.version9", [ ("hostname", "string", "vim.version.version9", 0), ("version", "string", "vim.version.version9", 0 | F_OPTIONAL), ("error", "vmodl.MethodFault", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.VsanClusterHealthConfigs", "VsanClusterHealthConfigs", "vmodl.DynamicData", "vim.version.version9", [ ("enableVsanTelemetry", "boolean", "vim.version.version9", 0 | F_OPTIONAL), ("vsanTelemetryInterval", "int", "vim.version.version9", 0 | F_OPTIONAL), ( "vsanTelemetryProxy", "vim.cluster.VsanClusterTelemetryProxyConfig", "vim.version.version9", 0 | F_OPTIONAL, ), ( "configs", "vim.cluster.VsanClusterHealthResultKeyValuePair[]", "vim.version.version9", 0 | F_OPTIONAL, ), ], ) CreateDataType( "vim.cluster.VsanClusterWhatifHostFailuresResult", "VsanClusterWhatifHostFailuresResult", "vmodl.DynamicData", "vim.version.version9", [ ("numFailures", "long", "vim.version.version9", 0), ("totalUsedCapacityB", "long", "vim.version.version9", 0), ("totalCapacityB", "long", "vim.version.version9", 0), ("totalRcReservationB", "long", "vim.version.version9", 0), ("totalRcSizeB", "long", "vim.version.version9", 0), ("usedComponents", "long", "vim.version.version9", 0), ("totalComponents", "long", "vim.version.version9", 0), ("componentLimitHealth", "string", "vim.version.version9", 0 | F_OPTIONAL), ("diskFreeSpaceHealth", "string", "vim.version.version9", 0 | F_OPTIONAL), ("rcFreeReservationHealth", "string", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.VsanObjectIdentityAndHealth", "VsanObjectIdentityAndHealth", "vmodl.DynamicData", "vim.version.version9", [ ( "identities", "vim.cluster.VsanObjectIdentity[]", "vim.version.version9", 0 | F_OPTIONAL, ), ( "health", "vim.host.VsanObjectOverallHealth", "vim.version.version9", 0 | F_OPTIONAL, ), ( "spaceSummary", "vim.cluster.VsanObjectSpaceSummary[]", "vim.version.version9", 0 | F_OPTIONAL, ), ("rawData", "string", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.host.VsanHclControllerInfo", "VsanHclControllerInfo", "vmodl.DynamicData", "vim.version.version9", [ ("deviceName", "string", "vim.version.version9", 0), ("deviceDisplayName", "string", "vim.version.version9", 0 | F_OPTIONAL), ("driverName", "string", "vim.version.version9", 0 | F_OPTIONAL), ("driverVersion", "string", "vim.version.version9", 0 | F_OPTIONAL), ("vendorId", "long", "vim.version.version9", 0 | F_OPTIONAL), ("deviceId", "long", "vim.version.version9", 0 | F_OPTIONAL), ("subVendorId", "long", "vim.version.version9", 0 | F_OPTIONAL), ("subDeviceId", "long", "vim.version.version9", 0 | F_OPTIONAL), ("extraInfo", "vim.KeyValue[]", "vim.version.version9", 0 | F_OPTIONAL), ("deviceOnHcl", "boolean", "vim.version.version9", 0 | F_OPTIONAL), ("releaseSupported", "boolean", "vim.version.version9", 0 | F_OPTIONAL), ("releasesOnHcl", "string[]", "vim.version.version9", 0 | F_OPTIONAL), ("driverVersionsOnHcl", "string[]", "vim.version.version9", 0 | F_OPTIONAL), ("driverVersionSupported", "boolean", "vim.version.version9", 0 | F_OPTIONAL), ("fwVersionSupported", "boolean", "vim.version.version9", 0 | F_OPTIONAL), ("fwVersionOnHcl", "string[]", "vim.version.version9", 0 | F_OPTIONAL), ("cacheConfigSupported", "boolean", "vim.version.version9", 0 | F_OPTIONAL), ("cacheConfigOnHcl", "string[]", "vim.version.version9", 0 | F_OPTIONAL), ("raidConfigSupported", "boolean", "vim.version.version9", 0 | F_OPTIONAL), ("raidConfigOnHcl", "string[]", "vim.version.version9", 0 | F_OPTIONAL), ("fwVersion", "string", "vim.version.version9", 0 | F_OPTIONAL), ("raidConfig", "string", "vim.version.version9", 0 | F_OPTIONAL), ("cacheConfig", "string", "vim.version.version9", 0 | F_OPTIONAL), ( "cimProviderInfo", "vim.host.VsanHostCimProviderInfo", "vim.version.version9", 0 | F_OPTIONAL, ), ], ) CreateDataType( "vim.cluster.VsanClusterHealthResultKeyValuePair", "VsanClusterHealthResultKeyValuePair", "vmodl.DynamicData", "vim.version.version9", [ ("key", "string", "vim.version.version9", 0 | F_OPTIONAL), ("value", "string", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.StorageOperationalStatus", "VsanStorageOperationalStatus", "vmodl.DynamicData", "vim.version.version9", [ ("healthy", "boolean", "vim.version.version9", 0 | F_OPTIONAL), ("operationETA", "vmodl.DateTime", "vim.version.version9", 0 | F_OPTIONAL), ("operationProgress", "long", "vim.version.version9", 0 | F_OPTIONAL), ("transitional", "boolean", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.VsanSpaceUsage", "VsanSpaceUsage", "vmodl.DynamicData", "vim.version.version9", [ ("totalCapacityB", "long", "vim.version.version9", 0), ("freeCapacityB", "long", "vim.version.version9", 0 | F_OPTIONAL), ( "spaceOverview", "vim.cluster.VsanObjectSpaceSummary", "vim.version.version9", 0 | F_OPTIONAL, ), ( "spaceDetail", "vim.cluster.VsanSpaceUsageDetailResult", "vim.version.version9", 0 | F_OPTIONAL, ), ], ) CreateDataType( "vim.cluster.VsanClusterHealthResultTable", "VsanClusterHealthResultTable", "vim.cluster.VsanClusterHealthResultBase", "vim.version.version9", [ ( "columns", "vim.cluster.VsanClusterHealthResultColumnInfo[]", "vim.version.version9", 0 | F_OPTIONAL, ), ( "rows", "vim.cluster.VsanClusterHealthResultRow[]", "vim.version.version9", 0 | F_OPTIONAL, ), ], ) CreateDataType( "vim.cluster.VsanClusterConfig", "VsanClusterConfig", "vmodl.DynamicData", "vim.version.version9", [ ("config", "vim.vsan.cluster.ConfigInfo", "vim.version.version9", 0), ("name", "string", "vim.version.version9", 0), ("hosts", "string[]", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.vsan.host.VsanHostCapability", "VimVsanHostVsanHostCapability", "vmodl.DynamicData", "vim.version.version10", [ ("host", "vim.HostSystem", "vim.version.version10", 0), ("isSupported", "boolean", "vim.version.version10", 0), ("isLicensed", "boolean", "vim.version.version10", 0), ], ) CreateDataType( "vim.cluster.VsanPerfThreshold", "VsanPerfThreshold", "vmodl.DynamicData", "vim.version.version9", [ ( "direction", "vim.cluster.VsanPerfThresholdDirectionType", "vim.version.version9", 0, ), ("yellow", "string", "vim.version.version9", 0 | F_OPTIONAL), ("red", "string", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.host.VsanNetworkHealthResult", "VsanNetworkHealthResult", "vmodl.DynamicData", "vim.version.version9", [ ("host", "vim.HostSystem", "vim.version.version9", 0 | F_OPTIONAL), ("hostname", "string", "vim.version.version9", 0 | F_OPTIONAL), ("vsanVmknicPresent", "boolean", "vim.version.version9", 0 | F_OPTIONAL), ("ipSubnets", "string[]", "vim.version.version9", 0 | F_OPTIONAL), ("issueFound", "boolean", "vim.version.version9", 0 | F_OPTIONAL), ( "peerHealth", "vim.host.VsanNetworkPeerHealthResult[]", "vim.version.version9", 0 | F_OPTIONAL, ), ("multicastConfig", "string", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.vsan.ConfigInfoEx", "VsanConfigInfoEx", "vim.vsan.cluster.ConfigInfo", "vim.version.version10", [ ( "dataEfficiencyConfig", "vim.vsan.DataEfficiencyConfig", "vim.version.version10", 0 | F_OPTIONAL, ) ], ) CreateDataType( "vim.host.VsanVmdkLoadTestResult", "VsanVmdkLoadTestResult", "vmodl.DynamicData", "vim.version.version9", [ ("success", "boolean", "vim.version.version9", 0), ("faultMessage", "string", "vim.version.version9", 0 | F_OPTIONAL), ("spec", "vim.host.VsanVmdkLoadTestSpec", "vim.version.version9", 0), ("actualDurationSec", "int", "vim.version.version9", 0 | F_OPTIONAL), ("totalBytes", "long", "vim.version.version9", 0 | F_OPTIONAL), ("iops", "long", "vim.version.version9", 0 | F_OPTIONAL), ("tputBps", "long", "vim.version.version9", 0 | F_OPTIONAL), ("avgLatencyUs", "long", "vim.version.version9", 0 | F_OPTIONAL), ("maxLatencyUs", "long", "vim.version.version9", 0 | F_OPTIONAL), ("numIoAboveLatencyThreshold", "long", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.VsanClusterVMsHealthOverallResult", "VsanClusterVMsHealthOverAllResult", "vmodl.DynamicData", "vim.version.version9", [ ( "healthStateList", "vim.cluster.VsanClusterVMsHealthSummaryResult[]", "vim.version.version9", 0 | F_OPTIONAL, ), ("overallHealthState", "string", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.host.VsanHostHealthSystemStatusResult", "VsanHostHealthSystemStatusResult", "vmodl.DynamicData", "vim.version.version9", [ ("hostname", "string", "vim.version.version9", 0), ("status", "string", "vim.version.version9", 0), ("issues", "string[]", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.VsanClusterAdvCfgSyncResult", "VsanClusterAdvCfgSyncResult", "vmodl.DynamicData", "vim.version.version9", [ ("inSync", "boolean", "vim.version.version9", 0), ("name", "string", "vim.version.version9", 0), ( "hostValues", "vim.cluster.VsanClusterAdvCfgSyncHostResult[]", "vim.version.version9", 0 | F_OPTIONAL, ), ], ) CreateDataType( "vim.host.VsanQueryResultHostInfo", "VsanQueryResultHostInfo", "vmodl.DynamicData", "vim.version.version9", [ ("uuid", "string", "vim.version.version9", 0 | F_OPTIONAL), ("hostnameInCmmds", "string", "vim.version.version9", 0 | F_OPTIONAL), ("vsanIpv4Addresses", "string[]", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.vsan.host.DiskMapInfoEx", "VimVsanHostDiskMapInfoEx", "vmodl.DynamicData", "vim.version.version10", [ ("mapping", "vim.vsan.host.DiskMapping", "vim.version.version10", 0), ("isMounted", "boolean", "vim.version.version10", 0), ("isAllFlash", "boolean", "vim.version.version10", 0), ("isDataEfficiency", "boolean", "vim.version.version10", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.host.VsanVmdkLoadTestSpec", "VsanVmdkLoadTestSpec", "vmodl.DynamicData", "vim.version.version9", [ ( "vmdkCreateSpec", "vim.VirtualDiskManager.FileBackedVirtualDiskSpec", "vim.version.version9", 0 | F_OPTIONAL, ), ( "vmdkIOSpec", "vim.host.VsanVmdkIOLoadSpec", "vim.version.version9", 0 | F_OPTIONAL, ), ( "vmdkIOSpecSequence", "vim.host.VsanVmdkIOLoadSpec[]", "vim.version.version9", 0 | F_OPTIONAL, ), ("stepDurationSec", "long", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.VsanClusterHealthSummary", "VsanClusterHealthSummary", "vmodl.DynamicData", "vim.version.version9", [ ( "clusterStatus", "vim.cluster.VsanClusterHealthSystemStatusResult", "vim.version.version9", 0 | F_OPTIONAL, ), ("timestamp", "vmodl.DateTime", "vim.version.version9", 0 | F_OPTIONAL), ( "clusterVersions", "vim.cluster.VsanClusterHealthSystemVersionResult", "vim.version.version9", 0 | F_OPTIONAL, ), ( "objectHealth", "vim.host.VsanObjectOverallHealth", "vim.version.version9", 0 | F_OPTIONAL, ), ( "vmHealth", "vim.cluster.VsanClusterVMsHealthOverallResult", "vim.version.version9", 0 | F_OPTIONAL, ), ( "networkHealth", "vim.cluster.VsanClusterNetworkHealthResult", "vim.version.version9", 0 | F_OPTIONAL, ), ( "limitHealth", "vim.cluster.VsanClusterLimitHealthResult", "vim.version.version9", 0 | F_OPTIONAL, ), ( "advCfgSync", "vim.cluster.VsanClusterAdvCfgSyncResult[]", "vim.version.version9", 0 | F_OPTIONAL, ), ( "createVmHealth", "vim.cluster.VsanHostCreateVmHealthTestResult[]", "vim.version.version9", 0 | F_OPTIONAL, ), ( "physicalDisksHealth", "vim.host.VsanPhysicalDiskHealthSummary[]", "vim.version.version9", 0 | F_OPTIONAL, ), ( "hclInfo", "vim.cluster.VsanClusterHclInfo", "vim.version.version9", 0 | F_OPTIONAL, ), ( "groups", "vim.cluster.VsanClusterHealthGroup[]", "vim.version.version9", 0 | F_OPTIONAL, ), ("overallHealth", "string", "vim.version.version9", 0), ("overallHealthDescription", "string", "vim.version.version9", 0), ( "clomdLiveness", "vim.cluster.VsanClusterClomdLivenessResult", "vim.version.version9", 0 | F_OPTIONAL, ), ( "diskBalance", "vim.cluster.VsanClusterBalanceSummary", "vim.version.version9", 0 | F_OPTIONAL, ), ], ) CreateDataType( "vim.cluster.VsanPerfEntityType", "VsanPerfEntityType", "vmodl.DynamicData", "vim.version.version9", [ ("name", "string", "vim.version.version9", 0), ("id", "string", "vim.version.version9", 0), ("graphs", "vim.cluster.VsanPerfGraph[]", "vim.version.version9", 0), ("description", "string", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.host.VsanNetworkLoadTestResult", "VsanNetworkLoadTestResult", "vmodl.DynamicData", "vim.version.version9", [ ("hostname", "string", "vim.version.version9", 0), ("status", "string", "vim.version.version9", 0 | F_OPTIONAL), ("client", "boolean", "vim.version.version9", 0), ("bandwidthBps", "long", "vim.version.version9", 0), ("totalBytes", "long", "vim.version.version9", 0), ("lostDatagrams", "long", "vim.version.version9", 0 | F_OPTIONAL), ("lossPct", "long", "vim.version.version9", 0 | F_OPTIONAL), ("sentDatagrams", "long", "vim.version.version9", 0 | F_OPTIONAL), ("jitterMs", "float", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.host.VsanPhysicalDiskHealthSummary", "VsanPhysicalDiskHealthSummary", "vmodl.DynamicData", "vim.version.version9", [ ("overallHealth", "string", "vim.version.version9", 0), ( "heapsWithIssues", "vim.host.VsanResourceHealth[]", "vim.version.version9", 0 | F_OPTIONAL, ), ( "slabsWithIssues", "vim.host.VsanResourceHealth[]", "vim.version.version9", 0 | F_OPTIONAL, ), ( "disks", "vim.host.VsanPhysicalDiskHealth[]", "vim.version.version9", 0 | F_OPTIONAL, ), ( "componentsWithIssues", "vim.host.VsanResourceHealth[]", "vim.version.version9", 0 | F_OPTIONAL, ), ("hostname", "string", "vim.version.version9", 0 | F_OPTIONAL), ("hostDedupScope", "int", "vim.version.version9", 0 | F_OPTIONAL), ("error", "vmodl.MethodFault", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.vsan.host.VsanDiskManagementSystemCapability", "VimVsanHostVsanDiskManagementSystemCapability", "vmodl.DynamicData", "vim.version.version10", [("version", "string", "vim.version.version10", 0)], ) CreateDataType( "vim.host.VsanHostCimProviderInfo", "VsanHostCimProviderInfo", "vmodl.DynamicData", "vim.version.version9", [ ("cimProviderSupported", "boolean", "vim.version.version9", 0 | F_OPTIONAL), ("installedCIMProvider", "string", "vim.version.version9", 0 | F_OPTIONAL), ("cimProviderOnHcl", "string[]", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.VsanObjectInformation", "VsanObjectInformation", "vmodl.DynamicData", "vim.version.version9", [ ("directoryName", "string", "vim.version.version9", 0 | F_OPTIONAL), ("vsanObjectUuid", "string", "vim.version.version9", 0 | F_OPTIONAL), ("vsanHealth", "string", "vim.version.version9", 0 | F_OPTIONAL), ("policyAttributes", "vim.KeyValue[]", "vim.version.version9", 0 | F_OPTIONAL), ("spbmProfileUuid", "string", "vim.version.version9", 0 | F_OPTIONAL), ("spbmProfileGenerationId", "string", "vim.version.version9", 0 | F_OPTIONAL), ( "spbmComplianceResult", "vim.cluster.StorageComplianceResult", "vim.version.version9", 0 | F_OPTIONAL, ), ], ) CreateDataType( "vim.cluster.VsanObjectIdentity", "VsanObjectIdentity", "vmodl.DynamicData", "vim.version.version9", [ ("uuid", "string", "vim.version.version9", 0), ("type", "string", "vim.version.version9", 0), ("vmInstanceUuid", "string", "vim.version.version9", 0 | F_OPTIONAL), ("vmNsObjectUuid", "string", "vim.version.version9", 0 | F_OPTIONAL), ("vm", "vim.VirtualMachine", "vim.version.version9", 0 | F_OPTIONAL), ("description", "string", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.host.VsanResourceHealth", "VsanResourceHealth", "vmodl.DynamicData", "vim.version.version9", [ ("resource", "string", "vim.version.version9", 0), ("health", "string", "vim.version.version9", 0), ("description", "string", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.VsanCapability", "VsanCapability", "vmodl.DynamicData", "vim.version.version10", [ ("target", "vmodl.ManagedObject", "vim.version.version10", 0 | F_OPTIONAL), ("capabilities", "string[]", "vim.version.version10", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.VsanHostClomdLivenessResult", "VsanHostClomdLivenessResult", "vmodl.DynamicData", "vim.version.version9", [ ("hostname", "string", "vim.version.version9", 0), ("clomdStat", "string", "vim.version.version9", 0), ("error", "vmodl.MethodFault", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.VsanObjectQuerySpec", "VsanObjectQuerySpec", "vmodl.DynamicData", "vim.version.version9", [ ("uuid", "string", "vim.version.version9", 0), ("spbmProfileGenerationId", "string", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.VsanClusterLimitHealthResult", "VsanClusterLimitHealthResult", "vmodl.DynamicData", "vim.version.version9", [ ("issueFound", "boolean", "vim.version.version9", 0), ("componentLimitHealth", "string", "vim.version.version9", 0), ("diskFreeSpaceHealth", "string", "vim.version.version9", 0), ("rcFreeReservationHealth", "string", "vim.version.version9", 0), ( "hostResults", "vim.host.VsanLimitHealthResult[]", "vim.version.version9", 0 | F_OPTIONAL, ), ( "whatifHostFailures", "vim.cluster.VsanClusterWhatifHostFailuresResult[]", "vim.version.version9", 0 | F_OPTIONAL, ), ("hostsCommFailure", "string[]", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.VsanStorageWorkloadType", "VsanStorageWorkloadType", "vmodl.DynamicData", "vim.version.version9", [ ("specs", "vim.host.VsanVmdkLoadTestSpec[]", "vim.version.version9", 0), ("typeId", "string", "vim.version.version9", 0), ("name", "string", "vim.version.version9", 0), ("description", "string", "vim.version.version9", 0), ], ) CreateDataType( "vim.cluster.VsanClusterAdvCfgSyncHostResult", "VsanClusterAdvCfgSyncHostResult", "vmodl.DynamicData", "vim.version.version9", [ ("hostname", "string", "vim.version.version9", 0), ("value", "string", "vim.version.version9", 0), ], ) CreateDataType( "vim.vsan.upgradesystem.ObjectPolicyIssue", "VsanObjectPolicyIssue", "vim.VsanUpgradeSystem.PreflightCheckIssue", "vim.version.version10", [("uuids", "string[]", "vim.version.version10", 0)], ) CreateDataType( "vim.cluster.VsanPerfTopEntities", "VsanPerfTopEntities", "vmodl.DynamicData", "vim.version.version9", [ ("metricId", "vim.cluster.VsanPerfMetricId", "vim.version.version9", 0), ("entities", "vim.cluster.VsanPerfTopEntity[]", "vim.version.version9", 0), ], ) CreateDataType( "vim.host.VsanProactiveRebalanceInfoEx", "VsanProactiveRebalanceInfoEx", "vmodl.DynamicData", "vim.version.version9", [ ("running", "boolean", "vim.version.version9", 0 | F_OPTIONAL), ("startTs", "vmodl.DateTime", "vim.version.version9", 0 | F_OPTIONAL), ("stopTs", "vmodl.DateTime", "vim.version.version9", 0 | F_OPTIONAL), ("varianceThreshold", "float", "vim.version.version9", 0 | F_OPTIONAL), ("timeThreshold", "int", "vim.version.version9", 0 | F_OPTIONAL), ("rateThreshold", "int", "vim.version.version9", 0 | F_OPTIONAL), ("hostname", "string", "vim.version.version9", 0 | F_OPTIONAL), ("error", "vmodl.MethodFault", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.VsanClusterProactiveTestResult", "VsanClusterProactiveTestResult", "vmodl.DynamicData", "vim.version.version9", [ ("overallStatus", "string", "vim.version.version9", 0), ("overallStatusDescription", "string", "vim.version.version9", 0), ("timestamp", "vmodl.DateTime", "vim.version.version9", 0), ( "healthTest", "vim.cluster.VsanClusterHealthTest", "vim.version.version9", 0 | F_OPTIONAL, ), ], ) CreateDataType( "vim.host.VSANCmmdsPreferredFaultDomainInfo", "VimHostVSANCmmdsPreferredFaultDomainInfo", "vmodl.DynamicData", "vim.version.version10", [ ("preferredFaultDomainId", "string", "vim.version.version10", 0), ("preferredFaultDomainName", "string", "vim.version.version10", 0), ], ) CreateDataType( "vim.cluster.VsanFaultDomainsConfigSpec", "VimClusterVsanFaultDomainsConfigSpec", "vmodl.DynamicData", "vim.version.version10", [ ( "faultDomains", "vim.cluster.VsanFaultDomainSpec[]", "vim.version.version10", 0, ), ( "witness", "vim.cluster.VsanWitnessSpec", "vim.version.version10", 0 | F_OPTIONAL, ), ], ) CreateDataType( "vim.cluster.VsanClusterHostVmknicMapping", "VsanClusterHostVmknicMapping", "vmodl.DynamicData", "vim.version.version9", [ ("host", "string", "vim.version.version9", 0), ("vmknic", "string", "vim.version.version9", 0), ], ) CreateDataType( "vim.cluster.VsanClusterVmdkLoadTestResult", "VsanClusterVmdkLoadTestResult", "vmodl.DynamicData", "vim.version.version9", [ ("task", "vim.Task", "vim.version.version9", 0 | F_OPTIONAL), ( "clusterResult", "vim.cluster.VsanClusterProactiveTestResult", "vim.version.version9", 0 | F_OPTIONAL, ), ( "hostResults", "vim.host.VsanHostVmdkLoadTestResult[]", "vim.version.version9", 0 | F_OPTIONAL, ), ], ) CreateDataType( "vim.cluster.VsanClusterVMsHealthSummaryResult", "VsanClusterVMsHealthSummaryResult", "vmodl.DynamicData", "vim.version.version9", [ ("numVMs", "int", "vim.version.version9", 0), ("state", "string", "vim.version.version9", 0 | F_OPTIONAL), ("health", "string", "vim.version.version9", 0), ("vmInstanceUuids", "string[]", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.host.VSANStretchedClusterHostCapability", "VimHostVSANStretchedClusterHostCapability", "vmodl.DynamicData", "vim.version.version10", [("featureVersion", "string", "vim.version.version10", 0)], ) CreateDataType( "vim.host.VsanFailedRepairObjectResult", "VsanFailedRepairObjectResult", "vmodl.DynamicData", "vim.version.version9", [ ("uuid", "string", "vim.version.version9", 0), ("errMessage", "string", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.VsanClusterCreateVmHealthTestResult", "VsanClusterCreateVmHealthTestResult", "vmodl.DynamicData", "vim.version.version9", [ ( "clusterResult", "vim.cluster.VsanClusterProactiveTestResult", "vim.version.version9", 0, ), ( "hostResults", "vim.cluster.VsanHostCreateVmHealthTestResult[]", "vim.version.version9", 0 | F_OPTIONAL, ), ], ) CreateDataType( "vim.host.VsanObjectHealth", "VsanObjectHealth", "vmodl.DynamicData", "vim.version.version9", [ ("numObjects", "int", "vim.version.version9", 0), ("health", "vim.host.VsanObjectHealthState", "vim.version.version9", 0), ("objUuids", "string[]", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.VsanClusterBalanceSummary", "VsanClusterBalanceSummary", "vmodl.DynamicData", "vim.version.version9", [ ("varianceThreshold", "long", "vim.version.version9", 0), ( "disks", "vim.cluster.VsanClusterBalancePerDiskInfo[]", "vim.version.version9", 0 | F_OPTIONAL, ), ], ) CreateDataType( "vim.cluster.VsanClusterTelemetryProxyConfig", "VsanClusterTelemetryProxyConfig", "vmodl.DynamicData", "vim.version.version9", [ ("host", "string", "vim.version.version9", 0 | F_OPTIONAL), ("port", "int", "vim.version.version9", 0 | F_OPTIONAL), ("user", "string", "vim.version.version9", 0 | F_OPTIONAL), ("password", "string", "<PASSWORD>", 0 | F_OPTIONAL), ("autoDiscovered", "boolean", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.host.VsanVmdkIOLoadSpec", "VsanVmdkIOLoadSpec", "vmodl.DynamicData", "vim.version.version9", [ ("readPct", "int", "vim.version.version9", 0), ("oio", "int", "vim.version.version9", 0), ("iosizeB", "int", "vim.version.version9", 0), ("dataSizeMb", "long", "vim.version.version9", 0), ("random", "boolean", "vim.version.version9", 0), ("startOffsetB", "long", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.host.VsanVsanPcapResult", "VsanVsanPcapResult", "vmodl.DynamicData", "vim.version.version9", [ ("calltime", "float", "vim.version.version9", 0), ("vmknic", "string", "vim.version.version9", 0), ("tcpdumpFilter", "string", "vim.version.version9", 0), ("snaplen", "int", "vim.version.version9", 0), ("pkts", "string[]", "vim.version.version9", 0 | F_OPTIONAL), ("pcap", "string", "vim.version.version9", 0 | F_OPTIONAL), ("error", "vmodl.MethodFault", "vim.version.version9", 0 | F_OPTIONAL), ("hostname", "string", "vim.version.version9", 0 | F_OPTIONAL), ], ) CreateDataType( "vim.cluster.VsanClusterNetworkLoadTestResult", "VsanClusterNetworkLoadTestResult", "vmodl.DynamicData", "vim.version.version9", [ ( "clusterResult", "vim.cluster.VsanClusterProactiveTestResult", "vim.version.version9", 0, ), ( "hostResults", "vim.host.VsanNetworkLoadTestResult[]", "vim.version.version9", 0 | F_OPTIONAL, ), ], ) CreateDataType( "vim.vsan.upgradesystem.HostPropertyRetrieveIssue", "VsanHostPropertyRetrieveIssue", "vim.VsanUpgradeSystem.PreflightCheckIssue", "vim.version.version10", [("hosts", "vim.HostSystem[]", "vim.version.version10", 0)], ) CreateEnumType( "vim.host.VsanObjectHealthState", "VsanObjectHealthState", "vim.version.version9", [ "inaccessible", "reducedavailabilitywithnorebuild", "reducedavailabilitywithnorebuilddelaytimer", "reducedavailabilitywithactiverebuild", "datamove", "nonavailabilityrelatedreconfig", "nonavailabilityrelatedincompliance", "healthy", ], ) CreateEnumType( "vim.cluster.VsanObjectTypeEnum", "VsanObjectTypeEnum", "vim.version.version9", [ "vmswap", "vdisk", "namespace", "vmem", "statsdb", "iscsi", "other", "fileSystemOverhead", "dedupOverhead", "checksumOverhead", ], ) CreateEnumType( "vim.cluster.VsanCapabilityType", "VsanCapabilityType", "vim.version.version10", [ "capability", "allflash", "stretchedcluster", "dataefficiency", "clusterconfig", "upgrade", "objectidentities", ], ) CreateEnumType( "vim.cluster.VsanHealthLogLevelEnum", "VsanHealthLogLevelEnum", "vim.version.version9", [ "INFO", "WARNING", "ERROR", "DEBUG", "CRITICAL", ], ) CreateEnumType( "vim.cluster.VsanPerfSummaryType", "VsanPerfSummaryType", "vim.version.version9", [ "average", "maximum", "minimum", "latest", "summation", "none", ], ) CreateEnumType( "vim.cluster.StorageComplianceStatus", "VsanStorageComplianceStatus", "vim.version.version9", [ "compliant", "nonCompliant", "unknown", "notApplicable", ], ) CreateEnumType( "vim.cluster.VsanPerfStatsUnitType", "VsanPerfStatsUnitType", "vim.version.version9", [ "number", "time_ms", "percentage", "size_bytes", "rate_bytes", ], ) CreateEnumType( "vim.cluster.VsanPerfThresholdDirectionType", "VsanPerfThresholdDirectionType", "vim.version.version9", [ "upper", "lower", ], ) CreateEnumType( "vim.cluster.VsanPerfStatsType", "VsanPerfStatsType", "vim.version.version9", [ "absolute", "delta", "rate", ], ) CreateEnumType( "vim.vsan.host.DiskMappingCreationType", "VimVsanHostDiskMappingCreationType", "vim.version.version10", [ "hybrid", "allFlash", ], ) CreateEnumType( "vim.cluster.VsanClusterHealthActionIdEnum", "VsanClusterHealthActionIdEnum", "vim.version.version9", [ "RepairClusterObjectsAction", "UploadHclDb", "UpdateHclDbFromInternet", "EnableHealthService", "DiskBalance", "StopDiskBalance", "RemediateDedup", "UpgradeVsanDiskFormat", ], ) CreateEnumType( "vim.cluster.VsanDiskGroupCreationType", "VimClusterVsanDiskGroupCreationType", "vim.version.version10", [ "allflash", "hybrid", ], )
python/10-tables.py
EzzEddin/amazon-textract-code-samples
291
11080868
<gh_stars>100-1000 import boto3 from trp import Document # Document documentName = "employmentapp.png" # Amazon Textract client textract = boto3.client('textract') # Call Amazon Textract with open(documentName, "rb") as document: response = textract.analyze_document( Document={ 'Bytes': document.read(), }, FeatureTypes=["TABLES"]) #print(response) doc = Document(response) for page in doc.pages: # Print tables for table in page.tables: for r, row in enumerate(table.rows): for c, cell in enumerate(row.cells): print("Table[{}][{}] = {}".format(r, c, cell.text))
setup.py
al1p-R/pandas-log
186
11080883
<reponame>al1p-R/pandas-log #!/usr/bin/env python # -*- coding: utf-8 -*- """The setup script.""" from setuptools import setup, find_packages with open('README.rst') as readme_file: readme = readme_file.read() requirements = ["humanize>=0.5.0", "pandas>=0.25.1", "pandas_flavor>=0.1.2"] setup_requirements = ['pytest-runner', ] test_requirements = ['pytest>=3', ] setup( name='pandas-log', version='0.1.7', description="pandas-log provides feedback about basic pandas operations. It provides simple wrapper functions for " "the most common functions, such as apply, map, query and more.", author="<NAME>", author_email='<EMAIL>', url='https://github.com/eyaltrabelsi/pandas-log', packages=find_packages(include=['pandas_log', 'pandas_log.*']), install_requires=requirements, python_requires=">=3.4", license="MIT license", long_description="pandas-log provides feedback about basic pandas operations. It provides simple wrapper functions " "for the most common functions, such as apply, map, query and more.", long_description_content_type="text/x-rst", setup_requires=setup_requirements, test_suite='tests', tests_require=test_requirements )
iSH/tools/fakefsify.py
bzxy/cydia
678
11080887
#!/usr/bin/env python3 import os import sys from pathlib import Path import struct import urllib.request import tarfile import sqlite3 SCHEMA = """ create table meta (id integer unique default 0, db_inode integer); insert into meta (db_inode) values (0); create table stats (inode integer primary key, stat blob); create table paths (path blob primary key, inode integer references stats(inode)); create index inode_to_path on paths (inode, path); pragma user_version=3; """ # no index is needed on stats, because the rows are ordered by the primary key def extract_member(archive, db, member): path = data/(member.name) major = member.devmajor minor = member.devminor rdev = ((minor & 0xfff00) << 12) | (major << 8) | (minor & 0xff) mode = member.mode if member.isfile(): mode |= 0o100000 elif member.isdir(): mode |= 0o040000 elif member.issym(): mode |= 0o120000 elif member.ischr(): mode |= 0o020000 elif member.isblk(): mode |= 0o060000 elif member.isfifo(): mode |= 0o010000 elif member.islnk(): pass else: raise ValueError('unrecognized tar entry type') if path != data and not path.parent.exists(): parent_member = tarfile.TarInfo(os.path.dirname(member.name)) parent_member.type = tarfile.DIRTYPE parent_member.mode = 0o755 extract_member(archive, db, parent_member) if member.isdir(): path.mkdir() elif member.issym(): path.write_text(member.linkname) elif member.isfile(): archive.extract(member, data) else: path.touch() def meta_path(path): path = path.relative_to(data) return b'/' + bytes(path) if path.parts else b'' cursor = db.cursor() if member.islnk(): # a hard link shares its target's inode target_path = data/(member.linkname) cursor.execute('select inode from paths where path = ?', (meta_path(target_path),)) inode, = cursor.fetchone() else: statblob = memoryview(struct.pack( '=iiii', mode, member.uid, member.gid, rdev, )) cursor.execute('insert into stats (stat) values (?)', (statblob,)) inode = cursor.lastrowid cursor.execute('insert into paths values (?, ?)', (meta_path(path), inode)) def extract_archive(archive, db): for member in archive.getmembers(): extract_member(archive, db, member) try: _, archive_path, fs = sys.argv except ValueError: print('wrong number of arguments') print("Usage: fakefsify.py <rootfs archive> <destination dir>") sys.exit(1) fs = Path(fs) fs.mkdir(parents=True, exist_ok=True) data = fs/'data' db_path = fs/'meta.db' with open(archive_path, 'rb') as archive: with tarfile.open(fileobj=archive) as archive: db = sqlite3.connect(str(db_path)) db.executescript(SCHEMA) extract_archive(archive, db) db.execute('update meta set db_inode = ?', (db_path.stat().st_ino,)) db.commit()
release/stubs.min/System/Windows/Forms/__init___parts/DataGridViewBand.py
htlcnn/ironpython-stubs
182
11080892
<reponame>htlcnn/ironpython-stubs class DataGridViewBand(DataGridViewElement,ICloneable,IDisposable): """ Represents a linear collection of elements in a System.Windows.Forms.DataGridView control. """ def Clone(self): """ Clone(self: DataGridViewBand) -> object Creates an exact copy of this band. Returns: An System.Object that represents the cloned System.Windows.Forms.DataGridViewBand. """ pass def Dispose(self): """ Dispose(self: DataGridViewBand) Releases all resources used by the System.Windows.Forms.DataGridViewBand. """ pass def OnDataGridViewChanged(self,*args): """ OnDataGridViewChanged(self: DataGridViewBand) Called when the band is associated with a different System.Windows.Forms.DataGridView. """ pass def RaiseCellClick(self,*args): """ RaiseCellClick(self: DataGridViewElement,e: DataGridViewCellEventArgs) Raises the System.Windows.Forms.DataGridView.CellClick event. e: A System.Windows.Forms.DataGridViewCellEventArgs that contains the event data. """ pass def RaiseCellContentClick(self,*args): """ RaiseCellContentClick(self: DataGridViewElement,e: DataGridViewCellEventArgs) Raises the System.Windows.Forms.DataGridView.CellContentClick event. e: A System.Windows.Forms.DataGridViewCellEventArgs that contains the event data. """ pass def RaiseCellContentDoubleClick(self,*args): """ RaiseCellContentDoubleClick(self: DataGridViewElement,e: DataGridViewCellEventArgs) Raises the System.Windows.Forms.DataGridView.CellContentDoubleClick event. e: A System.Windows.Forms.DataGridViewCellEventArgs that contains the event data. """ pass def RaiseCellValueChanged(self,*args): """ RaiseCellValueChanged(self: DataGridViewElement,e: DataGridViewCellEventArgs) Raises the System.Windows.Forms.DataGridView.CellValueChanged event. e: A System.Windows.Forms.DataGridViewCellEventArgs that contains the event data. """ pass def RaiseDataError(self,*args): """ RaiseDataError(self: DataGridViewElement,e: DataGridViewDataErrorEventArgs) Raises the System.Windows.Forms.DataGridView.DataError event. e: A System.Windows.Forms.DataGridViewDataErrorEventArgs that contains the event data. """ pass def RaiseMouseWheel(self,*args): """ RaiseMouseWheel(self: DataGridViewElement,e: MouseEventArgs) Raises the System.Windows.Forms.Control.MouseWheel event. e: A System.Windows.Forms.MouseEventArgs that contains the event data. """ pass def ToString(self): """ ToString(self: DataGridViewBand) -> str Returns a string that represents the current band. Returns: A System.String that represents the current System.Windows.Forms.DataGridViewBand. """ pass def __enter__(self,*args): """ __enter__(self: IDisposable) -> object Provides the implementation of __enter__ for objects which implement IDisposable. """ pass def __exit__(self,*args): """ __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) Provides the implementation of __exit__ for objects which implement IDisposable. """ pass def __init__(self,*args): """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __str__(self,*args): pass ContextMenuStrip=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets the shortcut menu for the band. Get: ContextMenuStrip(self: DataGridViewBand) -> ContextMenuStrip Set: ContextMenuStrip(self: DataGridViewBand)=value """ DefaultCellStyle=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets the default cell style of the band. Get: DefaultCellStyle(self: DataGridViewBand) -> DataGridViewCellStyle Set: DefaultCellStyle(self: DataGridViewBand)=value """ DefaultHeaderCellType=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets the run-time type of the default header cell. Get: DefaultHeaderCellType(self: DataGridViewBand) -> Type Set: DefaultHeaderCellType(self: DataGridViewBand)=value """ Displayed=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets a value indicating whether the band is currently displayed onscreen. Get: Displayed(self: DataGridViewBand) -> bool """ Frozen=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets a value indicating whether the band will move when a user scrolls through the System.Windows.Forms.DataGridView. Get: Frozen(self: DataGridViewBand) -> bool Set: Frozen(self: DataGridViewBand)=value """ HasDefaultCellStyle=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets a value indicating whether the System.Windows.Forms.DataGridViewBand.DefaultCellStyle property has been set. Get: HasDefaultCellStyle(self: DataGridViewBand) -> bool """ HeaderCellCore=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets the header cell of the System.Windows.Forms.DataGridViewBand. """ Index=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets the relative position of the band within the System.Windows.Forms.DataGridView control. Get: Index(self: DataGridViewBand) -> int """ InheritedStyle=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets the cell style in effect for the current band,taking into account style inheritance. Get: InheritedStyle(self: DataGridViewBand) -> DataGridViewCellStyle """ IsRow=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets a value indicating whether the band represents a row. """ ReadOnly=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets a value indicating whether the user can edit the band's cells. Get: ReadOnly(self: DataGridViewBand) -> bool Set: ReadOnly(self: DataGridViewBand)=value """ Resizable=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets a value indicating whether the band can be resized in the user interface (UI). Get: Resizable(self: DataGridViewBand) -> DataGridViewTriState Set: Resizable(self: DataGridViewBand)=value """ Selected=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets a value indicating whether the band is in a selected user interface (UI) state. Get: Selected(self: DataGridViewBand) -> bool Set: Selected(self: DataGridViewBand)=value """ Tag=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets the object that contains data to associate with the band. Get: Tag(self: DataGridViewBand) -> object Set: Tag(self: DataGridViewBand)=value """ Visible=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets a value indicating whether the band is visible to the user. Get: Visible(self: DataGridViewBand) -> bool Set: Visible(self: DataGridViewBand)=value """
openbook_communities/migrations/0030_communitynotificationssubscription_new_posts_notifications.py
TamaraAbells/okuna-api
164
11080907
# Generated by Django 2.2.5 on 2019-12-06 13:41 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('openbook_communities', '0029_communitynotificationssubscription'), ] operations = [ migrations.AddField( model_name='communitynotificationssubscription', name='new_posts_notifications', field=models.BooleanField(default=True), ), ]
hooks/post_gen_project.py
devalexanderdaza/amazing-github-template
146
11080955
<filename>hooks/post_gen_project.py #!/usr/bin/env python import os import shutil PROJECT_DIRECTORY = os.path.realpath(os.path.curdir) def remove(filepath): fullpath = os.path.join(PROJECT_DIRECTORY, filepath) if os.path.isfile(fullpath): os.remove(fullpath) else: shutil.rmtree(fullpath, ignore_errors=True) if __name__ == "__main__": if "{{ cookiecutter.open_source_license }}" == "Not open source": remove(".github/workflows/codeql.yml") # codeql is available for free only for OSS remove("LICENSE") if "{{ cookiecutter.include_logo }}" != "y": remove("docs/images/logo.svg") if "{{ cookiecutter.include_screenshots }}" != "y": remove("docs/images/screenshot.png") if "{{ cookiecutter.include_security }}" != "y": remove("docs/SECURITY.md") if "{{ cookiecutter.include_code_of_conduct }}" != "y": remove("docs/CODE_OF_CONDUCT.md") if "{{ cookiecutter.include_workflows }}" != "y": remove(".github/workflows") remove(".github/labels.yml") if "{{ cookiecutter.use_github_discussions }}" == "y": remove(".github/ISSUE_TEMPLATE/04_SUPPORT_QUESTION.md") if "{{ cookiecutter.use_codeql }}" != "y": remove(".github/workflows/codeql.yml")
Anaconda-files/Program_01c.py
arvidl/dynamical-systems-with-applications-using-python
106
11080959
<filename>Anaconda-files/Program_01c.py # Program 01c: Two curves on one plot. # See Figure 1.14. import matplotlib.pyplot as plt import numpy as np t = np.arange(0.0, 2.0, 0.01) c = 1 + np.cos(2*np.pi*t) s = 1 + np.sin(2*np.pi*t) plt.plot(t, s, 'r--', t, c, 'b-.') plt.xlabel('time (s)') plt.ylabel('voltage (mV)') plt.title('Voltage-time plot') plt.grid(True) plt.savefig('Voltage-Time Plot.png') plt.show()
backpack/hessianfree/hvp.py
jabader97/backpack
395
11080970
<filename>backpack/hessianfree/hvp.py import torch from .rop import R_op def hessian_vector_product(f, params, v, grad_params=None, detach=True): """ Multiplies the vector `v` with the Hessian, `v = H @ v` where `H` is the Hessian of `f` w.r.t. `params`. Example usage: ``` X, Y = data() model = torch.nn.Linear(784, 10) lossfunc = torch.nn.CrossEntropyLoss() loss = lossfunc(output, Y) v = list([torch.randn_like(p) for p in model.parameters]) Hv = hessian_vector_product(loss, list(model.parameters()), v) ``` Parameters: ----------- f: torch.Tensor params: torch.Tensor or [torch.Tensor] v: torch.Tensor or [torch.Tensor] Shapes must match `params` grad_params: torch.Tensor or [torch.Tensor], optional Gradient of `f` w.r.t. `params`. If the gradients have already been computed elsewhere, the first of two backpropagations can be saved. `grad_params` must have been computed with `create_graph = True` to not destroy the computation graph for the second backward pass. detach: Bool, optional Whether to detach the output from the computation graph (default: True) """ if grad_params is not None: df_dx = tuple(grad_params) else: df_dx = torch.autograd.grad(f, params, create_graph=True, retain_graph=True) Hv = R_op(df_dx, params, v) if detach: return tuple(j.detach() for j in Hv) else: return Hv
mayan/apps/duplicates/icons.py
nattangwiwat/Mayan-EDMS-recitation
343
11080976
from mayan.apps.appearance.classes import Icon icon_duplicated_document_list = Icon( driver_name='fontawesome', symbol='clone' ) icon_duplicated_document_scan = Icon( driver_name='fontawesome-dual-classes', primary_class='fas fa-clone', secondary_class='far fa-eye' )
client/verta/verta/_swagger/_public/modeldb/api/DatasetServiceApi.py
CaptEmulation/modeldb
835
11081009
# THIS FILE IS AUTO-GENERATED. DO NOT EDIT class DatasetServiceApi: def __init__(self, client, base_path = "/v1"): self.client = client self.base_path = base_path def addDatasetAttributes(self, body=None): __query = { } if body is None: raise Exception("Missing required parameter \"body\"") format_args = {} path = "/dataset/addDatasetAttributes" if "$body" in path: path = path.replace("$body", "%(body)s") format_args["body"] = body ret = self.client.request("POST", self.base_path + path % format_args, __query, body) if ret is not None: from ..model.ModeldbAddDatasetAttributesResponse import ModeldbAddDatasetAttributesResponse ret = ModeldbAddDatasetAttributesResponse.from_json(ret) return ret def addDatasetTags(self, body=None): __query = { } if body is None: raise Exception("Missing required parameter \"body\"") format_args = {} path = "/dataset/addDatasetTags" if "$body" in path: path = path.replace("$body", "%(body)s") format_args["body"] = body ret = self.client.request("POST", self.base_path + path % format_args, __query, body) if ret is not None: from ..model.ModeldbAddDatasetTagsResponse import ModeldbAddDatasetTagsResponse ret = ModeldbAddDatasetTagsResponse.from_json(ret) return ret def createDataset(self, body=None): __query = { } if body is None: raise Exception("Missing required parameter \"body\"") format_args = {} path = "/dataset/createDataset" if "$body" in path: path = path.replace("$body", "%(body)s") format_args["body"] = body ret = self.client.request("POST", self.base_path + path % format_args, __query, body) if ret is not None: from ..model.ModeldbCreateDatasetResponse import ModeldbCreateDatasetResponse ret = ModeldbCreateDatasetResponse.from_json(ret) return ret def deleteDataset(self, body=None): __query = { } if body is None: raise Exception("Missing required parameter \"body\"") format_args = {} path = "/dataset/deleteDataset" if "$body" in path: path = path.replace("$body", "%(body)s") format_args["body"] = body ret = self.client.request("DELETE", self.base_path + path % format_args, __query, body) if ret is not None: from ..model.ModeldbDeleteDatasetResponse import ModeldbDeleteDatasetResponse ret = ModeldbDeleteDatasetResponse.from_json(ret) return ret def deleteDatasetAttributes(self, id=None, attribute_keys=None, delete_all=None): __query = { "id": client.to_query(id), "attribute_keys": client.to_query(attribute_keys), "delete_all": client.to_query(delete_all) } body = None format_args = {} path = "/dataset/deleteDatasetAttributes" if "$id" in path: path = path.replace("$id", "%(id)s") format_args["id"] = id if "$attribute_keys" in path: path = path.replace("$attribute_keys", "%(attribute_keys)s") format_args["attribute_keys"] = attribute_keys if "$delete_all" in path: path = path.replace("$delete_all", "%(delete_all)s") format_args["delete_all"] = delete_all ret = self.client.request("DELETE", self.base_path + path % format_args, __query, body) if ret is not None: from ..model.ModeldbDeleteDatasetAttributesResponse import ModeldbDeleteDatasetAttributesResponse ret = ModeldbDeleteDatasetAttributesResponse.from_json(ret) return ret def deleteDatasetTags(self, body=None): __query = { } if body is None: raise Exception("Missing required parameter \"body\"") format_args = {} path = "/dataset/deleteDatasetTags" if "$body" in path: path = path.replace("$body", "%(body)s") format_args["body"] = body ret = self.client.request("DELETE", self.base_path + path % format_args, __query, body) if ret is not None: from ..model.ModeldbDeleteDatasetTagsResponse import ModeldbDeleteDatasetTagsResponse ret = ModeldbDeleteDatasetTagsResponse.from_json(ret) return ret def deleteDatasets(self, body=None): __query = { } if body is None: raise Exception("Missing required parameter \"body\"") format_args = {} path = "/dataset/deleteDatasets" if "$body" in path: path = path.replace("$body", "%(body)s") format_args["body"] = body ret = self.client.request("DELETE", self.base_path + path % format_args, __query, body) if ret is not None: from ..model.ModeldbDeleteDatasetsResponse import ModeldbDeleteDatasetsResponse ret = ModeldbDeleteDatasetsResponse.from_json(ret) return ret def findDatasets(self, body=None): __query = { } if body is None: raise Exception("Missing required parameter \"body\"") format_args = {} path = "/dataset/findDatasets" if "$body" in path: path = path.replace("$body", "%(body)s") format_args["body"] = body ret = self.client.request("POST", self.base_path + path % format_args, __query, body) if ret is not None: from ..model.ModeldbFindDatasetsResponse import ModeldbFindDatasetsResponse ret = ModeldbFindDatasetsResponse.from_json(ret) return ret def getAllDatasets(self, page_number=None, page_limit=None, ascending=None, sort_key=None, workspace_name=None): __query = { "page_number": client.to_query(page_number), "page_limit": client.to_query(page_limit), "ascending": client.to_query(ascending), "sort_key": client.to_query(sort_key), "workspace_name": client.to_query(workspace_name) } body = None format_args = {} path = "/dataset/getAllDatasets" if "$page_number" in path: path = path.replace("$page_number", "%(page_number)s") format_args["page_number"] = page_number if "$page_limit" in path: path = path.replace("$page_limit", "%(page_limit)s") format_args["page_limit"] = page_limit if "$ascending" in path: path = path.replace("$ascending", "%(ascending)s") format_args["ascending"] = ascending if "$sort_key" in path: path = path.replace("$sort_key", "%(sort_key)s") format_args["sort_key"] = sort_key if "$workspace_name" in path: path = path.replace("$workspace_name", "%(workspace_name)s") format_args["workspace_name"] = workspace_name ret = self.client.request("GET", self.base_path + path % format_args, __query, body) if ret is not None: from ..model.ModeldbGetAllDatasetsResponse import ModeldbGetAllDatasetsResponse ret = ModeldbGetAllDatasetsResponse.from_json(ret) return ret def getDatasetAttributes(self, id=None, attribute_keys=None, get_all=None): __query = { "id": client.to_query(id), "attribute_keys": client.to_query(attribute_keys), "get_all": client.to_query(get_all) } body = None format_args = {} path = "/dataset/getDatasetAttributes" if "$id" in path: path = path.replace("$id", "%(id)s") format_args["id"] = id if "$attribute_keys" in path: path = path.replace("$attribute_keys", "%(attribute_keys)s") format_args["attribute_keys"] = attribute_keys if "$get_all" in path: path = path.replace("$get_all", "%(get_all)s") format_args["get_all"] = get_all ret = self.client.request("GET", self.base_path + path % format_args, __query, body) if ret is not None: from ..model.ModeldbGetAttributesResponse import ModeldbGetAttributesResponse ret = ModeldbGetAttributesResponse.from_json(ret) return ret def getDatasetById(self, id=None): __query = { "id": client.to_query(id) } body = None format_args = {} path = "/dataset/getDatasetById" if "$id" in path: path = path.replace("$id", "%(id)s") format_args["id"] = id ret = self.client.request("GET", self.base_path + path % format_args, __query, body) if ret is not None: from ..model.ModeldbGetDatasetByIdResponse import ModeldbGetDatasetByIdResponse ret = ModeldbGetDatasetByIdResponse.from_json(ret) return ret def getDatasetByName(self, name=None, workspace_name=None): __query = { "name": client.to_query(name), "workspace_name": client.to_query(workspace_name) } body = None format_args = {} path = "/dataset/getDatasetByName" if "$name" in path: path = path.replace("$name", "%(name)s") format_args["name"] = name if "$workspace_name" in path: path = path.replace("$workspace_name", "%(workspace_name)s") format_args["workspace_name"] = workspace_name ret = self.client.request("GET", self.base_path + path % format_args, __query, body) if ret is not None: from ..model.ModeldbGetDatasetByNameResponse import ModeldbGetDatasetByNameResponse ret = ModeldbGetDatasetByNameResponse.from_json(ret) return ret def getDatasetTags(self, id=None): __query = { "id": client.to_query(id) } body = None format_args = {} path = "/dataset/getDatasetTags" if "$id" in path: path = path.replace("$id", "%(id)s") format_args["id"] = id ret = self.client.request("GET", self.base_path + path % format_args, __query, body) if ret is not None: from ..model.ModeldbGetTagsResponse import ModeldbGetTagsResponse ret = ModeldbGetTagsResponse.from_json(ret) return ret def getExperimentRunByDataset(self, body=None): __query = { } if body is None: raise Exception("Missing required parameter \"body\"") format_args = {} path = "/dataset/getExperimentRunByDataset" if "$body" in path: path = path.replace("$body", "%(body)s") format_args["body"] = body ret = self.client.request("POST", self.base_path + path % format_args, __query, body) if ret is not None: from ..model.ModeldbGetExperimentRunByDatasetResponse import ModeldbGetExperimentRunByDatasetResponse ret = ModeldbGetExperimentRunByDatasetResponse.from_json(ret) return ret def getLastExperimentByDatasetId(self, dataset_id=None): __query = { "dataset_id": client.to_query(dataset_id) } body = None format_args = {} path = "/dataset/getLastExperimentByDatasetId" if "$dataset_id" in path: path = path.replace("$dataset_id", "%(dataset_id)s") format_args["dataset_id"] = dataset_id ret = self.client.request("GET", self.base_path + path % format_args, __query, body) if ret is not None: from ..model.ModeldbLastExperimentByDatasetIdResponse import ModeldbLastExperimentByDatasetIdResponse ret = ModeldbLastExperimentByDatasetIdResponse.from_json(ret) return ret def setDatasetVisibility(self, body=None): __query = { } if body is None: raise Exception("Missing required parameter \"body\"") format_args = {} path = "/dataset/setDatasetVisibility" if "$body" in path: path = path.replace("$body", "%(body)s") format_args["body"] = body ret = self.client.request("POST", self.base_path + path % format_args, __query, body) if ret is not None: from ..model.ModeldbSetDatasetVisibiltyResponse import ModeldbSetDatasetVisibiltyResponse ret = ModeldbSetDatasetVisibiltyResponse.from_json(ret) return ret def setDatasetWorkspace(self, body=None): __query = { } if body is None: raise Exception("Missing required parameter \"body\"") format_args = {} path = "/dataset/setDatasetWorkspace" if "$body" in path: path = path.replace("$body", "%(body)s") format_args["body"] = body ret = self.client.request("POST", self.base_path + path % format_args, __query, body) if ret is not None: from ..model.ModeldbSetDatasetWorkspaceResponse import ModeldbSetDatasetWorkspaceResponse ret = ModeldbSetDatasetWorkspaceResponse.from_json(ret) return ret def updateDatasetAttributes(self, body=None): __query = { } if body is None: raise Exception("Missing required parameter \"body\"") format_args = {} path = "/dataset/updateDatasetAttributes" if "$body" in path: path = path.replace("$body", "%(body)s") format_args["body"] = body ret = self.client.request("POST", self.base_path + path % format_args, __query, body) if ret is not None: from ..model.ModeldbUpdateDatasetAttributesResponse import ModeldbUpdateDatasetAttributesResponse ret = ModeldbUpdateDatasetAttributesResponse.from_json(ret) return ret def updateDatasetDescription(self, body=None): __query = { } if body is None: raise Exception("Missing required parameter \"body\"") format_args = {} path = "/dataset/updateDatasetDescription" if "$body" in path: path = path.replace("$body", "%(body)s") format_args["body"] = body ret = self.client.request("POST", self.base_path + path % format_args, __query, body) if ret is not None: from ..model.ModeldbUpdateDatasetDescriptionResponse import ModeldbUpdateDatasetDescriptionResponse ret = ModeldbUpdateDatasetDescriptionResponse.from_json(ret) return ret def updateDatasetName(self, body=None): __query = { } if body is None: raise Exception("Missing required parameter \"body\"") format_args = {} path = "/dataset/updateDatasetName" if "$body" in path: path = path.replace("$body", "%(body)s") format_args["body"] = body ret = self.client.request("POST", self.base_path + path % format_args, __query, body) if ret is not None: from ..model.ModeldbUpdateDatasetNameResponse import ModeldbUpdateDatasetNameResponse ret = ModeldbUpdateDatasetNameResponse.from_json(ret) return ret
model/ldam_drw_resnets/ea_resnet_cifar.py
frank-xwang/RIDE-LongTailRecognition
175
11081030
<gh_stars>100-1000 # From https://github.com/kaidic/LDAM-DRW/blob/master/models/resnet_cifar.py ''' Properly implemented ResNet for CIFAR10 as described in paper [1]. The implementation and structure of this file is hugely influenced by [2] which is implemented for ImageNet and doesn't have option A for identity. Moreover, most of the implementations on the web is copy-paste from torchvision's resnet and has wrong number of params. Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following number of layers and parameters: name | layers | params ResNet20 | 20 | 0.27M ResNet32 | 32 | 0.46M ResNet44 | 44 | 0.66M ResNet56 | 56 | 0.85M ResNet110 | 110 | 1.7M ResNet1202| 1202 | 19.4m which this implementation indeed has. Reference: [1] <NAME>, <NAME>, <NAME>, <NAME> Deep Residual Learning for Image Recognition. arXiv:1512.03385 [2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py If you use this implementation in you work, please don't forget to mention the author, <NAME>. ''' import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init from torch.nn import Parameter __all__ = ['ResNet_s', 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110', 'resnet1202'] def _weights_init(m): classname = m.__class__.__name__ if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight) class NormedLinear(nn.Module): def __init__(self, in_features, out_features): super(NormedLinear, self).__init__() self.weight = Parameter(torch.Tensor(in_features, out_features)) self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5) def forward(self, x): out = F.normalize(x, dim=1).mm(F.normalize(self.weight, dim=0)) return out class LambdaLayer(nn.Module): def __init__(self, lambd): super(LambdaLayer, self).__init__() self.lambd = lambd def forward(self, x): return self.lambd(x) class BasicBlock(nn.Module): expansion = 1 def __init__(self, in_planes, planes, stride=1, option='A'): super(BasicBlock, self).__init__() self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.shortcut = nn.Sequential() if stride != 1 or in_planes != planes: if option == 'A': """ For CIFAR10 ResNet paper uses option A. """ self.planes = planes self.in_planes = in_planes # self.shortcut = LambdaLayer(lambda x: F.pad(x[:, :, fc00:db20:35b:7399::5, ::2], (0, 0, 0, 0, planes // 4, planes // 4), "constant", 0)) self.shortcut = LambdaLayer(lambda x: F.pad(x[:, :, fc00:db20:35b:7399::5, ::2], (0, 0, 0, 0, (planes - in_planes) // 2, (planes - in_planes) // 2), "constant", 0)) elif option == 'B': self.shortcut = nn.Sequential( nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(self.expansion * planes) ) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.bn2(self.conv2(out)) out += self.shortcut(x) out = F.relu(out) return out class ResNet_s(nn.Module): def __init__(self, block, num_blocks, num_experts, num_classes=10, reduce_dimension=False, layer2_output_dim=None, layer3_output_dim=None, top_choices_num=5, pos_weight=20, share_expert_help_pred_fc=True, force_all=False, use_norm=False, s=30): super(ResNet_s, self).__init__() self.in_planes = 16 self.num_experts = num_experts self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(16) self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1) self.in_planes = self.next_in_planes if layer2_output_dim is None: if reduce_dimension: layer2_output_dim = 24 else: layer2_output_dim = 32 if layer3_output_dim is None: if reduce_dimension: layer3_output_dim = 48 else: layer3_output_dim = 64 self.layer2s = nn.ModuleList([self._make_layer(block, layer2_output_dim, num_blocks[1], stride=2) for _ in range(num_experts)]) self.in_planes = self.next_in_planes self.layer3s = nn.ModuleList([self._make_layer(block, layer3_output_dim, num_blocks[2], stride=2) for _ in range(num_experts)]) self.in_planes = self.next_in_planes if use_norm: self.linears = nn.ModuleList([NormedLinear(layer3_output_dim, num_classes) for _ in range(num_experts)]) else: s = 1 self.linears = nn.ModuleList([nn.Linear(layer3_output_dim, num_classes) for _ in range(num_experts)]) self.num_classes = num_classes self.top_choices_num = top_choices_num self.share_expert_help_pred_fc = share_expert_help_pred_fc self.layer3_feat = True expert_hidden_fc_output_dim = 16 self.expert_help_pred_hidden_fcs = nn.ModuleList([nn.Linear((layer3_output_dim if self.layer3_feat else layer2_output_dim) * block.expansion, expert_hidden_fc_output_dim) for _ in range(self.num_experts - 1)]) if self.share_expert_help_pred_fc: self.expert_help_pred_fc = nn.Linear(expert_hidden_fc_output_dim + self.top_choices_num, 1) else: self.expert_help_pred_fcs = nn.ModuleList([nn.Linear(expert_hidden_fc_output_dim + self.top_choices_num, 1) for _ in range(self.num_experts - 1)]) self.pos_weight = pos_weight self.s = s self.force_all = force_all if not force_all: for name, param in self.named_parameters(): if "expert_help_pred" in name: param.requires_grad_(True) else: param.requires_grad_(False) self.apply(_weights_init) def _make_layer(self, block, planes, num_blocks, stride): strides = [stride] + [1]*(num_blocks-1) layers = [] self.next_in_planes = self.in_planes for stride in strides: layers.append(block(self.next_in_planes, planes, stride)) self.next_in_planes = planes * block.expansion return nn.Sequential(*layers) def _hook_before_iter(self): assert self.training, "_hook_before_iter should be called at training time only, after train() is called" count = 0 for module in self.modules(): if isinstance(module, nn.BatchNorm2d): if module.weight.requires_grad == False: module.eval() count += 1 if count > 0: print("Warning: detected at least one frozen BN, set them to eval state. Count:", count) def _separate_part(self, x, ind): out = x out = (self.layer2s[ind])(out) out = (self.layer3s[ind])(out) self.feat = out out = F.avg_pool2d(out, out.size()[3]) out = out.view(out.size(0), -1) out = (self.linears[ind])(out) out = out * self.s # This hyperparam s is originally in the loss function, but we moved it here to prevent using s multiple times in distillation. return out def pred_expert_help(self, input_part, i): feature, logits = input_part feature = F.adaptive_avg_pool2d(feature, (1, 1)).flatten(1) feature = feature / feature.norm(dim=1, keepdim=True) feature = F.relu((self.expert_help_pred_hidden_fcs[i])(feature)) topk, _ = torch.topk(logits, k=self.top_choices_num, dim=1) confidence_input = torch.cat((topk, feature), dim=1) if self.share_expert_help_pred_fc: expert_help_pred = self.expert_help_pred_fc(confidence_input) else: expert_help_pred = (self.expert_help_pred_fcs[i])(confidence_input) return expert_help_pred def forward(self, x, target=None): out = F.relu(self.bn1(self.conv1(x))) shared_part = self.layer1(out) if target is not None: # training time output = shared_part.new_zeros((shared_part.size(0), self.num_classes)) expert_help_preds = output.new_zeros((output.size(0), self.num_experts - 1), dtype=torch.float) # first column: correctness of the first model, second: correctness of expert of the first and second, etc. correctness = output.new_zeros((output.size(0), self.num_experts), dtype=torch.bool) loss = output.new_zeros((1,)) for i in range(self.num_experts): output += self._separate_part(shared_part, i) correctness[:, i] = output.argmax(dim=1) == target # Or: just helpful, predict 1 if i != self.num_experts - 1: expert_help_preds[:, i] = self.pred_expert_help((self.feat, output / (i+1)), i).view((-1,)) for i in range(self.num_experts - 1): expert_help_target = (~correctness[:, i]) & correctness[:, i+1:].any(dim=1) expert_help_pred = expert_help_preds[:, i] loss += F.binary_cross_entropy_with_logits(expert_help_pred, expert_help_target.float(), pos_weight=expert_help_pred.new_tensor([self.pos_weight])) # output with all experts return output / self.num_experts, loss / (self.num_experts - 1) else: # test time expert_next = shared_part.new_ones((shared_part.size(0),), dtype=torch.bool) num_experts_for_each_sample = shared_part.new_ones((shared_part.size(0), 1), dtype=torch.long) output = self._separate_part(shared_part, 0) for i in range(1, self.num_experts): expert_help_pred = self.pred_expert_help((self.feat, output[expert_next] / i), i-1).view((-1,)) if not self.force_all: expert_next[expert_next.clone()] = torch.sigmoid(expert_help_pred) > 0.5 print("expert ({}):".format(i), expert_next.sum().item() / expert_next.size(0)) if not expert_next.any(): break output[expert_next] += self._separate_part(shared_part[expert_next], i) num_experts_for_each_sample[expert_next] += 1 return output / num_experts_for_each_sample, num_experts_for_each_sample def resnet20(): return ResNet_s(BasicBlock, [3, 3, 3]) def resnet32(num_classes=10, use_norm=False): return ResNet_s(BasicBlock, [5, 5, 5], num_classes=num_classes, use_norm=use_norm) def resnet44(): return ResNet_s(BasicBlock, [7, 7, 7]) def resnet56(): return ResNet_s(BasicBlock, [9, 9, 9]) def resnet110(): return ResNet_s(BasicBlock, [18, 18, 18]) def resnet1202(): return ResNet_s(BasicBlock, [200, 200, 200]) def test(net): import numpy as np total_params = 0 for x in filter(lambda p: p.requires_grad, net.parameters()): total_params += np.prod(x.data.numpy().shape) print("Total number of params", total_params) print("Total layers", len(list(filter(lambda p: p.requires_grad and len(p.data.size())>1, net.parameters())))) if __name__ == "__main__": for net_name in __all__: if net_name.startswith('resnet'): print(net_name) test(globals()[net_name]()) print()
policy/openbot/server/zeroconf.py
januxnet/OpenBot
1,971
11081072
<reponame>januxnet/OpenBot<gh_stars>1000+ """ Example of announcing a service (in this case, a fake HTTP server) """ import asyncio import logging import os import socket import sys from aiohttp import web from aiozeroconf import ServiceInfo, Zeroconf from netifaces import interfaces, ifaddresses, AF_INET SERVICE_TYPE = "_openbot-server._tcp.local." loop = asyncio.get_event_loop() zc = Zeroconf(loop) async def register(app: web.Application): await run_test(zc) app.on_shutdown.append(on_shutdown) async def run_test(zc): desc = {} local_ip = ip4_address() name = ( os.getenv("OPENBOT_NAME", socket.gethostname()) .replace(".local", "") .replace(".", "-") ) info = ServiceInfo( SERVICE_TYPE, f"{name}.{SERVICE_TYPE}", address=socket.inet_aton(local_ip), port=8000, weight=0, priority=0, properties=desc, ) print("Registration of the service with name:", name) await zc.register_service(info) def ip4_address(): for interface in interfaces(): addresses = ifaddresses(interface) if AF_INET not in addresses: continue for link in addresses[AF_INET]: if "addr" not in link: continue ip4 = link["addr"] if ip4.startswith("127.") or ip4.startswith("10."): print(f"Skip address {ip4} @ interface {interface}") continue print(f"Found address {ip4} @ interface {interface}") return ip4 async def do_close(zc): global info await zc.unregister_service(info) await zc.close() async def on_shutdown(app): print("Unregistering...") await do_close(zc) if __name__ == "__main__": logging.basicConfig(level=logging.DEBUG) if len(sys.argv) > 1: assert sys.argv[1:] == ["--debug"] logging.getLogger("aiozeroconf").setLevel(logging.DEBUG) try: xx = loop.create_task(run_test(zc)) loop.run_forever() except KeyboardInterrupt: print("Unregistering...") loop.run_until_complete(do_close(zc)) finally: loop.close()
tests/patterns/test_SequencePattern.py
josiah-wolf-oberholtzer/supriya
191
11081089
import pytest from supriya.patterns import SequencePattern from supriya.patterns.testutils import run_pattern_test @pytest.mark.parametrize( "stop_at, sequence, iterations, expected, is_infinite", [ (None, [1, 2, 3], None, [1, 2, 3], True), (None, [1, 2, 3], 1, [1, 2, 3], False), (None, [1, 2, 3], 2, [1, 2, 3, 1, 2, 3], False), (None, [1, 2, 3, SequencePattern(["a", "b"])], 1, [1, 2, 3, "a", "b"], False), ( None, [1, 2, 3, SequencePattern(["a", "b"], None)], 1, [1, 2, 3, "a", "b"], True, ), ( None, [SequencePattern([1, 2, 3]), SequencePattern(["a", "b"])], 1, [1, 2, 3, "a", "b"], False, ), ( None, [SequencePattern([1, 2, 3]), SequencePattern(["a", "b"])], 2, [1, 2, 3, "a", "b", 1, 2, 3, "a", "b"], False, ), ( None, [SequencePattern([1, 2, 3], None), SequencePattern(["a", "b"])], 1, [1, 2, 3], True, ), ( None, [SequencePattern([1, 2, 3], None), SequencePattern(["a", "b"])], None, [1, 2, 3], True, ), ], ) def test(stop_at, sequence, iterations, expected, is_infinite): pattern = SequencePattern(sequence, iterations=iterations) run_pattern_test(pattern, expected, is_infinite, stop_at) @pytest.mark.parametrize( "sequence, iterations, raises", [ ([1, 2, 3], 1, None), ([1, 2, 3], 10, None), ([1, 2, 3], None, None), ([1, 2, 3], 0, ValueError), (23, 1, ValueError), ], ) def test___init__(sequence, iterations, raises): if raises: with pytest.raises(raises): SequencePattern(sequence, iterations) else: SequencePattern(sequence, iterations)
models/se_resnet.py
calmisential/Basic_CNNs_TensorFlow2.0
497
11081104
<gh_stars>100-1000 import tensorflow as tf from configuration import NUM_CLASSES class SEBlock(tf.keras.layers.Layer): def __init__(self, input_channels, r=16): super(SEBlock, self).__init__() self.pool = tf.keras.layers.GlobalAveragePooling2D() self.fc1 = tf.keras.layers.Dense(units=input_channels // r) self.fc2 = tf.keras.layers.Dense(units=input_channels) def call(self, inputs, **kwargs): branch = self.pool(inputs) branch = self.fc1(branch) branch = tf.nn.relu(branch) branch = self.fc2(branch) branch = tf.nn.sigmoid(branch) branch = tf.expand_dims(input=branch, axis=1) branch = tf.expand_dims(input=branch, axis=1) output = tf.keras.layers.multiply(inputs=[inputs, branch]) return output class BottleNeck(tf.keras.layers.Layer): def __init__(self, filter_num, stride=1): super(BottleNeck, self).__init__() self.conv1 = tf.keras.layers.Conv2D(filters=filter_num, kernel_size=(1, 1), strides=1, padding='same') self.bn1 = tf.keras.layers.BatchNormalization() self.conv2 = tf.keras.layers.Conv2D(filters=filter_num, kernel_size=(3, 3), strides=stride, padding='same') self.bn2 = tf.keras.layers.BatchNormalization() self.conv3 = tf.keras.layers.Conv2D(filters=filter_num * 4, kernel_size=(1, 1), strides=1, padding='same') self.bn3 = tf.keras.layers.BatchNormalization() self.se = SEBlock(input_channels=filter_num * 4) self.downsample = tf.keras.Sequential() self.downsample.add(tf.keras.layers.Conv2D(filters=filter_num * 4, kernel_size=(1, 1), strides=stride)) self.downsample.add(tf.keras.layers.BatchNormalization()) def call(self, inputs, training=None): identity = self.downsample(inputs) x = self.conv1(inputs) x = self.bn1(x, training=training) x = tf.nn.relu(x) x = self.conv2(x) x = self.bn2(x, training=training) x = tf.nn.relu(x) x = self.conv3(x) x = self.bn3(x, training=training) x = self.se(x) output = tf.nn.relu(tf.keras.layers.add([identity, x])) return output class SEResNet(tf.keras.Model): def __init__(self, block_num, model_name): super(SEResNet, self).__init__() self.model_name = model_name self.pre1 = tf.keras.layers.Conv2D(filters=64, kernel_size=(7, 7), strides=2, padding='same') self.pre2 = tf.keras.layers.BatchNormalization() self.pre3 = tf.keras.layers.Activation(tf.keras.activations.relu) self.pre4 = tf.keras.layers.MaxPool2D(pool_size=(3, 3), strides=2) self.layer1 = self._make_res_block(filter_num=64, blocks=block_num[0]) self.layer2 = self._make_res_block(filter_num=128, blocks=block_num[1], stride=2) self.layer3 = self._make_res_block(filter_num=256, blocks=block_num[2], stride=2) self.layer4 = self._make_res_block(filter_num=512, blocks=block_num[3], stride=2) self.avgpool = tf.keras.layers.GlobalAveragePooling2D() self.fc = tf.keras.layers.Dense(units=NUM_CLASSES, activation=tf.keras.activations.softmax) def _make_res_block(self, filter_num, blocks, stride=1): res_block = tf.keras.Sequential() res_block.add(BottleNeck(filter_num, stride=stride)) for _ in range(1, blocks): res_block.add(BottleNeck(filter_num, stride=1)) return res_block def call(self, inputs, training=None, mask=None): pre1 = self.pre1(inputs) pre2 = self.pre2(pre1, training=training) pre3 = self.pre3(pre2) pre4 = self.pre4(pre3) l1 = self.layer1(pre4, training=training) l2 = self.layer2(l1, training=training) l3 = self.layer3(l2, training=training) l4 = self.layer4(l3, training=training) avgpool = self.avgpool(l4) out = self.fc(avgpool) return out def __repr__(self): return "SE_ResNet_{}".format(self.model_name) def se_resnet_50(): return SEResNet(block_num=[3, 4, 6, 3], model_name="50") def se_resnet_101(): return SEResNet(block_num=[3, 4, 23, 3], model_name="101") def se_resnet_152(): return SEResNet(block_num=[3, 8, 36, 3], model_name="152")
accounting/apps/connect/middlewares.py
Abdur-rahmaanJ/django-accounting
127
11081107
class ForceGettingStartedMiddleware(object): def process_request(self, request): pass
security/pwntools/ex03/soluce.py
Franco227/Workshops
333
11081112
#!/usr/bin/env python3 from pwn import * PATH = './challenge' def get_offset(): p = process(PATH) c = cyclic(50000) p.sendline(c) p.recvuntil('[check] 0x') check = p.recvline().strip() sequence = unhex(check) # extract cyclic from hex ptr little_endian = u32(sequence, endian='big') # from big to little return cyclic_find(little_endian) # find offset offset = get_offset() p = process(PATH) p.sendline(b'A' * offset + pack(0xdeadbeef)) p.recvuntil('[check]') print(p.recvall())
sshtest.py
VCStardust/K8tools
4,611
11081123
<reponame>VCStardust/K8tools # C:\Users\null\Desktop\ssh>python ssh.py 172.16.58.3 22 root k8gege # 172.16.58.3 22 root k8gege LoginOK import paramiko import sys ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) def checkSSH(): try: ssh.connect(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]) print sys.argv[1]+' '+sys.argv[2]+' '+sys.argv[3]+' '+sys.argv[4]+' LoginOK' except: pass checkSSH()
rdkit/ML/Cluster/ClusterVis.py
kazuyaujihara/rdkit
1,609
11081125
# $Id$ # # Copyright (C) 2001-2006 <NAME> and Rational Discovery LLC # # @@ All Rights Reserved @@ # This file is part of the RDKit. # The contents are covered by the terms of the BSD license # which is included in the file license.txt, found at the root # of the RDKit source tree. # """Cluster tree visualization using Sping """ try: from rdkit.sping import pid piddle = pid except ImportError: from rdkit.piddle import piddle import numpy from . import ClusterUtils class VisOpts(object): """ stores visualization options for cluster viewing **Instance variables** - x/yOffset: amount by which the drawing is offset from the edges of the canvas - lineColor: default color for drawing the cluster tree - lineWidth: the width of the lines used to draw the tree """ xOffset = 20 yOffset = 20 lineColor = piddle.Color(0, 0, 0) hideColor = piddle.Color(.8, .8, .8) terminalColors = [piddle.Color(1, 0, 0), piddle.Color(0, 0, 1), piddle.Color(1, 1, 0), piddle.Color(0, .5, .5), piddle.Color(0, .8, 0), piddle.Color(.5, .5, .5), piddle.Color(.8, .3, .3), piddle.Color(.3, .3, .8), piddle.Color(.8, .8, .3), piddle.Color(.3, .8, .8)] lineWidth = 2 hideWidth = 1.1 nodeRad = 15 nodeColor = piddle.Color(1., .4, .4) highlightColor = piddle.Color(1., 1., .4) highlightRad = 10 def _scaleMetric(val, power=2, min=1e-4): val = float(val) nval = pow(val, power) if nval < min: return 0.0 else: return numpy.log(nval / min) class ClusterRenderer(object): def __init__(self, canvas, size, ptColors=[], lineWidth=None, showIndices=0, showNodes=1, stopAtCentroids=0, logScale=0, tooClose=-1): self.canvas = canvas self.size = size self.ptColors = ptColors self.lineWidth = lineWidth self.showIndices = showIndices self.showNodes = showNodes self.stopAtCentroids = stopAtCentroids self.logScale = logScale self.tooClose = tooClose def _AssignPointLocations(self, cluster, terminalOffset=4): self.pts = cluster.GetPoints() self.nPts = len(self.pts) self.xSpace = float(self.size[0] - 2 * VisOpts.xOffset) / float(self.nPts - 1) ySize = self.size[1] for i in range(self.nPts): pt = self.pts[i] if self.logScale > 0: v = _scaleMetric(pt.GetMetric(), self.logScale) else: v = float(pt.GetMetric()) pt._drawPos = (VisOpts.xOffset + i * self.xSpace, ySize - (v * self.ySpace + VisOpts.yOffset) + terminalOffset) def _AssignClusterLocations(self, cluster): # first get the search order (top down) toDo = [cluster] examine = cluster.GetChildren()[:] while len(examine): node = examine.pop(0) children = node.GetChildren() if len(children): toDo.append(node) for child in children: if not child.IsTerminal(): examine.append(child) # and reverse it (to run from bottom up) toDo.reverse() for node in toDo: if self.logScale > 0: v = _scaleMetric(node.GetMetric(), self.logScale) else: v = float(node.GetMetric()) # average our children's x positions childLocs = [x._drawPos[0] for x in node.GetChildren()] if len(childLocs): xp = sum(childLocs) / float(len(childLocs)) yp = self.size[1] - (v * self.ySpace + VisOpts.yOffset) node._drawPos = (xp, yp) def _DrawToLimit(self, cluster): """ we assume that _drawPos settings have been done already """ if self.lineWidth is None: lineWidth = VisOpts.lineWidth else: lineWidth = self.lineWidth examine = [cluster] while len(examine): node = examine.pop(0) xp, yp = node._drawPos children = node.GetChildren() if abs(children[1]._drawPos[0] - children[0]._drawPos[0]) > self.tooClose: # draw the horizontal line connecting things drawColor = VisOpts.lineColor self.canvas.drawLine(children[0]._drawPos[0], yp, children[-1]._drawPos[0], yp, drawColor, lineWidth) # and draw the lines down to the children for child in children: if self.ptColors and child.GetData() is not None: drawColor = self.ptColors[child.GetData()] else: drawColor = VisOpts.lineColor cxp, cyp = child._drawPos self.canvas.drawLine(cxp, yp, cxp, cyp, drawColor, lineWidth) if not child.IsTerminal(): examine.append(child) else: if self.showIndices and not self.stopAtCentroids: try: txt = str(child.GetName()) except Exception: txt = str(child.GetIndex()) self.canvas.drawString(txt, cxp - self.canvas.stringWidth(txt) / 2, cyp) else: # draw a "hidden" line to the bottom self.canvas.drawLine(xp, yp, xp, self.size[1] - VisOpts.yOffset, VisOpts.hideColor, lineWidth) def DrawTree(self, cluster, minHeight=2.0): if self.logScale > 0: v = _scaleMetric(cluster.GetMetric(), self.logScale) else: v = float(cluster.GetMetric()) if v <= 0: v = minHeight self.ySpace = float(self.size[1] - 2 * VisOpts.yOffset) / v self._AssignPointLocations(cluster) self._AssignClusterLocations(cluster) if not self.stopAtCentroids: self._DrawToLimit(cluster) else: raise NotImplementedError('stopAtCentroids drawing not yet implemented') def DrawClusterTree(cluster, canvas, size, ptColors=[], lineWidth=None, showIndices=0, showNodes=1, stopAtCentroids=0, logScale=0, tooClose=-1): """ handles the work of drawing a cluster tree on a Sping canvas **Arguments** - cluster: the cluster tree to be drawn - canvas: the Sping canvas on which to draw - size: the size of _canvas_ - ptColors: if this is specified, the _colors_ will be used to color the terminal nodes of the cluster tree. (color == _pid.Color_) - lineWidth: if specified, it will be used for the widths of the lines used to draw the tree **Notes** - _Canvas_ is neither _save_d nor _flush_ed at the end of this - if _ptColors_ is the wrong length for the number of possible terminal node types, this will throw an IndexError - terminal node types are determined using their _GetData()_ methods """ renderer = ClusterRenderer(canvas, size, ptColors, lineWidth, showIndices, showNodes, stopAtCentroids, logScale, tooClose) renderer.DrawTree(cluster) def _DrawClusterTree(cluster, canvas, size, ptColors=[], lineWidth=None, showIndices=0, showNodes=1, stopAtCentroids=0, logScale=0, tooClose=-1): """ handles the work of drawing a cluster tree on a Sping canvas **Arguments** - cluster: the cluster tree to be drawn - canvas: the Sping canvas on which to draw - size: the size of _canvas_ - ptColors: if this is specified, the _colors_ will be used to color the terminal nodes of the cluster tree. (color == _pid.Color_) - lineWidth: if specified, it will be used for the widths of the lines used to draw the tree **Notes** - _Canvas_ is neither _save_d nor _flush_ed at the end of this - if _ptColors_ is the wrong length for the number of possible terminal node types, this will throw an IndexError - terminal node types are determined using their _GetData()_ methods """ if lineWidth is None: lineWidth = VisOpts.lineWidth pts = cluster.GetPoints() nPts = len(pts) if nPts <= 1: return xSpace = float(size[0] - 2 * VisOpts.xOffset) / float(nPts - 1) if logScale > 0: v = _scaleMetric(cluster.GetMetric(), logScale) else: v = float(cluster.GetMetric()) ySpace = float(size[1] - 2 * VisOpts.yOffset) / v for i in range(nPts): pt = pts[i] if logScale > 0: v = _scaleMetric(pt.GetMetric(), logScale) else: v = float(pt.GetMetric()) pt._drawPos = (VisOpts.xOffset + i * xSpace, size[1] - (v * ySpace + VisOpts.yOffset)) # if not stopAtCentroids or not hasattr(pt, '_isCentroid'): # allNodes.remove(pt) # allNodes not defined if not stopAtCentroids: allNodes = ClusterUtils.GetNodeList(cluster) else: allNodes = ClusterUtils.GetNodesDownToCentroids(cluster) while len(allNodes): node = allNodes.pop(0) children = node.GetChildren() if len(children): if logScale > 0: v = _scaleMetric(node.GetMetric(), logScale) else: v = float(node.GetMetric()) yp = size[1] - (v * ySpace + VisOpts.yOffset) childLocs = [x._drawPos[0] for x in children] xp = sum(childLocs) / float(len(childLocs)) node._drawPos = (xp, yp) if not stopAtCentroids or node._aboveCentroid > 0: for child in children: if ptColors != [] and child.GetData() is not None: drawColor = ptColors[child.GetData()] else: drawColor = VisOpts.lineColor if showNodes and hasattr(child, '_isCentroid'): canvas.drawLine(child._drawPos[0], child._drawPos[1] - VisOpts.nodeRad / 2, child._drawPos[0], node._drawPos[1], drawColor, lineWidth) else: canvas.drawLine(child._drawPos[0], child._drawPos[1], child._drawPos[0], node._drawPos[1], drawColor, lineWidth) canvas.drawLine(children[0]._drawPos[0], node._drawPos[1], children[-1]._drawPos[0], node._drawPos[1], VisOpts.lineColor, lineWidth) else: for child in children: drawColor = VisOpts.hideColor canvas.drawLine(child._drawPos[0], child._drawPos[1], child._drawPos[0], node._drawPos[1], drawColor, VisOpts.hideWidth) canvas.drawLine(children[0]._drawPos[0], node._drawPos[1], children[-1]._drawPos[0], node._drawPos[1], VisOpts.hideColor, VisOpts.hideWidth) if showIndices and (not stopAtCentroids or node._aboveCentroid >= 0): txt = str(node.GetIndex()) if hasattr(node, '_isCentroid'): txtColor = piddle.Color(1, .2, .2) else: txtColor = piddle.Color(0, 0, 0) canvas.drawString(txt, node._drawPos[0] - canvas.stringWidth(txt) / 2, node._drawPos[1] + canvas.fontHeight() / 4, color=txtColor) if showNodes and hasattr(node, '_isCentroid'): rad = VisOpts.nodeRad canvas.drawEllipse(node._drawPos[0] - rad / 2, node._drawPos[1] - rad / 2, node._drawPos[0] + rad / 2, node._drawPos[1] + rad / 2, piddle.transparent, fillColor=VisOpts.nodeColor) txt = str(node._clustID) canvas.drawString(txt, node._drawPos[0] - canvas.stringWidth(txt) / 2, node._drawPos[1] + canvas.fontHeight() / 4, color=piddle.Color(0, 0, 0)) if showIndices and not stopAtCentroids: for pt in pts: txt = str(pt.GetIndex()) canvas.drawString( str(pt.GetIndex()), pt._drawPos[0] - canvas.stringWidth(txt) / 2, pt._drawPos[1]) def ClusterToPDF(cluster, fileName, size=(300, 300), ptColors=[], lineWidth=None, showIndices=0, stopAtCentroids=0, logScale=0): """ handles the work of drawing a cluster tree to an PDF file **Arguments** - cluster: the cluster tree to be drawn - fileName: the name of the file to be created - size: the size of output canvas - ptColors: if this is specified, the _colors_ will be used to color the terminal nodes of the cluster tree. (color == _pid.Color_) - lineWidth: if specified, it will be used for the widths of the lines used to draw the tree **Notes** - if _ptColors_ is the wrong length for the number of possible terminal node types, this will throw an IndexError - terminal node types are determined using their _GetData()_ methods """ try: from rdkit.sping.PDF import pidPDF except ImportError: from rdkit.piddle import piddlePDF pidPDF = piddlePDF canvas = pidPDF.PDFCanvas(size, fileName) if lineWidth is None: lineWidth = VisOpts.lineWidth DrawClusterTree(cluster, canvas, size, ptColors=ptColors, lineWidth=lineWidth, showIndices=showIndices, stopAtCentroids=stopAtCentroids, logScale=logScale) if fileName: canvas.save() return canvas def ClusterToSVG(cluster, fileName, size=(300, 300), ptColors=[], lineWidth=None, showIndices=0, stopAtCentroids=0, logScale=0): """ handles the work of drawing a cluster tree to an SVG file **Arguments** - cluster: the cluster tree to be drawn - fileName: the name of the file to be created - size: the size of output canvas - ptColors: if this is specified, the _colors_ will be used to color the terminal nodes of the cluster tree. (color == _pid.Color_) - lineWidth: if specified, it will be used for the widths of the lines used to draw the tree **Notes** - if _ptColors_ is the wrong length for the number of possible terminal node types, this will throw an IndexError - terminal node types are determined using their _GetData()_ methods """ try: from rdkit.sping.SVG import pidSVG except ImportError: from rdkit.piddle.piddleSVG import piddleSVG pidSVG = piddleSVG canvas = pidSVG.SVGCanvas(size, fileName) if lineWidth is None: lineWidth = VisOpts.lineWidth DrawClusterTree(cluster, canvas, size, ptColors=ptColors, lineWidth=lineWidth, showIndices=showIndices, stopAtCentroids=stopAtCentroids, logScale=logScale) if fileName: canvas.save() return canvas def ClusterToImg(cluster, fileName, size=(300, 300), ptColors=[], lineWidth=None, showIndices=0, stopAtCentroids=0, logScale=0): """ handles the work of drawing a cluster tree to an image file **Arguments** - cluster: the cluster tree to be drawn - fileName: the name of the file to be created - size: the size of output canvas - ptColors: if this is specified, the _colors_ will be used to color the terminal nodes of the cluster tree. (color == _pid.Color_) - lineWidth: if specified, it will be used for the widths of the lines used to draw the tree **Notes** - The extension on _fileName_ determines the type of image file created. All formats supported by PIL can be used. - if _ptColors_ is the wrong length for the number of possible terminal node types, this will throw an IndexError - terminal node types are determined using their _GetData()_ methods """ try: from rdkit.sping.PIL import pidPIL except ImportError: from rdkit.piddle import piddlePIL pidPIL = piddlePIL canvas = pidPIL.PILCanvas(size, fileName) if lineWidth is None: lineWidth = VisOpts.lineWidth DrawClusterTree(cluster, canvas, size, ptColors=ptColors, lineWidth=lineWidth, showIndices=showIndices, stopAtCentroids=stopAtCentroids, logScale=logScale) if fileName: canvas.save() return canvas
djstripe/migrations/0008_2_5.py
ExtraE113/dj-stripe
937
11081140
# Generated by Django 3.2.3 on 2021-05-30 23:47 import django.db.models.deletion from django.conf import settings from django.db import migrations, models import djstripe.enums import djstripe.fields class Migration(migrations.Migration): dependencies = [ ("djstripe", "0007_2_4"), ] operations = [ migrations.RemoveField( model_name="subscription", name="tax_percent", ), migrations.RemoveField( model_name="countryspec", name="djstripe_owner_account", ), migrations.AddField( model_name="card", name="account", field=djstripe.fields.StripeForeignKey( blank=True, help_text="The external account the charge was made on behalf of. Null here indicates that this value was never set.", null=True, on_delete=django.db.models.deletion.PROTECT, related_name="cards", to="djstripe.account", to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD, ), ), migrations.AddField( model_name="card", name="default_for_currency", field=models.BooleanField( help_text="Whether this external account (Card) is the default account for its currency.", null=True, ), ), migrations.AlterField( model_name="bankaccount", name="account", field=djstripe.fields.StripeForeignKey( blank=True, help_text="The external account the charge was made on behalf of. Null here indicates that this value was never set.", null=True, on_delete=django.db.models.deletion.PROTECT, related_name="bank_accounts", to="djstripe.account", to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD, ), ), migrations.AlterField( model_name="bankaccount", name="default_for_currency", field=models.BooleanField( help_text="Whether this external account (BankAccount) is the default account for its currency.", null=True, ), ), migrations.RenameModel( old_name="FileUpload", new_name="File", ), migrations.CreateModel( name="FileLink", fields=[ ("djstripe_created", models.DateTimeField(auto_now_add=True)), ("djstripe_updated", models.DateTimeField(auto_now=True)), ( "djstripe_id", models.BigAutoField( primary_key=True, serialize=False, verbose_name="ID" ), ), ("id", djstripe.fields.StripeIdField(max_length=255, unique=True)), ( "livemode", models.BooleanField( blank=True, default=None, help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.", null=True, ), ), ( "created", djstripe.fields.StripeDateTimeField( blank=True, help_text="The datetime this object was created in stripe.", null=True, ), ), ( "metadata", djstripe.fields.JSONField( blank=True, help_text="A set of key/value pairs that you can attach to an object. It can be useful for storing additional information about an object in a structured format.", null=True, ), ), ( "description", models.TextField( blank=True, help_text="A description of this object.", null=True ), ), ( "expires_at", djstripe.fields.StripeDateTimeField( blank=True, help_text="Time at which the link expires.", null=True, ), ), ( "url", models.URLField( help_text="The publicly accessible URL to download the file." ), ), ( "djstripe_owner_account", djstripe.fields.StripeForeignKey( blank=True, help_text="The Stripe Account this object belongs to.", null=True, on_delete=django.db.models.deletion.CASCADE, to="djstripe.account", to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD, ), ), ( "file", djstripe.fields.StripeForeignKey( on_delete=django.db.models.deletion.CASCADE, to="djstripe.file", to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD, ), ), ], options={ "get_latest_by": "created", "abstract": False, }, ), migrations.CreateModel( name="Mandate", fields=[ ("djstripe_created", models.DateTimeField(auto_now_add=True)), ("djstripe_updated", models.DateTimeField(auto_now=True)), ( "djstripe_id", models.BigAutoField( primary_key=True, serialize=False, verbose_name="ID" ), ), ("id", djstripe.fields.StripeIdField(max_length=255, unique=True)), ( "livemode", models.BooleanField( blank=True, default=None, help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.", null=True, ), ), ( "created", djstripe.fields.StripeDateTimeField( blank=True, help_text="The datetime this object was created in stripe.", null=True, ), ), ( "metadata", djstripe.fields.JSONField( blank=True, help_text="A set of key/value pairs that you can attach to an object. It can be useful for storing additional information about an object in a structured format.", null=True, ), ), ( "description", models.TextField( blank=True, help_text="A description of this object.", null=True ), ), ( "customer_acceptance", djstripe.fields.JSONField( help_text="Details about the customer's acceptance of the mandate." ), ), ( "payment_method_details", djstripe.fields.JSONField( help_text="Additional mandate information specific to the payment method type." ), ), ( "status", djstripe.fields.StripeEnumField( enum=djstripe.enums.MandateStatus, help_text="The status of the mandate, which indicates whether it can be used to initiate a payment.", max_length=8, ), ), ( "type", djstripe.fields.StripeEnumField( enum=djstripe.enums.MandateType, help_text="The status of the mandate, which indicates whether it can be used to initiate a payment.", max_length=10, ), ), ( "multi_use", djstripe.fields.JSONField( blank=True, help_text="If this is a `multi_use` mandate, this hash contains details about the mandate.", null=True, ), ), ( "single_use", djstripe.fields.JSONField( blank=True, help_text="If this is a `single_use` mandate, this hash contains details about the mandate.", null=True, ), ), ( "djstripe_owner_account", djstripe.fields.StripeForeignKey( blank=True, help_text="The Stripe Account this object belongs to.", null=True, on_delete=django.db.models.deletion.CASCADE, to="djstripe.account", to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD, ), ), ( "payment_method", djstripe.fields.StripeForeignKey( on_delete=django.db.models.deletion.CASCADE, to="djstripe.paymentmethod", to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD, ), ), ], options={ "get_latest_by": "created", "abstract": False, }, ), migrations.AlterField( model_name="charge", name="source", field=djstripe.fields.PaymentMethodForeignKey( blank=True, help_text="The source used for this charge.", null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="charges", to="djstripe.djstripepaymentmethod", ), ), migrations.AlterField( model_name="customer", name="default_source", field=djstripe.fields.PaymentMethodForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="customers", to="djstripe.djstripepaymentmethod", ), ), ]
talk/src/_4_magics_good.py
zangyuchen2008/Clean-Code-in-Python-Second-Edition
133
11081162
""" magic methods: __contains__ Reimplementation of the request function, but this time calling the magic method implemented in the class, to make the code more readable. """ def request_product_for_customer(customer, product, current_stock): if product in current_stock: requested_product = current_stock.request(product) customer.assign_product(requested_product) else: return "Product not available"
ambari-server/src/test/python/TestSetupSso.py
samyzh/ambari
1,664
11081165
''' Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import os import platform import sys import unittest import StringIO from mock.mock import patch, MagicMock from only_for_platform import os_distro_value from ambari_commons import os_utils from urllib2 import HTTPError import shutil # Mock classes for reading from a file class MagicFile(object): def __init__(self, data): self.data = data def read(self): return self.data def __exit__(self, exc_type, exc_val, exc_tb): pass def __enter__(self): return self pass project_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)),os.path.normpath("../../../../")) shutil.copyfile(project_dir+"/ambari-server/conf/unix/ambari.properties", "/tmp/ambari.properties") # We have to use this import HACK because the filename contains a dash _search_file = os_utils.search_file def search_file_proxy(filename, searchpatch, pathsep=os.pathsep): global _search_file if "ambari.properties" in filename: return "/tmp/ambari.properties" return _search_file(filename, searchpatch, pathsep) os_utils.search_file = search_file_proxy with patch.object(platform, "linux_distribution", return_value = MagicMock(return_value=('Redhat', '6.4', 'Final'))): with patch("os.path.isdir", return_value = MagicMock(return_value=True)): with patch("os.access", return_value = MagicMock(return_value=True)): with patch.object(os_utils, "parse_log4j_file", return_value={'ambari.log.dir': '/var/log/ambari-server'}): with patch("platform.linux_distribution", return_value = os_distro_value): with patch("os.symlink"): with patch("glob.glob", return_value = ['/etc/init.d/postgresql-9.3']): _ambari_server_ = __import__('ambari-server') with patch("__builtin__.open"): from ambari_commons.exceptions import FatalException, NonFatalException from ambari_server.properties import Properties from ambari_server.setupSso import setup_sso, AMBARI_SSO_AUTH_ENABLED, \ SSO_PROVIDER_URL, SSO_CERTIFICATE, JWT_COOKIE_NAME, JWT_AUDIENCES, \ SSO_ENABLED_SERVICES, SSO_MANAGE_SERVICES class TestSetupSso(unittest.TestCase): @patch("ambari_server.setupSso.is_server_runing") def test_sso_setup_should_fail_if_server_is_not_running(self, is_server_runing_mock): out = StringIO.StringIO() sys.stdout = out is_server_runing_mock.return_value = (False, 0) options = self._create_empty_options_mock() try: setup_sso(options) self.fail("Should fail with non-fatal exception") except FatalException as e: self.assertTrue("Ambari Server is not running" in e.reason) pass sys.stdout = sys.__stdout__ pass @patch("ambari_server.setupSso.get_silent") @patch("ambari_server.setupSso.is_server_runing") def test_silent_mode_is_not_allowed(self, is_server_runing_mock, get_silent_mock): out = StringIO.StringIO() sys.stdout = out is_server_runing_mock.return_value = (True, 0) get_silent_mock.return_value = True options = self._create_empty_options_mock() try: setup_sso(options) self.fail("Should fail with fatal exception") except NonFatalException as e: self.assertTrue("setup-sso is not enabled in silent mode." in e.reason) pass sys.stdout = sys.__stdout__ pass @patch("ambari_server.setupSso.get_silent") @patch("ambari_server.setupSso.is_server_runing") def test_invalid_sso_enabled_cli_option_should_result_in_error(self, is_server_runing_mock, get_silent_mock): out = StringIO.StringIO() sys.stdout = out is_server_runing_mock.return_value = (True, 0) get_silent_mock.return_value = False options = self._create_empty_options_mock() options.sso_enabled = 'not_true_or_false' try: setup_sso(options) self.fail("Should fail with fatal exception") except FatalException as e: self.assertTrue("--sso-enabled should be to either 'true' or 'false'" in e.reason) pass sys.stdout = sys.__stdout__ pass @patch("ambari_server.setupSso.get_silent") @patch("ambari_server.setupSso.is_server_runing") def test_missing_sso_provider_url_cli_option_when_enabling_sso_should_result_in_error(self, is_server_runing_mock, get_silent_mock): out = StringIO.StringIO() sys.stdout = out is_server_runing_mock.return_value = (True, 0) get_silent_mock.return_value = False options = self._create_empty_options_mock() options.sso_enabled = 'true' options.sso_provider_url = '' try: setup_sso(options) self.fail("Should fail with fatal exception") except FatalException as e: self.assertTrue("Missing option: --sso-provider-url" in e.reason) pass sys.stdout = sys.__stdout__ pass @patch("ambari_server.setupSso.get_silent") @patch("ambari_server.setupSso.is_server_runing") def test_missing_sso_public_cert_file_cli_option_when_enabling_sso_should_result_in_error(self, is_server_runing_mock, get_silent_mock): out = StringIO.StringIO() sys.stdout = out is_server_runing_mock.return_value = (True, 0) get_silent_mock.return_value = False options = self._create_empty_options_mock() options.sso_enabled = 'true' options.sso_public_cert_file = '' try: setup_sso(options) self.fail("Should fail with fatal exception") except FatalException as e: self.assertTrue("Missing option: --sso-public-cert-file" in e.reason) pass sys.stdout = sys.__stdout__ pass @patch("ambari_server.setupSso.get_silent") @patch("ambari_server.setupSso.is_server_runing") def test_invalid_sso_provider_url_cli_option_when_enabling_sso_should_result_in_error(self, is_server_runing_mock, get_silent_mock): out = StringIO.StringIO() sys.stdout = out is_server_runing_mock.return_value = (True, 0) get_silent_mock.return_value = False options = self._create_empty_options_mock() options.sso_enabled = 'true' options.sso_provider_url = '!invalidHost:invalidPort' try: setup_sso(options) self.fail("Should fail with fatal exception") except FatalException as e: self.assertTrue("Invalid --sso-provider-url" in e.reason) pass options.sso_provider_url = 'The SSO provider URL is https://c7402.ambari.apache.org:8443/gateway/knoxsso/api/v1/websso' try: setup_sso(options) self.fail("Should fail with fatal exception") except FatalException as e: self.assertTrue("Invalid --sso-provider-url" in e.reason) pass options.sso_provider_url = 'https://c7402.ambari.apache.org:8443/gateway/knoxsso/api/v1/websso is the SSO provider URL' try: setup_sso(options) self.fail("Should fail with fatal exception") except FatalException as e: self.assertTrue("Invalid --sso-provider-url" in e.reason) pass sys.stdout = sys.__stdout__ pass @patch("ambari_server.setupSso.perform_changes_via_rest_api") @patch("ambari_server.setupSso.get_ambari_properties") @patch("ambari_server.setupSso.get_silent") @patch("ambari_server.setupSso.is_server_runing") @patch("ambari_server.setupSso.get_json_via_rest_api") @patch('__builtin__.open') def test_all_cli_options_are_collected_when_enabling_sso(self, open_mock, get_json_via_rest_api_mock, is_server_runing_mock, get_silent_mock, get_ambari_properties_mock, perform_changes_via_rest_api_mock): out = StringIO.StringIO() sys.stdout = out certificate_data = '-----BEGIN CERTIFICATE-----\n' \ 'MIIE3DCCA8SgAwIBAgIJAKfbOMmFyOlNMA0GCSqGSIb3DQEBBQUAMIGkMQswCQYD\n' \ '................................................................\n' \ 'dXRpbmcxFzAVBgNVBAMTDmNsb3VkYnJlYWstcmdsMSUwIwYJKoZIhvcNAQkBFhZy\n' \ '-----END CERTIFICATE-----' mock_file = MagicFile(certificate_data) open_mock.side_effect = [mock_file] get_json_via_rest_api_mock.return_value = (200, {}) is_server_runing_mock.return_value = (True, 0) get_silent_mock.return_value = False properties = Properties() get_ambari_properties_mock.return_value = properties sso_enabled = 'true' sso_enabled_services = 'Ambari, SERVICE1, SERVICE2' sso_provider_url = 'https://c7402.ambari.apache.org:8443/gateway/knoxsso/api/v1/websso' sso_public_cert_file = '/test/file/path' sso_jwt_cookie_name = 'test_cookie' sso_jwt_audience_list = 'test, audience, list' options = self._create_empty_options_mock() options.sso_enabled = sso_enabled options.sso_enabled_ambari = 'true' options.sso_manage_services = 'true' options.sso_provider_url = sso_provider_url options.sso_public_cert_file = sso_public_cert_file options.sso_jwt_cookie_name = sso_jwt_cookie_name options.sso_jwt_audience_list = sso_jwt_audience_list options.sso_enabled_services = sso_enabled_services setup_sso(options) self.assertTrue(perform_changes_via_rest_api_mock.called) requestCall = perform_changes_via_rest_api_mock.call_args_list[0] args, kwargs = requestCall requestData = args[5] self.assertTrue(isinstance(requestData, dict)) ssoProperties = requestData['Configuration']['properties'] self.assertEqual(ssoProperties[AMBARI_SSO_AUTH_ENABLED], sso_enabled) self.assertEqual(ssoProperties[SSO_PROVIDER_URL], sso_provider_url) self.assertEqual(ssoProperties[SSO_CERTIFICATE], certificate_data) self.assertEqual(ssoProperties[JWT_COOKIE_NAME], sso_jwt_cookie_name) self.assertEqual(ssoProperties[JWT_AUDIENCES], sso_jwt_audience_list) sys.stdout = sys.__stdout__ pass @patch("ambari_server.setupSso.perform_changes_via_rest_api") @patch("ambari_server.setupSso.get_ambari_properties") @patch("ambari_server.setupSso.get_silent") @patch("ambari_server.setupSso.is_server_runing") @patch("ambari_server.setupSso.get_json_via_rest_api") @patch('__builtin__.open') def test_only_sso_enabled_cli_option_is_collected_when_disabling_sso(self, open_mock, get_json_via_rest_api_mock, is_server_runing_mock, get_silent_mock, get_ambari_properties_mock, perform_changes_via_rest_api_mock): out = StringIO.StringIO() sys.stdout = out certificate_data = '-----BEGIN CERTIFICATE-----\n' \ 'MIIE3DCCA8SgAwIBAgIJAKfbOMmFyOlNMA0GCSqGSIb3DQEBBQUAMIGkMQswCQYD\n' \ '................................................................\n' \ 'dXRpbmcxFzAVBgNVBAMTDmNsb3VkYnJlYWstcmdsMSUwIwYJKoZIhvcNAQkBFhZy\n' \ '-----END CERTIFICATE-----' mock_file = MagicFile(certificate_data) open_mock.side_effect = [mock_file] get_json_via_rest_api_mock.return_value = (200, {}) is_server_runing_mock.return_value = (True, 0) get_silent_mock.return_value = False properties = Properties() get_ambari_properties_mock.return_value = properties sso_enabled = 'false' sso_provider_url = 'http://testHost:8080' sso_public_cert_file = '/test/file/path' sso_jwt_cookie_name = 'test_cookie' sso_jwt_audience_list = 'test, audience, list' options = self._create_empty_options_mock() options.sso_enabled = sso_enabled options.sso_provider_url = sso_provider_url options.sso_public_cert_file = sso_public_cert_file options.sso_jwt_cookie_name = sso_jwt_cookie_name options.sso_jwt_audience_list = sso_jwt_audience_list setup_sso(options) self.assertTrue(perform_changes_via_rest_api_mock.called) requestCall = perform_changes_via_rest_api_mock.call_args_list[0] args, kwargs = requestCall requestMethod = args[4] self.assertTrue(isinstance(requestMethod, str)) self.assertEqual(requestMethod, "DELETE") sys.stdout = sys.__stdout__ pass @patch("ambari_server.setupSso.perform_changes_via_rest_api") @patch("ambari_server.setupSso.get_YN_input") @patch("ambari_server.setupSso.get_ambari_properties") @patch("ambari_server.setupSso.get_silent") @patch("ambari_server.setupSso.is_server_runing") @patch("ambari_server.setupSso.get_json_via_rest_api") @patch('__builtin__.open') def test_sso_is_enabled_for_all_services_via_user_input(self, open_mock, get_json_via_rest_api_mock, is_server_runing_mock, get_silent_mock, get_ambari_properties_mock, get_YN_input_mock, perform_changes_via_rest_api_mock): out = StringIO.StringIO() sys.stdout = out certificate_data = '-----BEGIN CERTIFICATE-----\n' \ 'MIIE3DCCA8SgAwIBAgIJAKfbOMmFyOlNMA0GCSqGSIb3DQEBBQUAMIGkMQswCQYD\n' \ '................................................................\n' \ 'dXRpbmcxFzAVBgNVBAMTDmNsb3VkYnJlYWstcmdsMSUwIwYJKoZIhvcNAQkBFhZy\n' \ '-----END CERTIFICATE-----' mock_file = MagicFile(certificate_data) open_mock.side_effect = [mock_file] get_json_via_rest_api_mock.return_value = (200, {}) is_server_runing_mock.return_value = (True, 0) get_silent_mock.return_value = False get_ambari_properties_mock.return_value = Properties() def yn_input_side_effect(*args, **kwargs): if 'Manage SSO configurations' in args[0]: return True elif 'Manage SSO configurations' in args[0]: return True elif 'all services' in args[0]: return True else: raise Exception("ShouldNotBeInvoked") # only the 'Use SSO for all services' question should be asked for now get_YN_input_mock.side_effect = yn_input_side_effect sso_enabled = 'true' sso_provider_url = 'http://testHost:8080' sso_public_cert_file = '/test/file/path' sso_jwt_cookie_name = 'test_cookie' sso_jwt_audience_list = 'test, audience, list' options = self._create_empty_options_mock() options.sso_enabled = sso_enabled options.sso_enabled_ambari = 'true' options.sso_manage_services = 'true' options.sso_provider_url = sso_provider_url options.sso_public_cert_file = sso_public_cert_file options.sso_jwt_cookie_name = sso_jwt_cookie_name options.sso_jwt_audience_list = sso_jwt_audience_list setup_sso(options) self.assertTrue(perform_changes_via_rest_api_mock.called) requestCall = perform_changes_via_rest_api_mock.call_args_list[0] args, kwargs = requestCall requestData = args[5] self.assertTrue(isinstance(requestData, dict)) ssoProperties = requestData['Configuration']['properties'] self.assertEqual(ssoProperties[AMBARI_SSO_AUTH_ENABLED], sso_enabled) self.assertEqual(ssoProperties[SSO_PROVIDER_URL], sso_provider_url) self.assertEqual(ssoProperties[SSO_CERTIFICATE], certificate_data) self.assertEqual(ssoProperties[JWT_COOKIE_NAME], sso_jwt_cookie_name) self.assertEqual(ssoProperties[JWT_AUDIENCES], sso_jwt_audience_list) self.assertEqual(ssoProperties[SSO_MANAGE_SERVICES], "true") self.assertEqual(ssoProperties[SSO_ENABLED_SERVICES], "*") sys.stdout = sys.__stdout__ pass @patch("ambari_server.setupSso.perform_changes_via_rest_api") @patch("urllib2.urlopen") @patch("ambari_server.setupSso.get_cluster_name") @patch("ambari_server.setupSso.get_YN_input") @patch("ambari_server.setupSso.get_ambari_properties") @patch("ambari_server.setupSso.get_silent") @patch("ambari_server.setupSso.is_server_runing") @patch('__builtin__.open') def test_setup_sso_should_not_fail_when_sso_config_cannot_be_loaded_due_to_404_error(self, open_mock, is_server_runing_mock, get_silent_mock, get_ambari_properties_mock, get_YN_input_mock, get_cluster_name_mock, urlopen_mock, perform_changes_via_rest_api_mock): out = StringIO.StringIO() sys.stdout = out certificate_data = '-----BEGIN CERTIFICATE-----\n' \ 'MIIE3DCCA8SgAwIBAgIJAKfbOMmFyOlNMA0GCSqGSIb3DQEBBQUAMIGkMQswCQYD\n' \ '................................................................\n' \ 'dXRpbmcxFzAVBgNVBAMTDmNsb3VkYnJlYWstcmdsMSUwIwYJKoZIhvcNAQkBFhZy\n' \ '-----END CERTIFICATE-----' mock_file = MagicFile(certificate_data) open_mock.side_effect = [mock_file] is_server_runing_mock.return_value = (True, 0) get_silent_mock.return_value = False get_ambari_properties_mock.return_value = Properties() get_cluster_name_mock.return_value = 'cluster1' get_YN_input_mock.__return_value = True urlopen_mock.side_effect = HTTPError(MagicMock(status=404), 404, 'not found', None, None) sso_enabled = 'true' sso_provider_url = 'http://testHost:8080' sso_public_cert_file = '/test/file/path' sso_jwt_cookie_name = 'test_cookie' sso_jwt_audience_list = 'test, audience, list' options = self._create_empty_options_mock() options.sso_enabled = sso_enabled options.sso_enabled_ambari = sso_enabled options.sso_manage_services = 'true' options.sso_provider_url = sso_provider_url options.sso_public_cert_file = sso_public_cert_file options.sso_jwt_cookie_name = sso_jwt_cookie_name options.sso_jwt_audience_list = sso_jwt_audience_list setup_sso(options) self.assertTrue(perform_changes_via_rest_api_mock.called) requestCall = perform_changes_via_rest_api_mock.call_args_list[0] args, kwargs = requestCall requestData = args[5] self.assertTrue(isinstance(requestData, dict)) ssoProperties = requestData['Configuration']['properties'] self.assertEqual(ssoProperties[AMBARI_SSO_AUTH_ENABLED], sso_enabled) self.assertEqual(ssoProperties[SSO_PROVIDER_URL], sso_provider_url) self.assertEqual(ssoProperties[SSO_CERTIFICATE], certificate_data) self.assertEqual(ssoProperties[JWT_COOKIE_NAME], sso_jwt_cookie_name) self.assertEqual(ssoProperties[JWT_AUDIENCES], sso_jwt_audience_list) self.assertEqual(ssoProperties[SSO_MANAGE_SERVICES], "true") self.assertEqual(ssoProperties[SSO_ENABLED_SERVICES], "*") @patch("urllib2.urlopen") @patch("ambari_server.setupSso.perform_changes_via_rest_api") @patch("ambari_server.setupSso.get_cluster_name") @patch("ambari_server.setupSso.get_YN_input") @patch("ambari_server.setupSso.get_ambari_properties") @patch("ambari_server.setupSso.get_silent") @patch("ambari_server.setupSso.is_server_runing") @patch("ambari_server.setupSso.get_json_via_rest_api") @patch('__builtin__.open') def test_sso_enabled_services_are_collected_via_user_input(self, open_mock, get_json_via_rest_api_mock, is_server_runing_mock, get_silent_mock, get_ambari_properties_mock, get_YN_input_mock, get_cluster_name_mock, perform_changes_via_rest_api_mock, urlopen_mock): out = StringIO.StringIO() sys.stdout = out certificate_data = '-----BEGIN CERTIFICATE-----\n' \ 'MIIE3DCCA8SgAwIBAgIJAKfbOMmFyOlNMA0GCSqGSIb3DQEBBQUAMIGkMQswCQYD\n' \ '................................................................\n' \ 'dXRpbmcxFzAVBgNVBAMTDmNsb3VkYnJlYWstcmdsMSUwIwYJKoZIhvcNAQkBFhZy\n' \ '-----END CERTIFICATE-----' mock_file = MagicFile(certificate_data) open_mock.side_effect = [mock_file] eligible_services = \ """ { "href": "http://c7401:8080/api/v1/clusters/cluster1/services?ServiceInfo/sso_integration_supported=true", "items": [ { "href": "http://c7401:8080/api/v1/clusters/cluster1/services/HDFS", "ServiceInfo": { "cluster_name": "cluster1", "service_name": "HDFS", "sso_integration_supported": true, "sso_integration_requires_kerberos": false, "kerberos_enabled": false } }, { "href": "http://c7401:8080/api/v1/clusters/cluster1/services/ZOOKEPER", "ServiceInfo": { "cluster_name": "cluster1", "service_name": "ZOOKEPER", "sso_integration_supported": true, "sso_integration_requires_kerberos": false, "kerberos_enabled": false } } ] } """ eligible_services_json = { "href": "http://c7401:8080/api/v1/clusters/cluster1/services?ServiceInfo/sso_integration_supported=true", "items": [ { "href": "http://c7401:8080/api/v1/clusters/cluster1/services/HDFS", "ServiceInfo": { "cluster_name": "cluster1", "service_name": "HDFS", "sso_integration_supported": True, "sso_integration_requires_kerberos": False, "kerberos_enabled": False } }, { "href": "http://c7401:8080/api/v1/clusters/cluster1/services/ZOOKEPER", "ServiceInfo": { "cluster_name": "cluster1", "service_name": "ZOOKEPER", "sso_integration_supported": True, "sso_integration_requires_kerberos": False, "kerberos_enabled": False } } ] } get_json_via_rest_api_mock.return_value = (200, {}) get_json_via_rest_api_mock.return_value = (200, eligible_services_json) is_server_runing_mock.return_value = (True, 0) get_silent_mock.return_value = False get_ambari_properties_mock.return_value = Properties() get_cluster_name_mock.return_value = 'cluster1' def yn_input_side_effect(*args, **kwargs): if 'all services' in args[0]: return False else: return True get_YN_input_mock.side_effect = yn_input_side_effect response = MagicMock() response.getcode.return_value = 200 response.read.return_value = eligible_services urlopen_mock.return_value = response options = self._create_empty_options_mock() options.sso_enabled = 'true' options.sso_enabled_ambari = 'true' options.sso_manage_services = 'true' options.sso_provider_url = 'http://testHost:8080' options.sso_public_cert_file = '/test/file/path' options.sso_jwt_cookie_name = 'test_cookie' options.sso_jwt_audience_list = 'test, audience, list' setup_sso(options) self.assertTrue(perform_changes_via_rest_api_mock.called) requestCall = perform_changes_via_rest_api_mock.call_args_list[0] args, kwargs = requestCall requestData = args[5] self.assertTrue(isinstance(requestData, dict)) ssoProperties = requestData['Configuration']['properties'] self.assertEqual(ssoProperties[SSO_MANAGE_SERVICES], "true") self.assertEqual(ssoProperties[SSO_ENABLED_SERVICES], "HDFS,ZOOKEPER") sys.stdout = sys.__stdout__ pass def _create_empty_options_mock(self): options = MagicMock() options.sso_enabled = None options.sso_enabled_ambari = None options.sso_manage_services = None options.sso_enabled_services = None options.sso_provider_url = None options.sso_public_cert_file = None options.sso_jwt_cookie_name = None options.sso_jwt_audience_list = None return options
trench/command/activate_mfa_method.py
eriol/django-trench
190
11081170
<reponame>eriol/django-trench<gh_stars>100-1000 from typing import Callable, Set, Type from trench.backends.provider import get_mfa_handler from trench.command.generate_backup_codes import generate_backup_codes_command from trench.command.replace_mfa_method_backup_codes import ( regenerate_backup_codes_for_mfa_method_command, ) from trench.exceptions import MFAMethodDoesNotExistError from trench.models import MFAMethod from trench.utils import get_mfa_model class ActivateMFAMethodCommand: def __init__( self, mfa_model: Type[MFAMethod], backup_codes_generator: Callable ) -> None: self._mfa_model = mfa_model self._backup_codes_generator = backup_codes_generator def execute(self, user_id: int, name: str, code: str) -> Set[str]: mfa = self._mfa_model.objects.get_by_name(user_id=user_id, name=name) get_mfa_handler(mfa).confirm_activation(code) rows_affected = self._mfa_model.objects.filter( user_id=user_id, name=name ).update( is_active=True, is_primary=not self._mfa_model.objects.primary_exists(user_id=user_id), ) if rows_affected < 1: raise MFAMethodDoesNotExistError() backup_codes = regenerate_backup_codes_for_mfa_method_command( user_id=user_id, name=name, ) return backup_codes activate_mfa_method_command = ActivateMFAMethodCommand( mfa_model=get_mfa_model(), backup_codes_generator=generate_backup_codes_command, ).execute
Chapter03/Chapter_3_decomposition.py
stciaischoolrnn/Practical-Time-Series-Analysis
267
11081179
<reponame>stciaischoolrnn/Practical-Time-Series-Analysis<gh_stars>100-1000 # Import modules import requests import statsmodels.api as sm import io import pandas as pd # Load Dataset DATA_URL="http://robjhyndman.com/tsdldata/data/nybirths.dat" fopen = requests.get(DATA_URL).content ds=pd.read_csv(io.StringIO(fopen.decode('utf-8')), header=None, names=['birthcount']) print(ds.head()) # Add time index date=pd.date_range("1946-01-01", "1959-12-31", freq="1M") ds['Date']=pd.DataFrame(date) ds = ds.set_index('Date') # decompose dataset res = sm.tsa.seasonal_decompose(ds.birthcount, model="multiplicative") resplot = res.plot()
sphinxtogithub/tests/filehandler.py
davvid/sphinx-to-github
117
11081201
import unittest import sphinxtogithub class MockFileObject(object): before = """ <title>Breathe's documentation &mdash; BreatheExample v0.0.1 documentation</title> <link rel="stylesheet" href="_static/default.css" type="text/css" /> <link rel="stylesheet" href="_static/pygments.css" type="text/css" /> """ after = """ <title>Breathe's documentation &mdash; BreatheExample v0.0.1 documentation</title> <link rel="stylesheet" href="static/default.css" type="text/css" /> <link rel="stylesheet" href="static/pygments.css" type="text/css" /> """ def read(self): return self.before def write(self, text): self.written = text class MockOpener(object): def __init__(self): self.file_object = MockFileObject() def __call__(self, name, readmode="r"): self.name = name return self.file_object class TestFileHandler(unittest.TestCase): def testProcess(self): filepath = "filepath" opener = MockOpener() file_handler = sphinxtogithub.FileHandler(filepath, [], opener) file_handler.process() self.assertEqual(opener.file_object.written, MockFileObject.before) self.assertEqual(opener.name, filepath) def testProcessWithReplacers(self): filepath = "filepath" replacers = [] replacers.append(sphinxtogithub.Replacer("_static/default.css", "static/default.css")) replacers.append(sphinxtogithub.Replacer("_static/pygments.css", "static/pygments.css")) opener = MockOpener() file_handler = sphinxtogithub.FileHandler(filepath, replacers, opener) file_handler.process() self.assertEqual(opener.file_object.written, MockFileObject.after) def testSuite(): suite = unittest.TestSuite() suite.addTest(TestFileHandler("testProcess")) suite.addTest(TestFileHandler("testProcessWithReplacers")) return suite
tests/st/networks/models/deeplabv3/src/miou_precision.py
GuoSuiming/mindspore
3,200
11081218
# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """mIou.""" import numpy as np from mindspore.nn.metrics.metric import Metric def confuse_matrix(target, pred, n): k = (target >= 0) & (target < n) return np.bincount(n * target[k].astype(int) + pred[k], minlength=n ** 2).reshape(n, n) def iou(hist): denominator = hist.sum(1) + hist.sum(0) - np.diag(hist) res = np.diag(hist) / np.where(denominator > 0, denominator, 1) res = np.sum(res) / np.count_nonzero(denominator) return res class MiouPrecision(Metric): """Calculate miou precision.""" def __init__(self, num_class=21): super(MiouPrecision, self).__init__() if not isinstance(num_class, int): raise TypeError('num_class should be integer type, but got {}'.format(type(num_class))) if num_class < 1: raise ValueError('num_class must be at least 1, but got {}'.format(num_class)) self._num_class = num_class self._mIoU = [] self.clear() def clear(self): self._hist = np.zeros((self._num_class, self._num_class)) self._mIoU = [] def update(self, *inputs): if len(inputs) != 2: raise ValueError('Need 2 inputs (y_pred, y), but got {}'.format(len(inputs))) predict_in = self._convert_data(inputs[0]) label_in = self._convert_data(inputs[1]) if predict_in.shape[1] != self._num_class: raise ValueError('Class number not match, last input data contain {} classes, but current data contain {} ' 'classes'.format(self._num_class, predict_in.shape[1])) pred = np.argmax(predict_in, axis=1) label = label_in if len(label.flatten()) != len(pred.flatten()): print('Skipping: len(gt) = {:d}, len(pred) = {:d}'.format(len(label.flatten()), len(pred.flatten()))) raise ValueError('Class number not match, last input data contain {} classes, but current data contain {} ' 'classes'.format(self._num_class, predict_in.shape[1])) self._hist = confuse_matrix(label.flatten(), pred.flatten(), self._num_class) mIoUs = iou(self._hist) self._mIoU.append(mIoUs) def eval(self): """ Computes the mIoU categorical accuracy. """ mIoU = np.nanmean(self._mIoU) print('mIoU = {}'.format(mIoU)) return mIoU
tests/toranj/test-018-child-supervision.py
AdityaHPatwardhan/openthread
2,962
11081224
#!/usr/bin/env python3 # # Copyright (c) 2018, The OpenThread Authors. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import time import wpan from wpan import verify # ----------------------------------------------------------------------------------------------------------------------- # Test description: Child Supervision feature # # This test covers the behavior of Child Supervision feature. # # This test uses MAC allowlisting to emulate the situation where a child is # removed from parent's child table while the child continues to stay attached # to the parent (since data polls from child are acked at radio platform layer). # Specifically the test verifies that once supervision check is enabled on the # child, the child detects that it is no longer present in the parent's table # and tries to re-attach. # The test verifies the behavior of both parent and child, when supervision is # enabled. It verifies that parent is periodically sending supervision messages # to the child and that the child is monitoring the messages. # # This test also indirectly verifies the child timeout on parent. # test_name = __file__[:-3] if __file__.endswith('.py') else __file__ print('-' * 120) print('Starting \'{}\''.format(test_name)) # ----------------------------------------------------------------------------------------------------------------------- # Creating `wpan.Nodes` instances speedup = 2 wpan.Node.set_time_speedup_factor(speedup) parent = wpan.Node() child = wpan.Node() # ----------------------------------------------------------------------------------------------------------------------- # Init all nodes wpan.Node.init_all_nodes() # ----------------------------------------------------------------------------------------------------------------------- # Build network topology CHILD_TIMEOUT = 6 CHILD_SUPERVISION_CHECK_TIMEOUT = 2 PARENT_SUPERVISION_INTERVAL = 1 child.set(wpan.WPAN_POLL_INTERVAL, '500') child.set(wpan.WPAN_THREAD_CHILD_TIMEOUT, str(CHILD_TIMEOUT)) parent.form("child-sup") child.join_node(parent, wpan.JOIN_TYPE_SLEEPY_END_DEVICE) # ----------------------------------------------------------------------------------------------------------------------- # Test implementation # Disable child supervision on child and parent parent.set(wpan.WPAN_CHILD_SUPERVISION_INTERVAL, '0') child.set(wpan.WPAN_CHILD_SUPERVISION_CHECK_TIMEOUT, '0') verify(int(parent.get(wpan.WPAN_CHILD_SUPERVISION_INTERVAL), 0) == 0) verify(int(child.get(wpan.WPAN_CHILD_SUPERVISION_CHECK_TIMEOUT), 0) == 0) # Check that the child is associated and has correct timeout verify(child.is_associated()) verify(int(child.get(wpan.WPAN_THREAD_CHILD_TIMEOUT), 0) == CHILD_TIMEOUT) # Verify the child table on parent contains the child with correct timeout child_table = wpan.parse_child_table_result(parent.get(wpan.WPAN_THREAD_CHILD_TABLE)) verify(len(child_table) == 1) verify(int(child_table[0].timeout, 0) == CHILD_TIMEOUT) # Enabling allowlisting on parent # # Since child is not in parent's allowlist, the data polls from child # should be rejected and the child should be removed from parent's # child table after timeout. The child however should continue to # stay attached (since data polls are acked by radio driver) and # supervision check is disabled on the child. parent.set(wpan.WPAN_MAC_ALLOWLIST_ENABLED, '1') def check_child_is_removed_from_parent_child_table(): child_table = wpan.parse_child_table_result(parent.get(wpan.WPAN_THREAD_CHILD_TABLE)) verify(len(child_table) == 0) # wait till child is removed from parent's child table # after this child should still be associated wpan.verify_within(check_child_is_removed_from_parent_child_table, CHILD_TIMEOUT / speedup + 2) verify(child.is_associated()) # Enable supervision check on child and expect the child to # become detached after the check timeout child.set( wpan.WPAN_CHILD_SUPERVISION_CHECK_TIMEOUT, str(CHILD_SUPERVISION_CHECK_TIMEOUT), ) def check_child_is_detached(): verify(not child.is_associated()) wpan.verify_within(check_child_is_detached, CHILD_SUPERVISION_CHECK_TIMEOUT / speedup + 8) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Enable child supervision on parent and disable allowlisting parent.set(wpan.WPAN_CHILD_SUPERVISION_INTERVAL, str(PARENT_SUPERVISION_INTERVAL)) parent.set(wpan.WPAN_MAC_ALLOWLIST_ENABLED, '0') # Wait for the child to attach back def check_child_is_attached(): verify(child.is_associated()) wpan.verify_within(check_child_is_attached, 5) # MAC counters are used to verify the child supervision behavior. parent_unicast_tx_count = int(parent.get("NCP:Counter:TX_PKT_UNICAST"), 0) time.sleep(PARENT_SUPERVISION_INTERVAL * 1.2 / speedup) # To verify that the parent is indeed sending empty "supervision" # messages to its child, MAC counter for number of unicast tx is # used. Note that supervision interval on parent is set to 1 sec. verify(int(parent.get("NCP:Counter:TX_PKT_UNICAST"), 0) >= parent_unicast_tx_count + 1) verify(child.is_associated()) # Disable child supervision on parent parent.set(wpan.WPAN_CHILD_SUPERVISION_INTERVAL, '0') time.sleep(CHILD_SUPERVISION_CHECK_TIMEOUT * 3 / speedup) verify(child.is_associated()) # ----------------------------------------------------------------------------------------------------------------------- # Test finished wpan.Node.finalize_all_nodes() print('\'{}\' passed.'.format(test_name))
core/pyutil_test.py
Schweinepriester/oil
2,209
11081236
#!/usr/bin/env python2 """ pyutil_test.py: Tests for pyutil.py """ from __future__ import print_function import unittest import pyutil # module under test class PyUtilTest(unittest.TestCase): def testBackslashEscape(self): print(pyutil.BackslashEscape('foo', 'o')) if __name__ == '__main__': unittest.main()
backend_cmds.py
brandon-leapyear/SublimeHaskell
452
11081303
import threading import sublime import sublime_plugin import SublimeHaskell.internals.backend_mgr as BackendManager import SublimeHaskell.internals.utils as Utils import SublimeHaskell.sublime_haskell_common as Common import SublimeHaskell.internals.settings as Settings class SublimeHaskellStartBackend(sublime_plugin.WindowCommand): def __init__(self, window): super().__init__(window) self.busy = False def run(self, **_args): # Prevents the Python main thread from blocking. Utils.run_async(type(self).__name__ + '.do_startup', self.do_startup) def do_startup(self): backend_mgr = BackendManager.BackendManager() with Common.status_message_process('Starting up {0} backend'.format(backend_mgr.current_backend_name), priority=1): try: self.busy = True backend_mgr.set_state(BackendManager.BackendManager.INITIAL) backend_mgr.initialize() finally: self.busy = False cabal_project_status(self.window.active_view(), BackendManager.BackendManager()) def is_enabled(self): return not self.busy and BackendManager.BackendManager().is_inactive_state() class SublimeHaskellStopBackend(sublime_plugin.WindowCommand): def __init__(self, window): super().__init__(window) self.busy = False def run(self, **_args): # Prevents the Python main thread from blocking. Utils.run_async(type(self).__name__ + '.do_shutdown', self.do_shutdown) def do_shutdown(self): backend_mgr = BackendManager.BackendManager() with Common.status_message_process('Shutting down {0} backend'.format(backend_mgr.current_backend_name), priority=1): try: self.busy = True backend_mgr.shutdown_backend() finally: self.busy = False cabal_project_status(self.window.active_view(), BackendManager.BackendManager()) def is_enabled(self): return not (self.busy or BackendManager.BackendManager().is_inactive_state()) class SublimeHaskellRestartBackend(sublime_plugin.WindowCommand): def __init__(self, window): super().__init__(window) self.restart_ev = threading.Event() self.restart_ev.clear() def run(self, **_args): Utils.run_async('restarting backend', self.do_restart) def is_enabled(self): return not (self.restart_ev.is_set() or BackendManager.BackendManager().is_inactive_state()) def do_restart(self): self.restart_ev.set() try: SublimeHaskellStopBackend(self.window).do_shutdown() SublimeHaskellStartBackend(self.window).do_startup() finally: self.restart_ev.clear() cabal_project_status(self.window.active_view(), BackendManager.BackendManager()) class SublimeHaskellChooseBackend(sublime_plugin.WindowCommand): def __init__(self, window): super().__init__(window) self.backends = {} self.backend_names = [] def run(self, **_args): backend_mgr = BackendManager.BackendManager() # Rescan for backends to ensure we have the most up-to-date list... backend_mgr.possible_backends = backend_mgr.filter_possible(Settings.PLUGIN.backends) if backend_mgr.possible_backends: print('plugin \'backends\' {0}'.format([name for name in Settings.PLUGIN.backends])) print('Possible/usable \'backends\': {0}'.format([name for name in backend_mgr.possible_backends])) if len(backend_mgr.possible_backends) > 1: self.backend_names = [name for name in backend_mgr.possible_backends] self.backend_names.sort() self.window.show_quick_panel(self.backend_names, self.change_backend) elif len(backend_mgr.possible_backends) == 1: backend_name = list(backend_mgr.possible_backends)[0] sublime.message_dialog('Only one backend, \'{0}\', available -- starting it.'.format(backend_name)) self.start_new_backend(backend_name) else: backend_mgr.no_backends_available() def change_backend(self, idx): if idx >= 0: Utils.run_async('change backend: startup', self.start_new_backend, self.backend_names[idx]) def start_new_backend(self, backend_name): with Common.status_message_process('Changing backend to \'{0}\''.format(backend_name), priority=2): BackendManager.BackendManager().change_current_backend(backend_name) cabal_project_status(self.window.active_view(), BackendManager.BackendManager()) def cabal_project_status(view, backend_mgr): vsettings = view.settings() project_name = vsettings.get(Settings.SETTING_SUBHASK_PROJECT) if project_name is None: project_name = '_unknown_' active_backend = backend_mgr.active_backend() view.set_status('sublime_haskell_cabal', 'cabal: {0} [{1}]'.format(project_name, active_backend.backend_name()))
env/lib/python3.8/site-packages/_plotly_future_/__init__.py
acrucetta/Chicago_COVI_WebApp
11,750
11081308
import warnings import functools # Initialize _future_flags with all future flags that are now always in # effect. _future_flags = { "renderer_defaults", "template_defaults", "extract_chart_studio", "remove_deprecations", "v4_subplots", "orca_defaults", "timezones", "trace_uids", } def _assert_plotly_not_imported(): import sys if "plotly" in sys.modules: raise ImportError( """\ The _plotly_future_ module must be imported before the plotly module""" ) warnings.filterwarnings( "default", ".*?is deprecated, please use chart_studio*", DeprecationWarning ) def _chart_studio_warning(submodule): warnings.warn( "The plotly.{submodule} module is deprecated, " "please use chart_studio.{submodule} instead".format(submodule=submodule), DeprecationWarning, stacklevel=2, ) def _chart_studio_error(submodule): raise ImportError( """ The plotly.{submodule} module is deprecated, please install the chart-studio package and use the chart_studio.{submodule} module instead. """.format( submodule=submodule ) ) def _chart_studio_deprecation(fn): fn_name = fn.__name__ fn_module = fn.__module__ plotly_name = ".".join(["plotly"] + fn_module.split(".")[1:] + [fn_name]) chart_studio_name = ".".join( ["chart_studio"] + fn_module.split(".")[1:] + [fn_name] ) msg = """\ {plotly_name} is deprecated, please use {chart_studio_name}\ """.format( plotly_name=plotly_name, chart_studio_name=chart_studio_name ) @functools.wraps(fn) def wrapper(*args, **kwargs): warnings.warn(msg, DeprecationWarning, stacklevel=2) return fn(*args, **kwargs) return wrapper __all__ = ["_future_flags", "_chart_studio_error"]
anomalib/models/components/freia/__init__.py
openvinotoolkit/anomalib
689
11081309
"""Framework for Easily Invertible Architectures. Module to construct invertible networks with pytorch, based on a graph structure of operations. Link to the original repo: https://github.com/VLL-HD/FrEIA """ # Copyright (c) 2018-2022 <NAME>, Visual Learning Lab Heidelberg. # SPDX-License-Identifier: MIT # from .framework import SequenceINN from .modules import AllInOneBlock __all__ = ["SequenceINN", "AllInOneBlock"]
recipes/Python/577257_slugify_make_string_usable_URL_or/recipe-577257.py
tdiprima/code
2,023
11081395
<gh_stars>1000+ _slugify_strip_re = re.compile(r'[^\w\s-]') _slugify_hyphenate_re = re.compile(r'[-\s]+') def _slugify(value): """ Normalizes string, converts to lowercase, removes non-alpha characters, and converts spaces to hyphens. From Django's "django/template/defaultfilters.py". """ import unicodedata if not isinstance(value, unicode): value = unicode(value) value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore') value = unicode(_slugify_strip_re.sub('', value).strip().lower()) return _slugify_hyphenate_re.sub('-', value)
tensornet/feature_column/category_column.py
ZhangYaoFu/tensornet
273
11081398
# Copyright (c) 2020, Qihoo, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # -*- coding: utf-8 -*- # Author: zhangyansheng <<EMAIL>> # Copyright(C) 360.cn, all rights reserved. # Date: 2020/02/10 import collections from tensorflow.python.feature_column import feature_column_v2 as fc from tensorflow.python.framework import dtypes from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib from tensorflow.python.ops import parsing_ops class CategoryColumn(fc.CategoricalColumn, collections.namedtuple('CategoryColumn', ('key', 'bucket_size'))): def _is_v2_column(self): return True @property def name(self): return self.key @property def num_buckets(self): """Returns number of buckets in this sparse feature.""" return self.bucket_size def transform_feature(self, transformation_cache, state_manager): """mapping the values in the feature_column.""" input_tensor = transformation_cache.get(self.key, state_manager) if not isinstance(input_tensor, sparse_tensor_lib.SparseTensor): raise ValueError('CategoryColumn input must be a SparseTensor.') sparse_id_values = state_manager.get_feature_mapping_values(self.name) return sparse_tensor_lib.SparseTensor( input_tensor.indices, sparse_id_values, input_tensor.dense_shape) def parse_example_spec(self): return {self.key: parsing_ops.VarLenFeature(dtypes.string)} @property def parents(self): return [self.key] def get_sparse_tensors(self, transformation_cache, state_manager): return fc.CategoricalColumn.IdWeightPair( transformation_cache.get(self, state_manager), None) def _get_config(self): config = dict(zip(self._fields, self)) config['dtype'] = dtypes.string.name return config @classmethod def _from_config(cls, config, custom_objects=None, columns_by_name=None): fc._check_config_keys(config, cls._fields) kwargs = fc._standardize_and_copy_config(config) kwargs['dtype'] = dtypes.as_dtype(config['dtype']) return cls(**kwargs) def category_column(key, bucket_size=1024): """Represents sparse feature where ids are set by hashing. """ return CategoryColumn(key, bucket_size)
awesome_autodl/utils/yaml.py
yashsmehta/Awesome-AutoDL
1,135
11081407
##################################################### # Copyright (c) <NAME> [GitHub D-X-Y], 2020.08 # ##################################################### import yaml from pathlib import Path def load_yaml(file_path): with open(str(file_path), "r") as cfile: data = yaml.safe_load(cfile) return data def dump_yaml(data, indent=2, path=None): class NoAliasSafeDumper(yaml.SafeDumper): def ignore_aliases(self, data): return True xstr = yaml.dump( data, None, allow_unicode=True, Dumper=NoAliasSafeDumper, indent=indent, sort_keys=False, ) if path is not None: parent_dir = Path(path).resolve().parent parent_dir.mkdir(parents=True, exist_ok=True) with open(str(path), "w") as cfile: cfile.write(xstr) return xstr
test_settings.py
simiotics/djangorestframework-queryfields
195
11081414
SECRET_KEY = "Something you tell everybody to tell nobody" INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'drf_queryfields', ) ROOT_URLCONF = 'tests.app.urls'
src/bindings/python/tests_compatibility/test_ngraph/test_ops_scatter.py
pazamelin/openvino
2,406
11081431
<reponame>pazamelin/openvino<filename>src/bindings/python/tests_compatibility/test_ngraph/test_ops_scatter.py # Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np import ngraph as ng from ngraph.impl import Type def test_scatter_update_props(): dtype = np.int8 parameter_r = ng.parameter([2, 3, 4], dtype=dtype, name="data") parameter_i = ng.parameter([2, 1], dtype=dtype, name="indices") parameter_u = ng.parameter([2, 2, 1, 4], dtype=dtype, name="updates") axis = np.array([1], dtype=np.int8) node = ng.scatter_update(parameter_r, parameter_i, parameter_u, axis) assert node.get_type_name() == "ScatterUpdate" assert node.get_output_size() == 1 assert list(node.get_output_shape(0)) == [2, 3, 4] assert node.get_output_element_type(0) == Type.i8 def test_scatter_update_elements_props(): dtype = np.int8 parameter_r = ng.parameter([2, 4, 5, 7], dtype=dtype, name="data") parameter_i = ng.parameter([2, 2, 2, 2], dtype=dtype, name="indices") parameter_u = ng.parameter([2, 2, 2, 2], dtype=dtype, name="updates") axis = np.array([1], dtype=np.int8) node = ng.scatter_elements_update(parameter_r, parameter_i, parameter_u, axis) assert node.get_type_name() == "ScatterElementsUpdate" assert node.get_output_size() == 1 assert list(node.get_output_shape(0)) == [2, 4, 5, 7] assert node.get_output_element_type(0) == Type.i8
python/cuxfilter/layouts/chart_views.py
ajschmidt8/cuxfilter
201
11081458
<reponame>ajschmidt8/cuxfilter<gh_stars>100-1000 import panel as pn def chart_view(*charts, **params): """ Parameters: ----------- - charts - **params Ouput: ------ layout view """ view = pn.layout.Card(**params, sizing_mode="stretch_both") for chart in charts: if chart is not None: view.append(chart) return view
src/templates/java_templating.py
jeff5/jython-whinchat
577
11081504
<filename>src/templates/java_templating.py # copyright 2004-2005 <NAME> import cStringIO import java_parser from java_parser import UnknownScheme, make as jast_make from java_parser import make_id,make_literal import java_nodes as jast from java_pretty import JavaPretty, NodeVisitTracker # templating JavaPretty class JavaTemplatePretty(JavaPretty): bindings = {} # -*- # placeholders, fragments def get_binding(self,placeholder_tok): return self.bindings.get(placeholder_tok.value[1:],None) def make_generic_plh_visit(to_expect,add_semicolon=[],needs_nl=0): def plh_visit(self,node,ctxt): plh = node.Placeholder.PLACEHOLDER binding = self.get_binding(plh) if binding: paren = [] if node.Placeholder.has('Fragments'): paren.append(node.Placeholder.PLHSTARTPARMS) args = [] frags = node.Placeholder.Fragments for j in range(0,len(frags),2): frag = frags[j] if isinstance(frag,jast.OneProtectedFragment): frag = frag.Fragment args.append(JavaTemplate(frag,bindings=self.bindings)) paren.append(node.Placeholder.RPAREN) else: args = None if not isinstance(node.children[0],jast.Placeholder): # CASE, IMPORT ... self.emit_tok(node.children[0],'ia') if paren: self.emit_tok(plh,'ia') self.emit_tok(paren[0],'i') else: self.emit_tok(plh,'i') kind,r = binding.tvisit(self,args=args,paren=paren, expect=to_expect, ctxt=ctxt) if paren: self.emit_tok(paren[1],'a') else: self.emit_tok(plh,'a') if node.has('SEMICOLON'): if kind in add_semicolon: self.emit_tok(node.SEMICOLON) else: self.emit_tok(node.SEMICOLON,'ia') return r else: r = self.default_visit(node,ctxt) if needs_nl: self.nl() return r return plh_visit visit_BlockStatementPlaceholder = make_generic_plh_visit( ('BlockStatements','Expression'), add_semicolon=('Expression',)) visit_StatementPlaceholder = make_generic_plh_visit( ('Statement','Expression'), add_semicolon=('Expression',)) visit_SwitchBlockStatementGroupInSeqPlaceholder = make_generic_plh_visit( ('SwitchBlockStatementGroupInSeq',),needs_nl=1) visit_ExpressionInSeqPlaceholder = make_generic_plh_visit( ('ExpressionInSeq',)) visit_ClassBodyDeclarationPlaceholder = make_generic_plh_visit( ('ClassBodyDeclarations',)) visit_InterfaceBodyDeclarationPlaceholder = make_generic_plh_visit( ('ClassBodyDeclarations',)) visit_FormalParameterInSeqPlaceholder = make_generic_plh_visit( ('FormalParameterInSeq',)) visit_IdentifierOptPlaceholder = make_generic_plh_visit( ('Identifier','Empty')) visit_IdentifierPlaceholder = make_generic_plh_visit( ('Identifier',)) visit_TypePlaceholder = make_generic_plh_visit( ('TypeOrVOID',)) visit_PrimaryPlaceholder = make_generic_plh_visit( ('Primary',)) visit_SelectorPlaceholder = make_generic_plh_visit( ('Selector',)) # - * - _recast_table = {} def recast(fragment,paren,expect): spec = fragment.spec for kind in expect: recast_func = _recast_table.get((spec,kind),None) if recast_func: node = recast_func(fragment,paren) if node is not None: return kind,node raise Exception,"cannot recast %s as %s" % (spec,"|".join(expect)) def fill_recast_table(): for name,func in globals().items(): if name.startswith('recast_'): spec,kind = name[len('recast_'):].split('__') if spec == '': spec = () else: spec = tuple(spec.split('_')) #print name,"->",spec,kind _recast_table[(spec,kind)] = func # fragment recasts def recast_Identifier__Identifier(frag,paren): return frag def recast_Literal__Primary(frag,paren): return jast_make(jast.Primary,Literal=frag) recast_Literal__ExpressionInSeq = recast_Literal__Primary def recast_Primary__Primary(frag,paren): return frag def recast_Primary__Expression(frag,paren): return frag def recast_Primary__ExpressionInSeq(frag,paren): return frag def recast_QualifiedIdentifier__TypeOrVOID(frag,paren): return jast_make(jast.Type, QualifiedIdentifier = frag, BracketsOpt = jast_make(jast.Brackets)) def recast_QualifiedIdentifier__Identifier(frag,paren): if len(frag.QualifiedIdentifier) == 1: return frag.QualifiedIdentifier[0] def recast_QualifiedIdentifier__Primary(frag,paren): if len(frag.QualifiedIdentifier) == 1: return jast_make(jast.Primary,Identifier=frag.QualifiedIdentifier[0],ArgumentsOpt=None) def recast_QualifiedIdentifier__ExpressionInSeq(frag,paren): if len(frag.QualifiedIdentifier) == 1: return jast_make(jast.Primary,Identifier=frag.QualifiedIdentifier[0],ArgumentsOpt=None) def recast_Placeholder_Selectors__BlockStatements(frag,paren): if not frag.Selectors: # !!! return jast_make(jast.BlockStatements, jast_make(jast.BlockStatementPlaceholder,frag.Placeholder)) def recast_Placeholder_Selectors__Identifier(frag,paren): if not frag.Selectors: # !!! return jast_make(jast.IdentifierPlaceholder,frag.Placeholder) def recast___Identifier(frag,paren): return make_id("") def recast___BlockStatements(frag,paren): return jast_make(jast.BlockStatements) def recast_BlockStatements__BlockStatements(frag,paren): return frag.BlockStatements def recast_Expressions__Expression(frag,paren): if len(frag.Expressions) == 1: return frag.Expressions[0] def recast_Expression__Primary(frag,paren): return jast_make(jast.Primary,Expression=frag.Expression) def recast___ExpressionInSeq(frag,paren): return jast_make(jast.Expressions) def recast_Placeholder_Selectors__ExpressionInSeq(frag,paren): if not frag.Selectors: # !!! return jast_make(jast.ExpressionInSeqPlaceholder,frag.Placeholder) def recast_Expression__ExpressionInSeq(frag,paren): return frag.Expression def recast_Expressions__ExpressionInSeq(frag,paren): return frag.Expressions def recast___ClassBodyDeclarations(frag,paren): return jast_make(jast.ClassBodyDeclarations) def recast_CLASS_LBRACE_ClassBodyDeclarations_RBRACE__ClassBodyDeclarations(frag,paren): # comments !!! return frag.ClassBodyDeclarations def recast_VOID_LPAREN_FormalParameterListOpt_RPAREN__FormalParameterInSeq(frag,paren): # comments !!! return frag.FormalParameterListOpt def recast_Placeholder_Selectors__FormalParameterInSeq(frag,paren): if not frag.Selectors: # !!! return jast_make(jast.FormalParameterInSeqPlaceholder,frag.Placeholder) def recast___SwitchBlockStatementGroupInSeq(frag,paren): return jast_make(jast.SwitchBlockStatementGroups) def recast_SwitchBlockStatementGroups__SwitchBlockStatementGroupInSeq(frag,paren): return frag.SwitchBlockStatementGroups fill_recast_table() # - * - class JavaTemplate: def __init__(self,frag,parms='',bindings=None,start='Fragment'): if isinstance(frag,java_parser.Node): fragment = frag else: #print "parsing... <<" #print frag try: fragment = java_parser.parse(frag,start=start) except java_parser.JavaSyntaxError,e: print frag raise #print ">>" if (not isinstance(fragment,jast.Fragment) and not isinstance(fragment,jast.PlaceholderFragment)): child_name = fragment.__class__.__name__ if child_name == 'FormalParameterList': # !!! child_name = 'FormalParameterListOpt' try: fragment = jast_make(jast.Fragment,**{child_name: fragment}) except UnknownScheme: fragment = jast.Fragment((child_name,),[fragment]) self.fragment = fragment if not parms: self.parms = [] else: if isinstance(parms,str): self.parms = parms.split(':') else: self.parms = parms if bindings is None: self.bindings = {} else: self.bindings = bindings def _getfirstnt(self): i = 0 for child in self.fragment.children: if not isinstance(child,java_parser.Token): return child,i i += 1 raise Exception,"at least a non-terminal expected" def _getseqnode(self): for child in self.fragment.children: if isinstance(child,java_parser.Seq): return child return None def __add__(self,other): if not isinstance(other,JavaTemplate): raise Exception,"expected template" if self.parms or other.parms or self.bindings or other.bindings: raise Exception,"cannot add non bare templates" self_seq = self._getseqnode() other_seq = other._getseqnode() return self.__class__(java_parser.join_seq_nodes(self_seq,other_seq)) def tfree(self): return JavaTemplate(self.fragment,self.parms,{}) def tnaked(self): nt, i = self._getfirstnt() kind = self.fragment._spec[i] fragment = jast.Fragment((kind,),[nt]) return JavaTemplate(fragment,self.parms,self.bindings.copy()) def tbind(self,bindings): new_bindings = self.bindings.copy() new_bindings.update(bindings) return JavaTemplate(self.fragment,self.parms,new_bindings) def texpand(self,bindings,output = None,nindent=0): if output is None: to_string = 1 output = cStringIO.StringIO() else: to_string = 0 pretty = JavaTemplatePretty(output) for i in range(nindent): pretty.indent() self.tvisit(pretty,bindings=bindings) for i in range(nindent): pretty.dedent() if to_string: return output.getvalue() else: return None def __str__(self): return self.texpand({}) def tvisit(self,visitor,args=None,paren=[],bindings=None,expect=None,ctxt=None): before = [] after = [] if expect: kind, node = recast(self.fragment,paren,expect) if isinstance(node,tuple): before,after,node = node else: node = self.fragment try: saved = visitor.bindings new_bindings = self.bindings.copy() if bindings is not None: new_bindings.update(bindings) if args is not None: i = 0 for arg in args: new_bindings[self.parms[i]] = arg i += 1 visitor.bindings = new_bindings for tok,ctl in before: visitor.emit_tok(tok,ctl) r = visitor.visit(node,ctxt) for tok,ctl in after: visitor.emit_tok(tok,ctl) finally: visitor.bindings = saved if expect: return kind,r def texpand(fragment,bindings): output = cStringIO.StringIO() pretty = JavaTemplatePretty(output) pretty.bindings = bindings pretty.visit(fragment) return output.getvalue() class Concat: def tvisit(self,visitor,args=None,paren=[],bindings=None,expect=None,ctxt=None): frags = [] self_eval = 0 for arg in args: dummy, frag = recast(arg.fragment,[],('Identifier',)) if isinstance(frag,jast.Identifier): frag = frag.IDENTIFIER.value elif isinstance(frag,jast.IdentifierPlaceholder): frag = texpand(frag,arg.bindings) if not frag: continue if frag[0] == "`": # !!! self_eval = 1 else: raise Exception,"can't concat into an identifier: %s" % arg frags.append(frag) if not self_eval: frag = ''.join(frags) frag = make_id(frag) else: frag = "`concat`(%s)" % ','.join(frags) return JavaTemplate(frag).tvisit(visitor,paren=paren,expect=expect,ctxt=ctxt) concat = Concat() class Strfy: def tvisit(self,visitor,args=None,paren=[],bindings=None,expect=None,ctxt=None): if len(args) != 1: raise Exception,"strfy expects one arg" self_eval = 0 arg = args[0] dummy, frag = recast(arg.fragment,[],('Identifier',)) if isinstance(frag,jast.Identifier): frag = frag.IDENTIFIER.value elif isinstance(frag,jast.IdentifierPlaceholder): frag = texpand(frag,arg.bindings) if frag and frag[0] == "`": # !!! self_eval = 1 else: raise Exception,"can't recast as identifier for strfy: %s" % arg if not self_eval: frag = '"%s"' % frag frag = make_literal(frag) else: frag = "`strfy`(%s)" % frag return JavaTemplate(frag).tvisit(visitor,paren=paren,expect=expect,ctxt=ctxt) strfy = Strfy() class CSub: def tvisit(self,visitor,args=None,paren=[],bindings=None,expect=None,ctxt=None): if args: raise Exception,"csub expects no arguments" if not paren: raise Exception,"csub expects parenthesis" bindings = visitor.bindings visitor.emit_tok(paren[0],'a', subst=bindings) visitor.emit_tok(paren[1],'i', subst=bindings) return None,0 csub = CSub() def switchgroup(vals,suite): vals = [ jast_make(jast.Primary,Literal=make_literal(str(v))) for v in vals ] groups = [] for prim in vals[:-1]: lbl = jast_make(jast.SwitchLabel,Expression=prim) stmts = jast_make(jast.BlockStatements) groups.append(jast_make(jast.SwitchBlockStatementGroup,lbl,stmts)) groups.append(jast_make(jast.SwitchBlockStatementGroup, jast_make(jast.SwitchLabel,Expression=vals[-1]) ,suite)) return JavaTemplate(jast_make(jast.SwitchBlockStatementGroups,groups)) # - * - def fragments(): proto_parser = java_parser.JavaParser() to_show = [] for rule,name in proto_parser.rule2name.items(): if 'Fragment' in rule[0]: to_show.append(( rule[0], "%-50s [%s]" % ("%s ::= %s" % (rule[0],' '.join(rule[1])),name))) to_show.sort(lambda x,y: cmp(x[0],y[0])) for rule0,txt in to_show: print txt def check(): c = 0 supported = 0 for name in dir(jast): if 'Placeholder' in name: c += 1 if not hasattr(JavaTemplatePretty,'visit_%s' % name): print "missing support for %s" % name else: supported += 1 print "%s/%s" % (supported,c) # - * - def jt(s,parms=''): #print "TEMPL",s return JavaTemplate(s,parms) def gen(vals,nil=''): n = len(vals) cases = [] for i in range(2**n): j = 1 case = [] for c in range(n): if i&j: case.append(vals[c]) else: case.append(nil) j *= 2 cases.append(tuple(case)) return cases def commas(templ): return ','.join(templ.split()) def test4(): jt = JavaTemplate("int `cat`(a,`x);") assert jt.texpand({'cat': concat, 'x': JavaTemplate('b')}) == 'int ab;'; jt = JavaTemplate("int `cat`(a,`cat`(b,`x));") assert jt.texpand({'cat': concat, 'x': JavaTemplate('c')}) == 'int abc;'; jt = JavaTemplate("int `cat`(a,`cat`(b,`x`(c)));") assert jt.texpand({'cat': concat, 'x': JavaTemplate('`y',parms='y')}) == 'int abc;'; jt = JavaTemplate("int `cat`(a,`cat`(b,`x`(c)));") assert jt.texpand({'cat': concat, 'x': JavaTemplate('`cat`(y,`y)', bindings={'cat': concat},parms='y')}) == 'int abyc;'; def test3(): templ=jt("{ `a`([`b],1); }") inner=jt("{ `a(`b); }","a:b") print templ.texpand({'a': inner,'b': jt('foo')}) def test2(): templs = ["`x `y","a `x `y","`x a `y","`x `y a"] def subst(templ,x,y): return (templ.replace("`x",x) .replace("`y",y)) frags = [] for xx,xy,yx,yy in gen(['1','2','3','4']): frags.append((xx,jt(xx),xy,jt(xy),yx,jt(yx),yy,jt(yy))) for top in templs: ttop = jt(subst(commas(top),"`x`([`xx],[`xy])","`y`([`yx],[`yy])")) for x in templs: for y in templs: tx = jt(commas(x),"x:y") ty = jt(commas(y),"x:y") for xx,txx,xy,txy,yx,tyx,yy,tyy in frags: x1 = subst(x,xx,xy) y1 = subst(y,yx,yy) top1 = subst(top,x1,y1) expected = commas(top1) bindings = { 'x': tx, 'y': ty, } res = ttop.texpand(bindings) assert (',' not in (res[0],res[-1]) and ' ' not in res and ',,' not in res) bindings = { 'x': tx, 'y': ty, 'xx': txx, 'xy': txy, 'yx': tyx, 'yy': tyy } res = ttop.texpand(bindings) assert expected == res def test1(): frags = [] for triplet in gen(['1','2','3']): ttriplet = map(jt,triplet) frags.append((triplet,ttriplet)) for fixed in gen(['a','b','c','d']): ex = "%s `a %s `b %s `c %s" % fixed tex = jt(commas(ex)) for triplet,ttriplet in frags: expected = (ex.replace("`a",triplet[0]) .replace("`b",triplet[1]) .replace("`c",triplet[2])) expected = commas(expected) res = tex.texpand(dict(zip(('a','b','c'),ttriplet))) assert expected == res def test(): print jt("{ a(); b(); }").texpand({}) templs = [jt("{ `a; }"),jt("{ x(); `a; }"), jt("{ `a; y(); }"),jt("{ x(); `a; y(); }")] for frag in [jt("{ a(); b(); }"),jt("`b"),jt(""), jt("a();"),jt("a(); b();")]: for templ in templs: print templ.texpand({'a': frag}) templs = [jt("1,2,`x,4"),jt("`x,4"),jt("1,`x"),jt("1,`x,4")] for frag in [jt(""),jt("3"),jt("3,3")]: for tex in templs: print tex.texpand({'x': frag}) tcl = jt("class A { `a; }") tcl1 = jt("class A { `a; static {} }") tintf = jt("interface I { `a; }") tintf1 = jt("interface I { void method(); `a; }") for frag in [jt(""),jt("class { final int A = 2;}"), jt("class { final int A = 2; final int B = 3; }")]: for templ in [tcl,tintf,tcl1,tintf1]: print templ.texpand({'a': frag}) tmh0 = jt("interface I { int m(`x); }") tmh1 = jt("interface I { int m(`x,int b); }") tmh2 = jt("interface I { int m(int a,`x); }") tmh3 = jt("interface I { int m(int a,`x,int b); }") for frag in [jt("void()"),jt("void(int x)"), jt("void(int x,int y)")]: for templ in [tmh0,tmh1,tmh2,tmh3]: print templ.texpand({'x': frag}) with_comments = [] with_comments.append(JavaTemplate(""" { { /* ok */ } } """)) with_comments.append(JavaTemplate(""" { { `csub`( /* %(ok)s */); } } """)) with_comments.append(JavaTemplate(""" { { `csub`( /* %(ok)s */); break; } } """)) with_comments.append(JavaTemplate(""" { { invoke(a,`csub`(/* %(ok)s */),b); break; } } """)) for templ in with_comments: print templ.texpand({'csub': csub, 'ok': "OK"}) print (JavaTemplate("a(); b();")+JavaTemplate("c(); d();")).texpand({}) print (JavaTemplate("a,b")+JavaTemplate("c,d")).texpand({}) test1() print 'TEST1' test2() print 'TEST2' test3() print 'TEST3' test4() print 'TEST4'
pydal/helpers/regex.py
mbelletti/pydal
408
11081578
<gh_stars>100-1000 # -*- coding: utf-8 -*- import re REGEX_TYPE = re.compile(r"^(?:list:)?\w+") REGEX_DBNAME = re.compile(r"^(\w+)(:\w+)*") REGEX_W = re.compile(r"^\w+$") REGEX_TABLE_DOT_FIELD = re.compile(r"^(\w+)\.(\w+)$") REGEX_TABLE_DOT_FIELD_OPTIONAL_QUOTES = r'^"?(\w+)"?\."?(\w+)"?$' REGEX_UPLOAD_PATTERN = ( r"(?P<table>\w+)\.(?P<field>\w+)\.(?P<uuidkey>[\w-]+)(\.(?P<name>\S+))?\.\w+$" ) REGEX_UPLOAD_CLEANUP = "['\"\\s;]+" REGEX_UNPACK = r"(?<!\|)\|(?!\|)" REGEX_PYTHON_KEYWORDS = re.compile( "^(False|True|and|as|assert|break|class|" "continue|def|del|elif|else|except|exec|finally|for|from|global|if|import|" "in|is|lambda|nonlocal|not|or|pass|print|raise|return|try|while|with|yield)$" ) REGEX_SELECT_AS_PARSER = r"\s+AS\s+(\S+)$" REGEX_CONST_STRING = '("[^"]*")|' "('[^']*')" REGEX_SEARCH_PATTERN = r"^{[^.]+\.[^.]+(\.(lt|gt|le|ge|eq|ne|contains|startswith|year|month|day|hour|minute|second))?(\.not)?}$" REGEX_SQUARE_BRACKETS = r"^.+\[.+\]$" REGEX_UPLOAD_EXTENSION = r"\.(\w{1,5})$" REGEX_ALPHANUMERIC = r"^[0-9a-zA-Z]\w*$" REGEX_CREDENTIALS = r"(?<=//)[\w.-]+([:/][^@]*)?(?=@)" REGEX_VALID_TB_FLD = re.compile(r"^[a-zA-Z]\w*\Z")
iclass/data.py
WuZifeng/ademxapp
379
11081586
# pylint: skip-file """ file iterator for image classification """ import os import time from PIL import Image import numpy as np from mxnet.io import DataBatch, DataIter from mxnet.ndarray import array from util.io import BatchFetcherGroup #from util.sampler import BalancedSampler_OneClassPerImage as BalancedSampler from util.sampler import FixedSampler, RandomSampler from util.util import as_list def parse_split_file(dataset, split, data_root): split_filename = 'iclass/data/{}/{}.lst'.format(dataset, split) image_list = [] label_list = [] with open(split_filename) as f: for item in f.readlines(): fields = item.strip().split('\t') image_list.append(os.path.join(data_root, fields[2])) label_list.append(int(fields[1])) return image_list, label_list class FileIter(DataIter): """FileIter object for image classification. Parameters ---------- dataset : string dataset split : string data split the list file of images and labels, whose each line is in the format: image_id(0 indexed) \t image_label \t image_file_path data_root : string the root data directory data_name : string the data name used in the network input label_name : string the label name used in SoftmaxOutput sampler: obj how to shuffle the samples per epoch has_gt: bool if there are ground truth labels batch_images : int the number of images per batch transformer : object the transformer for data augmentation prefetch_threads: int the number of prefetchers prefetcher_type: string the type of prefechers, e.g., process/thread """ def __init__(self, dataset, split, data_root, data_name = 'data', label_name = 'softmax_label', sampler = 'fixed', has_gt = True, batch_images = 256, transformer = None, prefetch_threads = 1, prefetcher_type = 'thread',): super(FileIter, self).__init__() self._data_name = data_name self._label_name = label_name self._has_gt = has_gt self._batch_images = batch_images self._transformer = transformer self._image_list, self._label_list = parse_split_file(dataset, split, data_root) self._perm_len = len(self._image_list) if sampler == 'fixed': sampler = FixedSampler(self._perm_len) elif sampler == 'random': sampler = RandomSampler(self._perm_len) data_batch = self.read([0]) self.batch_size = self._batch_images * data_batch[1].shape[0] self._data = list({self._data_name: data_batch[0]}.items()) self._label = list({self._label_name: data_batch[1]}.items()) self._fetcher = BatchFetcherGroup(self, sampler, batch_images, prefetch_threads, prefetch_threads*2, prefetcher_type) def read(self, db_inds): outputs = [[], [],] for db_ind in db_inds: # load an image rim = Image.open(self._image_list[db_ind]).convert('RGB') data = np.array(rim, np.uint8) # jitter if self._transformer is not None: data = self._transformer(data) data_list = as_list(data) for datum in data_list: outputs[0].append(datum.transpose(2, 0, 1)[np.newaxis]) if self._has_gt: outputs[1].append([self._label_list[db_ind]] * len(data_list)) for i, output in enumerate(outputs): outputs[i] = np.concatenate(output, axis=0) return tuple(outputs) @property def batch_images(self): return self._batch_images @property def batches_per_epoch(self): return self._perm_len // self._batch_images @property def provide_data(self): """The name and shape of data provided by this iterator""" return [(k, tuple([self.batch_size] + list(v.shape[1:]))) for k, v in self._data] @property def provide_label(self): """The name and shape of label provided by this iterator""" return [(k, tuple([self.batch_size] + list(v.shape[1:]))) for k, v in self._label] def reset(self): self._fetcher.reset() def next(self): if self._fetcher.iter_next(): tic = time.time() data_batch = self._fetcher.get() print 'Waited for {} seconds'.format(time.time() - tic) else: raise StopIteration return DataBatch(data=[array(data_batch[0])], label=[array(data_batch[1])]) def debug(self): for i in xrange(self._perm_len): self.read([i]) print 'Done {}/{}'.format(i+1, self._perm_len) def draw_sample(self, data_batch, meanstd, rgb_scale): import pylab as pl for i in xrange(data_batch.data[0].shape[0]): im = data_batch.data[0][i].asnumpy().transpose(1, 2, 0) im = im * meanstd[0] + meanstd[1] im *= rgb_scale im = np.maximum(0, np.minimum(255, im)) pl.imshow(im.astype(np.uint8)) pl.show()
learntools/deep_learning/exercise_7.py
roannav/learntools
359
11081590
import numpy as np import matplotlib.pyplot as plt from PIL import Image from learntools.core import * from tensorflow import keras import tensorflow as tf print("Using TensorFlow version {}".format(tf.__version__)) class StartSequentialModel(CodingProblem): _vars = ['fashion_model'] def check(self, fashion_model): assert (type(fashion_model) == keras.models.Sequential), \ ("Set fashion model to be a Sequential() model.") _solution = CS( """ fashion_model = Sequential() """ ) class AddFirstLayer(CodingProblem): _vars = ['fashion_model'] def check(self, fashion_model): first_layer = fashion_model.layers[0] desired_input_shape = (None, 28, 28, 1) useful_text = "\nIt is hard to change a layer once you have added it. Recreate `fashion_model` and run `fashion_model` again after fixing this problem" assert (len(fashion_model.layers) == 1), \ ("You should have 1 layer at this point but you have {}".format(len(fashion_model.layers))) assert (first_layer.input_shape == desired_input_shape), \ ("First layer should have shape {} but instead it is {}. ".format(desired_input_shape, first_layer.input_shape) + useful_text) assert (first_layer.activation.__name__ == 'relu'), \ ("You haven't set `relu` as the activation function. " + useful_text) assert (first_layer.kernel_size == (3, 3)), \ ("The kernel size should be (3, 3) but yours is {}.".format(first_layer.kernel_size)) assert (first_layer.filters == 12), \ ("The first layer should have 12 filters but you have {}.".format(first_layer.filters)) _hint = "The `input_shape` argument should be input_shape = (img_rows, img_cols, 1). " \ "The other arguments you need are `units`, `kernel_size` and `" _solution = CS( """ fashion_model.add(Conv2D(12, activation='relu', kernel_size=3, input_shape = (img_rows, img_cols, 1))) """ ) class AddMoreLayers(CodingProblem): _vars = ['fashion_model'] def check(self, fashion_model): layers = fashion_model.layers last_layer = fashion_model.layers[-1] useful_text = "Use `fashion_model.summary()` to see your current model architecture." assert (len(layers) == 6), \ ("You should have 6 layers, but actually have {}. ".format(useful_text) + useful_text) assert (type(fashion_model.layers[3]) == keras.layers.Flatten), \ ("You should have a Flatten layer as the 4th layer. " + useful_text) assert (last_layer.activation.__name__ == 'softmax'), \ ("Your last layer's activation function should be softmax" "but it is {}. Fix this in your code and rebuild the model by rerunning all model-building cells".format(last_layer.activation.__name__)) assert (last_layer.output_shape == (None, 10)), \ ("The number of nodes in your layer doesn't match the number of prediction categories. " "Last layer shape should be (None, 10) but it is ".format(last_layer.output_shape) + \ ". Fix this in your code and re-run all model-building cells.") _solution = CS( """ fashion_model.add(Conv2D(20, activation='relu', kernel_size=3)) fashion_model.add(Conv2D(20, activation='relu', kernel_size=3)) fashion_model.add(Flatten()) fashion_model.add(Dense(100, activation='relu')) fashion_model.add(Dense(10, activation='softmax')) """ ) class CompileModel(CodingProblem): _vars = ['fashion_model'] def check(self, fashion_model): # TODO: Re-enable everything in this check after we are settled on TF 2.x # Ran into a bunch of problems here while API's were changing # assert (fashion_model.optimizer is not None), \ # ("You don't have an optimizer set. Did you run `fashion_model.compile` with an optimizer argument") # optimizer_name = fashion_model.optimizer._tf_api_names[0] # correct_optimizer_name = 'keras.optimizers.Adam' # assert(optimizer_name == correct_optimizer_name), \ # ("You didn't get the optimizer set correctly. It should be `adam`") # n_metrics = len(fashion_model.metrics) # assert (n_metrics == 1), \ # ("You should have a list with 1 item for the metric argument. You had {}".format(n_metrics)) # metric = fashion_model.metrics[0] # First criterion is for older versions of tf. Second is for later versions # assert ((metric == 'accuracy') or (metric._name == 'acc')), \ # ("You need to set metrics=['accuracy']") pass _solution = CS( """ fashion_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) """ ) class FitFullDLModel(CodingProblem): _vars = ['fashion_model'] def check(self, fashion_model): assert('history' in dir(fashion_model)), \ ('You have not fit the model yet.') assert("val_loss" in fashion_model.history.history), \ ('The way you fit the model did not generate validation scores. Specify `validation_split`') _solution = CS( """ fashion_model.fit(x, y, batch_size=100, epochs=4, validation_split=0.2) """ ) class CreateNewDLModelFromScratch(CodingProblem): _vars = ['second_fashion_model'] def check(self, second_fashion_model): print("Model summary from second_fashion_model.summary()") print(second_fashion_model.summary()) first_layer = second_fashion_model.layers[0] last_layer = second_fashion_model.layers[-1] desired_input_shape = (None, 28, 28, 1) assert (first_layer.input_shape == desired_input_shape), \ ("First layer should have shape {} but instead it is {}. ".format(desired_input_shape, first_layer.input_shape) + useful_text) assert (len(second_fashion_model.layers) > 1), \ ("Use more than 1 layer for a more accurate model.") assert (last_layer.output_shape == (None, 10)), \ ("The number of nodes in your layer doesn't match the number of prediction categories.") assert (second_fashion_model.optimizer is not None), \ ("You don't have an optimizer set. Did you run `second_fashion_model.compile`") assert('history' in dir(second_fashion_model)), \ ('You have not fit the model yet.') assert("val_acc" in second_fashion_model.history.history), \ ('The way you fit the model did not generate validation accuracy. Specify `validation_split` and compile with metrics=["accuracy"]') model_val_acc = second_fashion_model.history.history['val_acc'][-1] assert(model_val_acc > 0.75), \ ('You have completed all the model building steps correctly, but your validation accuracy ' 'of {} can be improved. Try changing the model to see if you can get a better score'.format(model_val_acc)) _hint = "Start by copying the code from `fashion_model` and then change layers as you choose. " + \ "You'll develop intuition for what changes are worth making with practice. The next lesson " + \ "gives a good strategy of building large model and using a special techniques to reduce overfitting." _solution = CS( """ second_fashion_model = Sequential() second_fashion_model.add(Conv2D(12, activation='relu', kernel_size=3, input_shape = (img_rows, img_cols, 1))) # Changed kernel sizes to be 2 second_fashion_model.add(Conv2D(20, activation='relu', kernel_size=2)) second_fashion_model.add(Conv2D(20, activation='relu', kernel_size=2)) # added an addition Conv2D layer second_fashion_model.add(Conv2D(20, activation='relu', kernel_size=2)) second_fashion_model.add(Flatten()) second_fashion_model.add(Dense(100, activation='relu')) # It is important not to change the last layer. First argument matches number of classes. Softmax guarantees we get reasonable probabilities second_fashion_model.add(Dense(10, activation='softmax')) second_fashion_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) second_fashion_model.fit(x, y, batch_size=100, epochs=4, validation_split=0.2) """ ) qvars = bind_exercises(globals(), [ StartSequentialModel, AddFirstLayer, AddMoreLayers, CompileModel, FitFullDLModel, CreateNewDLModelFromScratch, ], var_format='q_{n}', ) __all__ = list(qvars)
DEQ-Sequence/utils/adaptive_embedding.py
ashwinipokle/deq
548
11081624
import torch from torch import nn import torch.nn.functional as F class AdaptiveEmbedding(nn.Module): def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, sample_softmax=False): super(AdaptiveEmbedding, self).__init__() self.n_token = n_token self.d_embed = d_embed self.cutoffs = cutoffs + [n_token] self.div_val = div_val self.d_proj = d_proj self.emb_scale = d_proj ** 0.5 self.cutoff_ends = [0] + self.cutoffs self.emb_layers = nn.ModuleList() self.emb_projs = nn.ModuleList() if div_val == 1: self.emb_layers.append( nn.Embedding(n_token, d_embed, sparse=sample_softmax>0) ) if d_proj != d_embed: self.emb_projs.append(nn.Linear(d_embed, d_proj, bias=False)) else: for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1] d_emb_i = d_embed // (div_val ** i) self.emb_layers.append(nn.Embedding(r_idx-l_idx, d_emb_i)) self.emb_projs.append(nn.Linear(d_emb_i, d_proj, bias=False)) def forward(self, inp): if self.div_val == 1: embed = self.emb_layers[0](inp) if self.d_proj != self.d_embed: embed = F.linear(embed, self.emb_projs[0].weight) else: inp_flat = inp.contiguous().view(-1) emb_flat = torch.zeros([inp_flat.size(0), self.d_proj]) for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx) indices_i = mask_i.nonzero(as_tuple=False).squeeze() if indices_i.numel() == 0: continue inp_i = inp_flat.index_select(0, indices_i) - l_idx emb_i = self.emb_layers[i](inp_i) emb_i = F.linear(emb_i, self.emb_projs[i].weight) emb_flat.index_copy_(0, indices_i, emb_i) embed = emb_flat.view(*inp.size(), self.d_proj) embed.mul_(self.emb_scale) return embed
src/nsupdate/utils/_tests/test_ddns_client.py
mirzazulfan/nsupdate.info
774
11081627
<reponame>mirzazulfan/nsupdate.info """ Tests for ddns_client module. """ import pytest from ..ddns_client import dyndns2_update, Timeout, ConnectionError # see also conftest.py BASEDOMAIN = 'nsupdate.info' HOSTNAME = 'nsupdate-ddns-client-unittest.' + BASEDOMAIN INVALID_HOSTNAME = 'nsupdate-ddns-client-nohost.' + BASEDOMAIN USER, PASSWORD = HOSTNAME, '<PASSWORD>' # no problem, is only used for this unit test SERVER = 'ipv4.' + BASEDOMAIN SECURE = False # TLS/SNI support on python 2.x sucks :( class TestDynDns2Client(object): def test_timeout(self): with pytest.raises(Timeout): # this assumes that the service can't respond in 1us and thus times out dyndns2_update('wrong', 'wrong', SERVER, hostname='wrong', myip='172.16.31.10', secure=SECURE, timeout=0.000001) def test_connrefused(self): with pytest.raises(ConnectionError): # this assumes that there is no service running on 127.0.0.42 dyndns2_update('wrong', 'wrong', '127.0.0.42', hostname='wrong', myip='172.16.31.10', secure=SECURE, timeout=2.0) def test_notfqdn(self): status, text = dyndns2_update('wrongdomainnotfqdn', 'wrongpassword', SERVER, hostname=HOSTNAME, myip='172.16.31.10', secure=SECURE) assert status == 200 assert text == 'notfqdn' def test_badauth(self): status, text = dyndns2_update(USER, 'wrongpassword', SERVER, hostname=HOSTNAME, myip='172.16.31.10', secure=SECURE) assert status == 401 def test_nohost(self): status, text = dyndns2_update(USER, PASSWORD, SERVER, hostname=INVALID_HOSTNAME, myip='172.16.31.10', secure=SECURE) assert status == 200 assert text == 'nohost' def test_success(self): ip = '172.16.31.10' status, text = dyndns2_update(USER, PASSWORD, SERVER, hostname=HOSTNAME, myip=ip, secure=SECURE) assert status == 200 assert text in ["good %s" % ip, "nochg %s" % ip]
tests/spot/market/test_ticker_price.py
Banging12/binance-connector-python
512
11081635
from binance.spot import Spot as Client import responses from tests.util import mock_http_response mock_item = {"key_1": "value_1", "key_2": "value_2"} @mock_http_response(responses.GET, "/api/v3/ticker/price", mock_item, 200) def test_ticker_price_without_pair(): """Tests the API endpoint to get price ticker from all pairs""" api = Client() response = api.ticker_price() response.should.equal(mock_item) @mock_http_response( responses.GET, "/api/v3/ticker/price\\?symbol=BTCUSDT", mock_item, 200 ) def test_ticker_price(): """Tests the API endpoint to get price ticker from one pair""" api = Client() response = api.ticker_price("BTCUSDT") response.should.equal(mock_item)
lib/loss/lovasz_loss.py
littleSunlxy/contrastive-seg-lin
398
11081671
from itertools import filterfalse as ifilterfalse import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable from torch.nn import BCELoss from lib.loss.aaf import losses as lossx from lib.utils.tools.logger import Logger as Log # weights # ATR training # [0.85978634, 1.19630769, 1.02639146, 1.30664970, 0.97220603, 1.04885815, # 1.01745278, 1.01481690, 1.27155077, 1.12947663, 1.13016390, 1.06514227, # 1.08384483, 1.08506841, 1.09560942, 1.09565198, 1.07504567, 1.20411509] # CCF # [0.82073458, 1.23651165, 1.0366326, 0.97076566, 1.2802332, 0.98860602, # 1.29035071, 1.03882453, 0.96725283, 1.05142434, 1.0075884, 0.98630539, # 1.06208869, 1.0160915, 1.1613597, 1.17624919, 1.1701143, 1.24720215] # PPSS # [0.89680465, 1.14352656, 1.20982646, 0.99269248, # 1.17911144, 1.00641032, 1.47017195, 1.16447113] # Pascal # [0.82877791, 0.95688253, 0.94921949, 1.00538108, 1.0201687, 1.01665831, 1.05470914] # Lip # [0.7602572, 0.94236198, 0.85644457, 1.04346266, 1.10627293, 0.80980162, # 0.95168713, 0.8403769, 1.05798412, 0.85746254, 1.01274366, 1.05854692, # 1.03430773, 0.84867818, 0.88027721, 0.87580925, 0.98747462, 0.9876475, # 1.00016535, 1.00108882] class ABRLovaszLoss(nn.Module): """Lovasz loss for Alpha process""" def __init__(self, ignore_index=None, only_present=True): super(ABRLovaszLoss, self).__init__() self.ignore_index = ignore_index self.only_present = only_present # self.weight = torch.FloatTensor([0.80777327, 1.00125961, 0.90997236, 1.10867908, 1.17541499, # 0.86041422, 1.01116758, 0.89290045, 1.12410812, 0.91105395, # 1.07604013, 1.12470610, 1.09895196, 0.90172057, 0.93529453, # 0.93054733, 1.04919178, 1.04937547, 1.06267568, 1.06365688]) self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_index) def forward(self, preds, targets): h, w = targets[0].size(1), targets[0].size(2) # seg loss pred = F.interpolate(input=preds[0], size=(h, w), mode='bilinear', align_corners=True) pred = F.softmax(input=pred, dim=1) loss = lovasz_softmax_flat(*flatten_probas(pred, targets[0], self.ignore_index), only_present=self.only_present) # dsn loss pred_dsn = F.interpolate(input=preds[-1], size=(h, w), mode='bilinear', align_corners=True) loss_dsn = self.criterion(pred_dsn, targets[0]) return loss + 0.4 * loss_dsn class SegmentationLoss(nn.Module): """Lovasz loss for Alpha process""" def __init__(self, ignore_index=None, only_present=True): super(SegmentationLoss, self).__init__() self.ignore_index = ignore_index self.only_present = only_present self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_index) def forward(self, preds, targets): h, w = targets.size(1), targets.size(2) # seg loss pred = F.interpolate(input=preds[0], size=(h, w), mode='bilinear', align_corners=True) loss_ce = self.criterion(pred, targets) # dsn loss pred_dsn = F.interpolate(input=preds[-1], size=(h, w), mode='bilinear', align_corners=True) loss_dsn = self.criterion(pred_dsn, targets) total_loss = loss_ce + 0.4 * loss_dsn return total_loss class ABRLovaszCELoss(nn.Module): """Lovasz loss for Alpha process""" def __init__(self, ignore_index=None, only_present=True): super(ABRLovaszCELoss, self).__init__() self.ignore_index = ignore_index self.only_present = only_present self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_index) def forward(self, preds, targets): h, w = targets.size(1), targets.size(2) # seg loss pred = F.interpolate(input=preds[0], size=(h, w), mode='bilinear', align_corners=True) loss_ce = self.criterion(pred, targets) pred = F.softmax(input=pred, dim=1) loss = lovasz_softmax_flat(*flatten_probas(pred, targets, self.ignore_index), only_present=self.only_present) # dsn loss pred_dsn = F.interpolate(input=preds[-1], size=(h, w), mode='bilinear', align_corners=True) loss_dsn = self.criterion(pred_dsn, targets) total_loss = loss_ce + loss + 0.4 * loss_dsn return total_loss class LovaszSoftmaxLoss(nn.Module): """Lovasz loss for Deep Supervision""" def __init__(self, ignore_index=None, only_present=False, per_image=False): super(LovaszSoftmaxLoss, self).__init__() self.ignore_index = ignore_index self.only_present = only_present self.per_image = per_image self.weight = torch.FloatTensor([0.80777327, 1.00125961, 0.90997236, 1.10867908, 1.17541499, 0.86041422, 1.01116758, 0.89290045, 1.12410812, 0.91105395, 1.07604013, 1.12470610, 1.09895196, 0.90172057, 0.93529453, 0.93054733, 1.04919178, 1.04937547, 1.06267568, 1.06365688]) self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_index, weight=self.weight) def forward(self, preds, targets): h, w = targets.size(1), targets.size(2) # seg loss pred = F.interpolate(input=preds[0], size=(h, w), mode='bilinear', align_corners=True) pred = F.softmax(input=pred, dim=1) if self.per_image: loss = mean(lovasz_softmax_flat(*flatten_probas(pre.unsqueeze(0), tar.unsqueeze(0), self.ignore_index), only_present=self.only_present) for pre, tar in zip(pred, targets)) else: loss = lovasz_softmax_flat(*flatten_probas(pred, targets, self.ignore_index), only_present=self.only_present) # dsn loss pred_dsn = F.interpolate(input=preds[1], size=(h, w), mode='bilinear', align_corners=True) loss_dsn = self.criterion(pred_dsn, targets) return loss + 0.4 * loss_dsn def lovasz_softmax(probas, labels, classes='present', per_image=False, ignore=None): """ Multi-class Lovasz-Softmax loss probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1). Interpreted as binary (sigmoid) output with outputs of size [B, H, W]. labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1) classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average. per_image: compute the loss per image instead of per batch ignore: void class labels """ if per_image: loss = mean( lovasz_softmax_flat_ori(*flatten_probas_ori(prob.unsqueeze(0), lab.unsqueeze(0), ignore), classes=classes) for prob, lab in zip(probas, labels)) else: loss = lovasz_softmax_flat_ori(*flatten_probas_ori(probas, labels, ignore), classes=classes) return loss def lovasz_softmax_flat_ori(probas, labels, classes='present'): """ Multi-class Lovasz-Softmax loss probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1) labels: [P] Tensor, ground truth labels (between 0 and C - 1) classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average. """ if probas.numel() == 0: # only void pixels, the gradients should be 0 return probas * 0. C = probas.size(1) losses = [] class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes for c in class_to_sum: fg = (labels == c).float() # foreground for class c if (classes is 'present' and fg.sum() == 0): continue if C == 1: if len(classes) > 1: raise ValueError('Sigmoid output possible only with 1 class') class_pred = probas[:, 0] else: class_pred = probas[:, c] errors = (Variable(fg) - class_pred).abs() errors_sorted, perm = torch.sort(errors, 0, descending=True) perm = perm.data fg_sorted = fg[perm] losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted)))) return mean(losses) def flatten_probas_ori(probas, labels, ignore=None): """ Flattens predictions in the batch """ if probas.dim() == 3: # assumes output of a sigmoid layer B, H, W = probas.size() probas = probas.view(B, 1, H, W) B, C, H, W = probas.size() probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C labels = labels.view(-1) if ignore is None: return probas, labels valid = (labels != ignore) vprobas = probas[valid.nonzero().squeeze()] vlabels = labels[valid] return vprobas, vlabels def lovasz_softmax_flat(preds, targets, only_present=False): """ Multi-class Lovasz-Softmax loss :param preds: [P, C] Variable, class probabilities at each prediction (between 0 and 1) :param targets: [P] Tensor, ground truth labels (between 0 and C - 1) :param only_present: average only on classes present in ground truth """ if preds.numel() == 0: # only void pixels, the gradients should be 0 return preds * 0. C = preds.size(1) losses = [] for c in range(C): fg = (targets == c).float() # foreground for class c if only_present and fg.sum() == 0: continue errors = (Variable(fg) - preds[:, c]).abs() errors_sorted, perm = torch.sort(errors, 0, descending=True) perm = perm.data fg_sorted = fg[perm] losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted)))) return mean(losses) def lovasz_grad(gt_sorted): """ Computes gradient of the Lovasz extension w.r.t sorted errors """ p = len(gt_sorted) gts = gt_sorted.sum() intersection = gts - gt_sorted.float().cumsum(0) union = gts + (1 - gt_sorted).float().cumsum(0) jaccard = 1. - intersection / union if p > 1: # cover 1-pixel case jaccard[1:p] = jaccard[1:p] - jaccard[0:-1] return jaccard def flatten_probas(preds, targets, ignore=None): """ Flattens predictions in the batch """ B, C, H, W = preds.size() preds = preds.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C targets = targets.view(-1) if ignore is None: return preds, targets valid = (targets != ignore) vprobas = preds[valid.nonzero().squeeze()] vlabels = targets[valid] return vprobas, vlabels # --------------------------- BINARY LOSSES --------------------------- def lovasz_hinge(logits, labels, per_image=True, ignore=None): """ Binary Lovasz hinge loss logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) per_image: compute the loss per image instead of per batch ignore: void class id """ if per_image: loss = mean(lovasz_hinge_flat(*flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), ignore)) for log, lab in zip(logits, labels)) else: loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore)) return loss def lovasz_hinge_flat(logits, labels): """ Binary Lovasz hinge loss logits: [P] Variable, logits at each prediction (between -\infty and +\infty) labels: [P] Tensor, binary ground truth labels (0 or 1) ignore: label to ignore """ if len(labels) == 0: # only void pixels, the gradients should be 0 return logits.sum() * 0. signs = 2. * labels.float() - 1. errors = (1. - logits * Variable(signs)) errors_sorted, perm = torch.sort(errors, dim=0, descending=True) perm = perm.data gt_sorted = labels[perm] grad = lovasz_grad(gt_sorted) loss = torch.dot(F.relu(errors_sorted), Variable(grad)) return loss def flatten_binary_scores(scores, labels, ignore=None): """ Flattens predictions in the batch (binary case) Remove labels equal to 'ignore' """ scores = scores.view(-1) labels = labels.view(-1) if ignore is None: return scores, labels valid = (labels != ignore) vscores = scores[valid] vlabels = labels[valid] return vscores, vlabels def mean(l, ignore_nan=True, empty=0): """ nan mean compatible with generators. """ l = iter(l) if ignore_nan: l = ifilterfalse(isnan, l) try: n = 1 acc = next(l) except StopIteration: if empty == 'raise': raise ValueError('Empty mean') return empty for n, v in enumerate(l, 2): acc += v if n == 1: return acc return acc / n def isnan(x): return x != x class AAF_Loss(nn.Module): """ Loss function for multiple outputs """ def __init__(self, ignore_index=255, num_classes=7): super(AAF_Loss, self).__init__() self.ignore_index = ignore_index self.num_classes = num_classes self.kld_margin = 3.0 self.kld_lambda_1 = 1.0 self.kld_lambda_2 = 1.0 # self.dec = 1e-3 self.dec = 1e-2 self.softmax = nn.Softmax(dim=1) self.w_edge = torch.zeros(1, 1, 1, self.num_classes, 1, 3) self.w_edge_softmax = nn.Softmax(dim=-1) self.w_not_edge = torch.zeros(1, 1, 1, self.num_classes, 1, 3) self.w_not_edge_softmax = nn.Softmax(dim=-1) def forward(self, preds, targets): h, w = targets.size(1), targets.size(2) # seg loss pred = F.interpolate(input=preds, size=(h, w), mode='bilinear', align_corners=True) pred = F.softmax(input=pred, dim=1) # aaf loss labels = targets.unsqueeze(1) one_label = labels.clone() one_label[labels == self.ignore_index] = 0 # one_hot_lab = F.one_hot(one_label, num_classes=self.num_classes) one_hot_lab = torch.zeros(one_label.size(0), self.num_classes, one_label.size(2), one_label.size(3)).cuda() one_hot_lab = one_hot_lab.scatter_(1, one_label.data, 1) targets_p_node_list = list(torch.split(one_hot_lab, 1, dim=1)) for i in range(self.num_classes): # Log.info('{} {}'.format(targets_p_node_list[i].shape, labels.shape)) targets_p_node_list[i] = targets_p_node_list[i].squeeze(-1) targets_p_node_list[i][labels == self.ignore_index] = self.ignore_index one_hot_lab = torch.cat(targets_p_node_list, dim=1).permute(0, 2, 3, 1) prob = pred w_edge = self.w_edge_softmax(self.w_edge).cuda() w_not_edge = self.w_not_edge_softmax(self.w_not_edge).cuda() # Log.info('{} {} {} {}'.format(one_hot_lab.shape, labels.shape, w_edge.shape, w_not_edge.shape)) # w_edge_shape=list(w_edge.shape) # Apply AAF on 3x3 patch. eloss_1, neloss_1 = lossx.adaptive_affinity_loss(labels, one_hot_lab, prob, 1, self.num_classes, self.kld_margin, w_edge[..., 0], w_not_edge[..., 0]) # Apply AAF on 5x5 patch. # eloss_2, neloss_2 = lossx.adaptive_affinity_loss(labels, # one_hot_lab, # prob, # 2, # self.num_classes, # self.kld_margin, # w_edge[..., 1], # w_not_edge[..., 1]) # # Apply AAF on 7x7 patch. # eloss_3, neloss_3 = lossx.adaptive_affinity_loss(labels, # one_hot_lab, # prob, # 3, # self.num_classes, # self.kld_margin, # w_edge[..., 2], # w_not_edge[..., 2]) dec = self.dec aaf_loss = torch.mean(eloss_1) * self.kld_lambda_1 * dec # aaf_loss += torch.mean(eloss_2) * self.kld_lambda_1*dec # aaf_loss += torch.mean(eloss_3) * self.kld_lambda_1*dec aaf_loss += torch.mean(neloss_1) * self.kld_lambda_2 * dec # aaf_loss += torch.mean(neloss_2) * self.kld_lambda_2*dec # aaf_loss += torch.mean(neloss_3) * self.kld_lambda_2*dec return aaf_loss
pyjswidgets/pyjamas/ui/DecoratorPanel.py
takipsizad/pyjs
739
11081691
<reponame>takipsizad/pyjs<filename>pyjswidgets/pyjamas/ui/DecoratorPanel.py # Copyright (C) 2006-2008 Google Inc. # Copyright (C) 2009 <NAME> <<EMAIL>> from pyjamas import DOM from pyjamas.ui.SimplePanel import SimplePanel from pyjamas import Factory from pyjamas.ui.TabPanel import TabPanel from pyjamas.ui.TabBar import TabBar """ A {@link SimplePanel} that wraps its contents in stylized boxes, which can be used to add rounded corners to a {@link Widget}. Wrapping a {@link Widget} in a "9-box" allows users to specify images in each of the corners and along the four borders. This method allows the content within the {@link DecoratorPanel} to resize without disrupting the look of the border. In addition, rounded corners can generally be combined into a single image file, which reduces the number of downloaded files at startup. This class also simplifies the process of using AlphaImageLoaders to support 8-bit transparencies (anti-aliasing and shadows) in ie6, which does not support them normally. CSS Style Rules .gwt-DecoratorPanel { the panel } .gwt-DecoratorPanel .top { the top row } .gwt-DecoratorPanel .topLeft { the top left cell } .gwt-DecoratorPanel .topLeftInner { the inner element of the cell } .gwt-DecoratorPanel .topCenter { the top center cell } .gwt-DecoratorPanel .topCenterInner { the inner element of the cell } .gwt-DecoratorPanel .topRight { the top right cell } .gwt-DecoratorPanel .topRightInner { the inner element of the cell } .gwt-DecoratorPanel .middle { the middle row } .gwt-DecoratorPanel .middleLeft { the middle left cell } .gwt-DecoratorPanel .middleLeftInner { the inner element of the cell } .gwt-DecoratorPanel .middleCenter { the middle center cell } .gwt-DecoratorPanel .middleCenterInner { the inner element of the cell } .gwt-DecoratorPanel .middleRight { the middle right cell } .gwt-DecoratorPanel .middleRightInner { the inner element of the cell } .gwt-DecoratorPanel .bottom { the bottom row } .gwt-DecoratorPanel .bottomLeft { the bottom left cell } .gwt-DecoratorPanel .bottomLeftInner { the inner element of the cell } .gwt-DecoratorPanel .bottomCenter { the bottom center cell } .gwt-DecoratorPanel .bottomCenterInner { the inner element of the cell } .gwt-DecoratorPanel .bottomRight { the bottom right cell } .gwt-DecoratorPanel .bottomRightInner { the inner element of the cell } """ class DecoratorPanel(SimplePanel): #The default style name. DEFAULT_STYLENAME = "gwt-DecoratorPanel" #The default styles applied to each row. DEFAULT_ROW_STYLENAMES = [ "top", "middle", "bottom" ] def __init__(self, rowStyles=None, containerIndex=1, **kwargs): """ Creates a new panel using the specified style names to apply to each row. Each row will contain three cells (Left, Center, and Right). The Center cell in the containerIndex row will contain the {@link Widget}. @param rowStyles: an array of style names to apply to each row @param containerIndex the index of the container row """ if rowStyles is None: rowStyles = self.DEFAULT_ROW_STYLENAMES if kwargs.has_key('Element'): self.table = kwargs.pop('Element') fc = DOM.getFirstChild(self.table) if fc: self.tbody = fc else: self.tbody = DOM.createTBody() DOM.appendChild(self.table, self.tbody) else: # Add a tbody self.table = DOM.createTable() self.tbody = DOM.createTBody() DOM.appendChild(self.table, self.tbody) DOM.setAttribute(self.table, "cellSpacing", "0") DOM.setAttribute(self.table, "cellPadding", "0") if not kwargs.has_key('StyleName'): kwargs['StyleName']=self.DEFAULT_STYLENAME SimplePanel.__init__(self, self.table, **kwargs) # Add each row for i in range(len(rowStyles)): row = self.createTR(rowStyles[i]) DOM.appendChild(self.tbody, row) if i == containerIndex: self.containerElem = DOM.getFirstChild(DOM.getChild(row, 1)) def createTR(self, styleName) : """ Create a new row with a specific style name. The row will contain three cells (Left, Center, and Right), each prefixed with the specified style name. This method allows Widgets to reuse the code on a DOM level, without creating a DecoratorPanel Widget. @param styleName: the style name @return the new row {@link Element} """ trElem = DOM.createTR() self.setStyleName(trElem, styleName) DOM.appendChild(trElem, self.createTD(styleName + "Left")) DOM.appendChild(trElem, self.createTD(styleName + "Center")) DOM.appendChild(trElem, self.createTD(styleName + "Right")) return trElem def createTD(self, styleName): """ Create a new table cell with a specific style name. @param styleName: the style name @return the new cell {@link Element} """ tdElem = DOM.createTD() inner = DOM.createDiv() DOM.appendChild(tdElem, inner) self.setStyleName(tdElem, styleName) self.setStyleName(inner, styleName + "Inner") return tdElem def getCellElement(self, row, cell): """ Get a specific Element from the panel. @param row: the row index @param cell: the cell index @return the Element at the given row and cell """ tr = DOM.getChild(self.tbody, row) td = DOM.getChild(tr, cell) return DOM.getFirstChild(td) def getContainerElement(self): return self.containerElem Factory.registerClass('pyjamas.ui.DecoratorPanel', 'DecoratorPanel', DecoratorPanel) class DecoratedTabBar(TabBar): TAB_ROW_STYLES = ["tabTop", "tabMiddle"] STYLENAME_DEFAULT = "gwt-DecoratedTabBar" def __init__(self, **kwargs): """ Creates an empty {@link DecoratedTabBar}. """ TabBar.__init__(self, **kwargs) def createTabTextWrapper(self): return DecoratorPanel(self.TAB_ROW_STYLES, 1) Factory.registerClass('pyjamas.ui.DecoratorPanel', 'DecoratedTabBar', DecoratedTabBar) class DecoratedTabPanel(TabPanel): DEFAULT_STYLENAME = "gwt-DecoratedTabPanel" def __init__(self, **kwargs): if not kwargs.has_key('StyleName'): kwargs['StyleName']=self.DEFAULT_STYLENAME if kwargs.has_key('TabBar'): tabbar = kwargs.pop('TabBar') else: tabbar = DecoratedTabBar() TabPanel.__init__(self, tabbar, **kwargs) self.getTabBar().setStyleName(DecoratedTabBar.STYLENAME_DEFAULT) def createTabTextWrapper(self): return DecoratorPanel(DecoratedTabBar.TAB_ROW_STYLES, 1) Factory.registerClass('pyjamas.ui.DecoratorPanel', 'DecoratedTabPanel', DecoratedTabPanel) class DecoratorTitledPanel(DecoratorPanel): def __init__(self, title, titleStyle=None, imgStyle=None, rowStyles=None, containerIndex=2, titleIndex=1, **kwargs) : if rowStyles is None: rowStyles = ["top", "top2", "middle", "bottom"] if titleStyle is None: titleStyle = "title" DecoratorPanel.__init__(self, rowStyles, containerIndex, **kwargs) inner = self.getCellElement(titleIndex, 1) if imgStyle: img = DOM.createDiv() DOM.setAttribute(img, "className", imgStyle) DOM.appendChild(inner, img) tdiv = DOM.createDiv() DOM.setAttribute(tdiv, "className", titleStyle) DOM.setInnerText(tdiv, title) DOM.appendChild(inner, tdiv) Factory.registerClass('pyjamas.ui.DecoratorPanel', 'DecoratorTitledPanel', DecoratorTitledPanel)
tests/blessclient/user_ip_test.py
mwpeterson/python-blessclient
115
11081705
<reponame>mwpeterson/python-blessclient import time import pytest from blessclient.user_ip import UserIP from blessclient.bless_cache import BlessCache IP_URLS = ['http://checkip.amazonaws.com', 'http://api.ipify.org'] def test_getIP_fresh(): user_ip = UserIP(None, 10, IP_URLS) user_ip.fresh = True user_ip.currentIP = '1.1.1.1' assert user_ip.getIP() == '1.1.1.1' def test_fixed_ip(): user_ip = UserIP(None, 10, IP_URLS, '1.2.3.4') assert user_ip.getIP() == '1.2.3.4' def test_getIP_cached(): bc = BlessCache(None, None, BlessCache.CACHEMODE_ENABLED) bc.cache = {} bc.set('lastip', '1.1.1.1') bc.set('lastipchecktime', time.time()) user_ip = UserIP(bc, 10, IP_URLS) assert user_ip.getIP() == '1.1.1.1' def test_getIP_fetched(mocker): bc = BlessCache(None, None, BlessCache.CACHEMODE_ENABLED) bc.cache = {} mocker.patch.object(bc, 'save') user_ip = UserIP(bc, 10, IP_URLS) mocker.patch.object(user_ip, '_fetchIP') user_ip._fetchIP.return_value = '1.1.1.1' assert user_ip.getIP() == '1.1.1.1' user_ip._fetchIP.assert_called_once() bc.save.assert_called_once() def test_getIP_fetched_fail(mocker): bc = BlessCache(None, None, BlessCache.CACHEMODE_ENABLED) bc.cache = {} mocker.patch.object(bc, 'save') user_ip = UserIP(bc, 10, IP_URLS) mocker.patch.object(user_ip, '_fetchIP') user_ip._fetchIP.return_value = None with pytest.raises(Exception): user_ip.getIP() user_ip._fetchIP.assert_called()
test_arena_rllib_env.py
YuhangSong/Arena-Baselines
135
11081733
from __future__ import absolute_import from __future__ import division from __future__ import print_function """Simple example of using ArenaRllibEnv, which is a interface that convert a arena environment to a MultiAgentEnv (see: https://ray.readthedocs.io/en/latest/rllib-env.html#multi-agent-and-hierarchical) interface by rllib. """ import yaml import cv2 import logging import arena import numpy as np from copy import deepcopy as dcopy np.set_printoptions(edgeitems=1) logger = logging.getLogger(__name__) def run(args, parser): with open(args.config_file) as f: experiments = yaml.safe_load(f) env = arena.get_one_from_grid_search( dcopy(experiments["Arena-Benchmark"]["env"]) ) env_config = dcopy(experiments["Arena-Benchmark"]["config"]["env_config"]) env_config["sensors"] = arena.get_one_from_grid_search( env_config["sensors"] ) env_config["multi_agent_obs"] = arena.get_one_from_grid_search( env_config["multi_agent_obs"] ) env_config["train_mode"] = False logger.info(env) # Tennis-Sparse-2T1P-Discrete logger.info(env_config) # {'is_shuffle_agents': True, 'train_mode': True, 'sensors': 'visual_FP'} env = arena.ArenaRllibEnv( env=env, env_config=env_config, ) logger.info(env.observation_space) logger.info(env.action_space) obs_rllib = env.reset() logger.info("obs_rllib: {}".format(obs_rllib)) episode_video = {} while True: # Actions should be provided for each agent that returned an observation. obs_rllib, rewards_rllib, dones_rllib, infos_rllib = env.step( # actions={"agent_0": 0, "agent_1": 7} actions_rllib={ "agent_0": 0, "agent_1": 5, "agent_2": 6, "agent_3": 3, } ) logger.info("obs_rllib: {}".format(obs_rllib)) logger.info("rewards_rllib: {}".format(rewards_rllib)) logger.info("dones_rllib: {}".format(dones_rllib)) logger.info("infos_rllib: {}".format(infos_rllib)) if dones_rllib["__all__"]: for episode_video_key in episode_video.keys(): # initialize video writer fourcc = cv2.VideoWriter_fourcc( 'M', 'J', 'P', 'G' ) fps = 15 video_filename = "../{}.avi".format( episode_video_key, ) video_size = ( np.shape(episode_video[episode_video_key])[2], np.shape(episode_video[episode_video_key])[1] ) video_writer = cv2.VideoWriter( video_filename, fourcc, fps, video_size ) for frame_i in range(np.shape(episode_video[episode_video_key])[0]): video_writer.write( episode_video[episode_video_key][frame_i] ) video_writer.release() episode_video = {} input('episode end, keep going?') else: for agent_id in obs_rllib.keys(): obs_each_agent = obs_rllib[agent_id] if isinstance(obs_each_agent, dict): obs_keys = obs_each_agent.keys() else: obs_keys = ["default_own_obs"] for obs_key in obs_keys: if isinstance(obs_each_agent, dict): obs_each_key = obs_each_agent[obs_key] else: obs_each_key = obs_each_agent obs_each_channel = {} if len(np.shape(obs_each_key)) == 1: # vector observation obs_each_channel["default_channel"] = arena.get_img_from_fig( arena.plot_feature( obs_each_key ) ) elif len(np.shape(obs_each_key)) == 3: # visual observation for channel_i in range(np.shape(obs_each_key)[2]): gray = obs_each_key[ :, :, channel_i ] rgb = cv2.merge([gray, gray, gray]) rgb = (rgb * 255.0).astype(np.uint8) obs_each_channel["{}_channel".format( channel_i )] = rgb else: raise NotImplementedError for channel_key in obs_each_channel.keys(): temp = np.expand_dims( obs_each_channel[channel_key], 0 ) episode_video_key = "agent_{}-obs_{}-channel-{}".format( agent_id, obs_key, channel_key, ) if episode_video_key not in episode_video.keys(): episode_video[episode_video_key] = temp else: episode_video[episode_video_key] = np.concatenate( (episode_video[episode_video_key], temp) ) if __name__ == "__main__": parser = arena.create_parser() args = parser.parse_args() run(args, parser)
fuzz_lightyear/supplements/include.py
bbhunter/fuzz-lightyear
169
11081760
<gh_stars>100-1000 from typing import Callable from typing import Iterable from fuzz_lightyear.datastore import get_included_tags def tags(func: Callable[[], Iterable[str]]) -> Callable: """Allows developers to specify Swagger tags which should be fuzzed. Example: Only fuzz operations with the 'user_account' tag. >>> @fuzz_lightyear.include.tags ... def a(): ... return ['user_account'] """ tags_to_include = func() if tags_to_include: get_included_tags().update(tags_to_include) return func
phobos/io/entities/__init__.py
hawkina/phobos
323
11081813
#!/usr/bin/python3 # coding=utf-8 # ------------------------------------------------------------------------------- # This file is part of Phobos, a Blender Add-On to edit robot models. # Copyright (C) 2020 University of Bremen & DFKI GmbH Robotics Innovation Center # # You should have received a copy of the 3-Clause BSD License in the LICENSE file. # If not, see <https://opensource.org/licenses/BSD-3-Clause>. # ------------------------------------------------------------------------------- """ Parses the model entities on import and provides them as :data:`entity_types`. """ import os import importlib.util entity_types = dict() # this creates a dict entry for every python file in this subfolder for filename in os.listdir(os.path.dirname(__file__)): mod_name, file_ext = os.path.splitext(os.path.split(filename)[-1]) # only take .py files and ignore this __init__ file if (filename != os.path.split(__file__)[-1]) and (file_ext.lower() == '.py'): modpath = os.path.join(os.path.dirname(__file__), filename) # load the module from file and source it to access its data spec = importlib.util.spec_from_file_location(filename, modpath) py_mod = importlib.util.module_from_spec(spec) spec.loader.exec_module(py_mod) # try reading the entity dictionary and add it to the existing entities if hasattr(py_mod, 'entity_type_dict'): entity_types.update(py_mod.entity_type_dict.copy()) print('Registered entity plugin:', list(py_mod.entity_type_dict.keys())) else: print( 'ERROR in entities/__init__: "' + filename + '" has no valid entity plugin interface.' )
Alignment/TrackerAlignment/test/mcMisalignmentScaler_cfg.py
ckamtsikis/cmssw
852
11081817
<filename>Alignment/TrackerAlignment/test/mcMisalignmentScaler_cfg.py from __future__ import print_function import FWCore.ParameterSet.Config as cms process = cms.Process("MCMisalignmentScaler") process.load("Configuration.StandardSequences.MagneticField_cff") # B-field map process.load("Configuration.Geometry.GeometryRecoDB_cff") # Ideal geometry and interface process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff") # Global tag from Configuration.AlCa.GlobalTag import GlobalTag process.siStripQualityESProducer.ListOfRecordToMerge = cms.VPSet( cms.PSet(record = cms.string("RunInfoRcd"), tag = cms.string("")), cms.PSet(record = cms.string("SiStripBadChannelRcd"), tag = cms.string("")), cms.PSet(record = cms.string("SiStripBadFiberRcd"), tag = cms.string("")), cms.PSet(record = cms.string("SiStripBadModuleRcd"), tag = cms.string("")), cms.PSet(record = cms.string("SiStripBadStripRcd"), tag = cms.string("")), cms.PSet(record = cms.string("SiStripDetCablingRcd"), tag = cms.string("")), ) process.load("Alignment.TrackerAlignment.mcMisalignmentScaler_cfi") ################################################################################ # parameters to configure: process.GlobalTag = GlobalTag(process.GlobalTag, "auto:phase1_2017_realistic") process.mcMisalignmentScaler.scalers.append( cms.PSet( subDetector = cms.untracked.string("Tracker"), factor = cms.untracked.double(0.2) ) ) process.mcMisalignmentScaler.pullBadModulesToIdeal = False process.mcMisalignmentScaler.outlierPullToIdealCut = 0.1 ################################################################################ usedGlobalTag = process.GlobalTag.globaltag.value() print("Using Global Tag:", usedGlobalTag) from CondCore.CondDB.CondDB_cfi import CondDB process.PoolDBOutputService = cms.Service("PoolDBOutputService", CondDB, timetype = cms.untracked.string("runnumber"), toPut = cms.VPSet( cms.PSet( record = cms.string("TrackerAlignmentRcd"), tag = cms.string("Alignments") ), ) ) process.PoolDBOutputService.connect = "sqlite_file:misalignment_rescaled0p2.db" process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(1)) process.source = cms.Source("EmptySource") process.p = cms.Path(process.mcMisalignmentScaler)
bcbio/structural/titancna.py
lbeltrame/bcbio-nextgen
418
11081826
"""TitanCNA: Subclonal CNV calling and loss of heterogeneity in cancer. https://github.com/gavinha/TitanCNA """ import csv import glob import os import shutil import pandas as pd from bcbio import utils from bcbio.bam import ref from bcbio.distributed.transaction import file_transaction, tx_tmpdir from bcbio.heterogeneity import chromhacks, loh from bcbio.log import logger from bcbio.pipeline import datadict as dd from bcbio.provenance import do from bcbio.variation import effects, vcfutils from bcbio.structural import cnvkit def run(items): from bcbio import heterogeneity paired = vcfutils.get_paired(items) if not paired: logger.info("Skipping TitanCNA; no somatic tumor calls in batch: %s" % " ".join([dd.get_sample_name(d) for d in items])) return items work_dir = _sv_workdir(paired.tumor_data) cn_file = _titan_cn_file(dd.get_normalized_depth(paired.tumor_data), work_dir, paired.tumor_data) het_file = _titan_het_file(heterogeneity.get_variants(paired.tumor_data), work_dir, paired) if _should_run(het_file): ploidy_outdirs = [] for ploidy in [2, 3, 4]: for num_clusters in [1, 2, 3]: out_dir = _run_titancna(cn_file, het_file, ploidy, num_clusters, work_dir, paired.tumor_data) ploidy_outdirs.append((ploidy, out_dir)) solution_file = _run_select_solution(ploidy_outdirs, work_dir, paired.tumor_data) else: logger.info("Skipping TitanCNA; not enough input data: %s" % " ".join([dd.get_sample_name(d) for d in items])) return items out = [] if paired.normal_data: out.append(paired.normal_data) if "sv" not in paired.tumor_data: paired.tumor_data["sv"] = [] paired.tumor_data["sv"].append(_finalize_sv(solution_file, paired.tumor_data)) out.append(paired.tumor_data) return out def _finalize_sv(solution_file, data): """Add output files from TitanCNA calling optional solution. """ out = {"variantcaller": "titancna"} with open(solution_file) as in_handle: solution = dict(zip(in_handle.readline().strip("\r\n").split("\t"), in_handle.readline().strip("\r\n").split("\t"))) if solution.get("path"): out["purity"] = solution["purity"] out["ploidy"] = solution["ploidy"] out["cellular_prevalence"] = [x.strip() for x in solution["cellPrev"].split(",")] base = os.path.basename(solution["path"]) out["plot"] = dict([(n, solution["path"] + ext) for (n, ext) in [("rplots", ".Rplots.pdf"), ("cf", "/%s_CF.pdf" % base), ("cna", "/%s_CNA.pdf" % base), ("loh", "/%s_LOH.pdf" % base)] if os.path.exists(solution["path"] + ext)]) out["subclones"] = "%s.segs.txt" % solution["path"] out["hetsummary"] = solution_file out["vrn_file"] = to_vcf(out["subclones"], "TitanCNA", _get_header, _seg_to_vcf, data) out["lohsummary"] = loh.summary_status(out, data) return out def _should_run(het_file): """Check for enough input data to proceed with analysis. """ has_hets = False with open(het_file) as in_handle: for i, line in enumerate(in_handle): if i > 1: has_hets = True break return has_hets def _run_select_solution(ploidy_outdirs, work_dir, data): """Select optimal """ out_file = os.path.join(work_dir, "optimalClusters.txt") if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: ploidy_inputs = " ".join(["--ploidyRun%s=%s" % (p, d) for p, d in ploidy_outdirs]) cmd = "titanCNA_selectSolution.R {ploidy_inputs} --outFile={tx_out_file}" do.run(cmd.format(**locals()), "TitanCNA: select optimal solution") return out_file def _run_titancna(cn_file, het_file, ploidy, num_clusters, work_dir, data): """Run titanCNA wrapper script on given ploidy and clusters. """ sample = dd.get_sample_name(data) cores = dd.get_num_cores(data) export_cmd = utils.get_R_exports() ploidy_dir = utils.safe_makedir(os.path.join(work_dir, "run_ploidy%s" % ploidy)) cluster_dir = "%s_cluster%02d" % (sample, num_clusters) out_dir = os.path.join(ploidy_dir, cluster_dir) if not utils.file_uptodate(out_dir + ".titan.txt", cn_file): with tx_tmpdir(data) as tmp_dir: with utils.chdir(tmp_dir): cmd = ("{export_cmd} && titanCNA.R --id {sample} --hetFile {het_file} --cnFile {cn_file} " "--numClusters {num_clusters} --ploidy {ploidy} --numCores {cores} --outDir {tmp_dir} " "--libdir None") chroms = ["'%s'" % c.name.replace("chr", "") for c in ref.file_contigs(dd.get_ref_file(data)) if chromhacks.is_autosomal_or_x(c.name)] if "'X'" not in chroms: chroms += ["'X'"] # Use UCSC style naming for human builds to support BSgenome genome_build = ("hg19" if dd.get_genome_build(data) in ["GRCh37", "hg19"] else dd.get_genome_build(data)) cmd += """ --chrs "c(%s)" """ % ",".join(chroms) cmd += " --genomeBuild {genome_build}" if data["genome_build"] in ("hg19", "hg38"): cmd += " --genomeStyle UCSC" if data["genome_build"] in ["hg38"]: data_dir = os.path.normpath(os.path.join( os.path.dirname(os.path.realpath(os.path.join( os.path.dirname(utils.Rscript_cmd()), "titanCNA.R"))), os.pardir, os.pardir, "data")) cytoband_file = os.path.join(data_dir, "cytoBand_hg38.txt") assert os.path.exists(cytoband_file), cytoband_file cmd += " --cytobandFile %s" % cytoband_file # TitanCNA's model is influenced by the variance in read coverage data # and data type: set reasonable defaults for non-WGS runs # (see https://github.com/gavinha/TitanCNA/tree/master/scripts/R_scripts) if dd.get_coverage_interval(data) != "genome": cmd += " --alphaK=2500 --alphaKHigh=2500" do.run(cmd.format(**locals()), "TitanCNA CNV detection: ploidy %s, cluster %s" % (ploidy, num_clusters)) for fname in glob.glob(os.path.join(tmp_dir, cluster_dir + "*")): shutil.move(fname, ploidy_dir) if os.path.exists(os.path.join(tmp_dir, "Rplots.pdf")): shutil.move(os.path.join(tmp_dir, "Rplots.pdf"), os.path.join(ploidy_dir, "%s.Rplots.pdf" % cluster_dir)) return ploidy_dir def _sv_workdir(data): return utils.safe_makedir(os.path.join(dd.get_work_dir(data), "structural", dd.get_sample_name(data), "titancna")) def _titan_het_file(vrn_files, work_dir, paired): assert vrn_files, "Did not find compatible variant calling files for TitanCNA inputs" from bcbio.heterogeneity import bubbletree class OutWriter: def __init__(self, out_handle): self.writer = csv.writer(out_handle, dialect="excel-tab") def write_header(self): self.writer.writerow(["Chr", "Position", "Ref", "RefCount", "Nref", "NrefCount", "NormQuality"]) def write_row(self, rec, stats): if rec.qual and float(rec.qual) > 0: self.writer.writerow([rec.chrom, rec.pos, rec.ref, stats["tumor"]["depth"] - stats["tumor"]["alt"], rec.alts[0], stats["tumor"]["alt"], rec.qual]) return bubbletree.prep_vrn_file(vrn_files[0]["vrn_file"], vrn_files[0]["variantcaller"], work_dir, paired, OutWriter) def _titan_cn_file(cnr_file, work_dir, data): """Convert CNVkit or GATK4 normalized input into TitanCNA ready format. """ out_file = os.path.join(work_dir, "%s.cn" % (utils.splitext_plus(os.path.basename(cnr_file))[0])) support_cols = {"cnvkit": ["chromosome", "start", "end", "log2"], "gatk-cnv": ["CONTIG", "START", "END", "LOG2_COPY_RATIO"]} cols = support_cols[cnvkit.bin_approach(data)] if not utils.file_uptodate(out_file, cnr_file): with file_transaction(data, out_file) as tx_out_file: iterator = pd.read_csv(cnr_file, sep="\t", iterator=True, header=0, comment="@") with open(tx_out_file, "w") as handle: for chunk in iterator: chunk = chunk[cols] chunk.columns = ["chrom", "start", "end", "logR"] if cnvkit.bin_approach(data) == "cnvkit": chunk['start'] += 1 chunk.to_csv(handle, mode="a", sep="\t", index=False) return out_file # ## VCF converstion _vcf_header = """##fileformat=VCFv4.2 ##source={caller} ##INFO=<ID=END,Number=1,Type=Integer,Description="End position of the variant described in this record"> ##INFO=<ID=SVLEN,Number=1,Type=Integer,Description="Difference in length between REF and ALT alleles"> ##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of structural variant"> ##INFO=<ID=FOLD_CHANGE_LOG,Number=1,Type=Float,Description="Log fold change"> ##INFO=<ID=PROBES,Number=1,Type=Integer,Description="Number of probes in CNV"> ##INFO=<ID=CN,Number=1,Type=Integer,Description="Copy Number: Overall"> ##INFO=<ID=MajorCN,Number=1,Type=Integer,Description="Copy Number: Major allele"> ##INFO=<ID=MinorCN,Number=1,Type=Integer,Description="Copy Number: Minor allele"> ##ALT=<ID=DEL,Description="Deletion"> ##ALT=<ID=DUP,Description="Duplication"> ##ALT=<ID=LOH,Description="Loss of heterozygosity"> ##ALT=<ID=CNV,Description="Copy number variable region"> ##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype"> """ def _get_header(in_handle): return in_handle.readline().strip().split("\t"), in_handle def _seg_to_vcf(cur): svtype = _get_svtype(cur["TITAN_call"]) info = ["SVTYPE=%s" % svtype, "END=%s" % cur["End_Position.bp."], "SVLEN=%s" % (int(cur["End_Position.bp."]) - int(cur["Start_Position.bp."])), "CN=%s" % cur["Copy_Number"], "MajorCN=%s" % cur["MajorCN"], "MinorCN=%s" % cur["MinorCN"], "FOLD_CHANGE_LOG=%s" % cur["Median_logR"]] return [cur["Chromosome"], cur["Start_Position.bp."], ".", "N", "<%s>" % svtype, ".", ".", ";".join(info), "GT", "0/1"] def to_vcf(in_file, caller, header_fn, vcf_fn, data, sep="\t"): """Convert output TitanCNA segs file into bgzipped VCF.""" out_file = "%s.vcf" % utils.splitext_plus(in_file)[0] out_file_gz = out_file + ".gz" if not utils.file_exists(out_file + ".gz") and not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: with open(in_file) as in_handle: with open(tx_out_file, "w") as out_handle: out_handle.write(_vcf_header.format(caller=caller)) out_handle.write("\t".join(["#CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO", "FORMAT", dd.get_sample_name(data)]) + "\n") header, in_handle = header_fn(in_handle) for line in in_handle: out = vcf_fn(dict(zip(header, line.strip().split(sep)))) if out: out_handle.write("\t".join(out) + "\n") # also does bgzip and index out_file_prep_vcf_gz = vcfutils.sort_by_ref(out_file, data) shutil.move(out_file_prep_vcf_gz, out_file_gz) shutil.move(out_file_prep_vcf_gz + ".tbi", out_file_gz + ".tbi") effects_vcf, _ = effects.add_to_vcf(out_file_gz, data, "snpeff") return effects_vcf or out_file_gz def _get_svtype(call): """Retrieve structural variant type from current TitanCNA events. homozygous deletion (HOMD), hemizygous deletion LOH (DLOH), copy neutral LOH (NLOH), diploid heterozygous (HET), amplified LOH (ALOH), gain/duplication of 1 allele (GAIN), allele-specific copy number amplification (ASCNA), balanced copy number amplification (BCNA), unbalanced copy number amplification (UBCNA) """ if call in set(["HOMD", "DLOH"]): return "DEL" elif call in set(["ALOH", "GAIN", "ASCNA", "BCNA", "UBCNA"]): return "DUP" elif call in set(["NLOH"]): return "LOH" else: return "CNV"
pytorch/ocnn/octree_conv.py
enpelonio/O-CNN
299
11081827
import torch from torch import nn from torch.autograd import Function import ocnn def resize_with_last_val(list_in, num=3): assert (type(list_in) is list and len(list_in) < num + 1) for i in range(len(list_in), num): list_in.append(list_in[-1]) return list_in class OctreeConvFunction(Function): @staticmethod def forward(ctx, data_in, weights, octree, depth, channel_out, kernel_size, stride, nempty): data_in = data_in.contiguous() ctx.save_for_backward(data_in, weights, octree) ctx.depth = depth ctx.channel_out = channel_out ctx.kernel_size = resize_with_last_val(kernel_size) ctx.stride = stride ctx.nempty = nempty data_out = ocnn.nn.octree_conv( data_in, weights, octree, depth, channel_out, kernel_size, stride, nempty) return data_out @staticmethod def backward(ctx, grad_in): grad_in = grad_in.contiguous() data_in, weights, octree = ctx.saved_tensors grad_out, grad_w = ocnn.nn.octree_conv_grad( data_in, weights, octree, grad_in, ctx.depth, ctx.channel_out, ctx.kernel_size, ctx.stride, ctx.nempty) return (grad_out, grad_w) + (None,) * 6 class OctreeDeconvFunction(Function): @staticmethod def forward(ctx, data_in, weights, octree, depth, channel_out, kernel_size, stride, nempty): data_in = data_in.contiguous() ctx.save_for_backward(data_in, weights, octree) ctx.depth = depth ctx.channel_out = channel_out ctx.kernel_size = resize_with_last_val(kernel_size) ctx.stride = stride ctx.nempty = nempty data_out = ocnn.nn.octree_deconv( data_in, weights, octree, depth, channel_out, kernel_size, stride, nempty) return data_out @staticmethod def backward(ctx, grad_in): grad_in = grad_in.contiguous() data_in, weights, octree = ctx.saved_tensors grad_out, grad_w = ocnn.nn.octree_deconv_grad( data_in, weights, octree, grad_in, ctx.depth, ctx.channel_out, ctx.kernel_size, ctx.stride, ctx.nempty) return (grad_out, grad_w) + (None,) * 6 # alias octree_conv = OctreeConvFunction.apply octree_deconv = OctreeDeconvFunction.apply # module class OctreeConvBase(nn.Module): def __init__(self, depth, channel_in, channel_out, kernel_size=[3], stride=1, nempty=False): super(OctreeConvBase, self).__init__() self.depth = depth self.channel_out = channel_out self.kernel_size = resize_with_last_val(kernel_size) self.stride = stride self.channel_in = channel_in self.nempty = nempty self.kdim = self.kernel_size[0] * self.kernel_size[1] * self.kernel_size[2] conv_in = channel_in if self.is_conv_layer() else channel_out conv_out = channel_out if self.is_conv_layer() else channel_in self.cdim = conv_in * self.kdim self.weights = nn.Parameter(torch.Tensor(conv_out, self.cdim)) nn.init.xavier_uniform_(self.weights) def is_conv_layer(): raise NotImplementedError def extra_repr(self) -> str: return ('depth={}, channel_in={}, channel_out={}, kernel_size={}, ' 'stride={}, nempty={}').format(self.depth, self.channel_in, self.channel_out, self.kernel_size, self.stride, self.nempty) class OctreeConv(OctreeConvBase): def is_conv_layer(self): return True def forward(self, data, octree): assert data.size(1) == self.channel_in conv = octree_conv( data, self.weights, octree, self.depth, self.channel_out, self.kernel_size, self.stride, self.nempty) if self.stride == 2 and not self.nempty: conv = ocnn.octree_pad(conv, octree, self.depth-1) return conv class OctreeDeconv(OctreeConvBase): def is_conv_layer(self): return False def forward(self, data, octree): assert data.size(1) == self.channel_in if self.stride == 2 and not self.nempty: data = ocnn.octree_depad(data, octree, self.depth) deconv = octree_deconv( data, self.weights, octree, self.depth, self.channel_out, self.kernel_size, self.stride, self.nempty) return deconv class OctreeConvFast(OctreeConvBase): def is_conv_layer(self): return True def forward(self, data, octree): depth = self.depth col = ocnn.octree2col(data, octree, depth, self.kernel_size, self.stride, False) col = col.view([self.cdim, -1]) conv = torch.mm(self.weights, col) conv = torch.unsqueeze(torch.unsqueeze(conv, 0), -1) # [C,H] -> [1,C,H,1] if self.stride == 2: conv = ocnn.octree_pad(conv, octree, depth-1) return conv class OctreeDeconvFast(OctreeConvBase): def is_conv_layer(self): return False def forward(self, data, octree): depth = self.depth if self.stride == 2: data = ocnn.octree_depad(data, octree, depth) depth = depth + 1 data = torch.squeeze(torch.squeeze(data, dim=0), dim=-1) col = torch.mm(self.weights.t(), data) col = col.view(self.channel_out, self.kdim, -1) deconv = ocnn.col2octree(col, octree, depth, self.kernel_size, self.stride, False) return deconv
tests/test_stress_sorteddict.py
hinamimi/python-sortedcontainers
1,510
11081846
# -*- coding: utf-8 -*- from __future__ import print_function from sys import hexversion import random from .context import sortedcontainers from sortedcontainers import SortedDict from functools import wraps if hexversion < 0x03000000: from itertools import izip as zip range = xrange random.seed(0) actions = [] def actor(func): actions.append(func) return func def test_init(): sdict = SortedDict() sdict._check() sdict = SortedDict() sdict._reset(17) sdict._check() sdict = SortedDict((val, -val) for val in range(10000)) sdict._check() assert all(key == -val for key, val in sdict.items()) sdict.clear() sdict._check() assert len(sdict) == 0 sdict = SortedDict.fromkeys(range(1000), None) assert all(sdict[key] == None for key in range(1000)) @actor def stress_contains(sdict): keys = list(sdict) assert all((key in sdict for key in keys)) @actor def stress_delitem(sdict): keys = list(sdict) for rpt in range(100): pos = random.randrange(0, len(sdict)) del sdict[keys[pos]] del keys[pos] @actor def stress_getitem(sdict): items = list(sdict.items()) assert all(sdict[key] == value for key, value in items) @actor def stress_eq(sdict): that = dict((key, value) for key, value in sdict.items()) assert sdict == that @actor def stress_setitem_len(sdict): start_len = len(sdict) keys = list(range(100)) missing = sum(1 for val in keys if val not in sdict) for val in keys: sdict[val] = -val end_len = len(sdict) assert (start_len + missing) == end_len @actor def stress_copy(sdict): that = sdict.copy() @actor def stress_get(sdict): keys = list(range(100)) for key in keys: if key in sdict: assert sdict.get(key, 1) == -key else: assert sdict.get(key, 1) == 1 @actor def stress_has_key(sdict): if hexversion > 0x03000000: return keys = list(range(100)) for key in keys: assert all((key in sdict) == (sdict.has_key(key)) for key in sdict) @actor def stress_items_keys_values(sdict): items = sdict.items() keys = sdict.keys() values = sdict.values() assert list(items) == list(zip(keys, values)) @actor def stress_iter_items_keys_values(sdict): it = zip(sdict.items(), sdict.keys(), sdict.values()) assert all(tup[0] == (tup[1], tup[2]) for tup in it) @actor def stress_pop(sdict): keys = list(range(200)) for key in keys: if key in sdict: val = sdict[key] assert sdict.pop(key, 1) == val else: assert sdict.pop(key, 1) == 1 @actor def stress_popitem(sdict): items = [sdict.popitem() for rpt in range(100)] keys = [item[0] for item in items] assert all(keys[pos - 1] > keys[pos] for pos in range(1, len(keys))) assert all(key == -value for key, value in items) @actor def stress_setdefault(sdict): keys = list(range(200)) for key in keys: if key in sdict: assert sdict.setdefault(key) == -key else: sdict.setdefault(key) assert sdict[key] == None del sdict[key] def test_stress(repeat=1000): sdict = SortedDict((val, -val) for val in range(1000)) for rpt in range(repeat): action = random.choice(actions) action(sdict) try: sdict._check() except AssertionError: print(action) raise start_len = len(sdict) while len(sdict) < 500: key = random.randrange(0, 2000) sdict[key] = -key while len(sdict) > 2000: key = random.randrange(0, 2000) if key in sdict: del sdict[key] if start_len != len(sdict): sdict._check() if __name__ == '__main__': import sys from datetime import datetime start = datetime.now() print('Python', sys.version_info) try: num = int(sys.argv[1]) print('Setting iterations to', num) except: print('Setting iterations to 1000 (default)') num = 1000 try: pea = int(sys.argv[2]) random.seed(pea) print('Setting seed to', pea) except: print('Setting seed to 0 (default)') random.seed(0) try: test_stress(num) except: raise finally: print('Exiting after', (datetime.now() - start))
raiden/tests/integration/network/proxies/test_service_registry.py
tirkarthi/raiden
2,101
11081849
from unittest.mock import Mock, patch import pytest from eth_utils import to_canonical_address from raiden.blockchain.decode import update_service_addresses_from_event from raiden.blockchain.events import decode_raiden_event_to_internal from raiden.blockchain.filters import RaidenContractFilter from raiden.constants import BLOCK_ID_LATEST from raiden.exceptions import BrokenPreconditionError from raiden.network.pathfinding import get_random_pfs, get_valid_pfs_url from raiden.settings import RAIDEN_CONTRACT_VERSION from raiden.tests.utils.factories import HOP1 from raiden.tests.utils.smartcontracts import deploy_service_registry_and_set_urls from raiden.transfer.events import UpdateServicesAddresses from raiden.utils.keys import privatekey_to_address from raiden.utils.typing import FeeAmount, TokenNetworkRegistryAddress from raiden_contracts.contract_manager import ContractManager, contracts_precompiled_path token_network_registry_address_test_default = TokenNetworkRegistryAddress( to_canonical_address("0xB9633dd9a9a71F22C933bF121d7a22008f66B908") ) def test_service_registry_set_url(service_registry_address, private_keys, web3, contract_manager): c1_service_proxy, _ = deploy_service_registry_and_set_urls( private_keys=private_keys, web3=web3, contract_manager=contract_manager, service_registry_address=service_registry_address, ) with pytest.raises(BrokenPreconditionError): c1_service_proxy.set_url("") with pytest.raises(BrokenPreconditionError): c1_service_proxy.set_url("raiden-network.com") def test_service_registry_random_pfs( service_registry_address, private_keys, web3, contract_manager ): addresses = [privatekey_to_address(key) for key in private_keys] c1_service_proxy, urls = deploy_service_registry_and_set_urls( private_keys=private_keys, web3=web3, contract_manager=contract_manager, service_registry_address=service_registry_address, ) assert c1_service_proxy.ever_made_deposits_len(BLOCK_ID_LATEST) == 3 # Test that getting the url for each service address works for idx, address in enumerate(addresses): assert c1_service_proxy.get_service_url(BLOCK_ID_LATEST, address) == urls[idx] # Test that getting the url for a non-existing service address returns None assert c1_service_proxy.get_service_url(BLOCK_ID_LATEST, HOP1) is None # Test that get_service_address by index works for idx, address in enumerate(addresses): assert c1_service_proxy.ever_made_deposits(BLOCK_ID_LATEST, idx) == address # Test that getting the address for an index out of bounds returns None assert not c1_service_proxy.ever_made_deposits(BLOCK_ID_LATEST, 9999) mock_get_pfs_info = Mock() mock_get_pfs_info.return_value.price = 100 with patch("raiden.network.pathfinding.get_pfs_info", mock_get_pfs_info): # Make sure that too expensive PFSes are not considered valid assert not get_valid_pfs_url( c1_service_proxy, 0, BLOCK_ID_LATEST, pathfinding_max_fee=FeeAmount(99) ) # ...but ones with the expected price are fine assert ( get_valid_pfs_url( c1_service_proxy, 0, BLOCK_ID_LATEST, pathfinding_max_fee=FeeAmount(100) ) == urls[0] ) # Test that getting a random service from the proxy works assert ( get_random_pfs(c1_service_proxy, BLOCK_ID_LATEST, pathfinding_max_fee=FeeAmount(100)) in urls ) def test_service_registry_events(service_registry_address, private_keys, web3, contract_manager): """ - Test that `RaidenContractFilter` successfully matches on `RegisteredService`. - Test that blockchain-event, state_change and raiden-event decoding methods work. """ c1_service_proxy, _ = deploy_service_registry_and_set_urls( private_keys=private_keys, web3=web3, contract_manager=contract_manager, service_registry_address=service_registry_address, ) assert c1_service_proxy.ever_made_deposits_len(BLOCK_ID_LATEST) == 3 # register filter event_filter = RaidenContractFilter(service_registry=c1_service_proxy) manager = ContractManager(contracts_precompiled_path(RAIDEN_CONTRACT_VERSION)) flt = event_filter.to_web3_filters( contract_manager=manager, from_block=0, to_block="latest", node_address="0x6666666666666666666666666666666666666666", ) web3 = c1_service_proxy.client.web3 flt[0].pop("_name") events = web3.eth.getLogs(flt[0]) assert len(events) == 3 ABI = c1_service_proxy.proxy.abi decoded_events = [decode_raiden_event_to_internal(ABI, 0, ev) for ev in events] assert len(decoded_events) == 3 state_changes = [update_service_addresses_from_event(ev) for ev in decoded_events] assert len(state_changes) == 3 raiden_events = [UpdateServicesAddresses.from_state_change(sc) for sc in state_changes] assert len(raiden_events) == 3
modoboa/lib/migrations/0003_rename_parameters.py
HarshCasper/modoboa
1,602
11081851
from django.db import models, migrations APPS = ( ("modoboa_admin", "admin"), ("modoboa_admin_limits", "limits"), ("modoboa_admin_relaydomains", "relaydomains"), ) def rename_parameters(apps, schema_editor): """Rename old parameters.""" Parameter = apps.get_model("lib", "Parameter") UserParameter = apps.get_model("lib", "UserParameter") for oldapp, newapp in APPS: for p in Parameter.objects.filter(name__startswith=oldapp): p.name = p.name.replace(oldapp, newapp) p.save() for p in UserParameter.objects.filter(name__startswith=oldapp): p.name = p.name.replace(oldapp, newapp) p.save() class Migration(migrations.Migration): dependencies = [ ('lib', '0002_rename_parameters'), ('admin', '0001_initial'), ] operations = [ migrations.RunPython(rename_parameters) ]
Alignment/OfflineValidation/python/runGCPTkAlMap.py
ckamtsikis/cmssw
852
11081862
<filename>Alignment/OfflineValidation/python/runGCPTkAlMap.py import sys import copy import time from Alignment.OfflineValidation.TkAlMap import TkAlMap ''' Script for plotting TkAlMaps How to run: python runGCPTkAlMap.py -b inFile=<file_path> compAl=<c_alignment_name> refAl=<r_alignment_name> TkVersion=<phase> outDir=<out_dir> colPal=<col_int> defRanges=<range_str> TkautoVersion= <tk_version_bool> savePNG=<png_bool> savePDF=<pdf_bool> do4sigCap=<4sig_bool> doDefRange=<drange_bool> doFullRange=<frange_bool> doFull=<full_bool> doPixel=<pixel_bool> doStrips=<strips_bool> Explanation: - Mandatory arguments: inFile=<file_path> path to root file containing geometry comparison tree "alignTree" compAl=<c_alignment_name> name of alignment beeing compared (for title) refAl=<r_alignment_name> name of reference alignment (for title) - Optional arguments: TkVersion=<phase> tracker version valid options: phase0, phase1 outDir=<out_dir> directory where to store the images colPal=<col_int> color palette: 1 is rainbow palette, 2 is diverging color palette (blue to red) defRanges=<range_str> string containing changes to default range in format "<var>_range=[<min>,<max>];<var2>_..." example: "dr_range=[-10,10];rdphi_range=[-2.02,120];" TkautoVersion=<tk_version_bool> string boolean telling wheter or not to auto detect TkVersion (will override the TkVersion=<phase> selection) savePNG=<png_bool> string boolean to save or not save as png savePDF=<pdf_bool> string boolean to save or not save as pdf do4sigCap=<4sig_bool> string boolean to plot 4sigma capped plots or not doDefRange=<drange_bool> string boolean to plot default range capped plots or not doFullRange=<frange_bool> string boolean to plot un-capped plots or not doFull=<full_bool> string boolean to plot full detector or not doPixel=<pixel_bool> string boolean to plot separate pixel detector or not doStrips=<strips_bool> string boolean to plot separate strips detector or not ''' print('*---------------------------------------*') print('| GCP TkAlMap |') print('*---------------------------------------*') #var_list = ['dr'] #var_list = ['dx', 'dy', 'dz'] var_list = ['dr', 'dx', 'dy', 'dz', 'rdphi', 'dphi', 'dalpha', 'dbeta', 'dgamma', 'du', 'dv', 'dw', 'da', 'db', 'dg'] var_ranges = {} for var in var_list: var_ranges[var] = [None, None] # Our own parser print('Reading arguments') arguments = sys.argv al_ref = 'Reference Alignment' al_comp = 'Compared Alignment' out_dir = '.' phase_str = '' auto_tk_str = '' palette_str = '' range_str = '' save_png = False save_pdf = True do_4scap = False do_drange = False do_frange = True do_full = True do_pixel = False do_strips = False save_png_str = '' save_pdf_str = '' do_4scap_str = '' do_drange_str = '' do_frange_str = '' do_full_str = '' do_pixel_str = '' do_strips_str = '' for arg in arguments: if 'inFile=' in arg : in_file = arg.replace('inFile=', '') if 'refAl=' in arg : al_ref = arg.replace('refAl=', '') if 'compAl=' in arg : al_comp = arg.replace('compAl=', '') if 'outDir=' in arg : out_dir = arg.replace('outDir=', '') if 'TkVersion='in arg : phase_str = arg.replace('TkVersion=', '') if 'TkautoVersion='in arg: auto_tk_str = arg.replace('TkautoVersion=', '') if 'colPal='in arg : palette_str = arg.replace('colPal=', '') if 'defRanges=' in arg : range_str = arg.replace('defRanges=', '') # Limit outputs if 'savePNG=' in arg : save_png_str = arg.replace('savePNG=', '') if 'savePDF=' in arg : save_pdf_str = arg.replace('savePDF=', '') if 'do4sigCap=' in arg : do_4scap_str = arg.replace('do4sigCap=', '') if 'doDefRange=' in arg : do_drange_str = arg.replace('doDefRange=', '') if 'doFullRange=' in arg : do_frange_str = arg.replace('doFullRange=', '') if 'doFull=' in arg : do_full_str = arg.replace('doFull=', '') if 'doPixel=' in arg : do_pixel_str = arg.replace('doPixel=', '') if 'doStrips=' in arg : do_strips_str = arg.replace('doStrips=', '') # Digest arguments phase = 1 title = al_comp + ' - ' + al_ref auto_tk = True if 'FALSE' in auto_tk_str.upper(): auto_tk = False if 'PHASE0' in phase_str.upper() : phase = 0 geometry_file = 'TkAlMapDesign_phase1_cfg.py' if phase == 1: geometry_file = 'TkAlMapDesign_phase0_cfg.py' palette = 2 if '1' in palette_str: palette = 1 if 'TRUE' in save_png_str .upper(): save_png = True if 'TRUE' in save_pdf_str .upper(): save_pdf = True if 'TRUE' in do_4scap_str .upper(): do_4scap = True if 'TRUE' in do_drange_str.upper(): do_drange = True if 'TRUE' in do_frange_str.upper(): do_frange = True if 'TRUE' in do_full_str .upper(): do_full = True if 'TRUE' in do_pixel_str .upper(): do_pixel = True if 'TRUE' in do_strips_str.upper(): do_strips = True if 'FALSE' in save_png_str .upper(): save_png = False if 'FALSE' in save_pdf_str .upper(): save_pdf = False if 'FALSE' in do_4scap_str .upper(): do_4scap = False if 'FALSE' in do_drange_str.upper(): do_drange = False if 'FALSE' in do_frange_str.upper(): do_frange = False if 'FALSE' in do_full_str .upper(): do_full = False if 'FALSE' in do_pixel_str .upper(): do_pixel = False if 'FALSE' in do_strips_str.upper(): do_strips = False range_str_splt = range_str.split(';') for var_range_str in range_str_splt: cur_var = var_range_str.split('=')[0] if cur_var == '': continue cur_range = eval(var_range_str.split('=')[1]) for var in var_ranges: if var+'_range' == cur_var: if cur_range[0] != -99999: var_ranges[var][0] = cur_range[0] if cur_range[1] != -99999: var_ranges[var][1] = cur_range[1] #max_val = float(var_range_str.split('=')[1].split(',')[0].replace('[', '')) #min_val = float(var_range_str.split('=')[1].split(',')[1].replace(']', '')) print('Current setup:') print(' - reference alingment : '+al_ref) print(' - compared alingment : '+al_comp) print(' - tracker version : phase '+str(phase)) print(' - auto detect tracker version : '+str(auto_tk)) print(' - color palette : '+str(palette)) print(' - input root file : '+in_file) print(' - output directory : '+out_dir) print(' - saving as png : '+str(save_png)) print(' - saving as pdf : '+str(save_pdf)) print('') print('Active plots:') print(' - plot 4 sigma capped values : '+str(do_4scap)) print(' - plot default range capped values : '+str(do_drange)) print(' - plot un-capped values : '+str(do_frange)) print(' - plot full detector : '+str(do_full)) print(' - plot pixel detector : '+str(do_pixel)) print(' - plot strips detector : '+str(do_strips)) print('') print('Changed default ranges:') for var in var_ranges: if var_ranges[var][0] is None and var_ranges[var][1] is None: continue prt_srt = ' - '+var+'\t: [ ' if var_ranges[var][0] is None: prt_srt += 'default' else: prt_srt += str(var_ranges[var][0]) prt_srt += '\t, ' if var_ranges[var][1] is None: prt_srt += 'default' else: prt_srt += str(var_ranges[var][1]) prt_srt += '\t]' print(prt_srt) # Load maps for different configurations print('Loading maps') TkMap_full = TkAlMap('test', title, in_file, use_default_range=False, two_sigma_cap=False, GEO_file=geometry_file, tracker='full', palette=palette, check_tracker=auto_tk) TkMap_pixel = TkAlMap('test', title, in_file, use_default_range=False, two_sigma_cap=False, GEO_file=geometry_file, tracker='pixel', palette=palette, check_tracker=auto_tk) TkMap_strips = TkAlMap('test', title, in_file, use_default_range=False, two_sigma_cap=False, GEO_file=geometry_file, tracker='strips', palette=palette, check_tracker=auto_tk) TkMap_cap_full = TkAlMap('test', title, in_file, use_default_range=False, two_sigma_cap=True, GEO_file=geometry_file, tracker='full', palette=palette, check_tracker=auto_tk) TkMap_cap_pixel = TkAlMap('test', title, in_file, use_default_range=False, two_sigma_cap=True, GEO_file=geometry_file, tracker='pixel', palette=palette, check_tracker=auto_tk) TkMap_cap_strips = TkAlMap('test', title, in_file, use_default_range=False, two_sigma_cap=True, GEO_file=geometry_file, tracker='strips', palette=palette, check_tracker=auto_tk) TkMap_drange_full = TkAlMap('test', title, in_file, use_default_range=True, two_sigma_cap=False, GEO_file=geometry_file, tracker='full', palette=palette, check_tracker=auto_tk) TkMap_drange_pixel = TkAlMap('test', title, in_file, use_default_range=True, two_sigma_cap=False, GEO_file=geometry_file, tracker='pixel', palette=palette, check_tracker=auto_tk) TkMap_drange_strips = TkAlMap('test', title, in_file, use_default_range=True, two_sigma_cap=False, GEO_file=geometry_file, tracker='strips', palette=palette, check_tracker=auto_tk) ts_start = time.time() for var in var_list: print('----- Evaluating variable: '+var) # Usual setup if do_frange: if do_full: tmp_full = TkMap_full tmp_full.set_var(var) tmp_full.analyse() if save_pdf: tmp_full.save(out_dir=out_dir) if save_png: tmp_full.save(out_dir=out_dir, extension='png') tmp_full.plot_variable_distribution(out_dir=out_dir) if do_pixel: tmp_pixel = TkMap_pixel tmp_pixel.set_var(var) tmp_pixel.analyse() if save_pdf: tmp_pixel.save(out_dir=out_dir) if save_png: tmp_pixel.save(out_dir=out_dir, extension='png') tmp_pixel.plot_variable_distribution(out_dir=out_dir) if do_strips: tmp_strips = TkMap_strips tmp_strips.set_var(var) tmp_strips.analyse() if save_pdf: tmp_strips.save(out_dir=out_dir) if save_png: tmp_strips.save(out_dir=out_dir, extension='png') tmp_strips.plot_variable_distribution(out_dir=out_dir) # 4 sigma capping if do_4scap: if do_full: tmp_cap_full = TkMap_cap_full tmp_cap_full.set_var(var) tmp_cap_full.analyse() if save_pdf: tmp_cap_full.save(out_dir=out_dir) if save_png: tmp_cap_full.save(out_dir=out_dir, extension='png') if do_pixel: tmp_cap_pixel = TkMap_cap_pixel tmp_cap_pixel.set_var(var) tmp_cap_pixel.analyse() if save_pdf: tmp_cap_pixel.save(out_dir=out_dir) if save_png: tmp_cap_pixel.save(out_dir=out_dir, extension='png') if do_strips: tmp_cap_strips = TkMap_cap_strips tmp_cap_strips.set_var(var) tmp_cap_strips.analyse() if save_pdf: tmp_cap_strips.save(out_dir=out_dir) if save_png: tmp_cap_strips.save(out_dir=out_dir, extension='png') # default ranges if do_drange: if do_full: tmp_drange_full = TkMap_drange_full tmp_drange_full.set_var(var, var_ranges[var]) tmp_drange_full.analyse() if save_pdf: tmp_drange_full.save(out_dir=out_dir) if save_png: tmp_drange_full.save(out_dir=out_dir, extension='png') if do_pixel: tmp_drange_pixel = TkMap_drange_pixel tmp_drange_pixel.set_var(var, var_ranges[var]) tmp_drange_pixel.analyse() if save_pdf: tmp_drange_pixel.save(out_dir=out_dir) if save_png: tmp_drange_pixel.save(out_dir=out_dir, extension='png') if do_strips: tmp_drange_strips = TkMap_drange_strips tmp_drange_strips.set_var(var, var_ranges[var]) tmp_drange_strips.analyse() if save_pdf: tmp_drange_strips.save(out_dir=out_dir) if save_png: tmp_drange_strips.save(out_dir=out_dir, extension='png') TkMap_full.clean_up() TkMap_pixel.clean_up() TkMap_strips.clean_up() TkMap_cap_full.clean_up() TkMap_cap_pixel.clean_up() TkMap_cap_strips.clean_up() TkMap_drange_full.clean_up() TkMap_drange_pixel.clean_up() TkMap_drange_strips.clean_up() print('TOOK: '+str(time.time()-ts_start)+' s')
gen_pb_graph.py
semaralawa/Maix_Toolbox
140
11081864
<reponame>semaralawa/Maix_Toolbox #!/usr/bin/python3 import tensorflow as tf import sys import os model = sys.argv[1] graph = tf.get_default_graph() graph_def = graph.as_graph_def() graph_def.ParseFromString(tf.gfile.FastGFile(model, 'rb').read()) tf.import_graph_def(graph_def, name='graph') os.system('rm -f log/*') summaryWriter = tf.summary.FileWriter('log/', graph) os.system('tensorboard --logdir log/')
app/auth/forms.py
Joey-Wondersign/Staffjoy-suite-Joey
890
11081875
from flask.ext.wtf import Form, RecaptchaField from wtforms import StringField, PasswordField, BooleanField, \ SubmitField, SelectField, HiddenField from wtforms.fields.html5 import EmailField from wtforms.validators import Required, Length, Email, Regexp, EqualTo from wtforms import ValidationError from app.models import User from app.plans import plans class SignUpForm(Form): # WTF doesn"t have default "placeholder", so we use "label" for that name = StringField( "Name", validators=[Required(), Length(1, 256)], description="<NAME>") email = StringField( "Email", validators=[Required(), Length(1, 256), Email()], description="<EMAIL>") username = StringField( "Username", validators=[ Required(), Length(1, 64), Regexp("^[A-Za-z][A-Za-z0-9_.]*$", 0, "Usernames must have only letters, " "numbers, dots or underscores") ], description="7Bridges") password = PasswordField( "Password", validators=[Length(8, 256), Required()], description="??????") password2 = PasswordField( "<PASSWORD>", validators=[ Required(), EqualTo("password", message="Passwords must match") ], description="??????") recaptcha = RecaptchaField() submit = SubmitField("Submit") def validate_email(self, field): if User.query.filter_by(email=field.data.lower().strip()).first(): raise ValidationError("Email already registered.") def validate_username(self, field): if User.query.filter_by(username=field.data.lower().strip()).first(): raise ValidationError("Username already in use.") class FreeTrialForm(Form): # WTF doesn"t have default "placeholder", so we use "label" for that name = StringField( "Your Name", validators=[Required(), Length(1, 256)], description="<NAME>") email = StringField( "Your Email", validators=[Required(), Length(1, 256), Email()], description="<EMAIL>") password = PasswordField( "Password", validators=[Length(8, 256), Required()], description="??????") company_name = StringField( "Name of your company", validators=[Required(), Length(1, 256)], description="7 Bridges Coffee") plan = SelectField( "Type of Workers", choices=[(key, value["for"]) for key, value in plans.iteritems() if value["active"]], validators=[Required()], ) enterprise_access = SelectField( u"Are you planning on scheduling more than 40 workers?", choices=[ ("no", "No"), ("yes", "Yes"), ], default="no", validators=[Required()], ) day_week_starts = SelectField( u"Day of the week on which your schedules begin", choices=[ ("monday", "Monday"), ("tuesday", "Tuesday"), ("wednesday", "Wednesday"), ("thursday", "Thursday"), ("friday", "Friday"), ("saturday", "Saturday"), ("Sunday", "Sunday"), ], validators=[Required()], ) timezone = HiddenField() tos = BooleanField( "I agree to the <a href=\"/terms/\">Terms and Conditions</a> and the <a href=\"/privacy-policy/\">Privacy Policy</a>.", validators=[ Required( message="You must agree to these terms to create or activate an account." ) ]) submit = SubmitField("Submit") def validate_email(self, field): if User.query.filter_by(email=field.data.lower().strip()).first(): raise ValidationError("Email already registered.") class LoginForm(Form): email = StringField( "Email", validators=[Required(), Length(1, 256)], description="<EMAIL>") password = PasswordField( "Password", validators=[Required(), Length(1, 256)], description="??????") remember_me = BooleanField("Keep me logged in") submit = SubmitField("Submit") class NativeLoginForm(Form): email = EmailField( "Email", validators=[Required(), Length(1, 256)], description="<EMAIL>") password = PasswordField( "Password", validators=[Required(), Length(1, 256)], description="??????") submit = SubmitField("Submit") class RequestPasswordResetForm(Form): email = StringField( "Email", validators=[Required(), Length(1, 64), Email()], description="<EMAIL>") recaptcha = RecaptchaField() submit = SubmitField("Request Reset") class PasswordResetForm(Form): email = StringField( "Email", validators=[Required(), Length(1, 64), Email()], description="<EMAIL>") password = PasswordField( "Password", validators=[Length(8, 256), Required()], description="??????") password2 = PasswordField("Confirm password", validators=[Required(), \ EqualTo("password", message="Passwords must match")], description="??????") submit = SubmitField("Reset Password") def validate_email(self, field): if User.query.filter_by( email=field.data.lower().strip()).first() is None: raise ValidationError("Unknown email address.") class ChangePasswordForm(Form): old_password = PasswordField( "Current Password", validators=[Length(1, 256), Required()], description="??????") password = PasswordField( "<PASSWORD>", validators=[Length(8, 256), Required()], description="??????") password2 = PasswordField("Confirm password", validators=[Required(), \ EqualTo("password", message="Passwords must match")], description="??????") submit = SubmitField("Change") class ChangeNameForm(Form): name = StringField( "Name", validators=[Required(), Length(1, 256)], description="<NAME>") submit = SubmitField("Update") class ChangeEmailForm(Form): email = StringField( "Email", validators=[Required(), Length(1, 256), Email()], description="<EMAIL>") submit = SubmitField("Update") def __init__(self, user, *args, **kwargs): super(ChangeEmailForm, self).__init__(*args, **kwargs) self.user = user def validate_email(self, field): match = User.query.filter_by(email=field.data.lower().strip()).first() if match is not None and match.id != self.user.id: raise ValidationError("Email already registered.") class ChangeUsernameForm(Form): username = StringField( "Username", validators=[ Required(), Length(1, 64), Regexp("^[A-Za-z][A-Za-z0-9_.]*$", 0, "Usernames must have only letters, " "numbers, dots or underscores") ], description="7Bridges") submit = SubmitField("Update") def __init__(self, user, *args, **kwargs): super(ChangeUsernameForm, self).__init__(*args, **kwargs) self.user = user def validate_username(self, field): match = User.query.filter_by( username=field.data.lower().strip()).first() if match is not None and match.id != self.user.id: raise ValidationError("Username already in use.") class ActivateForm(Form): name = StringField( "Name", validators=[Required(), Length(1, 256)], description="<NAME>") username = StringField( "Username", validators=[ Required(), Length(1, 64), Regexp("^[A-Za-z][A-Za-z0-9_.]*$", 0, "Usernames must have only letters, " "numbers, dots or underscores") ], description="7Bridges") password = PasswordField( "Password", validators=[Length(8, 256), Required()], description="??????") password2 = PasswordField("Confirm password", validators=[Required(), \ EqualTo("password", message="Passwords must match")], description="??????") tos = BooleanField( "I agree to the <a href=\"/terms/\">Terms and Conditions</a> and the <a href=\"/privacy-policy/\">Privacy Policy</a>.", validators=[ Required( message="You must agree to these terms to create or activate an account." ) ]) recaptcha = RecaptchaField() submit = SubmitField("Submit") def validate_username(self, field): if User.query.filter_by(username=field.data.lower().strip()).first(): raise ValidationError("Username already in use.") class ApiKeyForm(Form): name = StringField( "Key Label", validators=[Required(), Length(1, 256)], description="Which program will use this key?") submit = SubmitField("Issue") class SessionsForm(Form): submit = SubmitField("Logout All Sessions") class ChangeNotificationsForm(Form): enable_notification_emails = BooleanField( "Send Email Alerts", ) enable_timeclock_notification_sms = BooleanField( "Send SMS Timeclock Notifications") submit = SubmitField("Save") class NewPhoneNumberForm(Form): def __init__(self, country_code_choices, *args, **kwargs): super(NewPhoneNumberForm, self).__init__(*args, **kwargs) self.phone_country_code.choices = country_code_choices phone_country_code = SelectField( "Country Code", choices=[], validators=[Required()], ) phone_national_number = StringField( "National Phone Number", validators=[Required(), Length(1, 256)], description="443-578-3359") submit = SubmitField("Save") class VerifyPhoneNumberForm(Form): pin = StringField( "Verification Pin", validators=[Required(), Length(1, 256)], description="") submit = SubmitField("Confirm") class RemovePhoneNumberForm(Form): submit = SubmitField("Remove phone number")
projects/data_generation/synthetic_multi_view_facial_image_generation/SyntheticDataGeneration.py
ESOGU-SRLAB/opendr
217
11081878
# Copyright 2020-2022 OpenDR European Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # MIT License # # Copyright (c) 2019 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # !/usr/bin/env python3.7 # coding: utf-8 from tqdm import tqdm from shutil import copyfile import cv2 import os from algorithm.DDFA import preprocessing_1 from algorithm.DDFA import preprocessing_2 from algorithm.Rotate_and_Render import test_multipose class MultiviewDataGeneration(): def __init__(self, args): self.path_in = args.path_in self.key = str(args.path_3ddfa + "/example/Images/") self.key1 = str(args.path_3ddfa + "/example/") self.key2 = str(args.path_3ddfa + "/results/") self.save_path = args.save_path self.val_yaw = args.val_yaw self.val_pitch = args.val_pitch self.args = args def eval(self): # STAGE No1 : detect faces and fitting to 3d mesh by main.py execution list_im = [] print("START") a = open("file_list.txt", "w") for subdir, dirs, files in os.walk(self.path_in): current_directory_path = os.path.abspath(subdir) for file in files: name, ext = os.path.splitext(file) if ext == ".jpg": current_image_path = os.path.join(current_directory_path, file) current_image = cv2.imread(current_image_path) list_im.append(current_image_path) a.write(str(file) + os.linesep) cv2.imwrite(os.path.join(self.key, file), current_image) self.args.files = list_im.copy() list_im.clear() preprocessing_1.main(self.args) a.close() # STAGE No2: Landmarks Output with inference.py execution im_list2 = [] d = open(os.path.join(self.key1, 'realign_lmk'), "w") for subdir, dirs, files in os.walk(self.path_in): current_directory_path = os.path.abspath(subdir) self.args.img_prefix = current_directory_path self.args.save_dir = os.path.abspath(self.key2) self.args.save_lmk_dir = os.path.abspath(self.key1) if not os.path.exists(self.args.save_dir): os.mkdir(self.args.save_dir) if not os.path.exists(self.args.save_lmk_dir): os.mkdir(self.args.save_lmk_dir) list_lfw_batch = './file_list.txt' dst = os.path.join(self.args.save_lmk_dir, "file_list.txt") copyfile(list_lfw_batch, dst) b = open("txt_name_batch.txt", "w") for file in files: with open(list_lfw_batch) as f: img_list = [x.strip() for x in f.readlines()] for img_idx, img_fp in enumerate(tqdm(img_list)): if img_fp == str(file): im_list2.append(str(file)) b.write(str(file) + os.linesep) self.args.img_list = './txt_name_batch.txt' b.close() self.args.dump_lmk = 'true' im_list2.clear() preprocessing_2.main(self.args) with open(os.path.join(self.args.save_lmk_dir, 'realign_lmk_')) as f: img_list = [x.strip() for x in f.readlines()] for img_idx, img_fp in enumerate(tqdm(img_list)): d.write(img_fp + os.linesep) d.close() # STAGE No3: Generate Facial Images in specific pitch and yaw angles test_multipose.main(self.save_path, self.val_yaw, self.val_pitch) def fit(self): raise NotImplementedError() def infer(self): raise NotImplementedError() def load(self): raise NotImplementedError() def optimize(self): raise NotImplementedError() def reset(self): raise NotImplementedError() def save(self): raise NotImplementedError()
telegram_gcloner/utils/restricted.py
freepg0099/CloneBot_Heroku
194
11081913
<filename>telegram_gcloner/utils/restricted.py #!/usr/bin/python3 # -*- coding: utf-8 -*- import logging from functools import wraps from utils.callback import callback_delete_message from utils.config_loader import config logger = logging.getLogger(__name__) def restricted(func): @wraps(func) def wrapped(update, context, *args, **kwargs): if not update.effective_user: return user_id = update.effective_user.id ban_list = context.bot_data.get('ban', []) # access control. comment out one or the other as you wish. otherwise you can use any of the following examples. # if user_id in ban_list: if user_id in ban_list or user_id not in config.USER_IDS: logger.info('Unauthorized access denied for {} {}.' .format(update.effective_user.full_name, user_id)) return return func(update, context, *args, **kwargs) return wrapped def restricted_private(func): @wraps(func) def wrapped(update, context, *args, **kwargs): if not update.effective_user: return user_id = update.effective_user.id chat_id = update.effective_chat.id ban_list = context.bot_data.get('ban', []) if user_id in ban_list or chat_id < 0: logger.info('Unauthorized access denied for private messages {} {}.' .format(update.effective_user.full_name, user_id)) if chat_id < 0: rsp = update.message.reply_text('Private chat only!') rsp.done.wait(timeout=60) message_id = rsp.result().message_id context.job_queue.run_once(callback_delete_message, config.TIMER_TO_DELETE_MESSAGE, context=(update.message.chat_id, message_id)) context.job_queue.run_once(callback_delete_message, config.TIMER_TO_DELETE_MESSAGE, context=(update.message.chat_id, update.message.message_id)) return return func(update, context, *args, **kwargs) return wrapped def restricted_private_and_group(func): @wraps(func) def wrapped(update, context, *args, **kwargs): if not update.effective_user: return user_id = update.effective_user.id chat_id = update.effective_chat.id ban_list = context.bot_data.get('ban', []) if user_id in ban_list or (chat_id < 0 or chat_id not in config.GROUP_IDS): logger.info('Unauthorized access denied for private and group messages{} {}.' .format(update.effective_user.full_name, user_id)) return return func(update, context, *args, **kwargs) return wrapped def restricted_group_only(func): @wraps(func) def wrapped(update, context, *args, **kwargs): if not update.effective_user: return user_id = update.effective_user.id chat_id = update.effective_chat.id ban_list = context.bot_data.get('ban', []) if user_id not in config.USER_IDS and (user_id in ban_list or chat_id > 0 or chat_id not in config.GROUP_IDS): logger.info('Unauthorized access denied for group only messages {} {}.' .format(update.effective_user.full_name, user_id)) return return func(update, context, *args, **kwargs) return wrapped def restricted_group_and_its_members_in_private(func): @wraps(func) def wrapped(update, context, *args, **kwargs): if not update.effective_user: return user_id = update.effective_user.id chat_id = update.effective_chat.id ban_list = context.bot_data.get('ban', []) allow = False if user_id in config.USER_IDS: allow = True elif user_id not in ban_list: if chat_id < 0: if chat_id in config.GROUP_IDS: allow = True else: for group_id in config.GROUP_IDS: info = context.bot.get_chat_member(chat_id=group_id, user_id=update.effective_user.id) if info.status in ['creator', 'administrator', 'member']: allow = True break if allow is False: logger.info('Unauthorized access denied for group and its members messages{} {}.' .format(update.effective_user.full_name, user_id)) return return func(update, context, *args, **kwargs) return wrapped def restricted_user_ids(func): @wraps(func) def wrapped(update, context, *args, **kwargs): if not update.effective_user: return user_id = update.effective_user.id if user_id not in config.USER_IDS: logger.info('Unauthorized access denied for {} {}.' .format(update.effective_user.full_name, user_id)) return return func(update, context, *args, **kwargs) return wrapped def restricted_admin(func): @wraps(func) def wrapped(update, context, *args, **kwargs): if not update.effective_user: return user_id = update.effective_user.id chat_id = update.effective_chat.id if user_id != config.USER_IDS[0]: logger.info("Unauthorized admin access denied for {} {}.".format(update.effective_user.full_name, user_id)) return if chat_id < 0: return return func(update, context, *args, **kwargs) return wrapped
pygimli/frameworks/methodManager.py
JuliusHen/gimli
224
11081921
<filename>pygimli/frameworks/methodManager.py #!/usr/bin/env python # -*- coding: utf-8 -*- """Method Manager Provide the end user interface for method (geophysical) dependent modelling and inversion as well as data and model visualization. """ import numpy as np import pygimli as pg from pygimli.utils import prettyFloat as pf def fit(funct, data, err=None, **kwargs): """Generic function fitter. Fit data to a given function. TODO ---- * Dictionary support for funct to submit user data.. Parameters ---------- funct: callable Function with the first argmument as data space, e.g., x, t, f, Nr. .. Any following arguments are the parameters to be fit. Except if a verbose flag if used. data: iterable (float) Data values err: iterable (float) [None] Data error values in %/100. Default is 1% if None are given. Other Parameters ---------------- *dataSpace*: iterable Keyword argument of the data space of len(data). The name need to fit the first argument of funct. Returns ------- model: array Fitted model parameter. response: array Model response. Example ------- >>> import pygimli as pg >>> >>> func = lambda t, a, b: a*np.exp(b*t) >>> t = np.linspace(1, 2, 20) >>> data = func(t, 1.1, 2.2) >>> model, response = pg.frameworks.fit(func, data, t=t) >>> print(pg.core.round(model, 1e-5)) 2 [1.1, 2.2] >>> _ = pg.plt.plot(t, data, 'o', label='data') >>> _ = pg.plt.plot(t, response, label='response') >>> _ = pg.plt.legend() """ mgr = ParameterInversionManager(funct, **kwargs) model = mgr.invert(data, err, **kwargs) return model, mgr.fw.response # TG: harmonicFit does not really belong here as it is no curve fit # We should rather use a class Decomposition # Discuss .. rename to Framework or InversionFramework since he only manages # the union of Inversion/Modelling and RegionManager(later) class MethodManager(object): """General manager to maintenance a measurement method. Method Manager are the interface to end-user interaction and can be seen as simple but complete application classes which manage all tasks of geophysical data processing. The method manager holds one instance of a forward operator and an appropriate inversion framework to handle modelling and data inversion. Method Manager also helps with data import and export, handle measurement data error estimation as well as model and data visualization. Attributes ---------- verbose : bool Give verbose output. debug : bool Give debug output. fop : :py:mod:`pygimli.frameworks.Modelling` Forward Operator instance .. knows the physics. fop is initialized by :py:mod:`pygimli.manager.MethodManager.initForwardOperator` and calls a valid :py:mod:`pygimli.manager.MethodManager.createForwardOperator` method in any derived classes. inv : :py:mod:`pygimli.frameworks.Inversion`. Inversion framework instance .. knows the reconstruction approach. The attribute inv is initialized by default but can be changed overwriting :py:mod:`pygimli.manager.MethodManager.initInversionFramework` """ def __init__(self, fop=None, fw=None, data=None, **kwargs): """Constructor.""" self._fop = fop self._fw = fw # we hold our own copy of the data self._verbose = kwargs.pop('verbose', False) self._debug = kwargs.pop('debug', False) self.data = None if data is not None: if isinstance(data, str): self.load(data) else: self.data = data # The inversion framework self._initInversionFramework(verbose=self._verbose, debug=self._debug) # The forward operator is stored in self._fw self._initForwardOperator(verbose=self._verbose, **kwargs) # maybe obsolete self.figs = {} self.errIsAbsolute = False def __hash__(self): """Create a hash for Method Manager.""" return pg.utils.strHash(str(type(self))) ^ hash(self.fop) @property def verbose(self): return self._verbose @verbose.setter def verbose(self, v): self._verbose = v self.fw.verbose = self._verbose @property def debug(self): return self._debug @debug.setter def debug(self, v): self._debug = v self.fw.debug = self._debug @property def fw(self): return self._fw @property def fop(self): return self.fw.fop @property def inv(self): return self.fw @property def model(self): return self.fw.model def reinitForwardOperator(self, **kwargs): """Reinitialize the forward operator. Sometimes it can be useful to reinitialize the forward operator. Keyword arguments will be forwarded to 'self.createForwardOperator'. """ self._initForwardOperator(**kwargs) def _initForwardOperator(self, **kwargs): """Initialize or re-initialize the forward operator. Called once in the constructor to force the manager to create the necessary forward operator member. Can be recalled if you need to changed the mangers own forward operator object. If you want an own instance of a valid FOP call createForwardOperator. """ if self._fop is not None: fop = self._fop else: fop = self.createForwardOperator(**kwargs) if fop is None: pg.critical("It seems that createForwardOperator method " "does not return a valid forward operator.") if self.fw is not None: self.fw.reset() self.fw.setForwardOperator(fop) else: pg.critical("No inversion framework defined.") def createForwardOperator(self, **kwargs): """Mandatory interface for derived classes. Here you need to specify which kind of forward operator FOP you want to use. This is called by any initForwardOperator() call. Parameters ---------- **kwargs Any arguments that are necessary for your FOP creation. Returns ------- Modelling Instance of any kind of :py:mod:`pygimli.framework.Modelling`. """ pg.critical("No forward operator defined, either give one or " "overwrite in derived class") def _initInversionFramework(self, **kwargs): """Initialize or re-initialize the inversion framework. Called once in the constructor to force the manager to create the necessary Framework instance. """ self._fw = self.createInversionFramework(**kwargs) if self.fw is None: pg.critical("createInversionFramework does not return " "valid inversion framework.") def createInversionFramework(self, **kwargs): """Create default Inversion framework. Derived classes may overwrite this method. Parameters ---------- **kwargs Any arguments that are necessary for your creation. Returns ------- Inversion Instance of any kind of :py:mod:`pygimli.framework.Inversion`. """ if self._fw is None: return pg.frameworks.Inversion(**kwargs) else: return self._fw def load(self, fileName): """API, overwrite in derived classes.""" pg.critical('API, overwrite in derived classes', fileName) def estimateError(self, data, errLevel=0.01, absError=None): # TODO check, rel or abs in return. """Estimate data error. Create an error of estimated measurement error. On default it returns an array of constant relative errors. More sophisticated error estimation should be done in specialized derived classes. Parameters ---------- data : iterable Data values for which the errors should be estimated. errLevel : float (0.01) Error level in percent/100 (i.e., 3% = 0.03). absError : float (None) Absolute error in the unit of the data. Returns ------- err : array Returning array of size len(data) """ if absError is not None: return absError + data * errLevel return np.ones(len(data)) * errLevel def simulate(self, model, **kwargs): # """Run a simulation aka the forward task.""" ra = self.fop.response(par=model) noiseLevel = kwargs.pop('noiseLevel', 0.0) if noiseLevel > 0: err = self.estimateError(ra, errLevel=noiseLevel) ra *= 1. + pg.randn(ra.size(), seed=kwargs.pop('seed', None)) * err return ra, err return ra def setData(self, data): """Set a data and distribute it to the forward operator""" self.data = data self.applyData(data) def applyData(self, data): """ """ self.fop.data = data def checkData(self, data): """Overwrite for special checks to return data values""" # if self._dataToken == 'nan': # pg.critical('self._dataToken nan, should be set in class', self) # return data(self._dataToken) return data def _ensureData(self, data): """Check data validity""" if data is None: data = self.fw.dataVals vals = self.checkData(data) if vals is None: pg.critical("There are no data values.") if abs(min(vals)) < 1e-12: print(min(vals), max(vals)) pg.critical("There are zero data values.") return vals def checkError(self, err, dataVals=None): """Return relative error. Default we assume 'err' are relative values. Overwrite is derived class if needed. """ if isinstance(err, pg.DataContainer): if not err.haveData('err'): pg.error('Datacontainer have no "err" values. ' 'Fallback set to 0.01') return err['err'] return err def _ensureError(self, err, dataVals=None): """Check error validity""" if err is None: err = self.fw.errorVals vals = self.checkError(err, dataVals) if vals is None: pg.warn('No data error given, set Fallback set to 1%') vals = np.ones(len(dataVals)) * 0.01 try: if min(vals) <= 0: pg.critical("All error values need to be larger then 0. Either" " give and err argument or fill dataContainer " " with a valid 'err' ", min(vals), max(vals)) except ValueError: pg.critical("Can't estimate data error") return vals def preRun(self, *args, **kwargs): """Called just before the inversion run starts.""" pass def postRun(self, *args, **kwargs): """Called just after the inversion run.""" pass def invert(self, data=None, err=None, **kwargs): """Invert the data. Invert the data by calling self.inv.run() with mandatory data and error values. TODO *need dataVals mandatory? what about already loaded data Parameters ---------- dataVals : iterable Data values to be inverted. errVals : iterable | float Error value for the given data. If errVals is float we assume this means to be a global relative error and force self.estimateError to be called. """ if data is not None: self.data = data else: data = self.data dataVals = self._ensureData(data) errVals = self._ensureError(err, dataVals) self.preRun(**kwargs) self.fw.run(dataVals, errVals, **kwargs) self.postRun(**kwargs) return self.fw.model def showModel(self, model, ax=None, **kwargs): """Show a model. Draw model into a given axes or show inversion result from last run. Forwards on default to the self.fop.drawModel function of the modelling operator. If there is no function given, you have to override this method. Parameters ---------- ax : mpl axes Axes object to draw into. Create a new if its not given. model : iterable Model data to be draw. Returns ------- ax, cbar """ if ax is None: fig, ax = pg.plt.subplots() ax, cBar = self.fop.drawModel(ax, model, **kwargs) return ax, cBar def showData(self, data=None, ax=None, **kwargs): """Show the data. Draw data values into a given axes or show the data values from the last run. Forwards on default to the self.fop.drawData function of the modelling operator. If there is no given function given, you have to override this method. Parameters ---------- ax : mpl axes Axes object to draw into. Create a new if its not given. data : iterable | pg.DataContainer Data values to be draw. Returns ------- ax, cbar """ if ax is None: fig, ax = pg.plt.subplots() if data is None: data = self.data return self.fop.drawData(ax, data, **kwargs), None def showResult(self, model=None, ax=None, **kwargs): """Show the last inversion result. TODO ---- DRY: decide showModel or showResult Parameters ---------- ax : mpl axes Axes object to draw into. Create a new if its not given. model : iterable [None] Model values to be draw. Default is self.model from the last run Returns ------- ax, cbar """ if model is None: model = self.model return self.showModel(model, ax=ax, **kwargs) def showFit(self, ax=None, **kwargs): """Show the last inversion data and response.""" ax, cBar = self.showData(data=self.inv.dataVals, error=self.inv.errorVals, label='Data', ax=ax, **kwargs) ax, cBar = self.showData(data=self.inv.response, label='Response', ax=ax, **kwargs) if not kwargs.pop('hideFittingAnnotation', False): fittext = r"rrms: {0}, $\chi^2$: {1}".format( pf(self.fw.inv.relrms()), pf(self.fw.inv.chi2())) ax.text(0.99, 0.005, fittext, transform=ax.transAxes, horizontalalignment='right', verticalalignment='bottom', fontsize=8) if not kwargs.pop('hideLegend', False): ax.legend() return ax, cBar def showResultAndFit(self, **kwargs): """Calls showResults and showFit.""" fig = pg.plt.figure() ax = fig.add_subplot(1, 2, 1) self.showResult(ax=ax, model=self.model, **kwargs) ax1 = fig.add_subplot(2, 2, 2) ax2 = fig.add_subplot(2, 2, 4) self.showFit(axs=[ax1, ax2], **kwargs) fig.tight_layout() return fig @staticmethod def createArgParser(dataSuffix='dat'): """Create default argument parser. TODO move this to some kind of app class Create default argument parser for the following options: -Q, --quiet -R, --robustData: options.robustData -B, --blockyModel: options.blockyModel -l, --lambda: options.lam -i, --maxIter: options.maxIter --depth: options.depth """ import argparse parser = argparse.ArgumentParser( description="usage: %prog [options] *." + dataSuffix) parser.add_argument("-Q", "--quiet", dest="quiet", action="store_true", default=False, help="Be verbose.") # parser.add_argument("-R", "--robustData", dest="robustData", # action="store_true", default=False, # help="Robust data (L1 norm) minimization.") # parser.add_argument("-B", "--blockyModel", dest="blockyModel", # action="store_true", default=False, # help="Blocky model (L1 norm) regularization.") parser.add_argument('-l', "--lambda", dest="lam", type=float, default=100, help="Regularization strength.") parser.add_argument('-i', "--maxIter", dest="maxIter", type=int, default=20, help="Maximum iteration count.") # parser.add_argument("--depth", dest="depth", type=float, # default=None, # help="Depth of inversion domain. [None=auto].") parser.add_argument('dataFileName') return parser class ParameterInversionManager(MethodManager): """Framework to invert unconstrained parameters.""" def __init__(self, funct=None, fop=None, **kwargs): """Constructor.""" if fop is not None: if not isinstance(fop, pg.frameworks.ParameterModelling): pg.critical("We need a fop if type ", pg.frameworks.ParameterModelling) elif funct is not None: fop = pg.frameworks.ParameterModelling(funct) else: pg.critical("you should either give a valid fop or a function so " "I can create the fop for you") super(ParameterInversionManager, self).__init__(fop, **kwargs) def createInversionFramework(self, **kwargs): """ """ return pg.frameworks.MarquardtInversion(**kwargs) def invert(self, data=None, err=None, **kwargs): """ Parameters ---------- limits: {str: [min, max]} Set limits for parameter by parameter name. startModel: {str: startModel} Set the start value for parameter by parameter name. """ dataSpace = kwargs.pop(self.fop.dataSpaceName, None) if dataSpace is not None: self.fop.dataSpace = dataSpace limits = kwargs.pop('limits', {}) for k, v in limits.items(): self.fop.setRegionProperties(k, limits=v) startModel = kwargs.pop('startModel', {}) if isinstance(startModel, dict): for k, v in startModel.items(): self.fop.setRegionProperties(k, startModel=v) else: kwargs['startModel'] = startModel return super(ParameterInversionManager, self).invert(data=data, err=err, **kwargs) class MethodManager1d(MethodManager): """Method Manager base class for managers on a 1d discretization.""" def __init__(self, fop=None, **kwargs): """Constructor.""" super(MethodManager1d, self).__init__(fop, **kwargs) def createInversionFramework(self, **kwargs): """ """ return pg.frameworks.Block1DInversion(**kwargs) def invert(self, data=None, err=None, **kwargs): """ """ return super(MethodManager1d, self).invert(data=data, err=err, **kwargs) class MeshMethodManager(MethodManager): def __init__(self, **kwargs): """Constructor. Attribute --------- mesh: pg.Mesh Copy of the main mesh to be distributed to inversion and the fop. You can overwrite it with invert(mesh=mesh). """ super(MeshMethodManager, self).__init__(**kwargs) self.mesh = None @property def paraDomain(self): return self.fop.paraDomain def paraModel(self, model=None): """Give the model parameter regarding the parameter mesh.""" if model is None: model = self.fw.model return self.fop.paraModel(model) def createMesh(self, data=None, **kwargs): """API, implement in derived classes.""" pg.critical('no default mesh generation defined .. implement in ' 'derived class') def setMesh(self, mesh, **kwargs): """Set a mesh and distribute it to the forward operator""" self.mesh = mesh self.applyMesh(mesh, **kwargs) def applyMesh(self, mesh, ignoreRegionManager=False, **kwargs): """ """ if ignoreRegionManager: mesh = self.fop.createRefinedFwdMesh(mesh, **kwargs) self.fop.setMesh(mesh, ignoreRegionManager=ignoreRegionManager) def invert(self, data=None, mesh=None, zWeight=1.0, startModel=None, **kwargs): """Run the full inversion. Parameters ---------- data : pg.DataContainer mesh : pg.Mesh [None] zWeight : float [1.0] startModel : float | iterable [None] If set to None fop.createDefaultStartModel(dataValues) is called. Keyword Arguments ----------------- forwarded to Inversion.run Returns ------- model : array Model mapped for match the paraDomain Cell markers. The calculated model is in self.fw.model. """ if data is None: data = self.data if data is None: pg.critical('No data given for inversion') self.applyData(data) # no mesh given and there is no mesh known .. we create them if mesh is None and self.mesh is None: mesh = self.createMesh(data, **kwargs) # a mesh was given or created so we forward it to the fop if mesh is not None: self.setMesh(mesh) # remove unused keyword argument .. need better kwargfs self.fop._refineP2 = kwargs.pop('refineP2', False) dataVals = self._ensureData(self.fop.data) errorVals = self._ensureError(self.fop.data, dataVals) if self.fop.mesh() is None: pg.critical('Please provide a mesh') # inversion will call this itsself as default behaviour # if startModel is None: # startModel = self.fop.createStartModel(dataVals) # pg._g('invert-dats', dataVals) # pg._g('invert-err', errVals) # pg._g('invert-sm', startModel) kwargs['startModel'] = startModel self.fop.setRegionProperties('*', zWeight=zWeight) # Limits is no mesh related argument here or base?? limits = kwargs.pop('limits', None) if limits is not None: self.fop.setRegionProperties('*', limits=limits) self.preRun(**kwargs) self.fw.run(dataVals, errorVals, **kwargs) self.postRun(**kwargs) return self.paraModel(self.fw.model) def showFit(self, axs=None, **kwargs): """Show data and the inversion result model response.""" orientation = 'vertical' if axs is None: fig, axs = pg.plt.subplots(nrows=1, ncols=2) orientation = 'horizontal' self.showData(data=self.inv.dataVals, orientation=orientation, ax=axs[0], **kwargs) axs[0].text(0.0, 1.03, "Data", transform=axs[0].transAxes, horizontalalignment='left', verticalalignment='center') resp = None data = None if 'model' in kwargs: resp = self.fop.response(kwargs['model']) data = self._ensureData(self.fop.data) else: resp = self.inv.response data = self.fw.dataVals self.showData(data=resp, orientation=orientation, ax=axs[1], **kwargs) axs[1].text(0.0, 1.03, "Response", transform=axs[1].transAxes, horizontalalignment='left', verticalalignment='center') fittext = r"rrms: {0}%, $\chi^2$: {1}".format( pg.pf(pg.utils.rrms(data, resp)*100), pg.pf(self.fw.chi2History[-1])) axs[1].text(1.0, 1.03, fittext, transform=axs[1].transAxes, horizontalalignment='right', verticalalignment='center') # if not kwargs.pop('hideFittingAnnotation', False): # axs[0].text(0.01, 1.0025, "rrms: {0}, $\chi^2$: {1}" # .format(pg.utils.prettyFloat(self.fw.inv.relrms()), # pg.utils.prettyFloat(self.fw.inv.chi2())), # transform=axs[0].transAxes, # horizontalalignment='left', # verticalalignment='bottom') return axs def coverage(self): """Return coverage vector considering the logarithmic transformation. """ covTrans = pg.core.coverageDCtrans(self.fop.jacobian(), 1.0 / self.inv.response, 1.0 / self.inv.model) nCells = self.fop.paraDomain.cellCount() return np.log10(covTrans[:nCells] / self.fop.paraDomain.cellSizes()) def standardizedCoverage(self, threshhold=0.01): """Return standardized coverage vector (0|1) using thresholding. """ return 1.0*(abs(self.coverage()) > threshhold) class PetroInversionManager(MeshMethodManager): """Class for petrophysical inversion (s. Rücker et al. 2017).""" def __init__(self, petro, mgr=None, **kwargs): """Initialize instance with manager and petrophysical relation.""" petrofop = kwargs.pop('petrofop', None) if petrofop is None: fop = kwargs.pop('fop', None) if fop is None and mgr is not None: # Check! why I can't use mgr.fop # fop = mgr.fop fop = mgr.createForwardOperator() self.checkData = mgr.checkData self.checkError = mgr.checkError if fop is not None: if not isinstance(fop, pg.frameworks.PetroModelling): petrofop = pg.frameworks.PetroModelling(fop, petro) if petrofop is None: print(mgr) print(fop) pg.critical('implement me') super().__init__(fop=petrofop, **kwargs) # Really necessary? Should a combination of petro and joint do the same class JointPetroInversionManager(MeshMethodManager): """Joint inversion targeting at the same parameter through petrophysics.""" def __init__(self, petros, mgrs): """Initialize with lists of managers and transformations""" self.mgrs = mgrs self.fops = [pg.frameworks.PetroModelling(m.fop, p) for p, m in zip(petros, mgrs)] super().__init__(fop=pg.frameworks.JointModelling(self.fops)) # just hold a local copy self.dataTrans = pg.trans.TransCumulative() def checkError(self, err, data=None): """Collect error values.""" if len(err) != len(self.mgrs): pg.critical("Please provide data for all managers") vals = pg.Vector(0) for i, mgr in enumerate(self.mgrs): # we get the data values again or we have to split data dataVals = mgr.checkData(self.fop._data[i]) vals = pg.cat(vals, mgr.checkError(err[i], dataVals)) return vals def checkData(self, data): """Collect data values.""" if len(data) != len(self.mgrs): pg.critical("Please provide data for all managers") self.dataTrans.clear() vals = pg.Vector(0) for i, mgr in enumerate(self.mgrs): self.dataTrans.add(mgr.inv.dataTrans, data[i].size()) vals = pg.cat(vals, mgr.checkData(data[i])) self.inv.dataTrans = self.dataTrans return vals def invert(self, data, **kwargs): """Run inversion""" limits = kwargs.pop('limits', [0., 1.]) self.fop.modelTrans.setLowerBound(limits[0]) self.fop.modelTrans.setUpperBound(limits[1]) kwargs['startModel'] = kwargs.pop('startModel', (limits[1]+limits[0])/2.) return super().invert(data, **kwargs)
build/lib/aiocoap/util/asyncio/recvmsg.py
DylanJPettij/CoapTemperatureSensor
229
11081937
# This file is part of the Python aiocoap library project. # # Copyright (c) 2012-2014 <NAME> <http://sixpinetrees.blogspot.com/>, # 2013-2014 <NAME> <<EMAIL>> # # aiocoap is free software, this file is published under the MIT license as # described in the accompanying LICENSE file. from .. import socknumbers from asyncio import BaseProtocol from asyncio.transports import BaseTransport class RecvmsgDatagramProtocol(BaseProtocol): """Callback interface similar to asyncio.DatagramProtocol, but dealing with recvmsg data.""" def datagram_msg_received(self, data, ancdata, flags, address): """Called when some datagram is received.""" def datagram_errqueue_received(self, data, ancdata, flags, address): """Called when some data is received from the error queue""" def error_received(self, exc): """Called when a send or receive operation raises an OSError.""" def _set_result_unless_cancelled(fut, result): """Helper setting the result only if the future was not cancelled.""" if fut.cancelled(): return fut.set_result(result) class RecvmsgSelectorDatagramTransport(BaseTransport): """A simple loop-independent transport that largely mimicks DatagramTransport but interfaces a RecvmsgSelectorDatagramProtocol. This does not implement any flow control, based on the assumption that it's not needed, for CoAP has its own flow control mechanisms.""" max_size = 4096 # Buffer size passed to recvmsg() -- should suffice for a full MTU package and ample ancdata def __init__(self, loop, sock, protocol, waiter): super().__init__(extra={'socket': sock}) self.__sock = sock # Persisted outside of sock because when GC breaks a reference cycle, # it can happen that the sock gets closed before this; we have to hope # that no new file gets opened and registered in the meantime. self.__sock_fileno = sock.fileno() self._loop = loop self._protocol = protocol loop.call_soon(protocol.connection_made, self) # only start reading when connection_made() has been called import weakref # We could add error handling in here like this: # ``` # self = s() # if self is None or self.__sock is None: # # The read event happened briefly before .close() was called, # # but late enough that the caller of close did not yield to let # # the event out; when remove_reader was then called, the # # pending event was not removed, so it fires now that the # # socket is already closed. (Depending on the GC's whims, self # # may or may not have been GC'd, but if it wasn't yet, the # # closed state is indicated by the lack of a __sock. # # # # Thus, silently (preferably with an ICMP error, but really # # can't do that)... # return # ``` # That was done tentatively while debugging errors flying out of # _read_ready, but it turned out that this was not the actual error # source. Thus, I'm not adding the handler and assuming that close's # remove_reader is not racing against callbacks, and thus that s() is # always valid while the transport is around (and the weakref is really # only used to break up the reference cycles to ensure the GC is not # needed here). rr = lambda s=weakref.ref(self): s()._read_ready() loop.call_soon(loop.add_reader, self.__sock_fileno, rr) loop.call_soon(_set_result_unless_cancelled, waiter, None) def close(self): self._loop.call_soon(self._protocol.connection_lost, None) if self.__sock is None: return self._loop.remove_reader(self.__sock_fileno) self.__sock.close() self.__sock = None self._protocol = None self._loop = None def __del__(self): if self.__sock is not None: self.close() def _read_ready(self): if socknumbers.HAS_RECVERR: try: data, ancdata, flags, addr = self.__sock.recvmsg(self.max_size, 1024, socknumbers.MSG_ERRQUEUE) except (BlockingIOError, InterruptedError): pass except OSError as exc: if repr(exc) == "OSError('received malformed or improperly truncated ancillary data',)": pass # workaround for https://bitbucket.org/pypy/pypy/issues/2649/recvmsg-with-empty-err-queue-raises-odd else: self._protocol.error_received(exc) except Exception as exc: self._fatal_error(exc, 'Fatal read error on datagram transport') else: self._protocol.datagram_errqueue_received(data, ancdata, flags, addr) # copied and modified from _SelectorDatagramTransport try: data, ancdata, flags, addr = self.__sock.recvmsg(self.max_size, 1024) # TODO: find a way for the application to tell the trensport how much data is expected except (BlockingIOError, InterruptedError): pass except OSError as exc: self._protocol.error_received(exc) except Exception as exc: self._fatal_error(exc, 'Fatal read error on datagram transport') else: self._protocol.datagram_msg_received(data, ancdata, flags, addr) def sendmsg(self, data, ancdata, flags, address): try: self.__sock.sendmsg((data,), ancdata, flags, address) return except OSError as exc: self._protocol.error_received(exc) return except Exception as exc: self._fatal_error(exc, 'Fatal write error on datagram transport') return async def create_recvmsg_datagram_endpoint(loop, factory, sock): """Create a datagram connection that uses recvmsg rather than recvfrom, and a RecvmsgDatagramProtocol protocol type. This is used like the create_datagram_endpoint method of an asyncio loop, but implemented in a generic way using the loop's add_reader method; thus, it's not a method of the loop but an independent function. Due to the way it is used in aiocoap, socket is not an optional argument here; it could be were this module ever split off into a standalone package. """ sock.setblocking(False) protocol = factory() waiter = loop.create_future() transport = RecvmsgSelectorDatagramTransport( loop, sock, protocol, waiter) try: await waiter except: # noqa: E722 # see https://github.com/PyCQA/pycodestyle/issues/703 transport.close() raise return transport, protocol
rapidsms/utils/translation.py
catalpainternational/rapidsms
330
11082009
<filename>rapidsms/utils/translation.py from collections import defaultdict from django.db.models.query import QuerySet def group_connections(connections): """ Return a list of (language code, respective connections) pairs, while using Django's translation.override() to set each language. """ grouped_conns = defaultdict(list) if isinstance(connections, QuerySet): languages = connections.values_list('contact__language', flat=True) for language in languages.distinct(): lang_conns = connections.filter(contact__language=language) grouped_conns[language].extend(lang_conns) else: for connection in connections: language = connection.contact.language grouped_conns[language].append(connection) for lang, conns in grouped_conns.items(): yield lang, conns
caserec/utils/process_data.py
madrugado/CaseRecommender
407
11082037
<gh_stars>100-1000 # coding=utf-8 """" These classes are responsible for read, write and process external files and information. """ # © 2019. Case Recommender (MIT License) import pandas as pd from caserec.utils.extra_functions import check_error_file __author__ = '<NAME> <<EMAIL>>' class ReadFile(object): def __init__(self, input_file, sep='\t', header=None, names=None, as_binary=False, binary_col=2): """ ReadFile is responsible to read and process all input files in the Case Recommender We used as default csv files with delimiter '\t'. e.g: user item score\n :param input_file: Input File with at least 2 columns. :type input_file: str :param sep: Delimiter for input files :type sep: str, default '\t' :param header: Skip header line (only work with method: read_with_pandas) :type header: int, default None :param names: Name of columns (only work with method: read_with_pandas) :type names: str, default None :param as_binary: If True, the explicit feedback will be transform to binary :type as_binary: bool, default False :param binary_col: Index of columns to read as binary (only work with method: read_with_pandas) :type binary_col: int, default 2 """ self.input_file = input_file self.sep = sep self.header = header self.names = names self.as_binary = as_binary self.binary_col = binary_col check_error_file(self.input_file) def read(self): """ Method to read files and collect important information. :return: Dictionary with file information :rtype: dict """ list_users = set() list_items = set() list_feedback = [] dict_feedback = {} # To be filled as: {user_id: [item_id_1, item_id_2, ..., item_id_N]} items_unobserved = {} items_seen_by_user = {} users_viewed_item = {} mean_value = 0 number_interactions = 0 with open(self.input_file) as infile: for line in infile: if line.strip(): inline = line.split(self.sep) if len(inline) == 1: raise TypeError("Error: Space type (sep) is invalid!") user, item, value = int(inline[0]), int(inline[1]), float(inline[2]) dict_feedback.setdefault(user, {}).update({item: 1.0 if self.as_binary else value}) items_seen_by_user.setdefault(user, set()).add(item) users_viewed_item.setdefault(item, set()).add(user) list_users.add(user) list_items.add(item) mean_value += 1.0 if self.as_binary else value list_feedback.append(1.0 if self.as_binary else value) number_interactions += 1 mean_value /= float(number_interactions) list_users = sorted(list(list_users)) list_items = sorted(list(list_items)) # Create a dictionary with unobserved items for each user / Map user with its respective id for user in list_users: items_unobserved[user] = list(set(list_items) - set(items_seen_by_user[user])) # Calculate the sparsity of the set: N / (nu * ni) sparsity = (1 - (number_interactions / float(len(list_users) * len(list_items)))) * 100 dict_file = { 'feedback': dict_feedback, 'users': list_users, 'items': list_items, 'sparsity': sparsity, 'number_interactions': number_interactions, 'users_viewed_item': users_viewed_item, 'items_unobserved': items_unobserved, 'items_seen_by_user': items_seen_by_user, 'mean_value': mean_value, 'max_value': max(list_feedback), 'min_value': min(list_feedback), } return dict_file def read_metadata_or_similarity(self): """ Method to read metadata or similarity files. Expects at least 2 columns for metadata file (item metadata or item metadata score) and 3 columns for similarity files (item item similarity) :return: Dictionary with file information :rtype: dict """ dict_values = {} list_col_1 = set() list_col_2 = set() mean_value = 0 number_interactions = 0 with open(self.input_file) as infile: for line in infile: if line.strip(): inline = line.split(self.sep) if len(inline) == 1: raise TypeError("Error: Space type (sep) is invalid!") if len(inline) == 2: attr1, attr2 = int(inline[0]), inline[1] dict_values.setdefault(attr1, {}).update({attr2: 1.0}) list_col_1.add(attr1) list_col_2.add(attr2) number_interactions += 1 else: attr1, attr2, value = int(inline[0]), inline[1], float(inline[2]) dict_values.setdefault(attr1, {}).update({attr2: 1.0 if self.as_binary else value}) list_col_1.add(attr1) list_col_2.add(attr2) mean_value += value number_interactions += 1 dict_file = { 'dict': dict_values, 'col_1': list(list_col_1), 'col_2': list(list_col_2), 'mean_value': mean_value, 'number_interactions': number_interactions } return dict_file def read_like_triple(self): """ Method to return information in the file as a triple. eg. (user, item, value) :return: List with triples in the file :rtype: list """ triple_list = [] with open(self.input_file) as infile: for line in infile: if line.strip(): inline = line.split(self.sep) if len(inline) == 1: raise TypeError("Error: Space type (sep) is invalid!") user, item, value = int(inline[0]), int(inline[1]), float(inline[2]) triple_list.append((user, item, value)) return triple_list def read_with_pandas(self): """ Method to read file with pandas :return DataFrame with file lines """ df = pd.read_csv(self.input_file, sep=self.sep, skiprows=self.header, header=None, names=self.names) if self.header is not None: df.columns = [i for i in range(len(df.columns))] if self.as_binary: df.iloc[:, self.binary_col] = 1 return df.sort_values(by=[0, 1]) def read_item_category(self): list_item_category = [] dict_category = {} set_items = set() dict_item_category = {} with open(self.input_file, 'r', encoding='utf-8') as infile: for line in infile: if line.strip(): inline = line.split(self.sep) item, category = int(inline[0]), inline[1].rstrip() list_item_category.append([item, category]) if category in dict_category: dict_category[category] += 1 else: dict_category[category] = 1 set_items.add(item) if item not in dict_item_category: dict_item_category[item] = [] dict_item_category[item].append(category) else: dict_item_category[item].append(category) return [dict_category, list_item_category, set_items, dict_item_category] class WriteFile(object): def __init__(self, output_file, data=None, sep="\t", mode='w', as_binary=False): """ Class to write predictions and information :param output_file: File with dir to write the information :type output_file: str :param data: Data to be write :type data: list, dict, set :param sep: Delimiter for input files :type sep: str, default '\t' :param mode: Method to write file :type mode: str, default 'w' :param as_binary: If True, write score equals 1 :type as_binary: bool, default False """ self.output_file = output_file self.data = data self.sep = sep self.mode = mode self.as_binary = as_binary def write(self): """ Method to write using data as list. e.g.: [user, item, score] """ with open(self.output_file, self.mode) as infile: for triple in self.data: if self.as_binary: infile.write('%d%s%d%s%f\n' % (triple[0], self.sep, triple[1], self.sep, 1.0)) else: infile.write('%d%s%d%s%f\n' % (triple[0], self.sep, triple[1], self.sep, triple[2])) def write_with_dict(self): """ Method to write using data as dictionary. e.g.: user: {item : score} """ with open(self.output_file, self.mode) as infile: for user in self.data: for pair in self.data[user]: infile.write('%d%s%d%s%f\n' % (user, self.sep, pair[0], self.sep, pair[1])) def write_with_pandas(self, df): """ Method to use a pandas DataFrame as data :param df: Data to write in output file :type df: DataFrame """ df.to_csv(self.output_file, sep=self.sep, mode=self.mode, header=None, index=False)
tests/test_goea_statsmodels.py
flying-sheep/goatools
477
11082050
<filename>tests/test_goea_statsmodels.py import sys import os from goatools.obo_parser import GODag from goatools.go_enrichment import GOEnrichmentStudy from goatools.associations import read_associations """Test Gene Ontology Enrichment Analysis using mutipletest methods in statsmodels.""" ROOT = os.path.dirname(os.path.abspath(__file__)) + "/data/" __copyright__ = "Copyright (C) 2010-2018, <NAME> et al., All rights reserved." def test_goea_statsmodels(log=sys.stdout): """Test GOEA with local multipletest correction methods for statsmodels.""" goeaobj = get_goeaobj() study_ids = [line.rstrip() for line in open(ROOT + "small_study")] prt_if = lambda nt: nt.p_uncorrected < 0.0005 ## These will specify to use the statsmodels methods methods_sm0 = ['holm-sidak', 'simes-hochberg', 'hommel', 'fdr_bh', 'fdr_by', 'fdr_tsbh', 'fdr_tsbky'] # 'fdr_gbs' generates a zerodivision warning # Prepend "sm_" or "statsmodels_" to a method to use that version methods_sm1 = ['sm_bonferroni', 'sm_sidak', 'sm_holm'] methods = methods_sm0 + methods_sm1 for method in methods: log.write("\nSTATSMODELS METHOD: {M}\n".format(M=method)) goea_results = goeaobj.run_study(study_ids, methods=[method]) # Make format_string. Examples: # "{NS} {p_uncorrected:5.3e} {p_fdr_bh:5.3e} {name} ({study_count} gene(s))\n" fmtstr = "".join(["{NS} {p_uncorrected:5.3e} {", "p_{M}:5.3e".format(M=method), "} {name} ({study_count} gene(s))\n"]) fout_xlsx = "goea_statsmodels_{M}.xlsx".format(M=method) fout_tsv = "goea_statsmodels_{M}.tsv".format(M=method) goeaobj.prt_txt(log, goea_results, fmtstr, prt_if=prt_if) goeaobj.wr_xlsx(fout_xlsx, goea_results) goeaobj.wr_tsv(fout_tsv, goea_results) def get_goeaobj(methods=None): """Test GOEA with method, fdr.""" obo_dag = GODag(ROOT + "goslim_generic.obo") fin_assc = ROOT + "slim_association" assoc = read_associations(fin_assc, 'id2gos', no_top=True) popul_ids = [line.rstrip() for line in open(ROOT + "small_population")] goeaobj = GOEnrichmentStudy(popul_ids, assoc, obo_dag, methods=methods) return goeaobj if __name__ == '__main__': test_goea_statsmodels() # Copyright (C) 2010-2018, <NAME>., All rights reserved.
turbinia/lib/text_formatter.py
sa3eed3ed/turbinia
559
11082067
<filename>turbinia/lib/text_formatter.py<gh_stars>100-1000 #!/usr/bin/env python # # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Methods for formatting text.""" from __future__ import print_function from __future__ import unicode_literals def bold(text): """Formats text as bold in Markdown format. Args: text(string): Text to format Return: string: Formatted text. """ return '**{0:s}**'.format(text.strip()) def heading1(text): """Formats text as heading 1 in Markdown format. Args: text(string): Text to format Return: string: Formatted text. """ return '# {0:s}'.format(text.strip()) def heading2(text): """Formats text as heading 2 in Markdown format. Args: text(string): Text to format Return: string: Formatted text. """ return '## {0:s}'.format(text.strip()) def heading3(text): """Formats text as heading 3 in Markdown format. Args: text(string): Text to format Return: string: Formatted text. """ return '### {0:s}'.format(text.strip()) def heading4(text): """Formats text as heading 4 in Markdown format. Args: text(string): Text to format Return: string: Formatted text. """ return '#### {0:s}'.format(text.strip()) def heading5(text): """Formats text as heading 5 in Markdown format. Args: text(string): Text to format Return: string: Formatted text. """ return '##### {0:s}'.format(text.strip()) def bullet(text, level=1): """Formats text as a bullet in Markdown format. Args: text(string): Text to format Return: string: Formatted text. """ return '{0:s}* {1:s}'.format(' ' * (level - 1), text.strip()) def code(text): """Formats text as code in Markdown format. Args: text(string): Text to format Return: string: Formatted text. """ return '`{0:s}`'.format(text.strip())
asdl/asdl_.py
Schweinepriester/oil
2,209
11082074
<reponame>Schweinepriester/oil #------------------------------------------------------------------------------- # Parser for ASDL [1] definition files. Reads in an ASDL description and parses # it into an AST that describes it. # # The EBNF we're parsing here: Figure 1 of the paper [1]. Extended to support # modules and attributes after a product. Words starting with Capital letters # are terminals. Literal tokens are in "double quotes". Others are # non-terminals. Id is either TokenId or ConstructorId. # # module ::= "module" Id "{" [definitions] "}" # definitions ::= { TypeId "=" type } # type ::= product | sum # product ::= fields ["attributes" fields] # fields ::= "(" { field, "," } field ")" # field ::= TypeId ["?" | "*"] [Id] # sum ::= constructor { "|" constructor } ["attributes" fields] # constructor ::= ConstructorId [fields] # # [1] "The Zephyr Abstract Syntax Description Language" by Wang, et. al. See # http://asdl.sourceforge.net/ #------------------------------------------------------------------------------- from __future__ import print_function import cStringIO from typing import List # TODO: There should be SimpleSumType(_SumType) and CompoundSumType(_SumType) # That can be determined at compile time with this function. is_simple() # should move to front_end.py. # PATCH: Moved this function from asdl_c.py. def is_simple(sum): """Return True if a sum is a simple. A sum is simple if its types have no fields, e.g. unaryop = Invert | Not | UAdd | USub """ for t in sum.types: if t.fields: return False return True # The following classes are the AST for the ASDL schema, i.e. the "meta-AST". # See the EBNF at the top of the file to understand the logical connection # between the various node types. class AST(object): def Print(self, f, indent): raise NotImplementedError() def __repr__(self): f = cStringIO.StringIO() self.Print(f, 0) return f.getvalue() class Use(AST): def __init__(self, mod_name, type_names): self.mod_name = mod_name self.type_names = type_names def Print(self, f, indent): ind = indent * ' ' f.write('%sUse %s {\n' % (ind, self.mod_name)) f.write(' %s%s\n' % (ind, ', '.join(t for t in self.type_names))) f.write('%s}\n' % ind) class Module(AST): def __init__(self, name, uses, dfns): self.name = name self.uses = uses self.dfns = dfns def Print(self, f, indent): ind = indent * ' ' f.write('%sModule %s {\n' % (ind, self.name)) for u in self.uses: u.Print(f, indent+1) f.write('\n') for d in self.dfns: d.Print(f, indent+1) f.write('\n') f.write('%s}\n' % ind) class TypeDecl(AST): """A binding of name to a Sum or Product type.""" def __init__(self, name, value): self.name = name # type: str self.value = value # type: AST def Print(self, f, indent): ind = indent * ' ' f.write('%sType %s {\n' % (ind, self.name)) self.value.Print(f, indent+1) f.write('%s}\n' % ind) class TypeExpr(AST): """A parameterized type expression, e.g. the type of a field. e.g. map[string, int] map[int, array[string]] self.children is empty if it's a leaf. Note: string* <=> array[string] mytype? <=> maybe[mytype] """ def __init__(self, name, children=None, seq=False, opt=False): self.name = name # type: str self.children = children or [] # type: List[TypeExpr] # mutated by name resolution stage. self.resolved = None # type: AST def Print(self, f, indent): """Printed on one line.""" ind = indent * ' ' f.write('TypeExpr %s' % (self.name)) # printed after field if self.children: f.write(' [ ') for i, child in enumerate(self.children): if i != 0: f.write(', ') child.Print(f, indent+1) f.write(' ]') class Field(AST): def __init__(self, typ, name): # type: (TypeExpr, str) -> None self.typ = typ # type expression self.name = name # variable name # This field is initialized in the name resolution phase. If the field # is 'action x', then we want to know if 'action' is a sum type, simple # type, or product type self.resolved_type = None # type: AST # TODO: It would be nice to have a token for line numbers in name # resolution errors def IsArray(self): return self.typ.name == 'array' def IsMaybe(self): return self.typ.name == 'maybe' def IsMap(self): return self.typ.name == 'map' def TypeName(self): # Compatibility for foo? and foo* if self.typ.name in ('array', 'maybe'): return self.typ.children[0].name else: return self.typ.name def Print(self, f, indent): ind = indent * ' ' f.write('%sField %r ' % (ind, self.name)) self.typ.Print(f, indent) f.write('\n') class _CompoundAST(AST): """Either a Product or Constructor. encode.py and format.py need a reflection API. """ def __init__(self, fields): self.fields = fields or [] class Constructor(_CompoundAST): def __init__(self, name, shared_type=None, fields=None): _CompoundAST.__init__(self, fields) self.name = name self.shared_type = shared_type # for DoubleQuoted %double_quoted def Print(self, f, indent): ind = indent * ' ' f.write('%sConstructor %s' % (ind, self.name)) if self.shared_type: f.write(' %%%s' % self.shared_type) if self.fields: f.write(' {\n') for field in self.fields: field.Print(f, indent+1) f.write('%s}' % ind) f.write('\n') class Sum(AST): def __init__(self, types, attributes=None): self.types = types # type: List[Constructor] self.attributes = attributes or [] def Print(self, f, indent): ind = indent * ' ' f.write('%sSum {\n' % ind) for t in self.types: t.Print(f, indent+1) if self.attributes: f.write('\n') f.write('%s (attributes)\n' % ind) for a in self.attributes: a.Print(f, indent+1) f.write('%s}\n' % ind) class SimpleSum(Sum): pass class Product(_CompoundAST): def __init__(self, fields, attributes=None): _CompoundAST.__init__(self, fields) self.attributes = attributes or [] def Print(self, f, indent): ind = indent * ' ' f.write('%sProduct {\n' % ind) for field in self.fields: field.Print(f, indent+1) if self.attributes: f.write('\n') f.write('%s (attributes)\n' % ind) for a in self.attributes: a.Print(f, indent+1) f.write('%s}\n' % ind) def TypeNameHeuristic(t): # type: (str) -> str """ For 'use'. We don't parse the imported file, so we have a heuristic based on the name! e.g. re_t or BraceGroup """ return '%s_t' % t if t[0].islower() else t
build/security/verify_build/verify_build_test.py
wwjiang007/fuchsia-1
210
11082077
<reponame>wwjiang007/fuchsia-1 #!/usr/bin/env python3.8 # Copyright 2020 The Fuchsia Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. r"""Unit test for verify_build.py. Need to have SCRUTINY and ZBI environmental variables set. To manually run this test: SCRUTINY=~/fuchsia/out/default/host_x64/scrutiny \ ZBI=~/fuchsia/out/default/host_x64/zbi python3 \ verify_build_test.py """ import os import subprocess import sys import tempfile import unittest import unittest.mock as mock import verify_build SUBPROCESS_RUN = subprocess.run class RunVerifyZbiKernelCmdlineTest(unittest.TestCase): def verify_kernel_cmdline(self, golden, actual): with tempfile.TemporaryDirectory() as test_folder: golden_file = os.path.join(test_folder, 'golden') stamp_file = os.path.join(test_folder, 'stamp') fuchsia_folder = os.path.join(test_folder, 'fuchsia') test_zbi = os.path.join(test_folder, 'test.zbi') cmdline_file = os.path.join(test_folder, 'cmdline') scrutiny = os.environ['SCRUTINY'] with open(golden_file, 'w+') as f: f.write(golden) with open(cmdline_file, 'wb+') as f: f.write(actual) # Use ZBI to create a test.zbi that only contains cmdline. subprocess.check_call( [ os.environ['ZBI'], '-o', test_zbi, '-T', 'CMDLINE', cmdline_file ]) os.mkdir(fuchsia_folder) args = [ '--type', 'kernel_cmdline', '--zbi-file', test_zbi, '--scrutiny', scrutiny, '--golden-files', golden_file, '--stamp', stamp_file ] # Verify the cmdline in the generated ZBI. result = verify_build.main(args) if result == 0: # Verify stamp file is created. self.assertTrue(os.path.isfile(stamp_file)) return result def verify_bootfs_filelist(self, want_filelist, got_files): with tempfile.TemporaryDirectory() as test_folder: golden_file = os.path.join(test_folder, 'golden') stamp_file = os.path.join(test_folder, 'stamp') fuchsia_folder = os.path.join(test_folder, 'fuchsia') os.mkdir(fuchsia_folder) test_zbi = os.path.join(test_folder, 'test.zbi') with open(golden_file, 'w+') as f: f.write(want_filelist) fake_scrutiny = os.path.join(test_folder, 'fake_scrutiny') with open(fake_scrutiny, 'w+') as f: f.write('fake scrutiny') # Create a dummy test.zbi. We are not going to use the real scrutiny # to parse it so its content doesn't matter. with open(test_zbi, 'w+') as f: f.write('test ZBI') zbi_files = {} for file in got_files: zbi_files[os.path.join('bootfs', file)] = 'bootfs file' fake_subprocess = FakeSubprocess(zbi_files, {}) with mock.patch('subprocess.run') as mock_run: mock_run.side_effect = fake_subprocess.run args = [ '--type', 'bootfs_filelist', '--zbi-file', test_zbi, '--scrutiny', fake_scrutiny, '--golden-files', golden_file, '--stamp', stamp_file ] result = verify_build.main(args) if result == 0: # Verify stamp file is created. self.assertTrue(os.path.isfile(stamp_file)) return result def verify_static_pkgs( self, want_pkgs, zbi_files, blobfs_files, system_image_files): with tempfile.TemporaryDirectory() as test_folder: golden_file = os.path.join(test_folder, 'golden') stamp_file = os.path.join(test_folder, 'stamp') depfile = os.path.join(test_folder, 'depfile') fuchsia_folder = os.path.join(test_folder, 'fuchsia') os.mkdir(fuchsia_folder) test_zbi = os.path.join(test_folder, 'test.zbi') test_blobfs = os.path.join(test_folder, 'test.blob') with open(golden_file, 'w+') as f: f.write(want_pkgs) fake_scrutiny = os.path.join(test_folder, 'fake_scrutiny') with open(fake_scrutiny, 'w+') as f: f.write('fake scrutiny') fake_far = os.path.join(test_folder, 'fake_far') with open(fake_scrutiny, 'w+') as f: f.write('fake far') # Create a dummy test.zbi. We are not going to use the real scrutiny # to parse it so its content doesn't matter. with open(test_zbi, 'w+') as f: f.write('test ZBI') blobs_folder = os.path.join(test_folder, 'blobs') os.mkdir(blobs_folder) blobfs_manifest = os.path.join(blobs_folder, 'blobs.manifest') with open(blobfs_manifest, 'w+') as bf: for blobfs_file in blobfs_files: # We use the blob merkle as the blob content file name here. with open(os.path.join(blobs_folder, blobfs_file), 'w+') as f: f.write(blobfs_files[blobfs_file]) bf.write(blobfs_file + '=' + blobfs_file + '\n') fake_subprocess = FakeSubprocess(zbi_files, system_image_files) with mock.patch('subprocess.run') as mock_run: mock_run.side_effect = fake_subprocess.run args = [ '--type', 'static_pkgs', '--zbi-file', test_zbi, '--blobfs-manifest', blobfs_manifest, '--scrutiny', fake_scrutiny, '--far', fake_far, '--golden-files', golden_file, '--stamp', stamp_file, '--depfile', depfile, ] result = verify_build.main(args) if result == 0: # Verify stamp file is created. self.assertTrue(os.path.isfile(stamp_file)) # Verify depfile is created. self.assertTrue(os.path.isfile(depfile)) return result def test_verify_kernel_cmdline_success_normal_case(self): self.assertEqual( 0, self.verify_kernel_cmdline( 'key1=v1\n# comments are ignored\nkey2=v2\nkey3=v3', b'key1=v1 key2=v2 key3=v3')) def test_verify_kernel_cmdline_success_order_diff(self): self.assertEqual( 0, self.verify_kernel_cmdline( 'key1=v1\nkey2=v2\nkey3=v3', b'key2=v2 key1=v1 key3=v3')) def test_verify_kernel_cmdline_success_no_value_option(self): self.assertEqual( 0, self.verify_kernel_cmdline('option1\noption2', b'option1 option2')) def test_verify_kernel_cmdline_success_transitional(self): # ? at start of line marks it optional cmdline_golden = 'key1=v1\nkey2=v2\n?key3=v3' self.assertEqual( 0, self.verify_kernel_cmdline( cmdline_golden, b'key1=v1 key2=v2 key3=v3')) self.assertEqual( 0, self.verify_kernel_cmdline( cmdline_golden, b'key1=v1 key2=v2')) def test_verify_kernel_cmdline_fail_golden_empty(self): self.assertEqual( 1, self.verify_kernel_cmdline('', b'key2=v2 key1=v1 key3=v3')) def test_verify_kernel_cmdline_fail_missing_key2(self): self.assertEqual( 1, self.verify_kernel_cmdline('key1=v1\nkey2=v2', b'key1=v1')) def test_verify_kernel_cmdline_fail_key1_mismatch(self): self.assertEqual( 1, self.verify_kernel_cmdline('key1=v1\nkey2=v2', b'key1=v2 key2=v2')) def test_verify_kernel_cmdline_fail_key2_mismatch(self): self.assertEqual( 1, self.verify_kernel_cmdline('key1=v1\nkey2=v2', b'key1=v1 key2=v1')) def test_verify_kernel_cmdline_fail_additional_key3(self): self.assertEqual( 1, self.verify_kernel_cmdline( 'key1=v1\nkey2=v2', b'key1=v1 key2=v2 key3=v3')) def test_verify_kernel_cmdline_fail_invalid_format(self): self.assertEqual( 1, self.verify_kernel_cmdline('key1=v1\nkey2=v2', b'invalid=format=1')) def test_verify_kernel_cmdline_fail_option1_missing(self): self.assertEqual( 1, self.verify_kernel_cmdline('option1\noption2', b'option2')) def test_verify_kernel_cmdline_fail_additional_option3(self): self.assertEqual( 1, self.verify_kernel_cmdline( 'option1\noption2', b'option1 option2 option3')) def test_verify_kernel_cmdline_zbi_not_found(self): with tempfile.TemporaryDirectory() as test_folder: golden_file = os.path.join(test_folder, 'golden') stamp_file = os.path.join(test_folder, 'stamp') fuchsia_folder = os.path.join(test_folder, 'fuchsia') test_zbi = os.path.join(test_folder, 'test.zbi') scrutiny = os.environ['SCRUTINY'] with open(golden_file, 'w+') as f: f.write('option1') # Do not create test_zbi os.mkdir(fuchsia_folder) args = [ '--type', 'kernel_cmdline', '--zbi-file', test_zbi, '--scrutiny', scrutiny, '--golden-files', golden_file, '--stamp', stamp_file ] self.assertEqual(1, verify_build.main(args)) def test_verify_kernel_cmdline_success_no_cmdline_found(self): with tempfile.TemporaryDirectory() as test_folder: golden_file = os.path.join(test_folder, 'golden') stamp_file = os.path.join(test_folder, 'stamp') fuchsia_folder = os.path.join(test_folder, 'fuchsia') test_zbi = os.path.join(test_folder, 'test.zbi') scrutiny = os.environ['SCRUTINY'] # Create an empty golden file with open(golden_file, 'w+') as f: f.write('') # Use ZBI to create a test.zbi with no cmdline. subprocess.check_call([os.environ['ZBI'], '-o', test_zbi]) os.mkdir(fuchsia_folder) args = [ '--type', 'kernel_cmdline', '--zbi-file', test_zbi, '--scrutiny', scrutiny, '--golden-files', golden_file, '--stamp', stamp_file ] self.assertEqual(0, verify_build.main(args)) def test_verify_kernel_cmdline_fail_golden_empty_cmdline_found(self): self.assertEqual(1, self.verify_kernel_cmdline('', b'option2')) def test_verify_kernel_cmdline_fail_golden_not_empty_cmdline_not_found( self): with tempfile.TemporaryDirectory() as test_folder: golden_file = os.path.join(test_folder, 'golden') stamp_file = os.path.join(test_folder, 'stamp') fuchsia_folder = os.path.join(test_folder, 'fuchsia') test_zbi = os.path.join(test_folder, 'test.zbi') scrutiny = os.environ['SCRUTINY'] # Create an empty golden file with open(golden_file, 'w+') as f: f.write('option1') # Use ZBI to create a test.zbi with no cmdline. subprocess.check_call([os.environ['ZBI'], '-o', test_zbi]) os.mkdir(fuchsia_folder) args = [ '--type', 'kernel_cmdline', '--zbi-file', test_zbi, '--scrutiny', scrutiny, '--golden-files', golden_file, '--stamp', stamp_file ] self.assertEqual(1, verify_build.main(args)) def test_verify_kernel_cmdline_multiple_golden_files_one_match(self): with tempfile.TemporaryDirectory() as test_folder: golden_file_1 = os.path.join(test_folder, 'golden_1') golden_file_2 = os.path.join(test_folder, 'golden_2') stamp_file = os.path.join(test_folder, 'stamp') fuchsia_folder = os.path.join(test_folder, 'fuchsia') test_zbi = os.path.join(test_folder, 'test.zbi') scrutiny = os.environ['SCRUTINY'] cmdline_file = os.path.join(test_folder, 'cmdline') # golden_file_1 does not match. with open(golden_file_1, 'w+') as f: f.write('option1') # golden_file_2 matches. with open(golden_file_2, 'w+') as f: f.write('option1 option2') with open(cmdline_file, 'wb+') as f: f.write(b'option1 option2') # Use ZBI to create a test.zbi that only contains cmdline. subprocess.check_call( [ os.environ['ZBI'], '-o', test_zbi, '-T', 'CMDLINE', cmdline_file ]) os.mkdir(fuchsia_folder) args = [ '--type', 'kernel_cmdline', '--zbi-file', test_zbi, '--scrutiny', scrutiny, '--golden-files', golden_file_1, golden_file_2, '--stamp', stamp_file ] self.assertEqual(1, verify_build.main(args)) def test_verify_kernel_cmdline_three_golden_files_not_supported(self): with tempfile.TemporaryDirectory() as test_folder: golden_file_1 = os.path.join(test_folder, 'golden_1') golden_file_2 = os.path.join(test_folder, 'golden_2') golden_file_3 = os.path.join(test_folder, 'golden_3') stamp_file = os.path.join(test_folder, 'stamp') fuchsia_folder = os.path.join(test_folder, 'fuchsia') test_zbi = os.path.join(test_folder, 'test.zbi') scrutiny = os.environ['SCRUTINY'] cmdline_file = os.path.join(test_folder, 'cmdline') with open(golden_file_1, 'w+') as f: f.write('option1') with open(golden_file_2, 'w+') as f: f.write('option1') with open(golden_file_3, 'w+') as f: f.write('option1') with open(cmdline_file, 'wb+') as f: f.write(b'option1') # Use ZBI to create a test.zbi that only contains cmdline. subprocess.check_call( [ os.environ['ZBI'], '-o', test_zbi, '-T', 'CMDLINE', cmdline_file ]) os.mkdir(fuchsia_folder) args = [ '--type', 'kernel_cmdline', '--zbi-file', test_zbi, '--scrutiny', scrutiny, '--golden-files', golden_file_1, golden_file_2, golden_file_3, '--stamp', stamp_file ] # We do not support more than two golden files. self.assertEqual(0, verify_build.main(args)) def test_verify_bootfs_filelist_normal_case(self): self.assertEqual( 0, self.verify_bootfs_filelist( 'fileA\nfileB\n# comments are ignored\nfileC', ['fileA', 'fileC', 'fileB'])) def test_verify_bootfs_filelist_sub_dir(self): self.assertEqual( 0, self.verify_bootfs_filelist( 'dir/fileA\ndir/fileC\nfileB', ['dir/fileA', 'dir/fileC', 'fileB'])) def test_verify_bootfs_filelist_mismatch(self): self.assertEqual( 1, self.verify_bootfs_filelist( 'fileA\nfileB\nfileC', ['fileA', 'fileC'])) def test_verify_bootfs_filelist_sub_dir_mismatch(self): self.assertEqual( 1, self.verify_bootfs_filelist( 'dir/fileA\ndir/fileC\nfileB', ['dir1/fileA', 'dir/fileC', 'fileB'])) def test_verify_bootfs_filelist_transitional(self): # ? at start of line permits presence or absence of file for soft # transitions golden_contents = 'fileA\n?fileB\nfileC' self.assertEqual( 0, self.verify_bootfs_filelist( golden_contents, ['fileA', 'fileB', 'fileC'])) self.assertEqual( 0, self.verify_bootfs_filelist(golden_contents, ['fileA', 'fileC'])) def test_verify_static_pkgs_normal_case(self): static_packages = 'pkg0/0=1\npkg1/0=1\npkg2/0=2' zbi_files = { 'bootfs/config/devmgr': 'zircon.system.pkgfs.cmd=bin/pkgsvr+1234' } blobfs_files = {'1234': 'system_image', '2345': static_packages} system_image_files = {'meta/contents': 'data/static_packages=2345'} self.assertEqual( 0, self.verify_static_pkgs( '# allow comments\npkg0\npkg1\npkg2', zbi_files, blobfs_files, system_image_files)) def test_verify_static_pkgs_order(self): static_packages = 'pkg2/2=1\npkg1/1=1\npkg0/0=2' zbi_files = { 'bootfs/config/devmgr': 'zircon.system.pkgfs.cmd=bin/pkgsvr+1234' } blobfs_files = {'1234': 'system_image', '2345': static_packages} system_image_files = {'meta/contents': 'data/static_packages=2345'} self.assertEqual( 0, self.verify_static_pkgs( 'pkg0\npkg1\npkg2', zbi_files, blobfs_files, system_image_files)) def test_verify_static_pkgs_transitional(self): static_packages_with_pkg2 = 'pkg0/0=1\npkg1/0=1\npkg2/0=2' static_packages_without_pkg2 = 'pkg0/0=1\npkg1/0=1\npkg2/0=2' zbi_files = { 'bootfs/config/devmgr': 'zircon.system.pkgfs.cmd=bin/pkgsvr+1234' } blobfs_files_with_pkg2 = {'1234': 'system_image', '2345': static_packages_with_pkg2} blobfs_files_without_pkg2 = {'1234': 'system_image', '2345': static_packages_without_pkg2} system_image_files = {'meta/contents': 'data/static_packages=2345'} self.assertEqual( 0, self.verify_static_pkgs( 'pkg0\npkg1\n?pkg2', zbi_files, blobfs_files_with_pkg2, system_image_files)) self.assertEqual( 0, self.verify_static_pkgs( 'pkg0\npkg1\n?pkg2', zbi_files, blobfs_files_without_pkg2, system_image_files)) def test_verify_static_pkgs_mismatch(self): static_packages = 'pkg0/0=1\npkg1/0=1' zbi_files = { 'bootfs/config/devmgr': 'zircon.system.pkgfs.cmd=bin/pkgsvr+1234' } blobfs_files = {'1234': 'system_image', '2345': static_packages} system_image_files = {'meta/contents': 'data/static_packages=2345'} self.assertEqual( 1, self.verify_static_pkgs( 'pkg0\npkg1\npkg2', zbi_files, blobfs_files, system_image_files)) def test_verify_static_pkgs_no_devmgr_config(self): static_packages = 'pkg0/0=1\npkg1/0=1\npkg2/0=2' zbi_files = {} blobfs_files = {'1234': 'system_image', '2345': static_packages} system_image_files = {'meta/contents': 'data/static_packages=2345'} self.assertEqual( 1, self.verify_static_pkgs( 'pkg0\npkg1\npkg2', zbi_files, blobfs_files, system_image_files)) def test_verify_static_pkgs_invalid_devmgr_config(self): static_packages = 'pkg0/0=1\npkg1/0=1\npkg2/0=2' zbi_files = {'bootfs/config/devmgr': 'zircon.system.pkgfs.cmd'} blobfs_files = {'1234': 'system_image', '2345': static_packages} system_image_files = {'meta/contents': 'data/static_packages=2345'} self.assertEqual( 1, self.verify_static_pkgs( 'pkg0\npkg1\npkg2', zbi_files, blobfs_files, system_image_files)) def test_verify_static_pkgs_system_image_blob_not_found(self): static_packages = 'pkg0/0=1\npkg1/0=1\npkg2/0=2' zbi_files = { 'bootfs/config/devmgr': 'zircon.system.pkgfs.cmd=bin/pkgsvr+1234' } blobfs_files = {'2345': static_packages} system_image_files = {'meta/contents': 'data/static_packages=2345'} self.assertEqual( 1, self.verify_static_pkgs( 'pkg0\npkg1\npkg2', zbi_files, blobfs_files, system_image_files)) def test_verify_static_pkgs_invalid_system_image(self): static_packages = 'pkg0/0=1\npkg1/0=1\npkg2/0=2' zbi_files = { 'bootfs/config/devmgr': 'zircon.system.pkgfs.cmd=bin/pkgsvr+1234' } blobfs_files = {'1234': 'system_image', '2345': static_packages} system_image_files = {} self.assertEqual( 1, self.verify_static_pkgs( 'pkg0\npkg1\npkg2', zbi_files, blobfs_files, system_image_files)) def test_verify_static_pkgs_static_pkgs_blob_not_found(self): zbi_files = { 'bootfs/config/devmgr': 'zircon.system.pkgfs.cmd=bin/pkgsvr+1234' } blobfs_files = {'1234': 'system_image'} system_image_files = {'meta/contents': 'data/static_packages=2345'} self.assertEqual( 1, self.verify_static_pkgs( 'pkg0\npkg1\npkg2', zbi_files, blobfs_files, system_image_files)) def test_verify_static_pkgs_invalid_static_pkgs_list(self): static_packages = 'pkg0/0' zbi_files = { 'bootfs/config/devmgr': 'zircon.system.pkgfs.cmd=bin/pkgsvr+1234' } blobfs_files = {'1234': 'system_image', '2345': static_packages} system_image_files = {'meta/contents': 'data/static_packages=2345'} self.assertEqual( 1, self.verify_static_pkgs( 'pkg0\npkg1\npkg2', zbi_files, blobfs_files, system_image_files)) class FakeSubprocess(object): def __init__(self, zbi_files, system_image_files): self.zbi_files = zbi_files self.system_image_files = system_image_files def _write_files(files, output): for file in files: dirpath = os.path.dirname(os.path.join(output, file)) if not os.path.exists(dirpath): os.makedirs(dirpath, exist_ok=True) with open(os.path.join(output, file), 'w+') as f: f.write(files[file]) def run(self, *argv, **kwargs): del kwargs command = argv[0] if command[0].endswith('fake_scrutiny'): output = '' input = '' scrutiny_commands = command[2].split(' ') for i in range(0, len(scrutiny_commands) - 1): if scrutiny_commands[i] == '--output': output = scrutiny_commands[i + 1] if scrutiny_commands[i] == '--input': input = scrutiny_commands[i + 1] if not os.path.exists(input): raise subprocess.CalledProcessError( cmd=command, returncode=1, stderr=('input: ' + input + ' not found').encode()) op = scrutiny_commands[0] if op == 'tool.zbi.extract': FakeSubprocess._write_files(self.zbi_files, output) else: raise subprocess.CalledProcessError( cmd=command, returncode=1, stderr=('unknown scrutiny command: ' + op).encode()) return subprocess.CompletedProcess( args=[], returncode=0, stdout=b'{"status":"ok"}') elif command[0].endswith('fake_far'): input = (command[2].split('='))[1] if not os.path.exists(input): raise subprocess.CalledProcessError( cmd=command, returncode=1, stderr=('input: ' + input + ' not found').encode()) output = (command[3].split('='))[1] os.mkdir(output) FakeSubprocess._write_files(self.system_image_files, output) return subprocess.CompletedProcess( args=[], returncode=0, stdout=b'') raise subprocess.CalledProcessError( cmd=command, returncode=1, stderr=b'unsupported command') if __name__ == '__main__': if 'SCRUTINY' not in os.environ or 'ZBI' not in os.environ: print('Please set SCRUTINY and ZBI environmental path') sys.exit(1) unittest.main()
demo/run_pg_on_cartpole.py
simonoso/EasyRL
125
11082085
<gh_stars>100-1000 # Copyright (c) 2019 Alibaba Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import print_function from __future__ import division import gym from easy_rl.agents import agents from easy_rl.utils.window_stat import WindowStat MODEL_CONFIG = dict( # specific type="PG", # common init_lr=1e-3, lr_strategy_spec={ 'type': 'exponential_decay', 'decay_steps': 100, 'decay_rate': 0.9 }, global_norm_clip=40) AGENT_CONFIG = dict( type="Agent", sample_batch_size=64, batch_size=64, # gae gamma=0.99, lambda_=0.5, use_gae=True, ) def main(): env = gym.make("CartPole-v0") agent_class = agents[AGENT_CONFIG["type"]] agent = agent_class( env.observation_space, env.action_space, AGENT_CONFIG, MODEL_CONFIG, distributed_spec={}, export_dir="dump_dir", checkpoint_dir="ckpt_dir") reward_window = WindowStat("reward", 50) length_window = WindowStat("length", 50) loss_window = WindowStat("loss", 50) obs, actions, rewards, next_obs, dones, value_preds = list(), list(), list( ), list(), list(), list() act_count = 0 for i in range(600): ob = env.reset() done = False episode_reward = .0 episode_len = 0 while not done: action, results = agent.act([ob], False) next_ob, reward, done, info = env.step(action[0]) act_count += 1 obs.append(ob) actions.append(action[0]) rewards.append(0.1 * reward) next_obs.append(next_ob) dones.append(done) value_preds.append(results["value_preds"][0]) if agent.ready_to_send: agent.send_experience( obs=obs, actions=actions, rewards=rewards, next_obs=next_obs, dones=dones, value_preds=value_preds) if agent.ready_to_receive: batch_data = agent.receive_experience() res = agent.learn(batch_data) loss_window.push(res['loss']) ob = next_ob episode_reward += reward episode_len += 1 if act_count % 1000 == 0: print("timestep:", act_count, reward_window, length_window) reward_window.push(episode_reward) length_window.push(episode_len) agent.export_saved_model() print("Done.") if __name__ == "__main__": main()
ebcli/containers/pathconfig.py
senstb/aws-elastic-beanstalk-cli
110
11082086
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from ebcli.core import fileoperations class PathConfig(object): COMPOSE_FILENAME = 'docker-compose.yml' DOCKERCFG_FILENAME = '.dockercfg' DOCKERIGNORE_FILENAME = '.dockerignore' DOCKERFILE_FILENAME = 'Dockerfile' DOCKERRUN_FILENAME = 'Dockerrun.aws.json' LOCAL_STATE_FILENAME = '.localstate' NEW_DOCKERFILE_FILENAME = 'Dockerfile.local' ROOT_LOCAL_LOGS_DIRNAME = 'local' @staticmethod def docker_proj_path(): return fileoperations.get_project_root() @classmethod def dockerfile_path(cls): return fileoperations.project_file_path(cls.DOCKERFILE_FILENAME) @classmethod def new_dockerfile_path(cls): return fileoperations.get_eb_file_full_location(cls.NEW_DOCKERFILE_FILENAME) @classmethod def dockerignore_path(cls): return fileoperations.project_file_path(cls.DOCKERIGNORE_FILENAME) @classmethod def logdir_path(cls): return fileoperations.get_logs_location(cls.ROOT_LOCAL_LOGS_DIRNAME) @classmethod def dockerrun_path(cls): return fileoperations.project_file_path(cls.DOCKERRUN_FILENAME) @classmethod def compose_path(cls): return fileoperations.get_eb_file_full_location(cls.COMPOSE_FILENAME) @classmethod def local_state_path(cls): return fileoperations.get_eb_file_full_location(cls.LOCAL_STATE_FILENAME) @classmethod def dockerfile_exists(cls): return fileoperations.project_file_exists(cls.DOCKERFILE_FILENAME) @classmethod def dockerrun_exists(cls): return fileoperations.project_file_exists(cls.DOCKERRUN_FILENAME)
pentest-tool/pentest/libs/password.py
Micr067/pentestdb
686
11082105
<gh_stars>100-1000 #!/usr/bin/env python #-*- coding:utf-8 -*- ''' Pentestdb, a database for penetration test. Copyright (c) 2015 alpha1e0 ================================================================ 社工密码生成器. ''' import time import itertools class PasswdGenerator(object): ''' Password generator. ''' # 常用密码关键数字 _numList = ['123456', '123123', '123123123', '112233', '445566', '456456', '789789', '778899', '321321', '520', '1314', '5201314', '1314520', '147369', '147258', '258', '147', '456', '789', '147258369', '111222', '123', '1234', '12345', '1234567', '12345678', '123456789', '987654321', '87654321', '7654321', '654321', '54321', '4321', '321'] # 常用前缀列表 _prefixList = ['a','qq','yy','aa','abc','qwer','woaini'] # 常用密码 _commonPasswd = ['<PASSWORD>', '<PASSWORD>', '<PASSWORD>', '<PASSWORD>', '<PASSWORD>', '<PASSWORD>', '<PASSWORD>', 'woaini520', 'woaini123', 'woaini521', 'qazwsx', '1qaz2wsx', '1q2w3e4r', '1q2w3e4r5t', '1q2w3e', 'qwertyuiop', 'zxcvbnm'] # 和partner混合的常用前缀列表 partnerPrefixList = ['520','5201314','1314','iloveu','iloveyou'] # 和domian,company组合的前缀列表 domainPrefixList = ['admin','root','manager','system'] def __init__(self, fullname, nickname, englishname, partnername, birthday, phone, qq, \ company, domain, oldpasswd, keywords, keynumbers): ''' Params: Parameters of args: fullname: specified the fullname, format: '<NAME>' '<NAME>' nickname: specified the nickname englishname: specified the english name partnername: specified the partner name birthday: specified the birthday day, format: '2000-1-10' phone: specified the phone number qq: specified the QQ number company: specified the company domain: specified the domain name oldpasswd: specified the oldpassword keywords: specified the keywords, example: 'keyword1 keyword2' keynumbers: specified the keynumbers, example: '123 789' ''' self.fullname = fullname self.nickname = nickname self.englishname = englishname self.partnername = partnername self.birthday = birthday self.phone = phone self.qq = qq self.company = company self.domain = domain self.oldpasswd = oldpasswd self.keywords = keywords self.keynumbers = keynumbers # 常用数字列表,用户和用户名、昵称、英文名、关键字等混合 self.innerNumList = [] # 常用前缀列表,用于和手机号、QQ号混合 self.innerPrefixList = [] # 段名列表,由原始全名生成 self.shortNameList = [] # 全名列表,由原始全名生成 self.fullNameList = [] # 待混合的keyword列表,由于用户名、昵称、英文名、关键字的混合规则一致,因此放到这一个列表内进行混合 self.mixedKeywordList = [] self.result = [] def _genShortNameList(self, fullname=None): fullname = fullname if fullname else self.fullname if not fullname: return [] else: result = [] func = lambda x:[x, x.title(), x[0].lower(), x[0].upper(), x.upper()] nameSplited = fullname.split() if len(nameSplited) == 1: result += func(nameSplited[0]) elif len(nameSplited) == 2: shortName = nameSplited[0][0].lower() + nameSplited[1][0].lower() result += func(shortName) else: shortName = nameSplited[0][0].lower() + nameSplited[1][0].lower() + nameSplited[2][0].lower() result += func(shortName) shortNameRS = nameSplited[1][0].lower() + nameSplited[2][0].lower() + nameSplited[0][0].lower() shortNameR = nameSplited[1][0].lower() + nameSplited[2][0].lower() + nameSplited[0] result += [shortNameR, shortNameRS, shortNameRS.upper()] return result def _genFullNameList(self, fullname=None): fullname = fullname if fullname else self.fullname if not fullname: return [] else: result = [] nameSplited = fullname.split() if len(nameSplited) == 1: result.append(nameSplited[0]) elif len(nameSplited) == 2: result += ["".join(nameSplited), nameSplited[1]+nameSplited[0]] else: result += [nameSplited[0]+nameSplited[1]+nameSplited[2], nameSplited[1]+nameSplited[2]+nameSplited[0]] return result + [x.upper() for x in result] def _genInnerNumList(self): result = self._numList for i in range(0,10): result += [str(i)*x for x in range(1,10)] endyear = int(time.strftime("%Y")) result += [str(x) for x in range(2000, endyear+1)] if self.keynumbers: result += self.keynumbers.split() if self.oldpasswd: result.append(self.oldpasswd) return result def _genDateList(self, date): if not date: return [] else: result = [] dateSplited = date.split("-") if len(dateSplited) == 1: result.append(dateSplited[0]) elif len(dateSplited) == 2: result += [dateSplited[0], dateSplited[0]+dateSplited[1], dateSplited[0][-2:]+dateSplited[1]] else: result += [dateSplited[0], dateSplited[0]+dateSplited[1], dateSplited[0]+dateSplited[1]+dateSplited[2]] result += [dateSplited[0][-2:]+dateSplited[1], dateSplited[0][-2:]+dateSplited[1]+dateSplited[2]] return result def _mixed(self, listA, listB): if not listA and not listB: return [] result = [] for a,b in itertools.product(listA, listB): if len(a+b)>5 and len(a+b)<17: result.append(a+b) result.append(a+"@"+b) return result def _preHandlePhase(self): self.innerNumList = self._genInnerNumList() self.innerPrefixList = self._prefixList + [x.upper() for x in self._prefixList] self.shortNameList = self._genShortNameList() self.fullNameList = self._genFullNameList() self.mixedKeywordList += self.shortNameList self.mixedKeywordList += self.fullNameList if self.nickname: self.mixedKeywordList.append(self.nickname) if self.englishname: self.mixedKeywordList.append(self.englishname) if self.keywords: self.mixedKeywordList += self.keywords.split() def _mixedPhase(self): self.result += self._mixed(self.mixedKeywordList, self.innerNumList) self.result += self._mixed(["520"], self.mixedKeywordList) if self.phone: self.result += self._mixed(self.innerPrefixList+self.mixedKeywordList, [self.phone]) if self.qq: self.result += self._mixed(self.innerPrefixList+self.mixedKeywordList, [self.qq]) if self.partnername: nameList = self._genShortNameList(self.partnername) nameList += self._genFullNameList(self.partnername) self.result += self._mixed(self.partnerPrefixList, nameList) if self.birthday: dateList = self._genDateList(self.birthday) self.result += self._mixed(self.innerPrefixList+self.mixedKeywordList, dateList) if self.domain: self.result += self._mixed(self.domainPrefixList, [self.domain]) if self.company: self.result += self._mixed(self.domainPrefixList, [self.company]) def _lastHandlePhase(self): self.result += self._commonPasswd self.result += [x+"." for x in self.result] def generate(self): self._preHandlePhase() self._mixedPhase() self._lastHandlePhase() return self.result
py_stringmatching/similarity_measure/levenshtein.py
kevalii/py_stringmatching
115
11082114
<reponame>kevalii/py_stringmatching from __future__ import division from py_stringmatching import utils from py_stringmatching.similarity_measure.cython.cython_levenshtein import levenshtein from py_stringmatching.similarity_measure.sequence_similarity_measure import \ SequenceSimilarityMeasure class Levenshtein(SequenceSimilarityMeasure): """Computes Levenshtein measure (also known as edit distance). Levenshtein distance computes the minimum cost of transforming one string into the other. Transforming a string is carried out using a sequence of the following operators: delete a character, insert a character, and substitute one character for another. """ def __init__(self): super(Levenshtein, self).__init__() def get_raw_score(self, string1, string2): """Computes the raw Levenshtein distance between two strings. Args: string1,string2 (str): Input strings. Returns: Levenshtein distance (int). Raises: TypeError : If the inputs are not strings. Examples: >>> lev = Levenshtein() >>> lev.get_raw_score('a', '') 1 >>> lev.get_raw_score('example', 'samples') 3 >>> lev.get_raw_score('levenshtein', 'frankenstein') 6 """ # input validations utils.sim_check_for_none(string1, string2) # convert input to unicode. string1 = utils.convert_to_unicode(string1) string2 = utils.convert_to_unicode(string2) utils.tok_check_for_string_input(string1, string2) if utils.sim_check_for_exact_match(string1, string2): return 0.0 return levenshtein(string1, string2) def get_sim_score(self, string1, string2): """Computes the normalized Levenshtein similarity score between two strings. Args: string1,string2 (str): Input strings. Returns: Normalized Levenshtein similarity (float). Raises: TypeError : If the inputs are not strings. Examples: >>> lev = Levenshtein() >>> lev.get_sim_score('a', '') 0.0 >>> lev.get_sim_score('example', 'samples') 0.5714285714285714 >>> lev.get_sim_score('levenshtein', 'frankenstein') 0.5 """ # convert input strings to unicode. string1 = utils.convert_to_unicode(string1) string2 = utils.convert_to_unicode(string2) raw_score = self.get_raw_score(string1, string2) max_len = max(len(string1), len(string2)) if max_len == 0: return 1.0 return 1 - (raw_score / max_len)
colossalai/context/random/__init__.py
RichardoLuo/ColossalAI
1,630
11082127
from ._helper import (seed, set_mode, with_seed, add_seed, get_seeds, get_states, get_current_mode, set_seed_states, sync_states, moe_set_seed, reset_seeds) __all__ = [ 'seed', 'set_mode', 'with_seed', 'add_seed', 'get_seeds', 'get_states', 'get_current_mode', 'set_seed_states', 'sync_states', 'moe_set_seed', 'reset_seeds' ]
style_transfer/style_transfer/fritz_coreml_converter.py
Sofienne71/style_transfer_python
277
11082133
import coremltools from coremltools.converters.keras._keras2_converter import * from coremltools.converters.keras._keras2_converter import _KERAS_LAYER_REGISTRY from coremltools.converters.keras import _topology2 from coremltools.converters.keras._topology2 import _KERAS_SKIP_LAYERS from coremltools.models.neural_network import NeuralNetworkBuilder as _NeuralNetworkBuilder from coremltools.proto import FeatureTypes_pb2 as _FeatureTypes_pb2 from collections import OrderedDict as _OrderedDict from coremltools.models import datatypes from coremltools.models import MLModel as _MLModel from coremltools.models.utils import save_spec as _save_spec import keras as _keras from coremltools._deps import HAS_KERAS2_TF as _HAS_KERAS2_TF import PIL.Image from six import string_types from coremltools.proto import FeatureTypes_pb2 as ft _IMAGE_SUFFIX = '_image' class FritzCoremlConverter(object): """A class to convert keras models to coreml. This is converter is a modified version of the one that comes packaged with coremltools, but it allows the user to define custom layer mappings from keras to coreml. """ @classmethod def _check_unsupported_layers(cls, model, supported_layers): """Check for any unsupported layers in the keras model. Args: model - a keras model supported_layers - a dictionary of supported layers. Keys are keras layer classes and values are corresponding coreml layer classes. """ for i, layer in enumerate(model.layers): if (isinstance(layer, _keras.models.Sequential) or isinstance(layer, _keras.models.Model)): cls._check_unsupported_layers(layer) else: if type(layer) not in supported_layers: print(supported_layers) raise ValueError( "Keras layer '%s' not supported. " % str(type(layer)) ) if isinstance(layer, _keras.layers.wrappers.TimeDistributed): if type(layer.layer) not in supported_layers: raise ValueError( "Keras layer '%s' not supported. " % str(type(layer.layer)) ) if isinstance(layer, _keras.layers.wrappers.Bidirectional): if not isinstance(layer.layer, _keras.layers.recurrent.LSTM): raise ValueError( 'Keras bi-directional wrapper conversion supports ' 'only LSTM layer at this time. ') @staticmethod def _get_layer_converter_fn(layer, supported_layers): """Get the right converter function for Keras. Args: layer - a keras layer supported_layers - a dictionary of supported layers. Keys are keras layer classes and values are corresponding coreml layer classes. Returns: layer - a coreml layer """ layer_type = type(layer) if layer_type in supported_layers: return supported_layers[layer_type] else: raise TypeError( "Keras layer of type %s is not supported." % type(layer) ) @staticmethod def _convert_multiarray_output_to_image(spec, feature_name, is_bgr=False): """Convert Core ML multiarray output to an image output. This modifies the core ml spec in place. spec - a Core ML spec protobuf object. feature_name - the name of the output feature to convert is_bgr - if true, assume image data is already in BGR mode. Default False """ for output in spec.description.output: if output.name != feature_name: continue if output.type.WhichOneof('Type') != 'multiArrayType': raise ValueError( "{} is not a multiarray type".format(output.name,) ) array_shape = tuple(output.type.multiArrayType.shape) if len(array_shape) == 2: height, width = array_shape output.type.imageType.colorSpace = \ ft.ImageFeatureType.ColorSpace.Value('GRAYSCALE') else: channels, height, width = array_shape if channels == 1: output.type.imageType.colorSpace = \ ft.ImageFeatureType.ColorSpace.Value('GRAYSCALE') elif channels == 3: if is_bgr: output.type.imageType.colorSpace = \ ft.ImageFeatureType.ColorSpace.Value('BGR') else: output.type.imageType.colorSpace = \ ft.ImageFeatureType.ColorSpace.Value('RGB') else: raise ValueError( "Channel Value {} not supported for image inputs" .format(channels,) ) output.type.imageType.width = width output.type.imageType.height = height @classmethod def convert_keras( cls, model, input_names=None, output_names=None, image_input_names=[], image_output_names=[], deprocessing_args={}, is_bgr=False, is_grayscale=False, red_bias=0.0, green_bias=0.0, blue_bias=0.0, gray_bias=0.0, image_scale=1.0, class_labels=None, predicted_feature_name=None, custom_layers=None): """ Convert a Keras model to a Core ML Model. model - a Keras model to convert input_names - names of input layers. Default None output_names - names of output layers. Default None image_input_names - a list of input names that are image datatypes image_output_names - a list of output names that are image datatypes preprocessing_args - a dictionary of arguments for input preprocessing class_labels - Class labels for outputs, predicted_feature_name - name for predicted features, custom_layers - a dictionary of custom layer conversions. Keys are Keras layer classes, values are coreml layer functions Returns: mlmodel - a coreml model object. """ if isinstance(model, string_types): model = _keras.models.load_model(model) elif isinstance(model, tuple): model = _load_keras_model(model[0], model[1]) # Merge the custom layers with the Keras layer registry supported_layers = {} supported_layers.update(_KERAS_LAYER_REGISTRY) if custom_layers: supported_layers.update(custom_layers) # Check valid versions cls._check_unsupported_layers(model, supported_layers) # Build network graph to represent Keras model graph = _topology2.NetGraph(model) graph.build() graph.remove_skip_layers(_KERAS_SKIP_LAYERS) graph.insert_1d_permute_layers() graph.insert_permute_for_spatial_bn() graph.defuse_activation() graph.remove_internal_input_layers() graph.make_output_layers() # The graph should be finalized before executing this graph.generate_blob_names() graph.add_recurrent_optionals() inputs = graph.get_input_layers() outputs = graph.get_output_layers() # check input / output names validity if input_names is not None: if isinstance(input_names, string_types): input_names = [input_names] else: input_names = ['input' + str(i + 1) for i in range(len(inputs))] if output_names is not None: if isinstance(output_names, string_types): output_names = [output_names] else: output_names = ['output' + str(i + 1) for i in range(len(outputs))] if (image_input_names is not None and isinstance(image_input_names, string_types)): image_input_names = [image_input_names] graph.reset_model_input_names(input_names) graph.reset_model_output_names(output_names) # Keras -> Core ML input dimension dictionary # (None, None) -> [1, 1, 1, 1, 1] # (None, D) -> [D] or [D, 1, 1, 1, 1] # (None, Seq, D) -> [Seq, 1, D, 1, 1] # (None, H, W, C) -> [C, H, W] # (D) -> [D] # (Seq, D) -> [Seq, 1, 1, D, 1] # (Batch, Sequence, D) -> [D] # Retrieve input shapes from model if type(model.input_shape) is list: input_dims = [filter(None, x) for x in model.input_shape] unfiltered_shapes = model.input_shape else: input_dims = [filter(None, model.input_shape)] unfiltered_shapes = [model.input_shape] for idx, dim in enumerate(input_dims): unfiltered_shape = unfiltered_shapes[idx] dim = list(dim) if len(dim) == 0: # Used to be [None, None] before filtering; indicating # unknown sequence length input_dims[idx] = tuple([1]) elif len(dim) == 1: s = graph.get_successors(inputs[idx])[0] if isinstance(graph.get_keras_layer(s), _keras.layers.embeddings.Embedding): # Embedding layer's special input (None, D) where D is # actually sequence length input_dims[idx] = (1,) else: input_dims[idx] = dim # dim is just a number elif len(dim) == 2: # [Seq, D] input_dims[idx] = (dim[1],) elif len(dim) == 3: # H,W,C if (len(unfiltered_shape) > 3): # keras uses the reverse notation from us input_dims[idx] = (dim[2], dim[0], dim[1]) else: # keras provided fixed batch and sequence length, so # the input was (batch, sequence, channel) input_dims[idx] = (dim[2],) else: raise ValueError( 'Input' + input_names[idx] + 'has input shape of length' + str(len(dim))) # Retrieve output shapes from model if type(model.output_shape) is list: output_dims = [filter(None, x) for x in model.output_shape] else: output_dims = [filter(None, model.output_shape[1:])] for idx, dim in enumerate(output_dims): dim = list(dim) if len(dim) == 1: output_dims[idx] = dim elif len(dim) == 2: # [Seq, D] output_dims[idx] = (dim[1],) elif len(dim) == 3: output_dims[idx] = (dim[2], dim[0], dim[1]) input_types = [datatypes.Array(*dim) for dim in input_dims] output_types = [datatypes.Array(*dim) for dim in output_dims] # Some of the feature handling is sensitive about string vs unicode input_names = map(str, input_names) output_names = map(str, output_names) is_classifier = class_labels is not None if is_classifier: mode = 'classifier' else: mode = None # assuming these match input_features = list(zip(input_names, input_types)) output_features = list(zip(output_names, output_types)) builder = _NeuralNetworkBuilder( input_features, output_features, mode=mode ) for iter, layer in enumerate(graph.layer_list): keras_layer = graph.keras_layer_map[layer] print("%d : %s, %s" % (iter, layer, keras_layer)) if isinstance(keras_layer, _keras.layers.wrappers.TimeDistributed): keras_layer = keras_layer.layer converter_func = cls._get_layer_converter_fn( keras_layer, supported_layers ) input_names, output_names = graph.get_layer_blobs(layer) converter_func( builder, layer, input_names, output_names, keras_layer ) # Set the right inputs and outputs on the model description (interface) builder.set_input(input_names, input_dims) builder.set_output(output_names, output_dims) # Since we aren't mangling anything the user gave us, we only need to # update the model interface here builder.add_optionals(graph.optional_inputs, graph.optional_outputs) # Add classifier classes (if applicable) if is_classifier: classes_in = class_labels if isinstance(classes_in, string_types): import os if not os.path.isfile(classes_in): raise ValueError( "Path to class labels (%s) does not exist." % classes_in ) with open(classes_in, 'r') as f: classes = f.read() classes = classes.splitlines() elif type(classes_in) is list: # list[int or str] classes = classes_in else: raise ValueError( 'Class labels must be a list of integers / ' 'strings, or a file path' ) if predicted_feature_name is not None: builder.set_class_labels( classes, predicted_feature_name=predicted_feature_name ) else: builder.set_class_labels(classes) # Set pre-processing paramsters builder.set_pre_processing_parameters( image_input_names=image_input_names, is_bgr=is_bgr, red_bias=red_bias, green_bias=green_bias, blue_bias=blue_bias, gray_bias=gray_bias, image_scale=image_scale) # Convert the image outputs to actual image datatypes for output_name in output_names: if output_name in image_output_names: cls._convert_multiarray_output_to_image( builder.spec, output_name, is_bgr=is_bgr ) # Return the protobuf spec spec = builder.spec return _MLModel(spec)
tests/mixology/version_solver/test_dependency_cache.py
zEdS15B3GCwq/poetry
7,258
11082141
from __future__ import annotations from copy import deepcopy from typing import TYPE_CHECKING from poetry.factory import Factory from poetry.mixology.version_solver import DependencyCache from tests.mixology.helpers import add_to_repo if TYPE_CHECKING: from poetry.core.packages.project_package import ProjectPackage from poetry.repositories import Repository from tests.mixology.version_solver.conftest import Provider def test_solver_dependency_cache_respects_source_type( root: ProjectPackage, provider: Provider, repo: Repository ): dependency_pypi = Factory.create_dependency("demo", ">=0.1.0") dependency_git = Factory.create_dependency( "demo", {"git": "https://github.com/demo/demo.git"}, groups=["dev"] ) root.add_dependency(dependency_pypi) root.add_dependency(dependency_git) add_to_repo(repo, "demo", "1.0.0") cache = DependencyCache(provider) cache.search_for.cache_clear() # ensure cache was never hit for both calls cache.search_for(dependency_pypi) cache.search_for(dependency_git) assert not cache.search_for.cache_info().hits # increase test coverage by searching for copies # (when searching for the exact same object, __eq__ is never called) packages_pypi = cache.search_for(deepcopy(dependency_pypi)) packages_git = cache.search_for(deepcopy(dependency_git)) assert cache.search_for.cache_info().hits == 2 assert cache.search_for.cache_info().currsize == 2 assert len(packages_pypi) == len(packages_git) == 1 assert packages_pypi != packages_git package_pypi = packages_pypi[0] package_git = packages_git[0] assert package_pypi.package.name == dependency_pypi.name assert package_pypi.package.version.text == "1.0.0" assert package_git.package.name == dependency_git.name assert package_git.package.version.text == "0.1.2" assert package_git.package.source_type == dependency_git.source_type assert package_git.package.source_url == dependency_git.source_url assert ( package_git.package.source_resolved_reference == "9cf87a285a2d3fbb0b9fa621997b3acc3631ed24" ) def test_solver_dependency_cache_respects_subdirectories( root: ProjectPackage, provider: Provider, repo: Repository ): dependency_one = Factory.create_dependency( "one", { "git": "https://github.com/demo/subdirectories.git", "subdirectory": "one", "platform": "linux", }, ) dependency_one_copy = Factory.create_dependency( "one", { "git": "https://github.com/demo/subdirectories.git", "subdirectory": "one-copy", "platform": "win32", }, ) root.add_dependency(dependency_one) root.add_dependency(dependency_one_copy) cache = DependencyCache(provider) cache.search_for.cache_clear() # ensure cache was never hit for both calls cache.search_for(dependency_one) cache.search_for(dependency_one_copy) assert not cache.search_for.cache_info().hits # increase test coverage by searching for copies # (when searching for the exact same object, __eq__ is never called) packages_one = cache.search_for(deepcopy(dependency_one)) packages_one_copy = cache.search_for(deepcopy(dependency_one_copy)) assert cache.search_for.cache_info().hits == 2 assert cache.search_for.cache_info().currsize == 2 assert len(packages_one) == len(packages_one_copy) == 1 package_one = packages_one[0] package_one_copy = packages_one_copy[0] assert package_one.package.name == package_one_copy.name assert package_one.package.version.text == package_one_copy.package.version.text assert package_one.package.source_type == package_one_copy.source_type == "git" assert ( package_one.package.source_resolved_reference == package_one_copy.source_resolved_reference == "9cf87a285a2d3fbb0b9fa621997b3acc3631ed24" ) assert ( package_one.package.source_subdirectory != package_one_copy.source_subdirectory ) assert package_one.package.source_subdirectory == "one" assert package_one_copy.package.source_subdirectory == "one-copy" assert package_one.dependency.marker.intersect( package_one_copy.dependency.marker ).is_empty()
scripts/artifacts/tileAppDisc.py
xperylabhub/iLEAPP
325
11082165
<gh_stars>100-1000 import glob import os import pathlib import sqlite3 from scripts.artifact_report import ArtifactHtmlReport from scripts.ilapfuncs import logfunc, tsv, timeline, is_platform_windows, open_sqlite_db_readonly def get_tileAppDisc(files_found, report_folder, seeker): for file_found in files_found: file_found = str(file_found) if file_found.endswith('tile-DiscoveredTileDB.sqlite'): break db = open_sqlite_db_readonly(file_found) cursor = db.cursor() cursor.execute(''' SELECT datetime(ZLAST_MODIFIED_TIMESTAMP,'unixepoch','31 years'), ZTILE_UUID FROM ZTILENTITY_DISCOVEREDTILE ''') all_rows = cursor.fetchall() usageentries = len(all_rows) data_list = [] if usageentries > 0: for row in all_rows: data_list.append((row[0], row[1])) description = 'Tile IDs seen from other users' report = ArtifactHtmlReport('Tile App - Discovered Tiles') report.start_artifact_report(report_folder, 'Tile App Discovered Tiles', description) report.add_script() data_headers = ('Last Modified Timestamp','Tile UUID') report.write_artifact_data_table(data_headers, data_list, file_found) report.end_artifact_report() tsvname = 'Tile App Discovered Tiles' tsv(report_folder, data_headers, data_list, tsvname) tlactivity = 'Tile Discovered Tiles' timeline(report_folder, tlactivity, data_list, data_headers) else: logfunc('No Tile App Discovered Tiles data available') db.close() return
python/example_code/lookoutvision/inference.py
cfuerst/aws-doc-sdk-examples
5,166
11082183
<gh_stars>1000+ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 """ Uses a trained Amazon Lookout for Vision model to detect anomalies in an image. The image can be local or in an S3 bucket. """ import argparse import logging import imghdr import os import boto3 from botocore.exceptions import ClientError logger = logging.getLogger(__name__) class Inference: """ Shows how to detect anomalies in an image using a trained Amazon Lookout for Vision model. """ @staticmethod def detect_anomalies(lookoutvision_client, project_name, model_version, photo): """ Detects anomalies in an image (jpg/png) by using your Amazon Lookout for Vision model. :param lookoutvision_client: An Amazon Lookout for Vision Boto3 client. :param project_name: The name of the project that contains the model that you want to use. :param model_version: The version of the model that you want to use. :param photo: The path and name of the image in which you want to detect anomalies. """ try: image_type = imghdr.what(photo) content_type = "" if image_type == "jpeg": content_type = "image/jpeg" elif image_type == "png": content_type = "image/png" else: logger.info("Invalid image type for %s", photo) raise ValueError( f"Invalid file format. Supply a jpeg or png format file: {photo}" ) # Call detect_anomalies logger.info("Detecting anomalies in %s", photo) with open(photo, "rb") as image: response = lookoutvision_client.detect_anomalies( ProjectName=project_name, ContentType=content_type, # "image/jpeg" or image/png Body=image.read(), ModelVersion=model_version, ) anomalous = response["DetectAnomalyResult"]["IsAnomalous"] confidence = response["DetectAnomalyResult"]["Confidence"] logger.info("Anomalous?: %s", format(anomalous)) logger.info("Confidence: %s", format(confidence)) return anomalous, confidence except FileNotFoundError as err: logger.info("Couldn't find file: %s", photo) raise except ClientError as err: logger.info(format(err)) raise @staticmethod def download_from_s3(s3_resource, photo): """ Downloads an image from an S3 bucket. :param photo: The S3 path of a photo to download. return: The local path to the downloaded file. """ try: bucket, key = photo.replace("s3://", "").split("/", 1) local_file = os.path.basename(photo) except ValueError as err: logger.info("Couldn't get S3 info for %s: %s", photo, format(err)) raise ValueError("Couldn't get S3 info for {}.".format(photo)) from err try: logger.info("Downloading %s", photo) s3_resource.Bucket(bucket).download_file(key,local_file) except ClientError as err: logger.exception("Couldn't download %s from S3.", photo) err.response["Error"]["Message"] = f"Couldn't download {photo} from S3." raise return local_file def add_arguments(parser): """ Adds command line arguments to the parser. :param parser: The command line parser. """ parser.add_argument( "project", help="The project containing the model that you want to use." ) parser.add_argument( "version", help="The version of the model that you want to use." ) parser.add_argument( "image", help="The file that you want to analyze. " "Supply a local file path or a path to an S3 object.", ) def main(): """ Entrypoint for anomaly detection example. """ try: logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") lookoutvision_client = boto3.client("lookoutvision") s3_resource=boto3.resource('s3') parser = argparse.ArgumentParser(usage=argparse.SUPPRESS) add_arguments(parser) args = parser.parse_args() print("Analyzing " + args.image) photo = args.image if args.image.startswith("s3://"): photo = Inference.download_from_s3(s3_resource,args.image) # analyze image anomalous, confidence = Inference.detect_anomalies( lookoutvision_client, args.project, args.version, photo ) # remove local copy of S3 photo if args.image.startswith("s3://"): os.remove(photo) state = "anomalous" if anomalous is False: state = "normal" print( f"Your model is {confidence:.0%} confident that the image is {state}." ) except ClientError as err: print("A service error occured: " + format(err.response["Error"]["Message"])) except FileNotFoundError as err: print("The supplied file couldn't be found: " + err.filename) except ValueError as err: print("A value error occured. " + format(err)) else: print("Successfully completed analysis.") if __name__ == "__main__": main()
tools/write_tfrecords.py
hanyeonjee/CRNN_Tensorflow
946
11082189
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # @Time : 19-3-13 下午1:31 # @Author : MaybeShewill-CV # @Site : https://github.com/MaybeShewill-CV/CRNN_Tensorflow # @File : write_tfrecords.py # @IDE: PyCharm """ Write tfrecords tools """ import argparse import os import os.path as ops from data_provider import shadownet_data_feed_pipline def init_args(): """ :return: """ parser = argparse.ArgumentParser() parser.add_argument('-d', '--dataset_dir', type=str, help='The origin synth90k dataset_dir') parser.add_argument('-s', '--save_dir', type=str, help='The generated tfrecords save dir') parser.add_argument('-c', '--char_dict_path', type=str, default=None, help='The char dict file path. If it is None the char dict will be' 'generated automatically in folder data/char_dict') parser.add_argument('-o', '--ord_map_dict_path', type=str, default=None, help='The ord map dict file path. If it is None the ord map dict will be' 'generated automatically in folder data/char_dict') return parser.parse_args() def write_tfrecords(dataset_dir, char_dict_path, ord_map_dict_path, save_dir): """ Write tensorflow records for training , testing and validation :param dataset_dir: the root dir of crnn dataset :param char_dict_path: json file path which contains the map relation between ord value and single character :param ord_map_dict_path: json file path which contains the map relation between int index value and char ord value :param save_dir: the root dir of tensorflow records to write into :return: """ assert ops.exists(dataset_dir), '{:s} not exist'.format(dataset_dir) os.makedirs(save_dir, exist_ok=True) # test crnn data producer producer = shadownet_data_feed_pipline.CrnnDataProducer( dataset_dir=dataset_dir, char_dict_path=char_dict_path, ord_map_dict_path=ord_map_dict_path, writer_process_nums=8 ) producer.generate_tfrecords( save_dir=save_dir ) if __name__ == '__main__': """ generate tfrecords """ args = init_args() write_tfrecords( dataset_dir=args.dataset_dir, char_dict_path=args.char_dict_path, ord_map_dict_path=args.ord_map_dict_path, save_dir=args.save_dir )
python/examples/cuda-to-cv.py
imrisaac/jetson-utils
372
11082199
#!/usr/bin/python3 # # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # import cv2 import jetson.utils import argparse # parse the command line parser = argparse.ArgumentParser(description='Convert an image from CUDA to OpenCV') parser.add_argument("file_in", type=str, default="images/jellyfish.jpg", nargs='?', help="filename of the input image to process") parser.add_argument("file_out", type=str, default="cuda-to-cv.jpg", nargs='?', help="filename of the output image to save") opt = parser.parse_args() # load the image into CUDA memory rgb_img = jetson.utils.loadImage(opt.file_in) print('RGB image: ') print(rgb_img) # convert to BGR, since that's what OpenCV expects bgr_img = jetson.utils.cudaAllocMapped(width=rgb_img.width, height=rgb_img.height, format='bgr8') jetson.utils.cudaConvertColor(rgb_img, bgr_img) print('BGR image: ') print(bgr_img) # make sure the GPU is done work before we convert to cv2 jetson.utils.cudaDeviceSynchronize() # convert to cv2 image (cv2 images are numpy arrays) cv_img = jetson.utils.cudaToNumpy(bgr_img) print('OpenCV image size: ' + str(cv_img.shape)) print('OpenCV image type: ' + str(cv_img.dtype)) # save the image if opt.file_out is not None: cv2.imwrite(opt.file_out, cv_img) print("saved {:d}x{:d} test image to '{:s}'".format(bgr_img.width, bgr_img.height, opt.file_out))
installer/core/terraform/resources/aws/iam.py
jonico/pacbot
1,165
11082205
from core.terraform.resources import TerraformResource, TerraformData from core.config import Settings from core.providers.aws.boto3 import iam class IAMRoleResource(TerraformResource): """ Base resource class for Terraform AWS IAM role resource Attributes: resource_instance_name (str): Type of resource instance available_args (dict): Instance configurations """ resource_instance_name = "aws_iam_role" available_args = { 'name': {'required': True, 'prefix': True, 'sep': '_'}, 'assume_role_policy': {'required': False}, 'arn': {'required': False}, 'path': {'required': False}, 'unique_id': {'required': False}, 'permissions_boundary': {'required': False}, 'description': {'required': False}, 'force_detach_policies': {'required': False}, 'tags': {'required': False} } description = Settings.RESOURCE_DESCRIPTION def check_exists_before(self, input, tf_outputs): """ Check if the resource is already exists in AWS Args: input (instance): input object tf_outputs (dict): Terraform output dictionary Returns: exists (boolean): True if already exists in AWS else False checked_details (dict): Status of the existence check """ checked_details = {'attr': "name", 'value': self.get_input_attr('name')} exists = False if not self.resource_in_tf_output(tf_outputs): exists = iam.check_role_exists(checked_details['value'], input.AWS_AUTH_CRED) return exists, checked_details class IAMRolePolicyResource(TerraformResource): """ Base resource class for Terraform AWS IAM role policy resource Attributes: resource_instance_name (str): Type of resource instance available_args (dict): Instance configurations """ resource_instance_name = "aws_iam_policy" available_args = { 'name': {'required': True, 'prefix': True, 'sep': '_'}, 'policy': {'required': True}, 'path': {'required': False}, 'arn': {'required': False}, 'description': {'required': False} } description = Settings.RESOURCE_DESCRIPTION def check_exists_before(self, input, tf_outputs): """ Check if the resource is already exists in AWS Args: input (instance): input object tf_outputs (dict): Terraform output dictionary Returns: exists (boolean): True if already exists in AWS else False checked_details (dict): Status of the existence check """ checked_details = {'attr': "name", 'value': self.get_input_attr('name')} exists = False if not self.resource_in_tf_output(tf_outputs): exists = iam.check_policy_exists(checked_details['value'], input.AWS_ACCOUNT_ID, input.AWS_AUTH_CRED) return exists, checked_details class IAMRolePolicyAttachmentResource(TerraformResource): """ Base resource class for Terraform AWS IAM role policy attach resource Attributes: resource_instance_name (str): Type of resource instance available_args (dict): Instance configurations """ resource_instance_name = "aws_iam_role_policy_attachment" available_args = { 'role': {'required': True}, 'policy_arn': {'required': True}, } class IAMInstanceProfileResource(TerraformResource): """ Base resource class for Terraform AWS IAM instance profile resource Attributes: resource_instance_name (str): Type of resource instance available_args (dict): Instance configurations """ resource_instance_name = "aws_iam_instance_profile" available_args = { 'name': {'required': True, 'prefix': True, 'sep': '_'}, 'role': {'required': True}, } def check_exists_before(self, input, tf_outputs): """ Check if the resource is already exists in AWS Args: input (instance): input object tf_outputs (dict): Terraform output dictionary Returns: exists (boolean): True if already exists in AWS else False checked_details (dict): Status of the existence check """ checked_details = {'attr': "name", 'value': self.get_input_attr('name')} exists = False if not self.resource_in_tf_output(tf_outputs): exists = iam.check_instance_profile_exists(checked_details['value'], input.AWS_AUTH_CRED) return exists, checked_details class IAMPolicyDocumentData(TerraformData): """ Base resource class for Terraform Policy document data resource Attributes: resource_instance_name (str): Type of resource instance available_args (dict): Instance configurations """ resource_instance_name = "aws_iam_policy_document" available_args = { 'statement': {'required': True}, } class IamServiceLinkedRole(TerraformResource): """ Base resource class for Terraform AWS Service linked role resource Attributes: resource_instance_name (str): Type of resource instance available_args (dict): Instance configurations """ resource_instance_name = "aws_iam_service_linked_role" available_args = { 'aws_service_name': {'required': True}, 'custom_suffix ': {'required': False}, 'description': {'required': False}, } description = Settings.RESOURCE_DESCRIPTION
Array/41_FirstMissingPositive.py
cls1991/leetcode
180
11082225
# coding: utf8 """ 题目链接: https://leetcode.com/problems/first-missing-positive/description. 题目描述: Given an unsorted integer array, find the first missing positive integer. For example, Given [1,2,0] return 3, and [3,4,-1,1] return 2. Your algorithm should run in O(n) time and uses constant space. """ class Solution(object): def firstMissingPositive(self, nums): """ :type nums: List[int] :rtype: int """ ll = len(nums) for i in range(ll): while 0 < nums[i] <= ll and nums[i] != i + 1: idx = nums[i] - 1 if nums[idx] == nums[i]: break nums[i], nums[idx] = nums[idx], nums[i] for i in range(ll): if nums[i] != i + 1: return i + 1 return ll + 1
lab001/solutions/s005.py
wuhanstudio/fpga_101
225
11082229
#!/usr/bin/env python3 from migen import * from litex.build.generic_platform import * from litex.build.xilinx import XilinxPlatform # IOs ---------------------------------------------------------------------------------------------- _io = [ ("user_led", 0, Pins("H17"), IOStandard("LVCMOS33")), ("user_sw", 0, Pins("J15"), IOStandard("LVCMOS33")), ("user_btn", 0, Pins("N17"), IOStandard("LVCMOS33")), ("clk100", 0, Pins("E3"), IOStandard("LVCMOS33")), ("cpu_reset", 0, Pins("C12"), IOStandard("LVCMOS33")), ("user_rgb_led_r", 0, Pins("N16"), IOStandard("LVCMOS33")), ("user_rgb_led_g", 0, Pins("R11"), IOStandard("LVCMOS33")), ("user_rgb_led_b", 0, Pins("G14"), IOStandard("LVCMOS33")), ] # Platform ----------------------------------------------------------------------------------------- class Platform(XilinxPlatform): default_clk_name = "clk100" default_clk_period = 1e6/100e6 def __init__(self): XilinxPlatform.__init__(self, "xc7a100t-csg324-1", _io, toolchain="vivado") # Design ------------------------------------------------------------------------------------------- # Create our platform (fpga interface) platform = Platform() # Create our module (fpga description) class Blink(Module): def __init__(self, blink_freq, sys_clk_freq, led): counter = Signal(32) # synchronous assignments self.sync += [ counter.eq(counter + 1), If(counter == int((sys_clk_freq/blink_freq)/2 - 1), counter.eq(0), led.eq(~led) ) ] # combinatorial assignements self.comb += [] class RGBBlink(Module): def __init__(self, platform): # submodules blink_r = Blink(1, 100e6, platform.request("user_rgb_led_r")) blink_g = Blink(2, 100e6, platform.request("user_rgb_led_g")) blink_b = Blink(4, 100e6, platform.request("user_rgb_led_b")) self.submodules += blink_r, blink_g, blink_b module = RGBBlink(platform) # Build -------------------------------------------------------------------------------------------- platform.build(module)